diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
commit | cf9b59e9d3e008591d1f54830f570982bb307a0d (patch) | |
tree | 113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/char | |
parent | 44504b2bebf8b5823c59484e73096a7d6574471d (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.
Conflicts:
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-mpc.c
drivers/net/gianfar.c
Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/char')
42 files changed, 5571 insertions, 2744 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3141dd3b6e53..e21175be25d0 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -276,11 +276,19 @@ config N_HDLC | |||
276 | Allows synchronous HDLC communications with tty device drivers that | 276 | Allows synchronous HDLC communications with tty device drivers that |
277 | support synchronous HDLC such as the Microgate SyncLink adapter. | 277 | support synchronous HDLC such as the Microgate SyncLink adapter. |
278 | 278 | ||
279 | This driver can only be built as a module ( = code which can be | 279 | This driver can be built as a module ( = code which can be |
280 | inserted in and removed from the running kernel whenever you want). | 280 | inserted in and removed from the running kernel whenever you want). |
281 | The module will be called n_hdlc. If you want to do that, say M | 281 | The module will be called n_hdlc. If you want to do that, say M |
282 | here. | 282 | here. |
283 | 283 | ||
284 | config N_GSM | ||
285 | tristate "GSM MUX line discipline support (EXPERIMENTAL)" | ||
286 | depends on EXPERIMENTAL | ||
287 | depends on NET | ||
288 | help | ||
289 | This line discipline provides support for the GSM MUX protocol and | ||
290 | presents the mux as a set of 61 individual tty devices. | ||
291 | |||
284 | config RISCOM8 | 292 | config RISCOM8 |
285 | tristate "SDL RISCom/8 card support" | 293 | tristate "SDL RISCom/8 card support" |
286 | depends on SERIAL_NONSTANDARD | 294 | depends on SERIAL_NONSTANDARD |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index f957edf7e45d..d39be4cf1f5d 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_SYNCLINK) += synclink.o | |||
40 | obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o | 40 | obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o |
41 | obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o | 41 | obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o |
42 | obj-$(CONFIG_N_HDLC) += n_hdlc.o | 42 | obj-$(CONFIG_N_HDLC) += n_hdlc.o |
43 | obj-$(CONFIG_N_GSM) += n_gsm.o | ||
43 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o | 44 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o |
44 | obj-$(CONFIG_SX) += sx.o generic_serial.o | 45 | obj-$(CONFIG_SX) += sx.o generic_serial.o |
45 | obj-$(CONFIG_RIO) += rio/ generic_serial.o | 46 | obj-$(CONFIG_RIO) += rio/ generic_serial.o |
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 870f12cfed93..120490949997 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -178,86 +178,6 @@ struct agp_bridge_data { | |||
178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) | 178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) |
179 | 179 | ||
180 | 180 | ||
181 | /* Intel registers */ | ||
182 | #define INTEL_APSIZE 0xb4 | ||
183 | #define INTEL_ATTBASE 0xb8 | ||
184 | #define INTEL_AGPCTRL 0xb0 | ||
185 | #define INTEL_NBXCFG 0x50 | ||
186 | #define INTEL_ERRSTS 0x91 | ||
187 | |||
188 | /* Intel i830 registers */ | ||
189 | #define I830_GMCH_CTRL 0x52 | ||
190 | #define I830_GMCH_ENABLED 0x4 | ||
191 | #define I830_GMCH_MEM_MASK 0x1 | ||
192 | #define I830_GMCH_MEM_64M 0x1 | ||
193 | #define I830_GMCH_MEM_128M 0 | ||
194 | #define I830_GMCH_GMS_MASK 0x70 | ||
195 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
196 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
197 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
198 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
199 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
200 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
201 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
202 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
203 | |||
204 | /* This one is for I830MP w. an external graphic card */ | ||
205 | #define INTEL_I830_ERRSTS 0x92 | ||
206 | |||
207 | /* Intel 855GM/852GM registers */ | ||
208 | #define I855_GMCH_GMS_MASK 0xF0 | ||
209 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
210 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
211 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
212 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
213 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
214 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
215 | #define I85X_CAPID 0x44 | ||
216 | #define I85X_VARIANT_MASK 0x7 | ||
217 | #define I85X_VARIANT_SHIFT 5 | ||
218 | #define I855_GME 0x0 | ||
219 | #define I855_GM 0x4 | ||
220 | #define I852_GME 0x2 | ||
221 | #define I852_GM 0x5 | ||
222 | |||
223 | /* Intel i845 registers */ | ||
224 | #define INTEL_I845_AGPM 0x51 | ||
225 | #define INTEL_I845_ERRSTS 0xc8 | ||
226 | |||
227 | /* Intel i860 registers */ | ||
228 | #define INTEL_I860_MCHCFG 0x50 | ||
229 | #define INTEL_I860_ERRSTS 0xc8 | ||
230 | |||
231 | /* Intel i810 registers */ | ||
232 | #define I810_GMADDR 0x10 | ||
233 | #define I810_MMADDR 0x14 | ||
234 | #define I810_PTE_BASE 0x10000 | ||
235 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
236 | #define I810_PTE_LOCAL 0x00000002 | ||
237 | #define I810_PTE_VALID 0x00000001 | ||
238 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
239 | #define I810_SMRAM_MISCC 0x70 | ||
240 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
241 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
242 | #define I810_GMS 0x000000c0 | ||
243 | #define I810_GMS_DISABLE 0x00000000 | ||
244 | #define I810_PGETBL_CTL 0x2020 | ||
245 | #define I810_PGETBL_ENABLED 0x00000001 | ||
246 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
247 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
248 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
249 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
250 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
251 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
252 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
253 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
254 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
255 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
256 | |||
257 | #define I810_DRAM_CTL 0x3000 | ||
258 | #define I810_DRAM_ROW_0 0x00000001 | ||
259 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
260 | |||
261 | struct agp_device_ids { | 181 | struct agp_device_ids { |
262 | unsigned short device_id; /* first, to make table easier to read */ | 182 | unsigned short device_id; /* first, to make table easier to read */ |
263 | enum chipset_type chipset; | 183 | enum chipset_type chipset; |
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index d2ce68f27e4b..fd793519ea2b 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = { | |||
204 | .aperture_sizes = ali_generic_sizes, | 204 | .aperture_sizes = ali_generic_sizes, |
205 | .size_type = U32_APER_SIZE, | 205 | .size_type = U32_APER_SIZE, |
206 | .num_aperture_sizes = 7, | 206 | .num_aperture_sizes = 7, |
207 | .needs_scratch_page = true, | ||
207 | .configure = ali_configure, | 208 | .configure = ali_configure, |
208 | .fetch_size = ali_fetch_size, | 209 | .fetch_size = ali_fetch_size, |
209 | .cleanup = ali_cleanup, | 210 | .cleanup = ali_cleanup, |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index a7637d72cef6..b6b1568314c8 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
142 | { | 142 | { |
143 | struct aper_size_info_lvl2 *value; | 143 | struct aper_size_info_lvl2 *value; |
144 | struct amd_page_map page_dir; | 144 | struct amd_page_map page_dir; |
145 | unsigned long __iomem *cur_gatt; | ||
145 | unsigned long addr; | 146 | unsigned long addr; |
146 | int retval; | 147 | int retval; |
147 | u32 temp; | 148 | u32 temp; |
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) | |||
178 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ | 179 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ |
179 | } | 180 | } |
180 | 181 | ||
182 | for (i = 0; i < value->num_entries; i++) { | ||
183 | addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; | ||
184 | cur_gatt = GET_GATT(addr); | ||
185 | writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); | ||
186 | readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ | ||
187 | } | ||
188 | |||
181 | return 0; | 189 | return 0; |
182 | } | 190 | } |
183 | 191 | ||
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = { | |||
375 | .aperture_sizes = amd_irongate_sizes, | 383 | .aperture_sizes = amd_irongate_sizes, |
376 | .size_type = LVL2_APER_SIZE, | 384 | .size_type = LVL2_APER_SIZE, |
377 | .num_aperture_sizes = 7, | 385 | .num_aperture_sizes = 7, |
386 | .needs_scratch_page = true, | ||
378 | .configure = amd_irongate_configure, | 387 | .configure = amd_irongate_configure, |
379 | .fetch_size = amd_irongate_fetch_size, | 388 | .fetch_size = amd_irongate_fetch_size, |
380 | .cleanup = amd_irongate_cleanup, | 389 | .cleanup = amd_irongate_cleanup, |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index fd50ead59c79..67ea3a60de74 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = { | |||
210 | .aperture_sizes = amd_8151_sizes, | 210 | .aperture_sizes = amd_8151_sizes, |
211 | .size_type = U32_APER_SIZE, | 211 | .size_type = U32_APER_SIZE, |
212 | .num_aperture_sizes = 7, | 212 | .num_aperture_sizes = 7, |
213 | .needs_scratch_page = true, | ||
213 | .configure = amd_8151_configure, | 214 | .configure = amd_8151_configure, |
214 | .fetch_size = amd64_fetch_size, | 215 | .fetch_size = amd64_fetch_size, |
215 | .cleanup = amd64_cleanup, | 216 | .cleanup = amd64_cleanup, |
@@ -499,6 +500,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, | |||
499 | u8 cap_ptr; | 500 | u8 cap_ptr; |
500 | int err; | 501 | int err; |
501 | 502 | ||
503 | /* The Highlander principle */ | ||
504 | if (agp_bridges_found) | ||
505 | return -ENODEV; | ||
506 | |||
502 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | 507 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); |
503 | if (!cap_ptr) | 508 | if (!cap_ptr) |
504 | return -ENODEV; | 509 | return -ENODEV; |
@@ -562,6 +567,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev) | |||
562 | amd64_aperture_sizes[bridge->aperture_size_idx].size); | 567 | amd64_aperture_sizes[bridge->aperture_size_idx].size); |
563 | agp_remove_bridge(bridge); | 568 | agp_remove_bridge(bridge); |
564 | agp_put_bridge(bridge); | 569 | agp_put_bridge(bridge); |
570 | |||
571 | agp_bridges_found--; | ||
565 | } | 572 | } |
566 | 573 | ||
567 | #ifdef CONFIG_PM | 574 | #ifdef CONFIG_PM |
@@ -709,6 +716,11 @@ static struct pci_device_id agp_amd64_pci_table[] = { | |||
709 | 716 | ||
710 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); | 717 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); |
711 | 718 | ||
719 | static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { | ||
720 | { PCI_DEVICE_CLASS(0, 0) }, | ||
721 | { } | ||
722 | }; | ||
723 | |||
712 | static struct pci_driver agp_amd64_pci_driver = { | 724 | static struct pci_driver agp_amd64_pci_driver = { |
713 | .name = "agpgart-amd64", | 725 | .name = "agpgart-amd64", |
714 | .id_table = agp_amd64_pci_table, | 726 | .id_table = agp_amd64_pci_table, |
@@ -734,7 +746,6 @@ int __init agp_amd64_init(void) | |||
734 | return err; | 746 | return err; |
735 | 747 | ||
736 | if (agp_bridges_found == 0) { | 748 | if (agp_bridges_found == 0) { |
737 | struct pci_dev *dev; | ||
738 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { | 749 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { |
739 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); | 750 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); |
740 | #ifdef MODULE | 751 | #ifdef MODULE |
@@ -750,17 +761,10 @@ int __init agp_amd64_init(void) | |||
750 | return -ENODEV; | 761 | return -ENODEV; |
751 | 762 | ||
752 | /* Look for any AGP bridge */ | 763 | /* Look for any AGP bridge */ |
753 | dev = NULL; | 764 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; |
754 | err = -ENODEV; | 765 | err = driver_attach(&agp_amd64_pci_driver.driver); |
755 | for_each_pci_dev(dev) { | 766 | if (err == 0 && agp_bridges_found == 0) |
756 | if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) | 767 | err = -ENODEV; |
757 | continue; | ||
758 | /* Only one bridge supported right now */ | ||
759 | if (agp_amd64_probe(dev, NULL) == 0) { | ||
760 | err = 0; | ||
761 | break; | ||
762 | } | ||
763 | } | ||
764 | } | 768 | } |
765 | return err; | 769 | return err; |
766 | } | 770 | } |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 3b2ecbe86ebe..dc30e2243494 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) | |||
341 | { | 341 | { |
342 | struct aper_size_info_lvl2 *value; | 342 | struct aper_size_info_lvl2 *value; |
343 | struct ati_page_map page_dir; | 343 | struct ati_page_map page_dir; |
344 | unsigned long __iomem *cur_gatt; | ||
344 | unsigned long addr; | 345 | unsigned long addr; |
345 | int retval; | 346 | int retval; |
346 | u32 temp; | 347 | u32 temp; |
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) | |||
395 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ | 396 | readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ |
396 | } | 397 | } |
397 | 398 | ||
399 | for (i = 0; i < value->num_entries; i++) { | ||
400 | addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; | ||
401 | cur_gatt = GET_GATT(addr); | ||
402 | writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); | ||
403 | } | ||
404 | |||
398 | return 0; | 405 | return 0; |
399 | } | 406 | } |
400 | 407 | ||
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = { | |||
415 | .aperture_sizes = ati_generic_sizes, | 422 | .aperture_sizes = ati_generic_sizes, |
416 | .size_type = LVL2_APER_SIZE, | 423 | .size_type = LVL2_APER_SIZE, |
417 | .num_aperture_sizes = 7, | 424 | .num_aperture_sizes = 7, |
425 | .needs_scratch_page = true, | ||
418 | .configure = ati_configure, | 426 | .configure = ati_configure, |
419 | .fetch_size = ati_fetch_size, | 427 | .fetch_size = ati_fetch_size, |
420 | .cleanup = ati_cleanup, | 428 | .cleanup = ati_cleanup, |
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 793f39ea9618..aa109cbe0e6e 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/page-flags.h> | 28 | #include <linux/page-flags.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include "agp.h" | 30 | #include "agp.h" |
31 | #include "intel-agp.h" | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * The real differences to the generic AGP code is | 34 | * The real differences to the generic AGP code is |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index fb86708e47ed..4b51982fd23a 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -1214,7 +1214,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) | |||
1214 | return NULL; | 1214 | return NULL; |
1215 | 1215 | ||
1216 | for (i = 0; i < page_count; i++) | 1216 | for (i = 0; i < page_count; i++) |
1217 | new->pages[i] = 0; | 1217 | new->pages[i] = NULL; |
1218 | new->page_count = 0; | 1218 | new->page_count = 0; |
1219 | new->type = type; | 1219 | new->type = type; |
1220 | new->num_scratch_pages = pages; | 1220 | new->num_scratch_pages = pages; |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index aa4248efc5d8..d836a71bf06d 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -11,1531 +11,13 @@ | |||
11 | #include <linux/agp_backend.h> | 11 | #include <linux/agp_backend.h> |
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | #include "agp.h" | 13 | #include "agp.h" |
14 | #include "intel-agp.h" | ||
15 | |||
16 | #include "intel-gtt.c" | ||
14 | 17 | ||
15 | int intel_agp_enabled; | 18 | int intel_agp_enabled; |
16 | EXPORT_SYMBOL(intel_agp_enabled); | 19 | EXPORT_SYMBOL(intel_agp_enabled); |
17 | 20 | ||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
29 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
30 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
31 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
32 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
33 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
34 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
35 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
36 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
37 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
38 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
39 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
40 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
41 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
42 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
43 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
44 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
45 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
46 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
47 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
48 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
49 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
50 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
51 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
52 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
53 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
54 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
55 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
56 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
57 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
58 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
59 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
60 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
61 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
62 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
63 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
64 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
65 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
66 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
67 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
68 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
69 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
70 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
71 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
72 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
73 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
74 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
75 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
76 | |||
77 | /* cover 915 and 945 variants */ | ||
78 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
79 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
80 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
81 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
82 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
83 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
84 | |||
85 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
87 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
88 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
89 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
90 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
91 | |||
92 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
97 | |||
98 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
100 | |||
101 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
102 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
103 | |||
104 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
105 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
106 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
107 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
108 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
109 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
110 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
111 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
112 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
113 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
114 | IS_SNB) | ||
115 | |||
116 | extern int agp_memory_reserved; | ||
117 | |||
118 | |||
119 | /* Intel 815 register */ | ||
120 | #define INTEL_815_APCONT 0x51 | ||
121 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
122 | |||
123 | /* Intel i820 registers */ | ||
124 | #define INTEL_I820_RDCR 0x51 | ||
125 | #define INTEL_I820_ERRSTS 0xc8 | ||
126 | |||
127 | /* Intel i840 registers */ | ||
128 | #define INTEL_I840_MCHCFG 0x50 | ||
129 | #define INTEL_I840_ERRSTS 0xc8 | ||
130 | |||
131 | /* Intel i850 registers */ | ||
132 | #define INTEL_I850_MCHCFG 0x50 | ||
133 | #define INTEL_I850_ERRSTS 0xc8 | ||
134 | |||
135 | /* intel 915G registers */ | ||
136 | #define I915_GMADDR 0x18 | ||
137 | #define I915_MMADDR 0x10 | ||
138 | #define I915_PTEADDR 0x1C | ||
139 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
140 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
141 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
142 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
143 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
144 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
145 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
146 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
147 | |||
148 | #define I915_IFPADDR 0x60 | ||
149 | |||
150 | /* Intel 965G registers */ | ||
151 | #define I965_MSAC 0x62 | ||
152 | #define I965_IFPADDR 0x70 | ||
153 | |||
154 | /* Intel 7505 registers */ | ||
155 | #define INTEL_I7505_APSIZE 0x74 | ||
156 | #define INTEL_I7505_NCAPID 0x60 | ||
157 | #define INTEL_I7505_NISTAT 0x6c | ||
158 | #define INTEL_I7505_ATTBASE 0x78 | ||
159 | #define INTEL_I7505_ERRSTS 0x42 | ||
160 | #define INTEL_I7505_AGPCTRL 0x70 | ||
161 | #define INTEL_I7505_MCHCFG 0x50 | ||
162 | |||
163 | #define SNB_GMCH_CTRL 0x50 | ||
164 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
165 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
166 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
167 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
168 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
169 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
170 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
171 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
172 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
173 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
174 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
175 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
176 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
177 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
178 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
179 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
180 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
181 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
182 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
183 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
184 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
185 | |||
186 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
187 | { | ||
188 | {64, 16384, 4}, | ||
189 | /* The 32M mode still requires a 64k gatt */ | ||
190 | {32, 8192, 4} | ||
191 | }; | ||
192 | |||
193 | #define AGP_DCACHE_MEMORY 1 | ||
194 | #define AGP_PHYS_MEMORY 2 | ||
195 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
196 | |||
197 | static struct gatt_mask intel_i810_masks[] = | ||
198 | { | ||
199 | {.mask = I810_PTE_VALID, .type = 0}, | ||
200 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
201 | {.mask = I810_PTE_VALID, .type = 0}, | ||
202 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
203 | .type = INTEL_AGP_CACHED_MEMORY} | ||
204 | }; | ||
205 | |||
206 | static struct _intel_private { | ||
207 | struct pci_dev *pcidev; /* device one */ | ||
208 | u8 __iomem *registers; | ||
209 | u32 __iomem *gtt; /* I915G */ | ||
210 | int num_dcache_entries; | ||
211 | /* gtt_entries is the number of gtt entries that are already mapped | ||
212 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
213 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
214 | * popup and for the GTT. | ||
215 | */ | ||
216 | int gtt_entries; /* i830+ */ | ||
217 | int gtt_total_size; | ||
218 | union { | ||
219 | void __iomem *i9xx_flush_page; | ||
220 | void *i8xx_flush_page; | ||
221 | }; | ||
222 | struct page *i8xx_page; | ||
223 | struct resource ifp_resource; | ||
224 | int resource_valid; | ||
225 | } intel_private; | ||
226 | |||
227 | #ifdef USE_PCI_DMA_API | ||
228 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
229 | { | ||
230 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
231 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
232 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
233 | return -EINVAL; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
238 | { | ||
239 | pci_unmap_page(intel_private.pcidev, dma, | ||
240 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
241 | } | ||
242 | |||
243 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
244 | { | ||
245 | struct sg_table st; | ||
246 | |||
247 | st.sgl = mem->sg_list; | ||
248 | st.orig_nents = st.nents = mem->page_count; | ||
249 | |||
250 | sg_free_table(&st); | ||
251 | |||
252 | mem->sg_list = NULL; | ||
253 | mem->num_sg = 0; | ||
254 | } | ||
255 | |||
256 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
257 | { | ||
258 | struct sg_table st; | ||
259 | struct scatterlist *sg; | ||
260 | int i; | ||
261 | |||
262 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
263 | |||
264 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | mem->sg_list = sg = st.sgl; | ||
268 | |||
269 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
270 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
271 | |||
272 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
273 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
274 | if (unlikely(!mem->num_sg)) { | ||
275 | intel_agp_free_sglist(mem); | ||
276 | return -ENOMEM; | ||
277 | } | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
282 | { | ||
283 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
284 | |||
285 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
286 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
287 | intel_agp_free_sglist(mem); | ||
288 | } | ||
289 | |||
290 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
291 | off_t pg_start, int mask_type) | ||
292 | { | ||
293 | struct scatterlist *sg; | ||
294 | int i, j; | ||
295 | |||
296 | j = pg_start; | ||
297 | |||
298 | WARN_ON(!mem->num_sg); | ||
299 | |||
300 | if (mem->num_sg == mem->page_count) { | ||
301 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
302 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
303 | sg_dma_address(sg), mask_type), | ||
304 | intel_private.gtt+j); | ||
305 | j++; | ||
306 | } | ||
307 | } else { | ||
308 | /* sg may merge pages, but we have to separate | ||
309 | * per-page addr for GTT */ | ||
310 | unsigned int len, m; | ||
311 | |||
312 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
313 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
314 | for (m = 0; m < len; m++) { | ||
315 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
316 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
317 | mask_type), | ||
318 | intel_private.gtt+j); | ||
319 | j++; | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | readl(intel_private.gtt+j-1); | ||
324 | } | ||
325 | |||
326 | #else | ||
327 | |||
328 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
329 | off_t pg_start, int mask_type) | ||
330 | { | ||
331 | int i, j; | ||
332 | u32 cache_bits = 0; | ||
333 | |||
334 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
335 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
336 | { | ||
337 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
338 | } | ||
339 | |||
340 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
341 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
342 | page_to_phys(mem->pages[i]), mask_type), | ||
343 | intel_private.gtt+j); | ||
344 | } | ||
345 | |||
346 | readl(intel_private.gtt+j-1); | ||
347 | } | ||
348 | |||
349 | #endif | ||
350 | |||
351 | static int intel_i810_fetch_size(void) | ||
352 | { | ||
353 | u32 smram_miscc; | ||
354 | struct aper_size_info_fixed *values; | ||
355 | |||
356 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
357 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
358 | |||
359 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
360 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
361 | return 0; | ||
362 | } | ||
363 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
364 | agp_bridge->previous_size = | ||
365 | agp_bridge->current_size = (void *) (values + 1); | ||
366 | agp_bridge->aperture_size_idx = 1; | ||
367 | return values[1].size; | ||
368 | } else { | ||
369 | agp_bridge->previous_size = | ||
370 | agp_bridge->current_size = (void *) (values); | ||
371 | agp_bridge->aperture_size_idx = 0; | ||
372 | return values[0].size; | ||
373 | } | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int intel_i810_configure(void) | ||
379 | { | ||
380 | struct aper_size_info_fixed *current_size; | ||
381 | u32 temp; | ||
382 | int i; | ||
383 | |||
384 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
385 | |||
386 | if (!intel_private.registers) { | ||
387 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
388 | temp &= 0xfff80000; | ||
389 | |||
390 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
391 | if (!intel_private.registers) { | ||
392 | dev_err(&intel_private.pcidev->dev, | ||
393 | "can't remap memory\n"); | ||
394 | return -ENOMEM; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
399 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
400 | /* This will need to be dynamically assigned */ | ||
401 | dev_info(&intel_private.pcidev->dev, | ||
402 | "detected 4MB dedicated video ram\n"); | ||
403 | intel_private.num_dcache_entries = 1024; | ||
404 | } | ||
405 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
406 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
407 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
408 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
409 | |||
410 | if (agp_bridge->driver->needs_scratch_page) { | ||
411 | for (i = 0; i < current_size->num_entries; i++) { | ||
412 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
413 | } | ||
414 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
415 | } | ||
416 | global_cache_flush(); | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void intel_i810_cleanup(void) | ||
421 | { | ||
422 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
423 | readl(intel_private.registers); /* PCI Posting. */ | ||
424 | iounmap(intel_private.registers); | ||
425 | } | ||
426 | |||
427 | static void intel_i810_tlbflush(struct agp_memory *mem) | ||
428 | { | ||
429 | return; | ||
430 | } | ||
431 | |||
432 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
433 | { | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | /* Exists to support ARGB cursors */ | ||
438 | static struct page *i8xx_alloc_pages(void) | ||
439 | { | ||
440 | struct page *page; | ||
441 | |||
442 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
443 | if (page == NULL) | ||
444 | return NULL; | ||
445 | |||
446 | if (set_pages_uc(page, 4) < 0) { | ||
447 | set_pages_wb(page, 4); | ||
448 | __free_pages(page, 2); | ||
449 | return NULL; | ||
450 | } | ||
451 | get_page(page); | ||
452 | atomic_inc(&agp_bridge->current_memory_agp); | ||
453 | return page; | ||
454 | } | ||
455 | |||
456 | static void i8xx_destroy_pages(struct page *page) | ||
457 | { | ||
458 | if (page == NULL) | ||
459 | return; | ||
460 | |||
461 | set_pages_wb(page, 4); | ||
462 | put_page(page); | ||
463 | __free_pages(page, 2); | ||
464 | atomic_dec(&agp_bridge->current_memory_agp); | ||
465 | } | ||
466 | |||
467 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
468 | int type) | ||
469 | { | ||
470 | if (type < AGP_USER_TYPES) | ||
471 | return type; | ||
472 | else if (type == AGP_USER_CACHED_MEMORY) | ||
473 | return INTEL_AGP_CACHED_MEMORY; | ||
474 | else | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
479 | int type) | ||
480 | { | ||
481 | int i, j, num_entries; | ||
482 | void *temp; | ||
483 | int ret = -EINVAL; | ||
484 | int mask_type; | ||
485 | |||
486 | if (mem->page_count == 0) | ||
487 | goto out; | ||
488 | |||
489 | temp = agp_bridge->current_size; | ||
490 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
491 | |||
492 | if ((pg_start + mem->page_count) > num_entries) | ||
493 | goto out_err; | ||
494 | |||
495 | |||
496 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
497 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
498 | ret = -EBUSY; | ||
499 | goto out_err; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | if (type != mem->type) | ||
504 | goto out_err; | ||
505 | |||
506 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
507 | |||
508 | switch (mask_type) { | ||
509 | case AGP_DCACHE_MEMORY: | ||
510 | if (!mem->is_flushed) | ||
511 | global_cache_flush(); | ||
512 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
513 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
514 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
515 | } | ||
516 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
517 | break; | ||
518 | case AGP_PHYS_MEMORY: | ||
519 | case AGP_NORMAL_MEMORY: | ||
520 | if (!mem->is_flushed) | ||
521 | global_cache_flush(); | ||
522 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
523 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
524 | page_to_phys(mem->pages[i]), mask_type), | ||
525 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
526 | } | ||
527 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
528 | break; | ||
529 | default: | ||
530 | goto out_err; | ||
531 | } | ||
532 | |||
533 | agp_bridge->driver->tlb_flush(mem); | ||
534 | out: | ||
535 | ret = 0; | ||
536 | out_err: | ||
537 | mem->is_flushed = true; | ||
538 | return ret; | ||
539 | } | ||
540 | |||
541 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
542 | int type) | ||
543 | { | ||
544 | int i; | ||
545 | |||
546 | if (mem->page_count == 0) | ||
547 | return 0; | ||
548 | |||
549 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
550 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
551 | } | ||
552 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
553 | |||
554 | agp_bridge->driver->tlb_flush(mem); | ||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * The i810/i830 requires a physical address to program its mouse | ||
560 | * pointer into hardware. | ||
561 | * However the Xserver still writes to it through the agp aperture. | ||
562 | */ | ||
563 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
564 | { | ||
565 | struct agp_memory *new; | ||
566 | struct page *page; | ||
567 | |||
568 | switch (pg_count) { | ||
569 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
570 | break; | ||
571 | case 4: | ||
572 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
573 | page = i8xx_alloc_pages(); | ||
574 | break; | ||
575 | default: | ||
576 | return NULL; | ||
577 | } | ||
578 | |||
579 | if (page == NULL) | ||
580 | return NULL; | ||
581 | |||
582 | new = agp_create_memory(pg_count); | ||
583 | if (new == NULL) | ||
584 | return NULL; | ||
585 | |||
586 | new->pages[0] = page; | ||
587 | if (pg_count == 4) { | ||
588 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
589 | new->pages[1] = new->pages[0] + 1; | ||
590 | new->pages[2] = new->pages[1] + 1; | ||
591 | new->pages[3] = new->pages[2] + 1; | ||
592 | } | ||
593 | new->page_count = pg_count; | ||
594 | new->num_scratch_pages = pg_count; | ||
595 | new->type = AGP_PHYS_MEMORY; | ||
596 | new->physical = page_to_phys(new->pages[0]); | ||
597 | return new; | ||
598 | } | ||
599 | |||
600 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
601 | { | ||
602 | struct agp_memory *new; | ||
603 | |||
604 | if (type == AGP_DCACHE_MEMORY) { | ||
605 | if (pg_count != intel_private.num_dcache_entries) | ||
606 | return NULL; | ||
607 | |||
608 | new = agp_create_memory(1); | ||
609 | if (new == NULL) | ||
610 | return NULL; | ||
611 | |||
612 | new->type = AGP_DCACHE_MEMORY; | ||
613 | new->page_count = pg_count; | ||
614 | new->num_scratch_pages = 0; | ||
615 | agp_free_page_array(new); | ||
616 | return new; | ||
617 | } | ||
618 | if (type == AGP_PHYS_MEMORY) | ||
619 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
620 | return NULL; | ||
621 | } | ||
622 | |||
623 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
624 | { | ||
625 | agp_free_key(curr->key); | ||
626 | if (curr->type == AGP_PHYS_MEMORY) { | ||
627 | if (curr->page_count == 4) | ||
628 | i8xx_destroy_pages(curr->pages[0]); | ||
629 | else { | ||
630 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
631 | AGP_PAGE_DESTROY_UNMAP); | ||
632 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
633 | AGP_PAGE_DESTROY_FREE); | ||
634 | } | ||
635 | agp_free_page_array(curr); | ||
636 | } | ||
637 | kfree(curr); | ||
638 | } | ||
639 | |||
640 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
641 | dma_addr_t addr, int type) | ||
642 | { | ||
643 | /* Type checking must be done elsewhere */ | ||
644 | return addr | bridge->driver->masks[type].mask; | ||
645 | } | ||
646 | |||
647 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
648 | { | ||
649 | {128, 32768, 5}, | ||
650 | /* The 64M mode still requires a 128k gatt */ | ||
651 | {64, 16384, 5}, | ||
652 | {256, 65536, 6}, | ||
653 | {512, 131072, 7}, | ||
654 | }; | ||
655 | |||
656 | static void intel_i830_init_gtt_entries(void) | ||
657 | { | ||
658 | u16 gmch_ctrl; | ||
659 | int gtt_entries = 0; | ||
660 | u8 rdct; | ||
661 | int local = 0; | ||
662 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
663 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
664 | |||
665 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
666 | |||
667 | if (IS_I965) { | ||
668 | u32 pgetbl_ctl; | ||
669 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
670 | |||
671 | /* The 965 has a field telling us the size of the GTT, | ||
672 | * which may be larger than what is necessary to map the | ||
673 | * aperture. | ||
674 | */ | ||
675 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
676 | case I965_PGETBL_SIZE_128KB: | ||
677 | size = 128; | ||
678 | break; | ||
679 | case I965_PGETBL_SIZE_256KB: | ||
680 | size = 256; | ||
681 | break; | ||
682 | case I965_PGETBL_SIZE_512KB: | ||
683 | size = 512; | ||
684 | break; | ||
685 | case I965_PGETBL_SIZE_1MB: | ||
686 | size = 1024; | ||
687 | break; | ||
688 | case I965_PGETBL_SIZE_2MB: | ||
689 | size = 2048; | ||
690 | break; | ||
691 | case I965_PGETBL_SIZE_1_5MB: | ||
692 | size = 1024 + 512; | ||
693 | break; | ||
694 | default: | ||
695 | dev_info(&intel_private.pcidev->dev, | ||
696 | "unknown page table size, assuming 512KB\n"); | ||
697 | size = 512; | ||
698 | } | ||
699 | size += 4; /* add in BIOS popup space */ | ||
700 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
701 | /* G33's GTT size defined in gmch_ctrl */ | ||
702 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
703 | case G33_PGETBL_SIZE_1M: | ||
704 | size = 1024; | ||
705 | break; | ||
706 | case G33_PGETBL_SIZE_2M: | ||
707 | size = 2048; | ||
708 | break; | ||
709 | default: | ||
710 | dev_info(&agp_bridge->dev->dev, | ||
711 | "unknown page table size 0x%x, assuming 512KB\n", | ||
712 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
713 | size = 512; | ||
714 | } | ||
715 | size += 4; | ||
716 | } else if (IS_G4X || IS_PINEVIEW) { | ||
717 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
718 | * stolen, ignore it in stolen gtt entries counting. However, | ||
719 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
720 | */ | ||
721 | size = 4; | ||
722 | } else { | ||
723 | /* On previous hardware, the GTT size was just what was | ||
724 | * required to map the aperture. | ||
725 | */ | ||
726 | size = agp_bridge->driver->fetch_size() + 4; | ||
727 | } | ||
728 | |||
729 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
730 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
731 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
732 | case I830_GMCH_GMS_STOLEN_512: | ||
733 | gtt_entries = KB(512) - KB(size); | ||
734 | break; | ||
735 | case I830_GMCH_GMS_STOLEN_1024: | ||
736 | gtt_entries = MB(1) - KB(size); | ||
737 | break; | ||
738 | case I830_GMCH_GMS_STOLEN_8192: | ||
739 | gtt_entries = MB(8) - KB(size); | ||
740 | break; | ||
741 | case I830_GMCH_GMS_LOCAL: | ||
742 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
743 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
744 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
745 | local = 1; | ||
746 | break; | ||
747 | default: | ||
748 | gtt_entries = 0; | ||
749 | break; | ||
750 | } | ||
751 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
752 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
753 | /* | ||
754 | * SandyBridge has new memory control reg at 0x50.w | ||
755 | */ | ||
756 | u16 snb_gmch_ctl; | ||
757 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
758 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
759 | case SNB_GMCH_GMS_STOLEN_32M: | ||
760 | gtt_entries = MB(32) - KB(size); | ||
761 | break; | ||
762 | case SNB_GMCH_GMS_STOLEN_64M: | ||
763 | gtt_entries = MB(64) - KB(size); | ||
764 | break; | ||
765 | case SNB_GMCH_GMS_STOLEN_96M: | ||
766 | gtt_entries = MB(96) - KB(size); | ||
767 | break; | ||
768 | case SNB_GMCH_GMS_STOLEN_128M: | ||
769 | gtt_entries = MB(128) - KB(size); | ||
770 | break; | ||
771 | case SNB_GMCH_GMS_STOLEN_160M: | ||
772 | gtt_entries = MB(160) - KB(size); | ||
773 | break; | ||
774 | case SNB_GMCH_GMS_STOLEN_192M: | ||
775 | gtt_entries = MB(192) - KB(size); | ||
776 | break; | ||
777 | case SNB_GMCH_GMS_STOLEN_224M: | ||
778 | gtt_entries = MB(224) - KB(size); | ||
779 | break; | ||
780 | case SNB_GMCH_GMS_STOLEN_256M: | ||
781 | gtt_entries = MB(256) - KB(size); | ||
782 | break; | ||
783 | case SNB_GMCH_GMS_STOLEN_288M: | ||
784 | gtt_entries = MB(288) - KB(size); | ||
785 | break; | ||
786 | case SNB_GMCH_GMS_STOLEN_320M: | ||
787 | gtt_entries = MB(320) - KB(size); | ||
788 | break; | ||
789 | case SNB_GMCH_GMS_STOLEN_352M: | ||
790 | gtt_entries = MB(352) - KB(size); | ||
791 | break; | ||
792 | case SNB_GMCH_GMS_STOLEN_384M: | ||
793 | gtt_entries = MB(384) - KB(size); | ||
794 | break; | ||
795 | case SNB_GMCH_GMS_STOLEN_416M: | ||
796 | gtt_entries = MB(416) - KB(size); | ||
797 | break; | ||
798 | case SNB_GMCH_GMS_STOLEN_448M: | ||
799 | gtt_entries = MB(448) - KB(size); | ||
800 | break; | ||
801 | case SNB_GMCH_GMS_STOLEN_480M: | ||
802 | gtt_entries = MB(480) - KB(size); | ||
803 | break; | ||
804 | case SNB_GMCH_GMS_STOLEN_512M: | ||
805 | gtt_entries = MB(512) - KB(size); | ||
806 | break; | ||
807 | } | ||
808 | } else { | ||
809 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
810 | case I855_GMCH_GMS_STOLEN_1M: | ||
811 | gtt_entries = MB(1) - KB(size); | ||
812 | break; | ||
813 | case I855_GMCH_GMS_STOLEN_4M: | ||
814 | gtt_entries = MB(4) - KB(size); | ||
815 | break; | ||
816 | case I855_GMCH_GMS_STOLEN_8M: | ||
817 | gtt_entries = MB(8) - KB(size); | ||
818 | break; | ||
819 | case I855_GMCH_GMS_STOLEN_16M: | ||
820 | gtt_entries = MB(16) - KB(size); | ||
821 | break; | ||
822 | case I855_GMCH_GMS_STOLEN_32M: | ||
823 | gtt_entries = MB(32) - KB(size); | ||
824 | break; | ||
825 | case I915_GMCH_GMS_STOLEN_48M: | ||
826 | /* Check it's really I915G */ | ||
827 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
828 | gtt_entries = MB(48) - KB(size); | ||
829 | else | ||
830 | gtt_entries = 0; | ||
831 | break; | ||
832 | case I915_GMCH_GMS_STOLEN_64M: | ||
833 | /* Check it's really I915G */ | ||
834 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
835 | gtt_entries = MB(64) - KB(size); | ||
836 | else | ||
837 | gtt_entries = 0; | ||
838 | break; | ||
839 | case G33_GMCH_GMS_STOLEN_128M: | ||
840 | if (IS_G33 || IS_I965 || IS_G4X) | ||
841 | gtt_entries = MB(128) - KB(size); | ||
842 | else | ||
843 | gtt_entries = 0; | ||
844 | break; | ||
845 | case G33_GMCH_GMS_STOLEN_256M: | ||
846 | if (IS_G33 || IS_I965 || IS_G4X) | ||
847 | gtt_entries = MB(256) - KB(size); | ||
848 | else | ||
849 | gtt_entries = 0; | ||
850 | break; | ||
851 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
852 | if (IS_I965 || IS_G4X) | ||
853 | gtt_entries = MB(96) - KB(size); | ||
854 | else | ||
855 | gtt_entries = 0; | ||
856 | break; | ||
857 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
858 | if (IS_I965 || IS_G4X) | ||
859 | gtt_entries = MB(160) - KB(size); | ||
860 | else | ||
861 | gtt_entries = 0; | ||
862 | break; | ||
863 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
864 | if (IS_I965 || IS_G4X) | ||
865 | gtt_entries = MB(224) - KB(size); | ||
866 | else | ||
867 | gtt_entries = 0; | ||
868 | break; | ||
869 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
870 | if (IS_I965 || IS_G4X) | ||
871 | gtt_entries = MB(352) - KB(size); | ||
872 | else | ||
873 | gtt_entries = 0; | ||
874 | break; | ||
875 | default: | ||
876 | gtt_entries = 0; | ||
877 | break; | ||
878 | } | ||
879 | } | ||
880 | if (gtt_entries > 0) { | ||
881 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
882 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
883 | gtt_entries /= KB(4); | ||
884 | } else { | ||
885 | dev_info(&agp_bridge->dev->dev, | ||
886 | "no pre-allocated video memory detected\n"); | ||
887 | gtt_entries = 0; | ||
888 | } | ||
889 | |||
890 | intel_private.gtt_entries = gtt_entries; | ||
891 | } | ||
892 | |||
893 | static void intel_i830_fini_flush(void) | ||
894 | { | ||
895 | kunmap(intel_private.i8xx_page); | ||
896 | intel_private.i8xx_flush_page = NULL; | ||
897 | unmap_page_from_agp(intel_private.i8xx_page); | ||
898 | |||
899 | __free_page(intel_private.i8xx_page); | ||
900 | intel_private.i8xx_page = NULL; | ||
901 | } | ||
902 | |||
903 | static void intel_i830_setup_flush(void) | ||
904 | { | ||
905 | /* return if we've already set the flush mechanism up */ | ||
906 | if (intel_private.i8xx_page) | ||
907 | return; | ||
908 | |||
909 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
910 | if (!intel_private.i8xx_page) | ||
911 | return; | ||
912 | |||
913 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
914 | if (!intel_private.i8xx_flush_page) | ||
915 | intel_i830_fini_flush(); | ||
916 | } | ||
917 | |||
918 | /* The chipset_flush interface needs to get data that has already been | ||
919 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
920 | * doesn't snoop those buffers. | ||
921 | * | ||
922 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
923 | * chipset write buffers that the later chips do. According to the 865 | ||
924 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
925 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
926 | * that it'll push whatever was in there out. It appears to work. | ||
927 | */ | ||
928 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
929 | { | ||
930 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
931 | |||
932 | memset(pg, 0, 1024); | ||
933 | |||
934 | if (cpu_has_clflush) | ||
935 | clflush_cache_range(pg, 1024); | ||
936 | else if (wbinvd_on_all_cpus() != 0) | ||
937 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
938 | } | ||
939 | |||
940 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
941 | * Use the memory already set aside for in the GTT. | ||
942 | */ | ||
943 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
944 | { | ||
945 | int page_order; | ||
946 | struct aper_size_info_fixed *size; | ||
947 | int num_entries; | ||
948 | u32 temp; | ||
949 | |||
950 | size = agp_bridge->current_size; | ||
951 | page_order = size->page_order; | ||
952 | num_entries = size->num_entries; | ||
953 | agp_bridge->gatt_table_real = NULL; | ||
954 | |||
955 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
956 | temp &= 0xfff80000; | ||
957 | |||
958 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
959 | if (!intel_private.registers) | ||
960 | return -ENOMEM; | ||
961 | |||
962 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
963 | global_cache_flush(); /* FIXME: ?? */ | ||
964 | |||
965 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
966 | intel_i830_init_gtt_entries(); | ||
967 | |||
968 | agp_bridge->gatt_table = NULL; | ||
969 | |||
970 | agp_bridge->gatt_bus_addr = temp; | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | /* Return the gatt table to a sane state. Use the top of stolen | ||
976 | * memory for the GTT. | ||
977 | */ | ||
978 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
979 | { | ||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | static int intel_i830_fetch_size(void) | ||
984 | { | ||
985 | u16 gmch_ctrl; | ||
986 | struct aper_size_info_fixed *values; | ||
987 | |||
988 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
989 | |||
990 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
991 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
992 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
993 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
994 | agp_bridge->aperture_size_idx = 0; | ||
995 | return values[0].size; | ||
996 | } | ||
997 | |||
998 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
999 | |||
1000 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
1001 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
1002 | agp_bridge->aperture_size_idx = 0; | ||
1003 | return values[0].size; | ||
1004 | } else { | ||
1005 | agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); | ||
1006 | agp_bridge->aperture_size_idx = 1; | ||
1007 | return values[1].size; | ||
1008 | } | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static int intel_i830_configure(void) | ||
1014 | { | ||
1015 | struct aper_size_info_fixed *current_size; | ||
1016 | u32 temp; | ||
1017 | u16 gmch_ctrl; | ||
1018 | int i; | ||
1019 | |||
1020 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1021 | |||
1022 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
1023 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1024 | |||
1025 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1026 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1027 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1028 | |||
1029 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1030 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1031 | |||
1032 | if (agp_bridge->driver->needs_scratch_page) { | ||
1033 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
1034 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1035 | } | ||
1036 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
1037 | } | ||
1038 | |||
1039 | global_cache_flush(); | ||
1040 | |||
1041 | intel_i830_setup_flush(); | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | static void intel_i830_cleanup(void) | ||
1046 | { | ||
1047 | iounmap(intel_private.registers); | ||
1048 | } | ||
1049 | |||
1050 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1051 | int type) | ||
1052 | { | ||
1053 | int i, j, num_entries; | ||
1054 | void *temp; | ||
1055 | int ret = -EINVAL; | ||
1056 | int mask_type; | ||
1057 | |||
1058 | if (mem->page_count == 0) | ||
1059 | goto out; | ||
1060 | |||
1061 | temp = agp_bridge->current_size; | ||
1062 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1063 | |||
1064 | if (pg_start < intel_private.gtt_entries) { | ||
1065 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1066 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1067 | pg_start, intel_private.gtt_entries); | ||
1068 | |||
1069 | dev_info(&intel_private.pcidev->dev, | ||
1070 | "trying to insert into local/stolen memory\n"); | ||
1071 | goto out_err; | ||
1072 | } | ||
1073 | |||
1074 | if ((pg_start + mem->page_count) > num_entries) | ||
1075 | goto out_err; | ||
1076 | |||
1077 | /* The i830 can't check the GTT for entries since its read only, | ||
1078 | * depend on the caller to make the correct offset decisions. | ||
1079 | */ | ||
1080 | |||
1081 | if (type != mem->type) | ||
1082 | goto out_err; | ||
1083 | |||
1084 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1085 | |||
1086 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1087 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1088 | goto out_err; | ||
1089 | |||
1090 | if (!mem->is_flushed) | ||
1091 | global_cache_flush(); | ||
1092 | |||
1093 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
1094 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
1095 | page_to_phys(mem->pages[i]), mask_type), | ||
1096 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
1097 | } | ||
1098 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
1099 | agp_bridge->driver->tlb_flush(mem); | ||
1100 | |||
1101 | out: | ||
1102 | ret = 0; | ||
1103 | out_err: | ||
1104 | mem->is_flushed = true; | ||
1105 | return ret; | ||
1106 | } | ||
1107 | |||
1108 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1109 | int type) | ||
1110 | { | ||
1111 | int i; | ||
1112 | |||
1113 | if (mem->page_count == 0) | ||
1114 | return 0; | ||
1115 | |||
1116 | if (pg_start < intel_private.gtt_entries) { | ||
1117 | dev_info(&intel_private.pcidev->dev, | ||
1118 | "trying to disable local/stolen memory\n"); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | |||
1122 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
1123 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1124 | } | ||
1125 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
1126 | |||
1127 | agp_bridge->driver->tlb_flush(mem); | ||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
1132 | { | ||
1133 | if (type == AGP_PHYS_MEMORY) | ||
1134 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
1135 | /* always return NULL for other allocation types for now */ | ||
1136 | return NULL; | ||
1137 | } | ||
1138 | |||
1139 | static int intel_alloc_chipset_flush_resource(void) | ||
1140 | { | ||
1141 | int ret; | ||
1142 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
1143 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
1144 | pcibios_align_resource, agp_bridge->dev); | ||
1145 | |||
1146 | return ret; | ||
1147 | } | ||
1148 | |||
1149 | static void intel_i915_setup_chipset_flush(void) | ||
1150 | { | ||
1151 | int ret; | ||
1152 | u32 temp; | ||
1153 | |||
1154 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
1155 | if (!(temp & 0x1)) { | ||
1156 | intel_alloc_chipset_flush_resource(); | ||
1157 | intel_private.resource_valid = 1; | ||
1158 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1159 | } else { | ||
1160 | temp &= ~1; | ||
1161 | |||
1162 | intel_private.resource_valid = 1; | ||
1163 | intel_private.ifp_resource.start = temp; | ||
1164 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
1165 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1166 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1167 | if (ret) | ||
1168 | intel_private.resource_valid = 0; | ||
1169 | } | ||
1170 | } | ||
1171 | |||
1172 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1173 | { | ||
1174 | u32 temp_hi, temp_lo; | ||
1175 | int ret; | ||
1176 | |||
1177 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1178 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1179 | |||
1180 | if (!(temp_lo & 0x1)) { | ||
1181 | |||
1182 | intel_alloc_chipset_flush_resource(); | ||
1183 | |||
1184 | intel_private.resource_valid = 1; | ||
1185 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1186 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1187 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1188 | } else { | ||
1189 | u64 l64; | ||
1190 | |||
1191 | temp_lo &= ~0x1; | ||
1192 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1193 | |||
1194 | intel_private.resource_valid = 1; | ||
1195 | intel_private.ifp_resource.start = l64; | ||
1196 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1197 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1198 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1199 | if (ret) | ||
1200 | intel_private.resource_valid = 0; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | static void intel_i9xx_setup_flush(void) | ||
1205 | { | ||
1206 | /* return if already configured */ | ||
1207 | if (intel_private.ifp_resource.start) | ||
1208 | return; | ||
1209 | |||
1210 | if (IS_SNB) | ||
1211 | return; | ||
1212 | |||
1213 | /* setup a resource for this object */ | ||
1214 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1215 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1216 | |||
1217 | /* Setup chipset flush for 915 */ | ||
1218 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1219 | intel_i965_g33_setup_chipset_flush(); | ||
1220 | } else { | ||
1221 | intel_i915_setup_chipset_flush(); | ||
1222 | } | ||
1223 | |||
1224 | if (intel_private.ifp_resource.start) { | ||
1225 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1226 | if (!intel_private.i9xx_flush_page) | ||
1227 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | static int intel_i915_configure(void) | ||
1232 | { | ||
1233 | struct aper_size_info_fixed *current_size; | ||
1234 | u32 temp; | ||
1235 | u16 gmch_ctrl; | ||
1236 | int i; | ||
1237 | |||
1238 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1239 | |||
1240 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1241 | |||
1242 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1243 | |||
1244 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1245 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1246 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1247 | |||
1248 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1249 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1250 | |||
1251 | if (agp_bridge->driver->needs_scratch_page) { | ||
1252 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1253 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1254 | } | ||
1255 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1256 | } | ||
1257 | |||
1258 | global_cache_flush(); | ||
1259 | |||
1260 | intel_i9xx_setup_flush(); | ||
1261 | |||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | static void intel_i915_cleanup(void) | ||
1266 | { | ||
1267 | if (intel_private.i9xx_flush_page) | ||
1268 | iounmap(intel_private.i9xx_flush_page); | ||
1269 | if (intel_private.resource_valid) | ||
1270 | release_resource(&intel_private.ifp_resource); | ||
1271 | intel_private.ifp_resource.start = 0; | ||
1272 | intel_private.resource_valid = 0; | ||
1273 | iounmap(intel_private.gtt); | ||
1274 | iounmap(intel_private.registers); | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1278 | { | ||
1279 | if (intel_private.i9xx_flush_page) | ||
1280 | writel(1, intel_private.i9xx_flush_page); | ||
1281 | } | ||
1282 | |||
1283 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1284 | int type) | ||
1285 | { | ||
1286 | int num_entries; | ||
1287 | void *temp; | ||
1288 | int ret = -EINVAL; | ||
1289 | int mask_type; | ||
1290 | |||
1291 | if (mem->page_count == 0) | ||
1292 | goto out; | ||
1293 | |||
1294 | temp = agp_bridge->current_size; | ||
1295 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1296 | |||
1297 | if (pg_start < intel_private.gtt_entries) { | ||
1298 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1299 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1300 | pg_start, intel_private.gtt_entries); | ||
1301 | |||
1302 | dev_info(&intel_private.pcidev->dev, | ||
1303 | "trying to insert into local/stolen memory\n"); | ||
1304 | goto out_err; | ||
1305 | } | ||
1306 | |||
1307 | if ((pg_start + mem->page_count) > num_entries) | ||
1308 | goto out_err; | ||
1309 | |||
1310 | /* The i915 can't check the GTT for entries since it's read only; | ||
1311 | * depend on the caller to make the correct offset decisions. | ||
1312 | */ | ||
1313 | |||
1314 | if (type != mem->type) | ||
1315 | goto out_err; | ||
1316 | |||
1317 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1318 | |||
1319 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1320 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1321 | goto out_err; | ||
1322 | |||
1323 | if (!mem->is_flushed) | ||
1324 | global_cache_flush(); | ||
1325 | |||
1326 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1327 | agp_bridge->driver->tlb_flush(mem); | ||
1328 | |||
1329 | out: | ||
1330 | ret = 0; | ||
1331 | out_err: | ||
1332 | mem->is_flushed = true; | ||
1333 | return ret; | ||
1334 | } | ||
1335 | |||
1336 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1337 | int type) | ||
1338 | { | ||
1339 | int i; | ||
1340 | |||
1341 | if (mem->page_count == 0) | ||
1342 | return 0; | ||
1343 | |||
1344 | if (pg_start < intel_private.gtt_entries) { | ||
1345 | dev_info(&intel_private.pcidev->dev, | ||
1346 | "trying to disable local/stolen memory\n"); | ||
1347 | return -EINVAL; | ||
1348 | } | ||
1349 | |||
1350 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1351 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1352 | |||
1353 | readl(intel_private.gtt+i-1); | ||
1354 | |||
1355 | agp_bridge->driver->tlb_flush(mem); | ||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | /* Return the aperture size by just checking the resource length. The effect | ||
1360 | * described in the spec of the MSAC registers is just changing of the | ||
1361 | * resource size. | ||
1362 | */ | ||
1363 | static int intel_i9xx_fetch_size(void) | ||
1364 | { | ||
1365 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1366 | int aper_size; /* size in megabytes */ | ||
1367 | int i; | ||
1368 | |||
1369 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1370 | |||
1371 | for (i = 0; i < num_sizes; i++) { | ||
1372 | if (aper_size == intel_i830_sizes[i].size) { | ||
1373 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1374 | agp_bridge->previous_size = agp_bridge->current_size; | ||
1375 | return aper_size; | ||
1376 | } | ||
1377 | } | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1382 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1383 | * Use the memory already set aside for in the GTT. | ||
1384 | */ | ||
1385 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1386 | { | ||
1387 | int page_order; | ||
1388 | struct aper_size_info_fixed *size; | ||
1389 | int num_entries; | ||
1390 | u32 temp, temp2; | ||
1391 | int gtt_map_size = 256 * 1024; | ||
1392 | |||
1393 | size = agp_bridge->current_size; | ||
1394 | page_order = size->page_order; | ||
1395 | num_entries = size->num_entries; | ||
1396 | agp_bridge->gatt_table_real = NULL; | ||
1397 | |||
1398 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1399 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1400 | |||
1401 | if (IS_G33) | ||
1402 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1403 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1404 | if (!intel_private.gtt) | ||
1405 | return -ENOMEM; | ||
1406 | |||
1407 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1408 | |||
1409 | temp &= 0xfff80000; | ||
1410 | |||
1411 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1412 | if (!intel_private.registers) { | ||
1413 | iounmap(intel_private.gtt); | ||
1414 | return -ENOMEM; | ||
1415 | } | ||
1416 | |||
1417 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1418 | global_cache_flush(); /* FIXME: ? */ | ||
1419 | |||
1420 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1421 | intel_i830_init_gtt_entries(); | ||
1422 | |||
1423 | agp_bridge->gatt_table = NULL; | ||
1424 | |||
1425 | agp_bridge->gatt_bus_addr = temp; | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * The i965 supports 36-bit physical addresses, but to keep | ||
1432 | * the format of the GTT the same, the bits that don't fit | ||
1433 | * in a 32-bit word are shifted down to bits 4..7. | ||
1434 | * | ||
1435 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1436 | * is always zero on 32-bit architectures, so no need to make | ||
1437 | * this conditional. | ||
1438 | */ | ||
1439 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1440 | dma_addr_t addr, int type) | ||
1441 | { | ||
1442 | /* Shift high bits down */ | ||
1443 | addr |= (addr >> 28) & 0xf0; | ||
1444 | |||
1445 | /* Type checking must be done elsewhere */ | ||
1446 | return addr | bridge->driver->masks[type].mask; | ||
1447 | } | ||
1448 | |||
1449 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1450 | { | ||
1451 | u16 snb_gmch_ctl; | ||
1452 | |||
1453 | switch (agp_bridge->dev->device) { | ||
1454 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1455 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1456 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1457 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1458 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1459 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1460 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1461 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1462 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1463 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1464 | *gtt_offset = *gtt_size = MB(2); | ||
1465 | break; | ||
1466 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1467 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1468 | *gtt_offset = MB(2); | ||
1469 | |||
1470 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1471 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1472 | default: | ||
1473 | case SNB_GTT_SIZE_0M: | ||
1474 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1475 | *gtt_size = MB(0); | ||
1476 | break; | ||
1477 | case SNB_GTT_SIZE_1M: | ||
1478 | *gtt_size = MB(1); | ||
1479 | break; | ||
1480 | case SNB_GTT_SIZE_2M: | ||
1481 | *gtt_size = MB(2); | ||
1482 | break; | ||
1483 | } | ||
1484 | break; | ||
1485 | default: | ||
1486 | *gtt_offset = *gtt_size = KB(512); | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1491 | * Use the memory already set aside for in the GTT. | ||
1492 | */ | ||
1493 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1494 | { | ||
1495 | int page_order; | ||
1496 | struct aper_size_info_fixed *size; | ||
1497 | int num_entries; | ||
1498 | u32 temp; | ||
1499 | int gtt_offset, gtt_size; | ||
1500 | |||
1501 | size = agp_bridge->current_size; | ||
1502 | page_order = size->page_order; | ||
1503 | num_entries = size->num_entries; | ||
1504 | agp_bridge->gatt_table_real = NULL; | ||
1505 | |||
1506 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1507 | |||
1508 | temp &= 0xfff00000; | ||
1509 | |||
1510 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1511 | |||
1512 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1513 | |||
1514 | if (!intel_private.gtt) | ||
1515 | return -ENOMEM; | ||
1516 | |||
1517 | intel_private.gtt_total_size = gtt_size / 4; | ||
1518 | |||
1519 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1520 | if (!intel_private.registers) { | ||
1521 | iounmap(intel_private.gtt); | ||
1522 | return -ENOMEM; | ||
1523 | } | ||
1524 | |||
1525 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1526 | global_cache_flush(); /* FIXME: ? */ | ||
1527 | |||
1528 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1529 | intel_i830_init_gtt_entries(); | ||
1530 | |||
1531 | agp_bridge->gatt_table = NULL; | ||
1532 | |||
1533 | agp_bridge->gatt_bus_addr = temp; | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | |||
1539 | static int intel_fetch_size(void) | 21 | static int intel_fetch_size(void) |
1540 | { | 22 | { |
1541 | int i; | 23 | int i; |
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = { | |||
1982 | .aperture_sizes = intel_generic_sizes, | 464 | .aperture_sizes = intel_generic_sizes, |
1983 | .size_type = U16_APER_SIZE, | 465 | .size_type = U16_APER_SIZE, |
1984 | .num_aperture_sizes = 7, | 466 | .num_aperture_sizes = 7, |
467 | .needs_scratch_page = true, | ||
1985 | .configure = intel_configure, | 468 | .configure = intel_configure, |
1986 | .fetch_size = intel_fetch_size, | 469 | .fetch_size = intel_fetch_size, |
1987 | .cleanup = intel_cleanup, | 470 | .cleanup = intel_cleanup, |
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = { | |||
2003 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 486 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2004 | }; | 487 | }; |
2005 | 488 | ||
2006 | static const struct agp_bridge_driver intel_810_driver = { | ||
2007 | .owner = THIS_MODULE, | ||
2008 | .aperture_sizes = intel_i810_sizes, | ||
2009 | .size_type = FIXED_APER_SIZE, | ||
2010 | .num_aperture_sizes = 2, | ||
2011 | .needs_scratch_page = true, | ||
2012 | .configure = intel_i810_configure, | ||
2013 | .fetch_size = intel_i810_fetch_size, | ||
2014 | .cleanup = intel_i810_cleanup, | ||
2015 | .tlb_flush = intel_i810_tlbflush, | ||
2016 | .mask_memory = intel_i810_mask_memory, | ||
2017 | .masks = intel_i810_masks, | ||
2018 | .agp_enable = intel_i810_agp_enable, | ||
2019 | .cache_flush = global_cache_flush, | ||
2020 | .create_gatt_table = agp_generic_create_gatt_table, | ||
2021 | .free_gatt_table = agp_generic_free_gatt_table, | ||
2022 | .insert_memory = intel_i810_insert_entries, | ||
2023 | .remove_memory = intel_i810_remove_entries, | ||
2024 | .alloc_by_type = intel_i810_alloc_by_type, | ||
2025 | .free_by_type = intel_i810_free_by_type, | ||
2026 | .agp_alloc_page = agp_generic_alloc_page, | ||
2027 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2028 | .agp_destroy_page = agp_generic_destroy_page, | ||
2029 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2030 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
2031 | }; | ||
2032 | |||
2033 | static const struct agp_bridge_driver intel_815_driver = { | 489 | static const struct agp_bridge_driver intel_815_driver = { |
2034 | .owner = THIS_MODULE, | 490 | .owner = THIS_MODULE, |
2035 | .aperture_sizes = intel_815_sizes, | 491 | .aperture_sizes = intel_815_sizes, |
2036 | .size_type = U8_APER_SIZE, | 492 | .size_type = U8_APER_SIZE, |
2037 | .num_aperture_sizes = 2, | 493 | .num_aperture_sizes = 2, |
494 | .needs_scratch_page = true, | ||
2038 | .configure = intel_815_configure, | 495 | .configure = intel_815_configure, |
2039 | .fetch_size = intel_815_fetch_size, | 496 | .fetch_size = intel_815_fetch_size, |
2040 | .cleanup = intel_8xx_cleanup, | 497 | .cleanup = intel_8xx_cleanup, |
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = { | |||
2056 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 513 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2057 | }; | 514 | }; |
2058 | 515 | ||
2059 | static const struct agp_bridge_driver intel_830_driver = { | ||
2060 | .owner = THIS_MODULE, | ||
2061 | .aperture_sizes = intel_i830_sizes, | ||
2062 | .size_type = FIXED_APER_SIZE, | ||
2063 | .num_aperture_sizes = 4, | ||
2064 | .needs_scratch_page = true, | ||
2065 | .configure = intel_i830_configure, | ||
2066 | .fetch_size = intel_i830_fetch_size, | ||
2067 | .cleanup = intel_i830_cleanup, | ||
2068 | .tlb_flush = intel_i810_tlbflush, | ||
2069 | .mask_memory = intel_i810_mask_memory, | ||
2070 | .masks = intel_i810_masks, | ||
2071 | .agp_enable = intel_i810_agp_enable, | ||
2072 | .cache_flush = global_cache_flush, | ||
2073 | .create_gatt_table = intel_i830_create_gatt_table, | ||
2074 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2075 | .insert_memory = intel_i830_insert_entries, | ||
2076 | .remove_memory = intel_i830_remove_entries, | ||
2077 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2078 | .free_by_type = intel_i810_free_by_type, | ||
2079 | .agp_alloc_page = agp_generic_alloc_page, | ||
2080 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2081 | .agp_destroy_page = agp_generic_destroy_page, | ||
2082 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2083 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2084 | .chipset_flush = intel_i830_chipset_flush, | ||
2085 | }; | ||
2086 | |||
2087 | static const struct agp_bridge_driver intel_820_driver = { | 516 | static const struct agp_bridge_driver intel_820_driver = { |
2088 | .owner = THIS_MODULE, | 517 | .owner = THIS_MODULE, |
2089 | .aperture_sizes = intel_8xx_sizes, | 518 | .aperture_sizes = intel_8xx_sizes, |
2090 | .size_type = U8_APER_SIZE, | 519 | .size_type = U8_APER_SIZE, |
2091 | .num_aperture_sizes = 7, | 520 | .num_aperture_sizes = 7, |
521 | .needs_scratch_page = true, | ||
2092 | .configure = intel_820_configure, | 522 | .configure = intel_820_configure, |
2093 | .fetch_size = intel_8xx_fetch_size, | 523 | .fetch_size = intel_8xx_fetch_size, |
2094 | .cleanup = intel_820_cleanup, | 524 | .cleanup = intel_820_cleanup, |
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = { | |||
2115 | .aperture_sizes = intel_830mp_sizes, | 545 | .aperture_sizes = intel_830mp_sizes, |
2116 | .size_type = U8_APER_SIZE, | 546 | .size_type = U8_APER_SIZE, |
2117 | .num_aperture_sizes = 4, | 547 | .num_aperture_sizes = 4, |
548 | .needs_scratch_page = true, | ||
2118 | .configure = intel_830mp_configure, | 549 | .configure = intel_830mp_configure, |
2119 | .fetch_size = intel_8xx_fetch_size, | 550 | .fetch_size = intel_8xx_fetch_size, |
2120 | .cleanup = intel_8xx_cleanup, | 551 | .cleanup = intel_8xx_cleanup, |
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = { | |||
2141 | .aperture_sizes = intel_8xx_sizes, | 572 | .aperture_sizes = intel_8xx_sizes, |
2142 | .size_type = U8_APER_SIZE, | 573 | .size_type = U8_APER_SIZE, |
2143 | .num_aperture_sizes = 7, | 574 | .num_aperture_sizes = 7, |
575 | .needs_scratch_page = true, | ||
2144 | .configure = intel_840_configure, | 576 | .configure = intel_840_configure, |
2145 | .fetch_size = intel_8xx_fetch_size, | 577 | .fetch_size = intel_8xx_fetch_size, |
2146 | .cleanup = intel_8xx_cleanup, | 578 | .cleanup = intel_8xx_cleanup, |
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
2167 | .aperture_sizes = intel_8xx_sizes, | 599 | .aperture_sizes = intel_8xx_sizes, |
2168 | .size_type = U8_APER_SIZE, | 600 | .size_type = U8_APER_SIZE, |
2169 | .num_aperture_sizes = 7, | 601 | .num_aperture_sizes = 7, |
602 | .needs_scratch_page = true, | ||
2170 | .configure = intel_845_configure, | 603 | .configure = intel_845_configure, |
2171 | .fetch_size = intel_8xx_fetch_size, | 604 | .fetch_size = intel_8xx_fetch_size, |
2172 | .cleanup = intel_8xx_cleanup, | 605 | .cleanup = intel_8xx_cleanup, |
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = { | |||
2193 | .aperture_sizes = intel_8xx_sizes, | 626 | .aperture_sizes = intel_8xx_sizes, |
2194 | .size_type = U8_APER_SIZE, | 627 | .size_type = U8_APER_SIZE, |
2195 | .num_aperture_sizes = 7, | 628 | .num_aperture_sizes = 7, |
629 | .needs_scratch_page = true, | ||
2196 | .configure = intel_850_configure, | 630 | .configure = intel_850_configure, |
2197 | .fetch_size = intel_8xx_fetch_size, | 631 | .fetch_size = intel_8xx_fetch_size, |
2198 | .cleanup = intel_8xx_cleanup, | 632 | .cleanup = intel_8xx_cleanup, |
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = { | |||
2219 | .aperture_sizes = intel_8xx_sizes, | 653 | .aperture_sizes = intel_8xx_sizes, |
2220 | .size_type = U8_APER_SIZE, | 654 | .size_type = U8_APER_SIZE, |
2221 | .num_aperture_sizes = 7, | 655 | .num_aperture_sizes = 7, |
656 | .needs_scratch_page = true, | ||
2222 | .configure = intel_860_configure, | 657 | .configure = intel_860_configure, |
2223 | .fetch_size = intel_8xx_fetch_size, | 658 | .fetch_size = intel_8xx_fetch_size, |
2224 | .cleanup = intel_8xx_cleanup, | 659 | .cleanup = intel_8xx_cleanup, |
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = { | |||
2240 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 675 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2241 | }; | 676 | }; |
2242 | 677 | ||
2243 | static const struct agp_bridge_driver intel_915_driver = { | ||
2244 | .owner = THIS_MODULE, | ||
2245 | .aperture_sizes = intel_i830_sizes, | ||
2246 | .size_type = FIXED_APER_SIZE, | ||
2247 | .num_aperture_sizes = 4, | ||
2248 | .needs_scratch_page = true, | ||
2249 | .configure = intel_i915_configure, | ||
2250 | .fetch_size = intel_i9xx_fetch_size, | ||
2251 | .cleanup = intel_i915_cleanup, | ||
2252 | .tlb_flush = intel_i810_tlbflush, | ||
2253 | .mask_memory = intel_i810_mask_memory, | ||
2254 | .masks = intel_i810_masks, | ||
2255 | .agp_enable = intel_i810_agp_enable, | ||
2256 | .cache_flush = global_cache_flush, | ||
2257 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2258 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2259 | .insert_memory = intel_i915_insert_entries, | ||
2260 | .remove_memory = intel_i915_remove_entries, | ||
2261 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2262 | .free_by_type = intel_i810_free_by_type, | ||
2263 | .agp_alloc_page = agp_generic_alloc_page, | ||
2264 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2265 | .agp_destroy_page = agp_generic_destroy_page, | ||
2266 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2267 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2268 | .chipset_flush = intel_i915_chipset_flush, | ||
2269 | #ifdef USE_PCI_DMA_API | ||
2270 | .agp_map_page = intel_agp_map_page, | ||
2271 | .agp_unmap_page = intel_agp_unmap_page, | ||
2272 | .agp_map_memory = intel_agp_map_memory, | ||
2273 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2274 | #endif | ||
2275 | }; | ||
2276 | |||
2277 | static const struct agp_bridge_driver intel_i965_driver = { | ||
2278 | .owner = THIS_MODULE, | ||
2279 | .aperture_sizes = intel_i830_sizes, | ||
2280 | .size_type = FIXED_APER_SIZE, | ||
2281 | .num_aperture_sizes = 4, | ||
2282 | .needs_scratch_page = true, | ||
2283 | .configure = intel_i915_configure, | ||
2284 | .fetch_size = intel_i9xx_fetch_size, | ||
2285 | .cleanup = intel_i915_cleanup, | ||
2286 | .tlb_flush = intel_i810_tlbflush, | ||
2287 | .mask_memory = intel_i965_mask_memory, | ||
2288 | .masks = intel_i810_masks, | ||
2289 | .agp_enable = intel_i810_agp_enable, | ||
2290 | .cache_flush = global_cache_flush, | ||
2291 | .create_gatt_table = intel_i965_create_gatt_table, | ||
2292 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2293 | .insert_memory = intel_i915_insert_entries, | ||
2294 | .remove_memory = intel_i915_remove_entries, | ||
2295 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2296 | .free_by_type = intel_i810_free_by_type, | ||
2297 | .agp_alloc_page = agp_generic_alloc_page, | ||
2298 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2299 | .agp_destroy_page = agp_generic_destroy_page, | ||
2300 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2301 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2302 | .chipset_flush = intel_i915_chipset_flush, | ||
2303 | #ifdef USE_PCI_DMA_API | ||
2304 | .agp_map_page = intel_agp_map_page, | ||
2305 | .agp_unmap_page = intel_agp_unmap_page, | ||
2306 | .agp_map_memory = intel_agp_map_memory, | ||
2307 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2308 | #endif | ||
2309 | }; | ||
2310 | |||
2311 | static const struct agp_bridge_driver intel_7505_driver = { | 678 | static const struct agp_bridge_driver intel_7505_driver = { |
2312 | .owner = THIS_MODULE, | 679 | .owner = THIS_MODULE, |
2313 | .aperture_sizes = intel_8xx_sizes, | 680 | .aperture_sizes = intel_8xx_sizes, |
2314 | .size_type = U8_APER_SIZE, | 681 | .size_type = U8_APER_SIZE, |
2315 | .num_aperture_sizes = 7, | 682 | .num_aperture_sizes = 7, |
683 | .needs_scratch_page = true, | ||
2316 | .configure = intel_7505_configure, | 684 | .configure = intel_7505_configure, |
2317 | .fetch_size = intel_8xx_fetch_size, | 685 | .fetch_size = intel_8xx_fetch_size, |
2318 | .cleanup = intel_8xx_cleanup, | 686 | .cleanup = intel_8xx_cleanup, |
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = { | |||
2334 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 702 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2335 | }; | 703 | }; |
2336 | 704 | ||
2337 | static const struct agp_bridge_driver intel_g33_driver = { | ||
2338 | .owner = THIS_MODULE, | ||
2339 | .aperture_sizes = intel_i830_sizes, | ||
2340 | .size_type = FIXED_APER_SIZE, | ||
2341 | .num_aperture_sizes = 4, | ||
2342 | .needs_scratch_page = true, | ||
2343 | .configure = intel_i915_configure, | ||
2344 | .fetch_size = intel_i9xx_fetch_size, | ||
2345 | .cleanup = intel_i915_cleanup, | ||
2346 | .tlb_flush = intel_i810_tlbflush, | ||
2347 | .mask_memory = intel_i965_mask_memory, | ||
2348 | .masks = intel_i810_masks, | ||
2349 | .agp_enable = intel_i810_agp_enable, | ||
2350 | .cache_flush = global_cache_flush, | ||
2351 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2352 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2353 | .insert_memory = intel_i915_insert_entries, | ||
2354 | .remove_memory = intel_i915_remove_entries, | ||
2355 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2356 | .free_by_type = intel_i810_free_by_type, | ||
2357 | .agp_alloc_page = agp_generic_alloc_page, | ||
2358 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2359 | .agp_destroy_page = agp_generic_destroy_page, | ||
2360 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2361 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2362 | .chipset_flush = intel_i915_chipset_flush, | ||
2363 | #ifdef USE_PCI_DMA_API | ||
2364 | .agp_map_page = intel_agp_map_page, | ||
2365 | .agp_unmap_page = intel_agp_unmap_page, | ||
2366 | .agp_map_memory = intel_agp_map_memory, | ||
2367 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2368 | #endif | ||
2369 | }; | ||
2370 | |||
2371 | static int find_gmch(u16 device) | 705 | static int find_gmch(u16 device) |
2372 | { | 706 | { |
2373 | struct pci_dev *gmch_device; | 707 | struct pci_dev *gmch_device; |
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device) | |||
2392 | static const struct intel_driver_description { | 726 | static const struct intel_driver_description { |
2393 | unsigned int chip_id; | 727 | unsigned int chip_id; |
2394 | unsigned int gmch_chip_id; | 728 | unsigned int gmch_chip_id; |
2395 | unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ | ||
2396 | char *name; | 729 | char *name; |
2397 | const struct agp_bridge_driver *driver; | 730 | const struct agp_bridge_driver *driver; |
2398 | const struct agp_bridge_driver *gmch_driver; | 731 | const struct agp_bridge_driver *gmch_driver; |
2399 | } intel_agp_chipsets[] = { | 732 | } intel_agp_chipsets[] = { |
2400 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, | 733 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, |
2401 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, | 734 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, |
2402 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, | 735 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, |
2403 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", | 736 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", |
2404 | NULL, &intel_810_driver }, | 737 | NULL, &intel_810_driver }, |
2405 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", | 738 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", |
2406 | NULL, &intel_810_driver }, | 739 | NULL, &intel_810_driver }, |
2407 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", | 740 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", |
2408 | NULL, &intel_810_driver }, | 741 | NULL, &intel_810_driver }, |
2409 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", | 742 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", |
2410 | &intel_815_driver, &intel_810_driver }, | 743 | &intel_815_driver, &intel_810_driver }, |
2411 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 744 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, |
2412 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 745 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, |
2413 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", | 746 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", |
2414 | &intel_830mp_driver, &intel_830_driver }, | 747 | &intel_830mp_driver, &intel_830_driver }, |
2415 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, | 748 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, |
2416 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, | 749 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, |
2417 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", | 750 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", |
2418 | &intel_845_driver, &intel_830_driver }, | 751 | &intel_845_driver, &intel_830_driver }, |
2419 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, | 752 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, |
2420 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", | 753 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", |
2421 | &intel_845_driver, &intel_830_driver }, | 754 | &intel_845_driver, &intel_830_driver }, |
2422 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, | 755 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, |
2423 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", | 756 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", |
2424 | &intel_845_driver, &intel_830_driver }, | 757 | &intel_845_driver, &intel_830_driver }, |
2425 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, | 758 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, |
2426 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", | 759 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", |
2427 | &intel_845_driver, &intel_830_driver }, | 760 | &intel_845_driver, &intel_830_driver }, |
2428 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, | 761 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, |
2429 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", | 762 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
2430 | NULL, &intel_915_driver }, | 763 | NULL, &intel_915_driver }, |
2431 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", | 764 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
2432 | NULL, &intel_915_driver }, | 765 | NULL, &intel_915_driver }, |
2433 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", | 766 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
2434 | NULL, &intel_915_driver }, | 767 | NULL, &intel_915_driver }, |
2435 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", | 768 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
2436 | NULL, &intel_915_driver }, | 769 | NULL, &intel_915_driver }, |
2437 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", | 770 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
2438 | NULL, &intel_915_driver }, | 771 | NULL, &intel_915_driver }, |
2439 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", | 772 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
2440 | NULL, &intel_915_driver }, | 773 | NULL, &intel_915_driver }, |
2441 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", | 774 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
2442 | NULL, &intel_i965_driver }, | 775 | NULL, &intel_i965_driver }, |
2443 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", | 776 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
2444 | NULL, &intel_i965_driver }, | 777 | NULL, &intel_i965_driver }, |
2445 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", | 778 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
2446 | NULL, &intel_i965_driver }, | 779 | NULL, &intel_i965_driver }, |
2447 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", | 780 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
2448 | NULL, &intel_i965_driver }, | 781 | NULL, &intel_i965_driver }, |
2449 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", | 782 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
2450 | NULL, &intel_i965_driver }, | 783 | NULL, &intel_i965_driver }, |
2451 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", | 784 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
2452 | NULL, &intel_i965_driver }, | 785 | NULL, &intel_i965_driver }, |
2453 | { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, | 786 | { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, |
2454 | { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, | 787 | { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, |
2455 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", | 788 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
2456 | NULL, &intel_g33_driver }, | 789 | NULL, &intel_g33_driver }, |
2457 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", | 790 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
2458 | NULL, &intel_g33_driver }, | 791 | NULL, &intel_g33_driver }, |
2459 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 792 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
2460 | NULL, &intel_g33_driver }, | 793 | NULL, &intel_g33_driver }, |
2461 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", | 794 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
2462 | NULL, &intel_g33_driver }, | 795 | NULL, &intel_g33_driver }, |
2463 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", | 796 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
2464 | NULL, &intel_g33_driver }, | 797 | NULL, &intel_g33_driver }, |
2465 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, | 798 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, |
2466 | "GM45", NULL, &intel_i965_driver }, | 799 | "GM45", NULL, &intel_i965_driver }, |
2467 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, | 800 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, |
2468 | "Eaglelake", NULL, &intel_i965_driver }, | 801 | "Eaglelake", NULL, &intel_i965_driver }, |
2469 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, | 802 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, |
2470 | "Q45/Q43", NULL, &intel_i965_driver }, | 803 | "Q45/Q43", NULL, &intel_i965_driver }, |
2471 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, | 804 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, |
2472 | "G45/G43", NULL, &intel_i965_driver }, | 805 | "G45/G43", NULL, &intel_i965_driver }, |
2473 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, | 806 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, |
2474 | "B43", NULL, &intel_i965_driver }, | 807 | "B43", NULL, &intel_i965_driver }, |
2475 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, | 808 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, |
2476 | "G41", NULL, &intel_i965_driver }, | 809 | "G41", NULL, &intel_i965_driver }, |
2477 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, | 810 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
2478 | "HD Graphics", NULL, &intel_i965_driver }, | 811 | "HD Graphics", NULL, &intel_i965_driver }, |
2479 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 812 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2480 | "HD Graphics", NULL, &intel_i965_driver }, | 813 | "HD Graphics", NULL, &intel_i965_driver }, |
2481 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 814 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2482 | "HD Graphics", NULL, &intel_i965_driver }, | 815 | "HD Graphics", NULL, &intel_i965_driver }, |
2483 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 816 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2484 | "HD Graphics", NULL, &intel_i965_driver }, | 817 | "HD Graphics", NULL, &intel_i965_driver }, |
2485 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, | 818 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, |
2486 | "Sandybridge", NULL, &intel_i965_driver }, | 819 | "Sandybridge", NULL, &intel_i965_driver }, |
2487 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, | 820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, |
2488 | "Sandybridge", NULL, &intel_i965_driver }, | 821 | "Sandybridge", NULL, &intel_i965_driver }, |
2489 | { 0, 0, 0, NULL, NULL, NULL } | 822 | { 0, 0, NULL, NULL, NULL } |
2490 | }; | 823 | }; |
2491 | 824 | ||
825 | static int __devinit intel_gmch_probe(struct pci_dev *pdev, | ||
826 | struct agp_bridge_data *bridge) | ||
827 | { | ||
828 | int i; | ||
829 | bridge->driver = NULL; | ||
830 | |||
831 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | ||
832 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | ||
833 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | ||
834 | bridge->driver = | ||
835 | intel_agp_chipsets[i].gmch_driver; | ||
836 | break; | ||
837 | } | ||
838 | } | ||
839 | |||
840 | if (!bridge->driver) | ||
841 | return 0; | ||
842 | |||
843 | bridge->dev_private_data = &intel_private; | ||
844 | bridge->dev = pdev; | ||
845 | |||
846 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | ||
847 | |||
848 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | ||
849 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
850 | dev_err(&intel_private.pcidev->dev, | ||
851 | "set gfx device dma mask 36bit failed!\n"); | ||
852 | else | ||
853 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
854 | DMA_BIT_MASK(36)); | ||
855 | } | ||
856 | |||
857 | return 1; | ||
858 | } | ||
859 | |||
2492 | static int __devinit agp_intel_probe(struct pci_dev *pdev, | 860 | static int __devinit agp_intel_probe(struct pci_dev *pdev, |
2493 | const struct pci_device_id *ent) | 861 | const struct pci_device_id *ent) |
2494 | { | 862 | { |
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2503 | if (!bridge) | 871 | if (!bridge) |
2504 | return -ENOMEM; | 872 | return -ENOMEM; |
2505 | 873 | ||
874 | bridge->capndx = cap_ptr; | ||
875 | |||
876 | if (intel_gmch_probe(pdev, bridge)) | ||
877 | goto found_gmch; | ||
878 | |||
2506 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | 879 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { |
2507 | /* In case that multiple models of gfx chip may | 880 | /* In case that multiple models of gfx chip may |
2508 | stand on same host bridge type, this can be | 881 | stand on same host bridge type, this can be |
2509 | sure we detect the right IGD. */ | 882 | sure we detect the right IGD. */ |
2510 | if (pdev->device == intel_agp_chipsets[i].chip_id) { | 883 | if (pdev->device == intel_agp_chipsets[i].chip_id) { |
2511 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | 884 | bridge->driver = intel_agp_chipsets[i].driver; |
2512 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | 885 | break; |
2513 | bridge->driver = | ||
2514 | intel_agp_chipsets[i].gmch_driver; | ||
2515 | break; | ||
2516 | } else if (intel_agp_chipsets[i].multi_gmch_chip) { | ||
2517 | continue; | ||
2518 | } else { | ||
2519 | bridge->driver = intel_agp_chipsets[i].driver; | ||
2520 | break; | ||
2521 | } | ||
2522 | } | 886 | } |
2523 | } | 887 | } |
2524 | 888 | ||
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2530 | return -ENODEV; | 894 | return -ENODEV; |
2531 | } | 895 | } |
2532 | 896 | ||
2533 | if (bridge->driver == NULL) { | 897 | if (!bridge->driver) { |
2534 | /* bridge has no AGP and no IGD detected */ | ||
2535 | if (cap_ptr) | 898 | if (cap_ptr) |
2536 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", | 899 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", |
2537 | intel_agp_chipsets[i].gmch_chip_id); | 900 | intel_agp_chipsets[i].gmch_chip_id); |
2538 | agp_put_bridge(bridge); | 901 | agp_put_bridge(bridge); |
2539 | return -ENODEV; | 902 | return -ENODEV; |
2540 | } | 903 | } |
2541 | 904 | ||
2542 | bridge->dev = pdev; | 905 | bridge->dev = pdev; |
2543 | bridge->capndx = cap_ptr; | 906 | bridge->dev_private_data = NULL; |
2544 | bridge->dev_private_data = &intel_private; | ||
2545 | 907 | ||
2546 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | 908 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); |
2547 | 909 | ||
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2577 | &bridge->mode); | 939 | &bridge->mode); |
2578 | } | 940 | } |
2579 | 941 | ||
2580 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | 942 | found_gmch: |
2581 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
2582 | dev_err(&intel_private.pcidev->dev, | ||
2583 | "set gfx device dma mask 36bit failed!\n"); | ||
2584 | else | ||
2585 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
2586 | DMA_BIT_MASK(36)); | ||
2587 | } | ||
2588 | |||
2589 | pci_set_drvdata(pdev, bridge); | 943 | pci_set_drvdata(pdev, bridge); |
2590 | err = agp_add_bridge(bridge); | 944 | err = agp_add_bridge(bridge); |
2591 | if (!err) | 945 | if (!err) |
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev) | |||
2611 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 965 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
2612 | int ret_val; | 966 | int ret_val; |
2613 | 967 | ||
2614 | if (bridge->driver == &intel_generic_driver) | 968 | bridge->driver->configure(); |
2615 | intel_configure(); | ||
2616 | else if (bridge->driver == &intel_850_driver) | ||
2617 | intel_850_configure(); | ||
2618 | else if (bridge->driver == &intel_845_driver) | ||
2619 | intel_845_configure(); | ||
2620 | else if (bridge->driver == &intel_830mp_driver) | ||
2621 | intel_830mp_configure(); | ||
2622 | else if (bridge->driver == &intel_915_driver) | ||
2623 | intel_i915_configure(); | ||
2624 | else if (bridge->driver == &intel_830_driver) | ||
2625 | intel_i830_configure(); | ||
2626 | else if (bridge->driver == &intel_810_driver) | ||
2627 | intel_i810_configure(); | ||
2628 | else if (bridge->driver == &intel_i965_driver) | ||
2629 | intel_i915_configure(); | ||
2630 | 969 | ||
2631 | ret_val = agp_rebind_memory(); | 970 | ret_val = agp_rebind_memory(); |
2632 | if (ret_val != 0) | 971 | if (ret_val != 0) |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h new file mode 100644 index 000000000000..2547465d4658 --- /dev/null +++ b/drivers/char/agp/intel-agp.h | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * Common Intel AGPGART and GTT definitions. | ||
3 | */ | ||
4 | |||
5 | /* Intel registers */ | ||
6 | #define INTEL_APSIZE 0xb4 | ||
7 | #define INTEL_ATTBASE 0xb8 | ||
8 | #define INTEL_AGPCTRL 0xb0 | ||
9 | #define INTEL_NBXCFG 0x50 | ||
10 | #define INTEL_ERRSTS 0x91 | ||
11 | |||
12 | /* Intel i830 registers */ | ||
13 | #define I830_GMCH_CTRL 0x52 | ||
14 | #define I830_GMCH_ENABLED 0x4 | ||
15 | #define I830_GMCH_MEM_MASK 0x1 | ||
16 | #define I830_GMCH_MEM_64M 0x1 | ||
17 | #define I830_GMCH_MEM_128M 0 | ||
18 | #define I830_GMCH_GMS_MASK 0x70 | ||
19 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
20 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
21 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
22 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
23 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
24 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
25 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
26 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
27 | |||
28 | /* This one is for I830MP w. an external graphic card */ | ||
29 | #define INTEL_I830_ERRSTS 0x92 | ||
30 | |||
31 | /* Intel 855GM/852GM registers */ | ||
32 | #define I855_GMCH_GMS_MASK 0xF0 | ||
33 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
34 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
35 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
36 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
37 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
38 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
39 | #define I85X_CAPID 0x44 | ||
40 | #define I85X_VARIANT_MASK 0x7 | ||
41 | #define I85X_VARIANT_SHIFT 5 | ||
42 | #define I855_GME 0x0 | ||
43 | #define I855_GM 0x4 | ||
44 | #define I852_GME 0x2 | ||
45 | #define I852_GM 0x5 | ||
46 | |||
47 | /* Intel i845 registers */ | ||
48 | #define INTEL_I845_AGPM 0x51 | ||
49 | #define INTEL_I845_ERRSTS 0xc8 | ||
50 | |||
51 | /* Intel i860 registers */ | ||
52 | #define INTEL_I860_MCHCFG 0x50 | ||
53 | #define INTEL_I860_ERRSTS 0xc8 | ||
54 | |||
55 | /* Intel i810 registers */ | ||
56 | #define I810_GMADDR 0x10 | ||
57 | #define I810_MMADDR 0x14 | ||
58 | #define I810_PTE_BASE 0x10000 | ||
59 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
60 | #define I810_PTE_LOCAL 0x00000002 | ||
61 | #define I810_PTE_VALID 0x00000001 | ||
62 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
63 | #define I810_SMRAM_MISCC 0x70 | ||
64 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
65 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
66 | #define I810_GMS 0x000000c0 | ||
67 | #define I810_GMS_DISABLE 0x00000000 | ||
68 | #define I810_PGETBL_CTL 0x2020 | ||
69 | #define I810_PGETBL_ENABLED 0x00000001 | ||
70 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
71 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
72 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
73 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
74 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
75 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
76 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
77 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
78 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
79 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
80 | |||
81 | #define I810_DRAM_CTL 0x3000 | ||
82 | #define I810_DRAM_ROW_0 0x00000001 | ||
83 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
84 | |||
85 | /* Intel 815 register */ | ||
86 | #define INTEL_815_APCONT 0x51 | ||
87 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
88 | |||
89 | /* Intel i820 registers */ | ||
90 | #define INTEL_I820_RDCR 0x51 | ||
91 | #define INTEL_I820_ERRSTS 0xc8 | ||
92 | |||
93 | /* Intel i840 registers */ | ||
94 | #define INTEL_I840_MCHCFG 0x50 | ||
95 | #define INTEL_I840_ERRSTS 0xc8 | ||
96 | |||
97 | /* Intel i850 registers */ | ||
98 | #define INTEL_I850_MCHCFG 0x50 | ||
99 | #define INTEL_I850_ERRSTS 0xc8 | ||
100 | |||
101 | /* intel 915G registers */ | ||
102 | #define I915_GMADDR 0x18 | ||
103 | #define I915_MMADDR 0x10 | ||
104 | #define I915_PTEADDR 0x1C | ||
105 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
106 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
107 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
108 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
109 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
110 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
111 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
112 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
113 | |||
114 | #define I915_IFPADDR 0x60 | ||
115 | |||
116 | /* Intel 965G registers */ | ||
117 | #define I965_MSAC 0x62 | ||
118 | #define I965_IFPADDR 0x70 | ||
119 | |||
120 | /* Intel 7505 registers */ | ||
121 | #define INTEL_I7505_APSIZE 0x74 | ||
122 | #define INTEL_I7505_NCAPID 0x60 | ||
123 | #define INTEL_I7505_NISTAT 0x6c | ||
124 | #define INTEL_I7505_ATTBASE 0x78 | ||
125 | #define INTEL_I7505_ERRSTS 0x42 | ||
126 | #define INTEL_I7505_AGPCTRL 0x70 | ||
127 | #define INTEL_I7505_MCHCFG 0x50 | ||
128 | |||
129 | #define SNB_GMCH_CTRL 0x50 | ||
130 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
131 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
132 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
133 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
134 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
135 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
136 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
137 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
138 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
139 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
140 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
141 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
142 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
143 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
144 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
145 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
146 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
147 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
148 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
149 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
150 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
151 | |||
152 | /* pci devices ids */ | ||
153 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
154 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
155 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
156 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
157 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
158 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
159 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
160 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
161 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
162 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
163 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
164 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
165 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
166 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
167 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
168 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
169 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
170 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
171 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
172 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
173 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
174 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
175 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
176 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
177 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
178 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
179 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
180 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
181 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
182 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
183 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
184 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
185 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
186 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
187 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
188 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
189 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
190 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
191 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
192 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
193 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
194 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
195 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
196 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
197 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
198 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
199 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
200 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
201 | |||
202 | /* cover 915 and 945 variants */ | ||
203 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
204 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
205 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
206 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
207 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
208 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
209 | |||
210 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
211 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
212 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
213 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
214 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
215 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
216 | |||
217 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
218 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
219 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
220 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
221 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
222 | |||
223 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
224 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
225 | |||
226 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
227 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
228 | |||
229 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
230 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
231 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
232 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
233 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
234 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
235 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
236 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
237 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
238 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
239 | IS_SNB) | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c new file mode 100644 index 000000000000..e8ea6825822c --- /dev/null +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -0,0 +1,1516 @@ | |||
1 | /* | ||
2 | * Intel GTT (Graphics Translation Table) routines | ||
3 | * | ||
4 | * Caveat: This driver implements the linux agp interface, but this is far from | ||
5 | * a agp driver! GTT support ended up here for purely historical reasons: The | ||
6 | * old userspace intel graphics drivers needed an interface to map memory into | ||
7 | * the GTT. And the drm provides a default interface for graphic devices sitting | ||
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to | ||
9 | * avoid having to create a new api. | ||
10 | * | ||
11 | * With gem this does not make much sense anymore, just needlessly complicates | ||
12 | * the code. But as long as the old graphics stack is still support, it's stuck | ||
13 | * here. | ||
14 | * | ||
15 | * /fairy-tale-mode off | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
29 | { | ||
30 | {64, 16384, 4}, | ||
31 | /* The 32M mode still requires a 64k gatt */ | ||
32 | {32, 8192, 4} | ||
33 | }; | ||
34 | |||
35 | #define AGP_DCACHE_MEMORY 1 | ||
36 | #define AGP_PHYS_MEMORY 2 | ||
37 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
38 | |||
39 | static struct gatt_mask intel_i810_masks[] = | ||
40 | { | ||
41 | {.mask = I810_PTE_VALID, .type = 0}, | ||
42 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
43 | {.mask = I810_PTE_VALID, .type = 0}, | ||
44 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
45 | .type = INTEL_AGP_CACHED_MEMORY} | ||
46 | }; | ||
47 | |||
48 | static struct _intel_private { | ||
49 | struct pci_dev *pcidev; /* device one */ | ||
50 | u8 __iomem *registers; | ||
51 | u32 __iomem *gtt; /* I915G */ | ||
52 | int num_dcache_entries; | ||
53 | /* gtt_entries is the number of gtt entries that are already mapped | ||
54 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
55 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
56 | * popup and for the GTT. | ||
57 | */ | ||
58 | int gtt_entries; /* i830+ */ | ||
59 | int gtt_total_size; | ||
60 | union { | ||
61 | void __iomem *i9xx_flush_page; | ||
62 | void *i8xx_flush_page; | ||
63 | }; | ||
64 | struct page *i8xx_page; | ||
65 | struct resource ifp_resource; | ||
66 | int resource_valid; | ||
67 | } intel_private; | ||
68 | |||
69 | #ifdef USE_PCI_DMA_API | ||
70 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
71 | { | ||
72 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
73 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
74 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
75 | return -EINVAL; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
80 | { | ||
81 | pci_unmap_page(intel_private.pcidev, dma, | ||
82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
83 | } | ||
84 | |||
85 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
86 | { | ||
87 | struct sg_table st; | ||
88 | |||
89 | st.sgl = mem->sg_list; | ||
90 | st.orig_nents = st.nents = mem->page_count; | ||
91 | |||
92 | sg_free_table(&st); | ||
93 | |||
94 | mem->sg_list = NULL; | ||
95 | mem->num_sg = 0; | ||
96 | } | ||
97 | |||
98 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
99 | { | ||
100 | struct sg_table st; | ||
101 | struct scatterlist *sg; | ||
102 | int i; | ||
103 | |||
104 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
105 | |||
106 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | mem->sg_list = sg = st.sgl; | ||
110 | |||
111 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
112 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
113 | |||
114 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
115 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
116 | if (unlikely(!mem->num_sg)) { | ||
117 | intel_agp_free_sglist(mem); | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
124 | { | ||
125 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
126 | |||
127 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
128 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
129 | intel_agp_free_sglist(mem); | ||
130 | } | ||
131 | |||
132 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
133 | off_t pg_start, int mask_type) | ||
134 | { | ||
135 | struct scatterlist *sg; | ||
136 | int i, j; | ||
137 | |||
138 | j = pg_start; | ||
139 | |||
140 | WARN_ON(!mem->num_sg); | ||
141 | |||
142 | if (mem->num_sg == mem->page_count) { | ||
143 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
144 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
145 | sg_dma_address(sg), mask_type), | ||
146 | intel_private.gtt+j); | ||
147 | j++; | ||
148 | } | ||
149 | } else { | ||
150 | /* sg may merge pages, but we have to separate | ||
151 | * per-page addr for GTT */ | ||
152 | unsigned int len, m; | ||
153 | |||
154 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
155 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
156 | for (m = 0; m < len; m++) { | ||
157 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
158 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
159 | mask_type), | ||
160 | intel_private.gtt+j); | ||
161 | j++; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | readl(intel_private.gtt+j-1); | ||
166 | } | ||
167 | |||
168 | #else | ||
169 | |||
170 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
171 | off_t pg_start, int mask_type) | ||
172 | { | ||
173 | int i, j; | ||
174 | u32 cache_bits = 0; | ||
175 | |||
176 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
177 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
178 | { | ||
179 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
180 | } | ||
181 | |||
182 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
183 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
184 | page_to_phys(mem->pages[i]), mask_type), | ||
185 | intel_private.gtt+j); | ||
186 | } | ||
187 | |||
188 | readl(intel_private.gtt+j-1); | ||
189 | } | ||
190 | |||
191 | #endif | ||
192 | |||
193 | static int intel_i810_fetch_size(void) | ||
194 | { | ||
195 | u32 smram_miscc; | ||
196 | struct aper_size_info_fixed *values; | ||
197 | |||
198 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
199 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
200 | |||
201 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
202 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
203 | return 0; | ||
204 | } | ||
205 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
206 | agp_bridge->current_size = (void *) (values + 1); | ||
207 | agp_bridge->aperture_size_idx = 1; | ||
208 | return values[1].size; | ||
209 | } else { | ||
210 | agp_bridge->current_size = (void *) (values); | ||
211 | agp_bridge->aperture_size_idx = 0; | ||
212 | return values[0].size; | ||
213 | } | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int intel_i810_configure(void) | ||
219 | { | ||
220 | struct aper_size_info_fixed *current_size; | ||
221 | u32 temp; | ||
222 | int i; | ||
223 | |||
224 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
225 | |||
226 | if (!intel_private.registers) { | ||
227 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
228 | temp &= 0xfff80000; | ||
229 | |||
230 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
231 | if (!intel_private.registers) { | ||
232 | dev_err(&intel_private.pcidev->dev, | ||
233 | "can't remap memory\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
239 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
240 | /* This will need to be dynamically assigned */ | ||
241 | dev_info(&intel_private.pcidev->dev, | ||
242 | "detected 4MB dedicated video ram\n"); | ||
243 | intel_private.num_dcache_entries = 1024; | ||
244 | } | ||
245 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
246 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
247 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
248 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
249 | |||
250 | if (agp_bridge->driver->needs_scratch_page) { | ||
251 | for (i = 0; i < current_size->num_entries; i++) { | ||
252 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
253 | } | ||
254 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
255 | } | ||
256 | global_cache_flush(); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static void intel_i810_cleanup(void) | ||
261 | { | ||
262 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
263 | readl(intel_private.registers); /* PCI Posting. */ | ||
264 | iounmap(intel_private.registers); | ||
265 | } | ||
266 | |||
267 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
268 | { | ||
269 | return; | ||
270 | } | ||
271 | |||
272 | /* Exists to support ARGB cursors */ | ||
273 | static struct page *i8xx_alloc_pages(void) | ||
274 | { | ||
275 | struct page *page; | ||
276 | |||
277 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
278 | if (page == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | if (set_pages_uc(page, 4) < 0) { | ||
282 | set_pages_wb(page, 4); | ||
283 | __free_pages(page, 2); | ||
284 | return NULL; | ||
285 | } | ||
286 | get_page(page); | ||
287 | atomic_inc(&agp_bridge->current_memory_agp); | ||
288 | return page; | ||
289 | } | ||
290 | |||
291 | static void i8xx_destroy_pages(struct page *page) | ||
292 | { | ||
293 | if (page == NULL) | ||
294 | return; | ||
295 | |||
296 | set_pages_wb(page, 4); | ||
297 | put_page(page); | ||
298 | __free_pages(page, 2); | ||
299 | atomic_dec(&agp_bridge->current_memory_agp); | ||
300 | } | ||
301 | |||
302 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
303 | int type) | ||
304 | { | ||
305 | if (type < AGP_USER_TYPES) | ||
306 | return type; | ||
307 | else if (type == AGP_USER_CACHED_MEMORY) | ||
308 | return INTEL_AGP_CACHED_MEMORY; | ||
309 | else | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
314 | int type) | ||
315 | { | ||
316 | int i, j, num_entries; | ||
317 | void *temp; | ||
318 | int ret = -EINVAL; | ||
319 | int mask_type; | ||
320 | |||
321 | if (mem->page_count == 0) | ||
322 | goto out; | ||
323 | |||
324 | temp = agp_bridge->current_size; | ||
325 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
326 | |||
327 | if ((pg_start + mem->page_count) > num_entries) | ||
328 | goto out_err; | ||
329 | |||
330 | |||
331 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
332 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
333 | ret = -EBUSY; | ||
334 | goto out_err; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | if (type != mem->type) | ||
339 | goto out_err; | ||
340 | |||
341 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
342 | |||
343 | switch (mask_type) { | ||
344 | case AGP_DCACHE_MEMORY: | ||
345 | if (!mem->is_flushed) | ||
346 | global_cache_flush(); | ||
347 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
348 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
349 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
350 | } | ||
351 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
352 | break; | ||
353 | case AGP_PHYS_MEMORY: | ||
354 | case AGP_NORMAL_MEMORY: | ||
355 | if (!mem->is_flushed) | ||
356 | global_cache_flush(); | ||
357 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
358 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
359 | page_to_phys(mem->pages[i]), mask_type), | ||
360 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
361 | } | ||
362 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
363 | break; | ||
364 | default: | ||
365 | goto out_err; | ||
366 | } | ||
367 | |||
368 | out: | ||
369 | ret = 0; | ||
370 | out_err: | ||
371 | mem->is_flushed = true; | ||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
376 | int type) | ||
377 | { | ||
378 | int i; | ||
379 | |||
380 | if (mem->page_count == 0) | ||
381 | return 0; | ||
382 | |||
383 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
384 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
385 | } | ||
386 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * The i810/i830 requires a physical address to program its mouse | ||
393 | * pointer into hardware. | ||
394 | * However the Xserver still writes to it through the agp aperture. | ||
395 | */ | ||
396 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
397 | { | ||
398 | struct agp_memory *new; | ||
399 | struct page *page; | ||
400 | |||
401 | switch (pg_count) { | ||
402 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
403 | break; | ||
404 | case 4: | ||
405 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
406 | page = i8xx_alloc_pages(); | ||
407 | break; | ||
408 | default: | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | if (page == NULL) | ||
413 | return NULL; | ||
414 | |||
415 | new = agp_create_memory(pg_count); | ||
416 | if (new == NULL) | ||
417 | return NULL; | ||
418 | |||
419 | new->pages[0] = page; | ||
420 | if (pg_count == 4) { | ||
421 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
422 | new->pages[1] = new->pages[0] + 1; | ||
423 | new->pages[2] = new->pages[1] + 1; | ||
424 | new->pages[3] = new->pages[2] + 1; | ||
425 | } | ||
426 | new->page_count = pg_count; | ||
427 | new->num_scratch_pages = pg_count; | ||
428 | new->type = AGP_PHYS_MEMORY; | ||
429 | new->physical = page_to_phys(new->pages[0]); | ||
430 | return new; | ||
431 | } | ||
432 | |||
433 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
434 | { | ||
435 | struct agp_memory *new; | ||
436 | |||
437 | if (type == AGP_DCACHE_MEMORY) { | ||
438 | if (pg_count != intel_private.num_dcache_entries) | ||
439 | return NULL; | ||
440 | |||
441 | new = agp_create_memory(1); | ||
442 | if (new == NULL) | ||
443 | return NULL; | ||
444 | |||
445 | new->type = AGP_DCACHE_MEMORY; | ||
446 | new->page_count = pg_count; | ||
447 | new->num_scratch_pages = 0; | ||
448 | agp_free_page_array(new); | ||
449 | return new; | ||
450 | } | ||
451 | if (type == AGP_PHYS_MEMORY) | ||
452 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
453 | return NULL; | ||
454 | } | ||
455 | |||
456 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
457 | { | ||
458 | agp_free_key(curr->key); | ||
459 | if (curr->type == AGP_PHYS_MEMORY) { | ||
460 | if (curr->page_count == 4) | ||
461 | i8xx_destroy_pages(curr->pages[0]); | ||
462 | else { | ||
463 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
464 | AGP_PAGE_DESTROY_UNMAP); | ||
465 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
466 | AGP_PAGE_DESTROY_FREE); | ||
467 | } | ||
468 | agp_free_page_array(curr); | ||
469 | } | ||
470 | kfree(curr); | ||
471 | } | ||
472 | |||
473 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
474 | dma_addr_t addr, int type) | ||
475 | { | ||
476 | /* Type checking must be done elsewhere */ | ||
477 | return addr | bridge->driver->masks[type].mask; | ||
478 | } | ||
479 | |||
480 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
481 | { | ||
482 | {128, 32768, 5}, | ||
483 | /* The 64M mode still requires a 128k gatt */ | ||
484 | {64, 16384, 5}, | ||
485 | {256, 65536, 6}, | ||
486 | {512, 131072, 7}, | ||
487 | }; | ||
488 | |||
489 | static void intel_i830_init_gtt_entries(void) | ||
490 | { | ||
491 | u16 gmch_ctrl; | ||
492 | int gtt_entries = 0; | ||
493 | u8 rdct; | ||
494 | int local = 0; | ||
495 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
496 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
497 | |||
498 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
499 | |||
500 | if (IS_I965) { | ||
501 | u32 pgetbl_ctl; | ||
502 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
503 | |||
504 | /* The 965 has a field telling us the size of the GTT, | ||
505 | * which may be larger than what is necessary to map the | ||
506 | * aperture. | ||
507 | */ | ||
508 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
509 | case I965_PGETBL_SIZE_128KB: | ||
510 | size = 128; | ||
511 | break; | ||
512 | case I965_PGETBL_SIZE_256KB: | ||
513 | size = 256; | ||
514 | break; | ||
515 | case I965_PGETBL_SIZE_512KB: | ||
516 | size = 512; | ||
517 | break; | ||
518 | case I965_PGETBL_SIZE_1MB: | ||
519 | size = 1024; | ||
520 | break; | ||
521 | case I965_PGETBL_SIZE_2MB: | ||
522 | size = 2048; | ||
523 | break; | ||
524 | case I965_PGETBL_SIZE_1_5MB: | ||
525 | size = 1024 + 512; | ||
526 | break; | ||
527 | default: | ||
528 | dev_info(&intel_private.pcidev->dev, | ||
529 | "unknown page table size, assuming 512KB\n"); | ||
530 | size = 512; | ||
531 | } | ||
532 | size += 4; /* add in BIOS popup space */ | ||
533 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
534 | /* G33's GTT size defined in gmch_ctrl */ | ||
535 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
536 | case G33_PGETBL_SIZE_1M: | ||
537 | size = 1024; | ||
538 | break; | ||
539 | case G33_PGETBL_SIZE_2M: | ||
540 | size = 2048; | ||
541 | break; | ||
542 | default: | ||
543 | dev_info(&agp_bridge->dev->dev, | ||
544 | "unknown page table size 0x%x, assuming 512KB\n", | ||
545 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
546 | size = 512; | ||
547 | } | ||
548 | size += 4; | ||
549 | } else if (IS_G4X || IS_PINEVIEW) { | ||
550 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
551 | * stolen, ignore it in stolen gtt entries counting. However, | ||
552 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
553 | */ | ||
554 | size = 4; | ||
555 | } else { | ||
556 | /* On previous hardware, the GTT size was just what was | ||
557 | * required to map the aperture. | ||
558 | */ | ||
559 | size = agp_bridge->driver->fetch_size() + 4; | ||
560 | } | ||
561 | |||
562 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
563 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
564 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
565 | case I830_GMCH_GMS_STOLEN_512: | ||
566 | gtt_entries = KB(512) - KB(size); | ||
567 | break; | ||
568 | case I830_GMCH_GMS_STOLEN_1024: | ||
569 | gtt_entries = MB(1) - KB(size); | ||
570 | break; | ||
571 | case I830_GMCH_GMS_STOLEN_8192: | ||
572 | gtt_entries = MB(8) - KB(size); | ||
573 | break; | ||
574 | case I830_GMCH_GMS_LOCAL: | ||
575 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
576 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
577 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
578 | local = 1; | ||
579 | break; | ||
580 | default: | ||
581 | gtt_entries = 0; | ||
582 | break; | ||
583 | } | ||
584 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
585 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
586 | /* | ||
587 | * SandyBridge has new memory control reg at 0x50.w | ||
588 | */ | ||
589 | u16 snb_gmch_ctl; | ||
590 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
591 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
592 | case SNB_GMCH_GMS_STOLEN_32M: | ||
593 | gtt_entries = MB(32) - KB(size); | ||
594 | break; | ||
595 | case SNB_GMCH_GMS_STOLEN_64M: | ||
596 | gtt_entries = MB(64) - KB(size); | ||
597 | break; | ||
598 | case SNB_GMCH_GMS_STOLEN_96M: | ||
599 | gtt_entries = MB(96) - KB(size); | ||
600 | break; | ||
601 | case SNB_GMCH_GMS_STOLEN_128M: | ||
602 | gtt_entries = MB(128) - KB(size); | ||
603 | break; | ||
604 | case SNB_GMCH_GMS_STOLEN_160M: | ||
605 | gtt_entries = MB(160) - KB(size); | ||
606 | break; | ||
607 | case SNB_GMCH_GMS_STOLEN_192M: | ||
608 | gtt_entries = MB(192) - KB(size); | ||
609 | break; | ||
610 | case SNB_GMCH_GMS_STOLEN_224M: | ||
611 | gtt_entries = MB(224) - KB(size); | ||
612 | break; | ||
613 | case SNB_GMCH_GMS_STOLEN_256M: | ||
614 | gtt_entries = MB(256) - KB(size); | ||
615 | break; | ||
616 | case SNB_GMCH_GMS_STOLEN_288M: | ||
617 | gtt_entries = MB(288) - KB(size); | ||
618 | break; | ||
619 | case SNB_GMCH_GMS_STOLEN_320M: | ||
620 | gtt_entries = MB(320) - KB(size); | ||
621 | break; | ||
622 | case SNB_GMCH_GMS_STOLEN_352M: | ||
623 | gtt_entries = MB(352) - KB(size); | ||
624 | break; | ||
625 | case SNB_GMCH_GMS_STOLEN_384M: | ||
626 | gtt_entries = MB(384) - KB(size); | ||
627 | break; | ||
628 | case SNB_GMCH_GMS_STOLEN_416M: | ||
629 | gtt_entries = MB(416) - KB(size); | ||
630 | break; | ||
631 | case SNB_GMCH_GMS_STOLEN_448M: | ||
632 | gtt_entries = MB(448) - KB(size); | ||
633 | break; | ||
634 | case SNB_GMCH_GMS_STOLEN_480M: | ||
635 | gtt_entries = MB(480) - KB(size); | ||
636 | break; | ||
637 | case SNB_GMCH_GMS_STOLEN_512M: | ||
638 | gtt_entries = MB(512) - KB(size); | ||
639 | break; | ||
640 | } | ||
641 | } else { | ||
642 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
643 | case I855_GMCH_GMS_STOLEN_1M: | ||
644 | gtt_entries = MB(1) - KB(size); | ||
645 | break; | ||
646 | case I855_GMCH_GMS_STOLEN_4M: | ||
647 | gtt_entries = MB(4) - KB(size); | ||
648 | break; | ||
649 | case I855_GMCH_GMS_STOLEN_8M: | ||
650 | gtt_entries = MB(8) - KB(size); | ||
651 | break; | ||
652 | case I855_GMCH_GMS_STOLEN_16M: | ||
653 | gtt_entries = MB(16) - KB(size); | ||
654 | break; | ||
655 | case I855_GMCH_GMS_STOLEN_32M: | ||
656 | gtt_entries = MB(32) - KB(size); | ||
657 | break; | ||
658 | case I915_GMCH_GMS_STOLEN_48M: | ||
659 | /* Check it's really I915G */ | ||
660 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
661 | gtt_entries = MB(48) - KB(size); | ||
662 | else | ||
663 | gtt_entries = 0; | ||
664 | break; | ||
665 | case I915_GMCH_GMS_STOLEN_64M: | ||
666 | /* Check it's really I915G */ | ||
667 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
668 | gtt_entries = MB(64) - KB(size); | ||
669 | else | ||
670 | gtt_entries = 0; | ||
671 | break; | ||
672 | case G33_GMCH_GMS_STOLEN_128M: | ||
673 | if (IS_G33 || IS_I965 || IS_G4X) | ||
674 | gtt_entries = MB(128) - KB(size); | ||
675 | else | ||
676 | gtt_entries = 0; | ||
677 | break; | ||
678 | case G33_GMCH_GMS_STOLEN_256M: | ||
679 | if (IS_G33 || IS_I965 || IS_G4X) | ||
680 | gtt_entries = MB(256) - KB(size); | ||
681 | else | ||
682 | gtt_entries = 0; | ||
683 | break; | ||
684 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
685 | if (IS_I965 || IS_G4X) | ||
686 | gtt_entries = MB(96) - KB(size); | ||
687 | else | ||
688 | gtt_entries = 0; | ||
689 | break; | ||
690 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
691 | if (IS_I965 || IS_G4X) | ||
692 | gtt_entries = MB(160) - KB(size); | ||
693 | else | ||
694 | gtt_entries = 0; | ||
695 | break; | ||
696 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
697 | if (IS_I965 || IS_G4X) | ||
698 | gtt_entries = MB(224) - KB(size); | ||
699 | else | ||
700 | gtt_entries = 0; | ||
701 | break; | ||
702 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
703 | if (IS_I965 || IS_G4X) | ||
704 | gtt_entries = MB(352) - KB(size); | ||
705 | else | ||
706 | gtt_entries = 0; | ||
707 | break; | ||
708 | default: | ||
709 | gtt_entries = 0; | ||
710 | break; | ||
711 | } | ||
712 | } | ||
713 | if (gtt_entries > 0) { | ||
714 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
715 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
716 | gtt_entries /= KB(4); | ||
717 | } else { | ||
718 | dev_info(&agp_bridge->dev->dev, | ||
719 | "no pre-allocated video memory detected\n"); | ||
720 | gtt_entries = 0; | ||
721 | } | ||
722 | |||
723 | intel_private.gtt_entries = gtt_entries; | ||
724 | } | ||
725 | |||
726 | static void intel_i830_fini_flush(void) | ||
727 | { | ||
728 | kunmap(intel_private.i8xx_page); | ||
729 | intel_private.i8xx_flush_page = NULL; | ||
730 | unmap_page_from_agp(intel_private.i8xx_page); | ||
731 | |||
732 | __free_page(intel_private.i8xx_page); | ||
733 | intel_private.i8xx_page = NULL; | ||
734 | } | ||
735 | |||
736 | static void intel_i830_setup_flush(void) | ||
737 | { | ||
738 | /* return if we've already set the flush mechanism up */ | ||
739 | if (intel_private.i8xx_page) | ||
740 | return; | ||
741 | |||
742 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
743 | if (!intel_private.i8xx_page) | ||
744 | return; | ||
745 | |||
746 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
747 | if (!intel_private.i8xx_flush_page) | ||
748 | intel_i830_fini_flush(); | ||
749 | } | ||
750 | |||
751 | /* The chipset_flush interface needs to get data that has already been | ||
752 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
753 | * doesn't snoop those buffers. | ||
754 | * | ||
755 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
756 | * chipset write buffers that the later chips do. According to the 865 | ||
757 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
758 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
759 | * that it'll push whatever was in there out. It appears to work. | ||
760 | */ | ||
761 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
762 | { | ||
763 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
764 | |||
765 | memset(pg, 0, 1024); | ||
766 | |||
767 | if (cpu_has_clflush) | ||
768 | clflush_cache_range(pg, 1024); | ||
769 | else if (wbinvd_on_all_cpus() != 0) | ||
770 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
771 | } | ||
772 | |||
773 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
774 | * Use the memory already set aside for in the GTT. | ||
775 | */ | ||
776 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
777 | { | ||
778 | int page_order; | ||
779 | struct aper_size_info_fixed *size; | ||
780 | int num_entries; | ||
781 | u32 temp; | ||
782 | |||
783 | size = agp_bridge->current_size; | ||
784 | page_order = size->page_order; | ||
785 | num_entries = size->num_entries; | ||
786 | agp_bridge->gatt_table_real = NULL; | ||
787 | |||
788 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
789 | temp &= 0xfff80000; | ||
790 | |||
791 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
792 | if (!intel_private.registers) | ||
793 | return -ENOMEM; | ||
794 | |||
795 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
796 | global_cache_flush(); /* FIXME: ?? */ | ||
797 | |||
798 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
799 | intel_i830_init_gtt_entries(); | ||
800 | |||
801 | agp_bridge->gatt_table = NULL; | ||
802 | |||
803 | agp_bridge->gatt_bus_addr = temp; | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | /* Return the gatt table to a sane state. Use the top of stolen | ||
809 | * memory for the GTT. | ||
810 | */ | ||
811 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
812 | { | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | static int intel_i830_fetch_size(void) | ||
817 | { | ||
818 | u16 gmch_ctrl; | ||
819 | struct aper_size_info_fixed *values; | ||
820 | |||
821 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
822 | |||
823 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
824 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
825 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
826 | agp_bridge->current_size = (void *) values; | ||
827 | agp_bridge->aperture_size_idx = 0; | ||
828 | return values[0].size; | ||
829 | } | ||
830 | |||
831 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
832 | |||
833 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
834 | agp_bridge->current_size = (void *) values; | ||
835 | agp_bridge->aperture_size_idx = 0; | ||
836 | return values[0].size; | ||
837 | } else { | ||
838 | agp_bridge->current_size = (void *) (values + 1); | ||
839 | agp_bridge->aperture_size_idx = 1; | ||
840 | return values[1].size; | ||
841 | } | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | static int intel_i830_configure(void) | ||
847 | { | ||
848 | struct aper_size_info_fixed *current_size; | ||
849 | u32 temp; | ||
850 | u16 gmch_ctrl; | ||
851 | int i; | ||
852 | |||
853 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
854 | |||
855 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
856 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
857 | |||
858 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
859 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
860 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
861 | |||
862 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
863 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
864 | |||
865 | if (agp_bridge->driver->needs_scratch_page) { | ||
866 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
867 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
868 | } | ||
869 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
870 | } | ||
871 | |||
872 | global_cache_flush(); | ||
873 | |||
874 | intel_i830_setup_flush(); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static void intel_i830_cleanup(void) | ||
879 | { | ||
880 | iounmap(intel_private.registers); | ||
881 | } | ||
882 | |||
883 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
884 | int type) | ||
885 | { | ||
886 | int i, j, num_entries; | ||
887 | void *temp; | ||
888 | int ret = -EINVAL; | ||
889 | int mask_type; | ||
890 | |||
891 | if (mem->page_count == 0) | ||
892 | goto out; | ||
893 | |||
894 | temp = agp_bridge->current_size; | ||
895 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
896 | |||
897 | if (pg_start < intel_private.gtt_entries) { | ||
898 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
899 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
900 | pg_start, intel_private.gtt_entries); | ||
901 | |||
902 | dev_info(&intel_private.pcidev->dev, | ||
903 | "trying to insert into local/stolen memory\n"); | ||
904 | goto out_err; | ||
905 | } | ||
906 | |||
907 | if ((pg_start + mem->page_count) > num_entries) | ||
908 | goto out_err; | ||
909 | |||
910 | /* The i830 can't check the GTT for entries since its read only, | ||
911 | * depend on the caller to make the correct offset decisions. | ||
912 | */ | ||
913 | |||
914 | if (type != mem->type) | ||
915 | goto out_err; | ||
916 | |||
917 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
918 | |||
919 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
920 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
921 | goto out_err; | ||
922 | |||
923 | if (!mem->is_flushed) | ||
924 | global_cache_flush(); | ||
925 | |||
926 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
927 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
928 | page_to_phys(mem->pages[i]), mask_type), | ||
929 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
930 | } | ||
931 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
932 | |||
933 | out: | ||
934 | ret = 0; | ||
935 | out_err: | ||
936 | mem->is_flushed = true; | ||
937 | return ret; | ||
938 | } | ||
939 | |||
940 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
941 | int type) | ||
942 | { | ||
943 | int i; | ||
944 | |||
945 | if (mem->page_count == 0) | ||
946 | return 0; | ||
947 | |||
948 | if (pg_start < intel_private.gtt_entries) { | ||
949 | dev_info(&intel_private.pcidev->dev, | ||
950 | "trying to disable local/stolen memory\n"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | |||
954 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
955 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
956 | } | ||
957 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
963 | { | ||
964 | if (type == AGP_PHYS_MEMORY) | ||
965 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
966 | /* always return NULL for other allocation types for now */ | ||
967 | return NULL; | ||
968 | } | ||
969 | |||
970 | static int intel_alloc_chipset_flush_resource(void) | ||
971 | { | ||
972 | int ret; | ||
973 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
974 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
975 | pcibios_align_resource, agp_bridge->dev); | ||
976 | |||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void intel_i915_setup_chipset_flush(void) | ||
981 | { | ||
982 | int ret; | ||
983 | u32 temp; | ||
984 | |||
985 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
986 | if (!(temp & 0x1)) { | ||
987 | intel_alloc_chipset_flush_resource(); | ||
988 | intel_private.resource_valid = 1; | ||
989 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
990 | } else { | ||
991 | temp &= ~1; | ||
992 | |||
993 | intel_private.resource_valid = 1; | ||
994 | intel_private.ifp_resource.start = temp; | ||
995 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
996 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
997 | /* some BIOSes reserve this area in a pnp some don't */ | ||
998 | if (ret) | ||
999 | intel_private.resource_valid = 0; | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1004 | { | ||
1005 | u32 temp_hi, temp_lo; | ||
1006 | int ret; | ||
1007 | |||
1008 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1009 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1010 | |||
1011 | if (!(temp_lo & 0x1)) { | ||
1012 | |||
1013 | intel_alloc_chipset_flush_resource(); | ||
1014 | |||
1015 | intel_private.resource_valid = 1; | ||
1016 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1017 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1018 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1019 | } else { | ||
1020 | u64 l64; | ||
1021 | |||
1022 | temp_lo &= ~0x1; | ||
1023 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1024 | |||
1025 | intel_private.resource_valid = 1; | ||
1026 | intel_private.ifp_resource.start = l64; | ||
1027 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1028 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1029 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1030 | if (ret) | ||
1031 | intel_private.resource_valid = 0; | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | static void intel_i9xx_setup_flush(void) | ||
1036 | { | ||
1037 | /* return if already configured */ | ||
1038 | if (intel_private.ifp_resource.start) | ||
1039 | return; | ||
1040 | |||
1041 | if (IS_SNB) | ||
1042 | return; | ||
1043 | |||
1044 | /* setup a resource for this object */ | ||
1045 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1046 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1047 | |||
1048 | /* Setup chipset flush for 915 */ | ||
1049 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1050 | intel_i965_g33_setup_chipset_flush(); | ||
1051 | } else { | ||
1052 | intel_i915_setup_chipset_flush(); | ||
1053 | } | ||
1054 | |||
1055 | if (intel_private.ifp_resource.start) { | ||
1056 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1057 | if (!intel_private.i9xx_flush_page) | ||
1058 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | static int intel_i915_configure(void) | ||
1063 | { | ||
1064 | struct aper_size_info_fixed *current_size; | ||
1065 | u32 temp; | ||
1066 | u16 gmch_ctrl; | ||
1067 | int i; | ||
1068 | |||
1069 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1070 | |||
1071 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1072 | |||
1073 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1074 | |||
1075 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1076 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1077 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1078 | |||
1079 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1080 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1081 | |||
1082 | if (agp_bridge->driver->needs_scratch_page) { | ||
1083 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1084 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1085 | } | ||
1086 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1087 | } | ||
1088 | |||
1089 | global_cache_flush(); | ||
1090 | |||
1091 | intel_i9xx_setup_flush(); | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static void intel_i915_cleanup(void) | ||
1097 | { | ||
1098 | if (intel_private.i9xx_flush_page) | ||
1099 | iounmap(intel_private.i9xx_flush_page); | ||
1100 | if (intel_private.resource_valid) | ||
1101 | release_resource(&intel_private.ifp_resource); | ||
1102 | intel_private.ifp_resource.start = 0; | ||
1103 | intel_private.resource_valid = 0; | ||
1104 | iounmap(intel_private.gtt); | ||
1105 | iounmap(intel_private.registers); | ||
1106 | } | ||
1107 | |||
1108 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1109 | { | ||
1110 | if (intel_private.i9xx_flush_page) | ||
1111 | writel(1, intel_private.i9xx_flush_page); | ||
1112 | } | ||
1113 | |||
1114 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1115 | int type) | ||
1116 | { | ||
1117 | int num_entries; | ||
1118 | void *temp; | ||
1119 | int ret = -EINVAL; | ||
1120 | int mask_type; | ||
1121 | |||
1122 | if (mem->page_count == 0) | ||
1123 | goto out; | ||
1124 | |||
1125 | temp = agp_bridge->current_size; | ||
1126 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1127 | |||
1128 | if (pg_start < intel_private.gtt_entries) { | ||
1129 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1130 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1131 | pg_start, intel_private.gtt_entries); | ||
1132 | |||
1133 | dev_info(&intel_private.pcidev->dev, | ||
1134 | "trying to insert into local/stolen memory\n"); | ||
1135 | goto out_err; | ||
1136 | } | ||
1137 | |||
1138 | if ((pg_start + mem->page_count) > num_entries) | ||
1139 | goto out_err; | ||
1140 | |||
1141 | /* The i915 can't check the GTT for entries since it's read only; | ||
1142 | * depend on the caller to make the correct offset decisions. | ||
1143 | */ | ||
1144 | |||
1145 | if (type != mem->type) | ||
1146 | goto out_err; | ||
1147 | |||
1148 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1149 | |||
1150 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1151 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1152 | goto out_err; | ||
1153 | |||
1154 | if (!mem->is_flushed) | ||
1155 | global_cache_flush(); | ||
1156 | |||
1157 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1158 | |||
1159 | out: | ||
1160 | ret = 0; | ||
1161 | out_err: | ||
1162 | mem->is_flushed = true; | ||
1163 | return ret; | ||
1164 | } | ||
1165 | |||
1166 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1167 | int type) | ||
1168 | { | ||
1169 | int i; | ||
1170 | |||
1171 | if (mem->page_count == 0) | ||
1172 | return 0; | ||
1173 | |||
1174 | if (pg_start < intel_private.gtt_entries) { | ||
1175 | dev_info(&intel_private.pcidev->dev, | ||
1176 | "trying to disable local/stolen memory\n"); | ||
1177 | return -EINVAL; | ||
1178 | } | ||
1179 | |||
1180 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1181 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1182 | |||
1183 | readl(intel_private.gtt+i-1); | ||
1184 | |||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | /* Return the aperture size by just checking the resource length. The effect | ||
1189 | * described in the spec of the MSAC registers is just changing of the | ||
1190 | * resource size. | ||
1191 | */ | ||
1192 | static int intel_i9xx_fetch_size(void) | ||
1193 | { | ||
1194 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1195 | int aper_size; /* size in megabytes */ | ||
1196 | int i; | ||
1197 | |||
1198 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1199 | |||
1200 | for (i = 0; i < num_sizes; i++) { | ||
1201 | if (aper_size == intel_i830_sizes[i].size) { | ||
1202 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1203 | return aper_size; | ||
1204 | } | ||
1205 | } | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1210 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1211 | * Use the memory already set aside for in the GTT. | ||
1212 | */ | ||
1213 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1214 | { | ||
1215 | int page_order; | ||
1216 | struct aper_size_info_fixed *size; | ||
1217 | int num_entries; | ||
1218 | u32 temp, temp2; | ||
1219 | int gtt_map_size = 256 * 1024; | ||
1220 | |||
1221 | size = agp_bridge->current_size; | ||
1222 | page_order = size->page_order; | ||
1223 | num_entries = size->num_entries; | ||
1224 | agp_bridge->gatt_table_real = NULL; | ||
1225 | |||
1226 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1227 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1228 | |||
1229 | if (IS_G33) | ||
1230 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1231 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1232 | if (!intel_private.gtt) | ||
1233 | return -ENOMEM; | ||
1234 | |||
1235 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1236 | |||
1237 | temp &= 0xfff80000; | ||
1238 | |||
1239 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1240 | if (!intel_private.registers) { | ||
1241 | iounmap(intel_private.gtt); | ||
1242 | return -ENOMEM; | ||
1243 | } | ||
1244 | |||
1245 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1246 | global_cache_flush(); /* FIXME: ? */ | ||
1247 | |||
1248 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1249 | intel_i830_init_gtt_entries(); | ||
1250 | |||
1251 | agp_bridge->gatt_table = NULL; | ||
1252 | |||
1253 | agp_bridge->gatt_bus_addr = temp; | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * The i965 supports 36-bit physical addresses, but to keep | ||
1260 | * the format of the GTT the same, the bits that don't fit | ||
1261 | * in a 32-bit word are shifted down to bits 4..7. | ||
1262 | * | ||
1263 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1264 | * is always zero on 32-bit architectures, so no need to make | ||
1265 | * this conditional. | ||
1266 | */ | ||
1267 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1268 | dma_addr_t addr, int type) | ||
1269 | { | ||
1270 | /* Shift high bits down */ | ||
1271 | addr |= (addr >> 28) & 0xf0; | ||
1272 | |||
1273 | /* Type checking must be done elsewhere */ | ||
1274 | return addr | bridge->driver->masks[type].mask; | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1278 | { | ||
1279 | u16 snb_gmch_ctl; | ||
1280 | |||
1281 | switch (agp_bridge->dev->device) { | ||
1282 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1283 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1284 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1285 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1286 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1287 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1288 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1289 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1290 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1291 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1292 | *gtt_offset = *gtt_size = MB(2); | ||
1293 | break; | ||
1294 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1295 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1296 | *gtt_offset = MB(2); | ||
1297 | |||
1298 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1299 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1300 | default: | ||
1301 | case SNB_GTT_SIZE_0M: | ||
1302 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1303 | *gtt_size = MB(0); | ||
1304 | break; | ||
1305 | case SNB_GTT_SIZE_1M: | ||
1306 | *gtt_size = MB(1); | ||
1307 | break; | ||
1308 | case SNB_GTT_SIZE_2M: | ||
1309 | *gtt_size = MB(2); | ||
1310 | break; | ||
1311 | } | ||
1312 | break; | ||
1313 | default: | ||
1314 | *gtt_offset = *gtt_size = KB(512); | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1319 | * Use the memory already set aside for in the GTT. | ||
1320 | */ | ||
1321 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1322 | { | ||
1323 | int page_order; | ||
1324 | struct aper_size_info_fixed *size; | ||
1325 | int num_entries; | ||
1326 | u32 temp; | ||
1327 | int gtt_offset, gtt_size; | ||
1328 | |||
1329 | size = agp_bridge->current_size; | ||
1330 | page_order = size->page_order; | ||
1331 | num_entries = size->num_entries; | ||
1332 | agp_bridge->gatt_table_real = NULL; | ||
1333 | |||
1334 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1335 | |||
1336 | temp &= 0xfff00000; | ||
1337 | |||
1338 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1339 | |||
1340 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1341 | |||
1342 | if (!intel_private.gtt) | ||
1343 | return -ENOMEM; | ||
1344 | |||
1345 | intel_private.gtt_total_size = gtt_size / 4; | ||
1346 | |||
1347 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1348 | if (!intel_private.registers) { | ||
1349 | iounmap(intel_private.gtt); | ||
1350 | return -ENOMEM; | ||
1351 | } | ||
1352 | |||
1353 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1354 | global_cache_flush(); /* FIXME: ? */ | ||
1355 | |||
1356 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1357 | intel_i830_init_gtt_entries(); | ||
1358 | |||
1359 | agp_bridge->gatt_table = NULL; | ||
1360 | |||
1361 | agp_bridge->gatt_bus_addr = temp; | ||
1362 | |||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static const struct agp_bridge_driver intel_810_driver = { | ||
1367 | .owner = THIS_MODULE, | ||
1368 | .aperture_sizes = intel_i810_sizes, | ||
1369 | .size_type = FIXED_APER_SIZE, | ||
1370 | .num_aperture_sizes = 2, | ||
1371 | .needs_scratch_page = true, | ||
1372 | .configure = intel_i810_configure, | ||
1373 | .fetch_size = intel_i810_fetch_size, | ||
1374 | .cleanup = intel_i810_cleanup, | ||
1375 | .mask_memory = intel_i810_mask_memory, | ||
1376 | .masks = intel_i810_masks, | ||
1377 | .agp_enable = intel_i810_agp_enable, | ||
1378 | .cache_flush = global_cache_flush, | ||
1379 | .create_gatt_table = agp_generic_create_gatt_table, | ||
1380 | .free_gatt_table = agp_generic_free_gatt_table, | ||
1381 | .insert_memory = intel_i810_insert_entries, | ||
1382 | .remove_memory = intel_i810_remove_entries, | ||
1383 | .alloc_by_type = intel_i810_alloc_by_type, | ||
1384 | .free_by_type = intel_i810_free_by_type, | ||
1385 | .agp_alloc_page = agp_generic_alloc_page, | ||
1386 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1387 | .agp_destroy_page = agp_generic_destroy_page, | ||
1388 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1389 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1390 | }; | ||
1391 | |||
1392 | static const struct agp_bridge_driver intel_830_driver = { | ||
1393 | .owner = THIS_MODULE, | ||
1394 | .aperture_sizes = intel_i830_sizes, | ||
1395 | .size_type = FIXED_APER_SIZE, | ||
1396 | .num_aperture_sizes = 4, | ||
1397 | .needs_scratch_page = true, | ||
1398 | .configure = intel_i830_configure, | ||
1399 | .fetch_size = intel_i830_fetch_size, | ||
1400 | .cleanup = intel_i830_cleanup, | ||
1401 | .mask_memory = intel_i810_mask_memory, | ||
1402 | .masks = intel_i810_masks, | ||
1403 | .agp_enable = intel_i810_agp_enable, | ||
1404 | .cache_flush = global_cache_flush, | ||
1405 | .create_gatt_table = intel_i830_create_gatt_table, | ||
1406 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1407 | .insert_memory = intel_i830_insert_entries, | ||
1408 | .remove_memory = intel_i830_remove_entries, | ||
1409 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1410 | .free_by_type = intel_i810_free_by_type, | ||
1411 | .agp_alloc_page = agp_generic_alloc_page, | ||
1412 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1413 | .agp_destroy_page = agp_generic_destroy_page, | ||
1414 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1415 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1416 | .chipset_flush = intel_i830_chipset_flush, | ||
1417 | }; | ||
1418 | |||
1419 | static const struct agp_bridge_driver intel_915_driver = { | ||
1420 | .owner = THIS_MODULE, | ||
1421 | .aperture_sizes = intel_i830_sizes, | ||
1422 | .size_type = FIXED_APER_SIZE, | ||
1423 | .num_aperture_sizes = 4, | ||
1424 | .needs_scratch_page = true, | ||
1425 | .configure = intel_i915_configure, | ||
1426 | .fetch_size = intel_i9xx_fetch_size, | ||
1427 | .cleanup = intel_i915_cleanup, | ||
1428 | .mask_memory = intel_i810_mask_memory, | ||
1429 | .masks = intel_i810_masks, | ||
1430 | .agp_enable = intel_i810_agp_enable, | ||
1431 | .cache_flush = global_cache_flush, | ||
1432 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1433 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1434 | .insert_memory = intel_i915_insert_entries, | ||
1435 | .remove_memory = intel_i915_remove_entries, | ||
1436 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1437 | .free_by_type = intel_i810_free_by_type, | ||
1438 | .agp_alloc_page = agp_generic_alloc_page, | ||
1439 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1440 | .agp_destroy_page = agp_generic_destroy_page, | ||
1441 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1442 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1443 | .chipset_flush = intel_i915_chipset_flush, | ||
1444 | #ifdef USE_PCI_DMA_API | ||
1445 | .agp_map_page = intel_agp_map_page, | ||
1446 | .agp_unmap_page = intel_agp_unmap_page, | ||
1447 | .agp_map_memory = intel_agp_map_memory, | ||
1448 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1449 | #endif | ||
1450 | }; | ||
1451 | |||
1452 | static const struct agp_bridge_driver intel_i965_driver = { | ||
1453 | .owner = THIS_MODULE, | ||
1454 | .aperture_sizes = intel_i830_sizes, | ||
1455 | .size_type = FIXED_APER_SIZE, | ||
1456 | .num_aperture_sizes = 4, | ||
1457 | .needs_scratch_page = true, | ||
1458 | .configure = intel_i915_configure, | ||
1459 | .fetch_size = intel_i9xx_fetch_size, | ||
1460 | .cleanup = intel_i915_cleanup, | ||
1461 | .mask_memory = intel_i965_mask_memory, | ||
1462 | .masks = intel_i810_masks, | ||
1463 | .agp_enable = intel_i810_agp_enable, | ||
1464 | .cache_flush = global_cache_flush, | ||
1465 | .create_gatt_table = intel_i965_create_gatt_table, | ||
1466 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1467 | .insert_memory = intel_i915_insert_entries, | ||
1468 | .remove_memory = intel_i915_remove_entries, | ||
1469 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1470 | .free_by_type = intel_i810_free_by_type, | ||
1471 | .agp_alloc_page = agp_generic_alloc_page, | ||
1472 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1473 | .agp_destroy_page = agp_generic_destroy_page, | ||
1474 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1475 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1476 | .chipset_flush = intel_i915_chipset_flush, | ||
1477 | #ifdef USE_PCI_DMA_API | ||
1478 | .agp_map_page = intel_agp_map_page, | ||
1479 | .agp_unmap_page = intel_agp_unmap_page, | ||
1480 | .agp_map_memory = intel_agp_map_memory, | ||
1481 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1482 | #endif | ||
1483 | }; | ||
1484 | |||
1485 | static const struct agp_bridge_driver intel_g33_driver = { | ||
1486 | .owner = THIS_MODULE, | ||
1487 | .aperture_sizes = intel_i830_sizes, | ||
1488 | .size_type = FIXED_APER_SIZE, | ||
1489 | .num_aperture_sizes = 4, | ||
1490 | .needs_scratch_page = true, | ||
1491 | .configure = intel_i915_configure, | ||
1492 | .fetch_size = intel_i9xx_fetch_size, | ||
1493 | .cleanup = intel_i915_cleanup, | ||
1494 | .mask_memory = intel_i965_mask_memory, | ||
1495 | .masks = intel_i810_masks, | ||
1496 | .agp_enable = intel_i810_agp_enable, | ||
1497 | .cache_flush = global_cache_flush, | ||
1498 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1499 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1500 | .insert_memory = intel_i915_insert_entries, | ||
1501 | .remove_memory = intel_i915_remove_entries, | ||
1502 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1503 | .free_by_type = intel_i810_free_by_type, | ||
1504 | .agp_alloc_page = agp_generic_alloc_page, | ||
1505 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1506 | .agp_destroy_page = agp_generic_destroy_page, | ||
1507 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1508 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1509 | .chipset_flush = intel_i915_chipset_flush, | ||
1510 | #ifdef USE_PCI_DMA_API | ||
1511 | .agp_map_page = intel_agp_map_page, | ||
1512 | .agp_unmap_page = intel_agp_unmap_page, | ||
1513 | .agp_map_memory = intel_agp_map_memory, | ||
1514 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1515 | #endif | ||
1516 | }; | ||
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 10f24e349a26..b9734a978186 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c | |||
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = { | |||
310 | .aperture_sizes = nvidia_generic_sizes, | 310 | .aperture_sizes = nvidia_generic_sizes, |
311 | .size_type = U8_APER_SIZE, | 311 | .size_type = U8_APER_SIZE, |
312 | .num_aperture_sizes = 5, | 312 | .num_aperture_sizes = 5, |
313 | .needs_scratch_page = true, | ||
313 | .configure = nvidia_configure, | 314 | .configure = nvidia_configure, |
314 | .fetch_size = nvidia_fetch_size, | 315 | .fetch_size = nvidia_fetch_size, |
315 | .cleanup = nvidia_cleanup, | 316 | .cleanup = nvidia_cleanup, |
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 6c3837a0184d..29aacd81de78 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = { | |||
125 | .aperture_sizes = sis_generic_sizes, | 125 | .aperture_sizes = sis_generic_sizes, |
126 | .size_type = U8_APER_SIZE, | 126 | .size_type = U8_APER_SIZE, |
127 | .num_aperture_sizes = 7, | 127 | .num_aperture_sizes = 7, |
128 | .needs_scratch_page = true, | ||
128 | .configure = sis_configure, | 129 | .configure = sis_configure, |
129 | .fetch_size = sis_fetch_size, | 130 | .fetch_size = sis_fetch_size, |
130 | .cleanup = sis_cleanup, | 131 | .cleanup = sis_cleanup, |
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = { | |||
415 | .subvendor = PCI_ANY_ID, | 416 | .subvendor = PCI_ANY_ID, |
416 | .subdevice = PCI_ANY_ID, | 417 | .subdevice = PCI_ANY_ID, |
417 | }, | 418 | }, |
418 | { | ||
419 | .class = (PCI_CLASS_BRIDGE_HOST << 8), | ||
420 | .class_mask = ~0, | ||
421 | .vendor = PCI_VENDOR_ID_SI, | ||
422 | .device = PCI_DEVICE_ID_SI_760, | ||
423 | .subvendor = PCI_ANY_ID, | ||
424 | .subdevice = PCI_ANY_ID, | ||
425 | }, | ||
426 | { } | 419 | { } |
427 | }; | 420 | }; |
428 | 421 | ||
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 6f48931ac1ce..95db71360d24 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | static int uninorth_rev; | 29 | static int uninorth_rev; |
30 | static int is_u3; | 30 | static int is_u3; |
31 | static u32 scratch_value; | ||
31 | 32 | ||
32 | #define DEFAULT_APERTURE_SIZE 256 | 33 | #define DEFAULT_APERTURE_SIZE 256 |
33 | #define DEFAULT_APERTURE_STRING "256" | 34 | #define DEFAULT_APERTURE_STRING "256" |
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty | |||
172 | 173 | ||
173 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 174 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
174 | for (i = 0; i < mem->page_count; ++i) { | 175 | for (i = 0; i < mem->page_count; ++i) { |
175 | if (gp[i]) { | 176 | if (gp[i] != scratch_value) { |
176 | dev_info(&agp_bridge->dev->dev, | 177 | dev_info(&agp_bridge->dev->dev, |
177 | "uninorth_insert_memory: entry 0x%x occupied (%x)\n", | 178 | "uninorth_insert_memory: entry 0x%x occupied (%x)\n", |
178 | i, gp[i]); | 179 | i, gp[i]); |
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
214 | return 0; | 215 | return 0; |
215 | 216 | ||
216 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 217 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
217 | for (i = 0; i < mem->page_count; ++i) | 218 | for (i = 0; i < mem->page_count; ++i) { |
218 | gp[i] = 0; | 219 | gp[i] = scratch_value; |
220 | } | ||
219 | mb(); | 221 | mb(); |
220 | uninorth_tlbflush(mem); | 222 | uninorth_tlbflush(mem); |
221 | 223 | ||
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) | |||
421 | 423 | ||
422 | bridge->gatt_bus_addr = virt_to_phys(table); | 424 | bridge->gatt_bus_addr = virt_to_phys(table); |
423 | 425 | ||
426 | if (is_u3) | ||
427 | scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; | ||
428 | else | ||
429 | scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | | ||
430 | 0x1UL); | ||
424 | for (i = 0; i < num_entries; i++) | 431 | for (i = 0; i < num_entries; i++) |
425 | bridge->gatt_table[i] = 0; | 432 | bridge->gatt_table[i] = scratch_value; |
426 | 433 | ||
427 | return 0; | 434 | return 0; |
428 | 435 | ||
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { | |||
519 | .agp_destroy_pages = agp_generic_destroy_pages, | 526 | .agp_destroy_pages = agp_generic_destroy_pages, |
520 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 527 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
521 | .cant_use_aperture = true, | 528 | .cant_use_aperture = true, |
529 | .needs_scratch_page = true, | ||
522 | }; | 530 | }; |
523 | 531 | ||
524 | const struct agp_bridge_driver u3_agp_driver = { | 532 | const struct agp_bridge_driver u3_agp_driver = { |
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index d3bd243867fc..df67e80019d2 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = { | |||
175 | .aperture_sizes = agp3_generic_sizes, | 175 | .aperture_sizes = agp3_generic_sizes, |
176 | .size_type = U8_APER_SIZE, | 176 | .size_type = U8_APER_SIZE, |
177 | .num_aperture_sizes = 10, | 177 | .num_aperture_sizes = 10, |
178 | .needs_scratch_page = true, | ||
178 | .configure = via_configure_agp3, | 179 | .configure = via_configure_agp3, |
179 | .fetch_size = via_fetch_size_agp3, | 180 | .fetch_size = via_fetch_size_agp3, |
180 | .cleanup = via_cleanup_agp3, | 181 | .cleanup = via_cleanup_agp3, |
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = { | |||
201 | .aperture_sizes = via_generic_sizes, | 202 | .aperture_sizes = via_generic_sizes, |
202 | .size_type = U8_APER_SIZE, | 203 | .size_type = U8_APER_SIZE, |
203 | .num_aperture_sizes = 9, | 204 | .num_aperture_sizes = 9, |
205 | .needs_scratch_page = true, | ||
204 | .configure = via_configure, | 206 | .configure = via_configure, |
205 | .fetch_size = via_fetch_size, | 207 | .fetch_size = via_fetch_size, |
206 | .cleanup = via_cleanup, | 208 | .cleanup = via_cleanup, |
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 7fef305774de..89d871ef8c2f 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c | |||
@@ -253,7 +253,7 @@ static int bsr_add_node(struct device_node *bn) | |||
253 | 253 | ||
254 | cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, | 254 | cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, |
255 | cur, cur->bsr_name); | 255 | cur, cur->bsr_name); |
256 | if (!cur->bsr_device) { | 256 | if (IS_ERR(cur->bsr_device)) { |
257 | printk(KERN_ERR "device_create failed for %s\n", | 257 | printk(KERN_ERR "device_create failed for %s\n", |
258 | cur->bsr_name); | 258 | cur->bsr_name); |
259 | cdev_del(&cur->bsr_cdev); | 259 | cdev_del(&cur->bsr_cdev); |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 64fe0a793efd..75f1cbd61c17 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
@@ -32,7 +32,7 @@ static bool busy; | |||
32 | static void random_recv_done(struct virtqueue *vq) | 32 | static void random_recv_done(struct virtqueue *vq) |
33 | { | 33 | { |
34 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ | 34 | /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ |
35 | if (!vq->vq_ops->get_buf(vq, &data_avail)) | 35 | if (!virtqueue_get_buf(vq, &data_avail)) |
36 | return; | 36 | return; |
37 | 37 | ||
38 | complete(&have_data); | 38 | complete(&have_data); |
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size) | |||
46 | sg_init_one(&sg, buf, size); | 46 | sg_init_one(&sg, buf, size); |
47 | 47 | ||
48 | /* There should always be room for one buffer. */ | 48 | /* There should always be room for one buffer. */ |
49 | if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0) | 49 | if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) |
50 | BUG(); | 50 | BUG(); |
51 | 51 | ||
52 | vq->vq_ops->kick(vq); | 52 | virtqueue_kick(vq); |
53 | } | 53 | } |
54 | 54 | ||
55 | static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) | 55 | static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index fc8cf7ac7f2b..4cd8b227c11f 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
25 | #include <linux/capability.h> | 25 | #include <linux/capability.h> |
26 | #include <linux/smp_lock.h> | ||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <asm/io.h> | 28 | #include <asm/io.h> |
28 | 29 | ||
@@ -82,8 +83,7 @@ module_param(fan_mult, int, 0); | |||
82 | MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with"); | 83 | MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with"); |
83 | 84 | ||
84 | static int i8k_open_fs(struct inode *inode, struct file *file); | 85 | static int i8k_open_fs(struct inode *inode, struct file *file); |
85 | static int i8k_ioctl(struct inode *, struct file *, unsigned int, | 86 | static long i8k_ioctl(struct file *, unsigned int, unsigned long); |
86 | unsigned long); | ||
87 | 87 | ||
88 | static const struct file_operations i8k_fops = { | 88 | static const struct file_operations i8k_fops = { |
89 | .owner = THIS_MODULE, | 89 | .owner = THIS_MODULE, |
@@ -91,7 +91,7 @@ static const struct file_operations i8k_fops = { | |||
91 | .read = seq_read, | 91 | .read = seq_read, |
92 | .llseek = seq_lseek, | 92 | .llseek = seq_lseek, |
93 | .release = single_release, | 93 | .release = single_release, |
94 | .ioctl = i8k_ioctl, | 94 | .unlocked_ioctl = i8k_ioctl, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | struct smm_regs { | 97 | struct smm_regs { |
@@ -307,8 +307,8 @@ static int i8k_get_dell_signature(int req_fn) | |||
307 | return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; | 307 | return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; |
308 | } | 308 | } |
309 | 309 | ||
310 | static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, | 310 | static int |
311 | unsigned long arg) | 311 | i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) |
312 | { | 312 | { |
313 | int val = 0; | 313 | int val = 0; |
314 | int speed; | 314 | int speed; |
@@ -395,6 +395,17 @@ static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, | |||
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | 397 | ||
398 | static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | ||
399 | { | ||
400 | long ret; | ||
401 | |||
402 | lock_kernel(); | ||
403 | ret = i8k_ioctl_unlocked(fp, cmd, arg); | ||
404 | unlock_kernel(); | ||
405 | |||
406 | return ret; | ||
407 | } | ||
408 | |||
398 | /* | 409 | /* |
399 | * Print the information for /proc/i8k. | 410 | * Print the information for /proc/i8k. |
400 | */ | 411 | */ |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 0fa2e4a0835d..98310e1aae30 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -879,8 +879,8 @@ static int isicom_open(struct tty_struct *tty, struct file *filp) | |||
879 | if (tport == NULL) | 879 | if (tport == NULL) |
880 | return -ENODEV; | 880 | return -ENODEV; |
881 | port = container_of(tport, struct isi_port, port); | 881 | port = container_of(tport, struct isi_port, port); |
882 | card = &isi_card[BOARD(tty->index)]; | ||
883 | 882 | ||
883 | tty->driver_data = port; | ||
884 | return tty_port_open(tport, tty, filp); | 884 | return tty_port_open(tport, tty, filp); |
885 | } | 885 | } |
886 | 886 | ||
@@ -936,7 +936,12 @@ static void isicom_shutdown(struct tty_port *port) | |||
936 | static void isicom_close(struct tty_struct *tty, struct file *filp) | 936 | static void isicom_close(struct tty_struct *tty, struct file *filp) |
937 | { | 937 | { |
938 | struct isi_port *ip = tty->driver_data; | 938 | struct isi_port *ip = tty->driver_data; |
939 | struct tty_port *port = &ip->port; | 939 | struct tty_port *port; |
940 | |||
941 | if (ip == NULL) | ||
942 | return; | ||
943 | |||
944 | port = &ip->port; | ||
940 | if (isicom_paranoia_check(ip, tty->name, "isicom_close")) | 945 | if (isicom_paranoia_check(ip, tty->name, "isicom_close")) |
941 | return; | 946 | return; |
942 | tty_port_close(port, tty, filp); | 947 | tty_port_close(port, tty, filp); |
@@ -1568,11 +1573,16 @@ static int __devinit isicom_probe(struct pci_dev *pdev, | |||
1568 | dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); | 1573 | dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); |
1569 | 1574 | ||
1570 | /* allot the first empty slot in the array */ | 1575 | /* allot the first empty slot in the array */ |
1571 | for (index = 0; index < BOARD_COUNT; index++) | 1576 | for (index = 0; index < BOARD_COUNT; index++) { |
1572 | if (isi_card[index].base == 0) { | 1577 | if (isi_card[index].base == 0) { |
1573 | board = &isi_card[index]; | 1578 | board = &isi_card[index]; |
1574 | break; | 1579 | break; |
1575 | } | 1580 | } |
1581 | } | ||
1582 | if (index == BOARD_COUNT) { | ||
1583 | retval = -ENODEV; | ||
1584 | goto err_disable; | ||
1585 | } | ||
1576 | 1586 | ||
1577 | board->index = index; | 1587 | board->index = index; |
1578 | board->base = pci_resource_start(pdev, 3); | 1588 | board->base = pci_resource_start(pdev, 3); |
@@ -1619,6 +1629,7 @@ errunrr: | |||
1619 | errdec: | 1629 | errdec: |
1620 | board->base = 0; | 1630 | board->base = 0; |
1621 | card_count--; | 1631 | card_count--; |
1632 | err_disable: | ||
1622 | pci_disable_device(pdev); | 1633 | pci_disable_device(pdev); |
1623 | err: | 1634 | err: |
1624 | return retval; | 1635 | return retval; |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 4cd6c527ee41..4e395c956a09 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
@@ -827,6 +827,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp) | |||
827 | return -ENODEV; | 827 | return -ENODEV; |
828 | if (portp->devnr < 1) | 828 | if (portp->devnr < 1) |
829 | return -ENODEV; | 829 | return -ENODEV; |
830 | |||
831 | tty->driver_data = portp; | ||
830 | return tty_port_open(&portp->port, tty, filp); | 832 | return tty_port_open(&portp->port, tty, filp); |
831 | } | 833 | } |
832 | 834 | ||
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index ada25bb8941e..54109dc9240c 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c | |||
@@ -24,6 +24,8 @@ | |||
24 | * 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik) | 24 | * 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik) |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
28 | |||
27 | #include <linux/consolemap.h> | 29 | #include <linux/consolemap.h> |
28 | #include <linux/module.h> | 30 | #include <linux/module.h> |
29 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
@@ -38,7 +40,6 @@ | |||
38 | #include <linux/kbd_kern.h> | 40 | #include <linux/kbd_kern.h> |
39 | #include <linux/kbd_diacr.h> | 41 | #include <linux/kbd_diacr.h> |
40 | #include <linux/vt_kern.h> | 42 | #include <linux/vt_kern.h> |
41 | #include <linux/sysrq.h> | ||
42 | #include <linux/input.h> | 43 | #include <linux/input.h> |
43 | #include <linux/reboot.h> | 44 | #include <linux/reboot.h> |
44 | #include <linux/notifier.h> | 45 | #include <linux/notifier.h> |
@@ -82,8 +83,7 @@ void compute_shiftstate(void); | |||
82 | typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value, | 83 | typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value, |
83 | char up_flag); | 84 | char up_flag); |
84 | static k_handler_fn K_HANDLERS; | 85 | static k_handler_fn K_HANDLERS; |
85 | k_handler_fn *k_handler[16] = { K_HANDLERS }; | 86 | static k_handler_fn *k_handler[16] = { K_HANDLERS }; |
86 | EXPORT_SYMBOL_GPL(k_handler); | ||
87 | 87 | ||
88 | #define FN_HANDLERS\ | 88 | #define FN_HANDLERS\ |
89 | fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\ | 89 | fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\ |
@@ -133,7 +133,7 @@ static struct input_handler kbd_handler; | |||
133 | static DEFINE_SPINLOCK(kbd_event_lock); | 133 | static DEFINE_SPINLOCK(kbd_event_lock); |
134 | static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ | 134 | static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ |
135 | static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ | 135 | static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ |
136 | static int dead_key_next; | 136 | static bool dead_key_next; |
137 | static int npadch = -1; /* -1 or number assembled on pad */ | 137 | static int npadch = -1; /* -1 or number assembled on pad */ |
138 | static unsigned int diacr; | 138 | static unsigned int diacr; |
139 | static char rep; /* flag telling character repeat */ | 139 | static char rep; /* flag telling character repeat */ |
@@ -147,22 +147,6 @@ static struct ledptr { | |||
147 | unsigned char valid:1; | 147 | unsigned char valid:1; |
148 | } ledptrs[3]; | 148 | } ledptrs[3]; |
149 | 149 | ||
150 | /* Simple translation table for the SysRq keys */ | ||
151 | |||
152 | #ifdef CONFIG_MAGIC_SYSRQ | ||
153 | unsigned char kbd_sysrq_xlate[KEY_MAX + 1] = | ||
154 | "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ | ||
155 | "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ | ||
156 | "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ | ||
157 | "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ | ||
158 | "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ | ||
159 | "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ | ||
160 | "\r\000/"; /* 0x60 - 0x6f */ | ||
161 | static int sysrq_down; | ||
162 | static int sysrq_alt_use; | ||
163 | #endif | ||
164 | static int sysrq_alt; | ||
165 | |||
166 | /* | 150 | /* |
167 | * Notifier list for console keyboard events | 151 | * Notifier list for console keyboard events |
168 | */ | 152 | */ |
@@ -361,8 +345,8 @@ static void to_utf8(struct vc_data *vc, uint c) | |||
361 | /* 110***** 10****** */ | 345 | /* 110***** 10****** */ |
362 | put_queue(vc, 0xc0 | (c >> 6)); | 346 | put_queue(vc, 0xc0 | (c >> 6)); |
363 | put_queue(vc, 0x80 | (c & 0x3f)); | 347 | put_queue(vc, 0x80 | (c & 0x3f)); |
364 | } else if (c < 0x10000) { | 348 | } else if (c < 0x10000) { |
365 | if (c >= 0xD800 && c < 0xE000) | 349 | if (c >= 0xD800 && c < 0xE000) |
366 | return; | 350 | return; |
367 | if (c == 0xFFFF) | 351 | if (c == 0xFFFF) |
368 | return; | 352 | return; |
@@ -370,7 +354,7 @@ static void to_utf8(struct vc_data *vc, uint c) | |||
370 | put_queue(vc, 0xe0 | (c >> 12)); | 354 | put_queue(vc, 0xe0 | (c >> 12)); |
371 | put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); | 355 | put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); |
372 | put_queue(vc, 0x80 | (c & 0x3f)); | 356 | put_queue(vc, 0x80 | (c & 0x3f)); |
373 | } else if (c < 0x110000) { | 357 | } else if (c < 0x110000) { |
374 | /* 11110*** 10****** 10****** 10****** */ | 358 | /* 11110*** 10****** 10****** 10****** */ |
375 | put_queue(vc, 0xf0 | (c >> 18)); | 359 | put_queue(vc, 0xf0 | (c >> 18)); |
376 | put_queue(vc, 0x80 | ((c >> 12) & 0x3f)); | 360 | put_queue(vc, 0x80 | ((c >> 12) & 0x3f)); |
@@ -469,6 +453,7 @@ static void fn_enter(struct vc_data *vc) | |||
469 | } | 453 | } |
470 | diacr = 0; | 454 | diacr = 0; |
471 | } | 455 | } |
456 | |||
472 | put_queue(vc, 13); | 457 | put_queue(vc, 13); |
473 | if (vc_kbd_mode(kbd, VC_CRLF)) | 458 | if (vc_kbd_mode(kbd, VC_CRLF)) |
474 | put_queue(vc, 10); | 459 | put_queue(vc, 10); |
@@ -478,6 +463,7 @@ static void fn_caps_toggle(struct vc_data *vc) | |||
478 | { | 463 | { |
479 | if (rep) | 464 | if (rep) |
480 | return; | 465 | return; |
466 | |||
481 | chg_vc_kbd_led(kbd, VC_CAPSLOCK); | 467 | chg_vc_kbd_led(kbd, VC_CAPSLOCK); |
482 | } | 468 | } |
483 | 469 | ||
@@ -485,12 +471,14 @@ static void fn_caps_on(struct vc_data *vc) | |||
485 | { | 471 | { |
486 | if (rep) | 472 | if (rep) |
487 | return; | 473 | return; |
474 | |||
488 | set_vc_kbd_led(kbd, VC_CAPSLOCK); | 475 | set_vc_kbd_led(kbd, VC_CAPSLOCK); |
489 | } | 476 | } |
490 | 477 | ||
491 | static void fn_show_ptregs(struct vc_data *vc) | 478 | static void fn_show_ptregs(struct vc_data *vc) |
492 | { | 479 | { |
493 | struct pt_regs *regs = get_irq_regs(); | 480 | struct pt_regs *regs = get_irq_regs(); |
481 | |||
494 | if (regs) | 482 | if (regs) |
495 | show_regs(regs); | 483 | show_regs(regs); |
496 | } | 484 | } |
@@ -515,7 +503,7 @@ static void fn_hold(struct vc_data *vc) | |||
515 | 503 | ||
516 | static void fn_num(struct vc_data *vc) | 504 | static void fn_num(struct vc_data *vc) |
517 | { | 505 | { |
518 | if (vc_kbd_mode(kbd,VC_APPLIC)) | 506 | if (vc_kbd_mode(kbd, VC_APPLIC)) |
519 | applkey(vc, 'P', 1); | 507 | applkey(vc, 'P', 1); |
520 | else | 508 | else |
521 | fn_bare_num(vc); | 509 | fn_bare_num(vc); |
@@ -610,7 +598,7 @@ static void fn_boot_it(struct vc_data *vc) | |||
610 | 598 | ||
611 | static void fn_compose(struct vc_data *vc) | 599 | static void fn_compose(struct vc_data *vc) |
612 | { | 600 | { |
613 | dead_key_next = 1; | 601 | dead_key_next = true; |
614 | } | 602 | } |
615 | 603 | ||
616 | static void fn_spawn_con(struct vc_data *vc) | 604 | static void fn_spawn_con(struct vc_data *vc) |
@@ -657,7 +645,7 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) | |||
657 | 645 | ||
658 | static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag) | 646 | static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag) |
659 | { | 647 | { |
660 | printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n"); | 648 | pr_err("k_lowercase was called - impossible\n"); |
661 | } | 649 | } |
662 | 650 | ||
663 | static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag) | 651 | static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag) |
@@ -669,7 +657,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag) | |||
669 | value = handle_diacr(vc, value); | 657 | value = handle_diacr(vc, value); |
670 | 658 | ||
671 | if (dead_key_next) { | 659 | if (dead_key_next) { |
672 | dead_key_next = 0; | 660 | dead_key_next = false; |
673 | diacr = value; | 661 | diacr = value; |
674 | return; | 662 | return; |
675 | } | 663 | } |
@@ -691,6 +679,7 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag) | |||
691 | { | 679 | { |
692 | if (up_flag) | 680 | if (up_flag) |
693 | return; | 681 | return; |
682 | |||
694 | diacr = (diacr ? handle_diacr(vc, value) : value); | 683 | diacr = (diacr ? handle_diacr(vc, value) : value); |
695 | } | 684 | } |
696 | 685 | ||
@@ -710,29 +699,28 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag) | |||
710 | static void k_dead(struct vc_data *vc, unsigned char value, char up_flag) | 699 | static void k_dead(struct vc_data *vc, unsigned char value, char up_flag) |
711 | { | 700 | { |
712 | static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' }; | 701 | static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' }; |
713 | value = ret_diacr[value]; | 702 | |
714 | k_deadunicode(vc, value, up_flag); | 703 | k_deadunicode(vc, ret_diacr[value], up_flag); |
715 | } | 704 | } |
716 | 705 | ||
717 | static void k_cons(struct vc_data *vc, unsigned char value, char up_flag) | 706 | static void k_cons(struct vc_data *vc, unsigned char value, char up_flag) |
718 | { | 707 | { |
719 | if (up_flag) | 708 | if (up_flag) |
720 | return; | 709 | return; |
710 | |||
721 | set_console(value); | 711 | set_console(value); |
722 | } | 712 | } |
723 | 713 | ||
724 | static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) | 714 | static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) |
725 | { | 715 | { |
726 | unsigned v; | ||
727 | |||
728 | if (up_flag) | 716 | if (up_flag) |
729 | return; | 717 | return; |
730 | v = value; | 718 | |
731 | if (v < ARRAY_SIZE(func_table)) { | 719 | if ((unsigned)value < ARRAY_SIZE(func_table)) { |
732 | if (func_table[value]) | 720 | if (func_table[value]) |
733 | puts_queue(vc, func_table[value]); | 721 | puts_queue(vc, func_table[value]); |
734 | } else | 722 | } else |
735 | printk(KERN_ERR "k_fn called with value=%d\n", value); | 723 | pr_err("k_fn called with value=%d\n", value); |
736 | } | 724 | } |
737 | 725 | ||
738 | static void k_cur(struct vc_data *vc, unsigned char value, char up_flag) | 726 | static void k_cur(struct vc_data *vc, unsigned char value, char up_flag) |
@@ -741,6 +729,7 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag) | |||
741 | 729 | ||
742 | if (up_flag) | 730 | if (up_flag) |
743 | return; | 731 | return; |
732 | |||
744 | applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE)); | 733 | applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE)); |
745 | } | 734 | } |
746 | 735 | ||
@@ -758,43 +747,45 @@ static void k_pad(struct vc_data *vc, unsigned char value, char up_flag) | |||
758 | return; | 747 | return; |
759 | } | 748 | } |
760 | 749 | ||
761 | if (!vc_kbd_led(kbd, VC_NUMLOCK)) | 750 | if (!vc_kbd_led(kbd, VC_NUMLOCK)) { |
751 | |||
762 | switch (value) { | 752 | switch (value) { |
763 | case KVAL(K_PCOMMA): | 753 | case KVAL(K_PCOMMA): |
764 | case KVAL(K_PDOT): | 754 | case KVAL(K_PDOT): |
765 | k_fn(vc, KVAL(K_REMOVE), 0); | 755 | k_fn(vc, KVAL(K_REMOVE), 0); |
766 | return; | 756 | return; |
767 | case KVAL(K_P0): | 757 | case KVAL(K_P0): |
768 | k_fn(vc, KVAL(K_INSERT), 0); | 758 | k_fn(vc, KVAL(K_INSERT), 0); |
769 | return; | 759 | return; |
770 | case KVAL(K_P1): | 760 | case KVAL(K_P1): |
771 | k_fn(vc, KVAL(K_SELECT), 0); | 761 | k_fn(vc, KVAL(K_SELECT), 0); |
772 | return; | 762 | return; |
773 | case KVAL(K_P2): | 763 | case KVAL(K_P2): |
774 | k_cur(vc, KVAL(K_DOWN), 0); | 764 | k_cur(vc, KVAL(K_DOWN), 0); |
775 | return; | 765 | return; |
776 | case KVAL(K_P3): | 766 | case KVAL(K_P3): |
777 | k_fn(vc, KVAL(K_PGDN), 0); | 767 | k_fn(vc, KVAL(K_PGDN), 0); |
778 | return; | 768 | return; |
779 | case KVAL(K_P4): | 769 | case KVAL(K_P4): |
780 | k_cur(vc, KVAL(K_LEFT), 0); | 770 | k_cur(vc, KVAL(K_LEFT), 0); |
781 | return; | 771 | return; |
782 | case KVAL(K_P6): | 772 | case KVAL(K_P6): |
783 | k_cur(vc, KVAL(K_RIGHT), 0); | 773 | k_cur(vc, KVAL(K_RIGHT), 0); |
784 | return; | 774 | return; |
785 | case KVAL(K_P7): | 775 | case KVAL(K_P7): |
786 | k_fn(vc, KVAL(K_FIND), 0); | 776 | k_fn(vc, KVAL(K_FIND), 0); |
787 | return; | 777 | return; |
788 | case KVAL(K_P8): | 778 | case KVAL(K_P8): |
789 | k_cur(vc, KVAL(K_UP), 0); | 779 | k_cur(vc, KVAL(K_UP), 0); |
790 | return; | 780 | return; |
791 | case KVAL(K_P9): | 781 | case KVAL(K_P9): |
792 | k_fn(vc, KVAL(K_PGUP), 0); | 782 | k_fn(vc, KVAL(K_PGUP), 0); |
793 | return; | 783 | return; |
794 | case KVAL(K_P5): | 784 | case KVAL(K_P5): |
795 | applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC)); | 785 | applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC)); |
796 | return; | 786 | return; |
797 | } | 787 | } |
788 | } | ||
798 | 789 | ||
799 | put_queue(vc, pad_chars[value]); | 790 | put_queue(vc, pad_chars[value]); |
800 | if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF)) | 791 | if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF)) |
@@ -880,6 +871,7 @@ static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) | |||
880 | { | 871 | { |
881 | if (up_flag || rep) | 872 | if (up_flag || rep) |
882 | return; | 873 | return; |
874 | |||
883 | chg_vc_kbd_lock(kbd, value); | 875 | chg_vc_kbd_lock(kbd, value); |
884 | } | 876 | } |
885 | 877 | ||
@@ -888,6 +880,7 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag) | |||
888 | k_shift(vc, value, up_flag); | 880 | k_shift(vc, value, up_flag); |
889 | if (up_flag || rep) | 881 | if (up_flag || rep) |
890 | return; | 882 | return; |
883 | |||
891 | chg_vc_kbd_slock(kbd, value); | 884 | chg_vc_kbd_slock(kbd, value); |
892 | /* try to make Alt, oops, AltGr and such work */ | 885 | /* try to make Alt, oops, AltGr and such work */ |
893 | if (!key_maps[kbd->lockstate ^ kbd->slockstate]) { | 886 | if (!key_maps[kbd->lockstate ^ kbd->slockstate]) { |
@@ -925,12 +918,12 @@ static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag) | |||
925 | 918 | ||
926 | static void k_brl(struct vc_data *vc, unsigned char value, char up_flag) | 919 | static void k_brl(struct vc_data *vc, unsigned char value, char up_flag) |
927 | { | 920 | { |
928 | static unsigned pressed,committing; | 921 | static unsigned pressed, committing; |
929 | static unsigned long releasestart; | 922 | static unsigned long releasestart; |
930 | 923 | ||
931 | if (kbd->kbdmode != VC_UNICODE) { | 924 | if (kbd->kbdmode != VC_UNICODE) { |
932 | if (!up_flag) | 925 | if (!up_flag) |
933 | printk("keyboard mode must be unicode for braille patterns\n"); | 926 | pr_warning("keyboard mode must be unicode for braille patterns\n"); |
934 | return; | 927 | return; |
935 | } | 928 | } |
936 | 929 | ||
@@ -942,32 +935,28 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag) | |||
942 | if (value > 8) | 935 | if (value > 8) |
943 | return; | 936 | return; |
944 | 937 | ||
945 | if (up_flag) { | 938 | if (!up_flag) { |
946 | if (brl_timeout) { | ||
947 | if (!committing || | ||
948 | time_after(jiffies, | ||
949 | releasestart + msecs_to_jiffies(brl_timeout))) { | ||
950 | committing = pressed; | ||
951 | releasestart = jiffies; | ||
952 | } | ||
953 | pressed &= ~(1 << (value - 1)); | ||
954 | if (!pressed) { | ||
955 | if (committing) { | ||
956 | k_brlcommit(vc, committing, 0); | ||
957 | committing = 0; | ||
958 | } | ||
959 | } | ||
960 | } else { | ||
961 | if (committing) { | ||
962 | k_brlcommit(vc, committing, 0); | ||
963 | committing = 0; | ||
964 | } | ||
965 | pressed &= ~(1 << (value - 1)); | ||
966 | } | ||
967 | } else { | ||
968 | pressed |= 1 << (value - 1); | 939 | pressed |= 1 << (value - 1); |
969 | if (!brl_timeout) | 940 | if (!brl_timeout) |
970 | committing = pressed; | 941 | committing = pressed; |
942 | } else if (brl_timeout) { | ||
943 | if (!committing || | ||
944 | time_after(jiffies, | ||
945 | releasestart + msecs_to_jiffies(brl_timeout))) { | ||
946 | committing = pressed; | ||
947 | releasestart = jiffies; | ||
948 | } | ||
949 | pressed &= ~(1 << (value - 1)); | ||
950 | if (!pressed && committing) { | ||
951 | k_brlcommit(vc, committing, 0); | ||
952 | committing = 0; | ||
953 | } | ||
954 | } else { | ||
955 | if (committing) { | ||
956 | k_brlcommit(vc, committing, 0); | ||
957 | committing = 0; | ||
958 | } | ||
959 | pressed &= ~(1 << (value - 1)); | ||
971 | } | 960 | } |
972 | } | 961 | } |
973 | 962 | ||
@@ -988,6 +977,7 @@ void setledstate(struct kbd_struct *kbd, unsigned int led) | |||
988 | kbd->ledmode = LED_SHOW_IOCTL; | 977 | kbd->ledmode = LED_SHOW_IOCTL; |
989 | } else | 978 | } else |
990 | kbd->ledmode = LED_SHOW_FLAGS; | 979 | kbd->ledmode = LED_SHOW_FLAGS; |
980 | |||
991 | set_leds(); | 981 | set_leds(); |
992 | } | 982 | } |
993 | 983 | ||
@@ -1075,7 +1065,7 @@ static const unsigned short x86_keycodes[256] = | |||
1075 | 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 }; | 1065 | 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 }; |
1076 | 1066 | ||
1077 | #ifdef CONFIG_SPARC | 1067 | #ifdef CONFIG_SPARC |
1078 | static int sparc_l1_a_state = 0; | 1068 | static int sparc_l1_a_state; |
1079 | extern void sun_do_break(void); | 1069 | extern void sun_do_break(void); |
1080 | #endif | 1070 | #endif |
1081 | 1071 | ||
@@ -1085,52 +1075,54 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, | |||
1085 | int code; | 1075 | int code; |
1086 | 1076 | ||
1087 | switch (keycode) { | 1077 | switch (keycode) { |
1088 | case KEY_PAUSE: | ||
1089 | put_queue(vc, 0xe1); | ||
1090 | put_queue(vc, 0x1d | up_flag); | ||
1091 | put_queue(vc, 0x45 | up_flag); | ||
1092 | break; | ||
1093 | 1078 | ||
1094 | case KEY_HANGEUL: | 1079 | case KEY_PAUSE: |
1095 | if (!up_flag) | 1080 | put_queue(vc, 0xe1); |
1096 | put_queue(vc, 0xf2); | 1081 | put_queue(vc, 0x1d | up_flag); |
1097 | break; | 1082 | put_queue(vc, 0x45 | up_flag); |
1083 | break; | ||
1098 | 1084 | ||
1099 | case KEY_HANJA: | 1085 | case KEY_HANGEUL: |
1100 | if (!up_flag) | 1086 | if (!up_flag) |
1101 | put_queue(vc, 0xf1); | 1087 | put_queue(vc, 0xf2); |
1102 | break; | 1088 | break; |
1103 | 1089 | ||
1104 | case KEY_SYSRQ: | 1090 | case KEY_HANJA: |
1105 | /* | 1091 | if (!up_flag) |
1106 | * Real AT keyboards (that's what we're trying | 1092 | put_queue(vc, 0xf1); |
1107 | * to emulate here emit 0xe0 0x2a 0xe0 0x37 when | 1093 | break; |
1108 | * pressing PrtSc/SysRq alone, but simply 0x54 | ||
1109 | * when pressing Alt+PrtSc/SysRq. | ||
1110 | */ | ||
1111 | if (sysrq_alt) { | ||
1112 | put_queue(vc, 0x54 | up_flag); | ||
1113 | } else { | ||
1114 | put_queue(vc, 0xe0); | ||
1115 | put_queue(vc, 0x2a | up_flag); | ||
1116 | put_queue(vc, 0xe0); | ||
1117 | put_queue(vc, 0x37 | up_flag); | ||
1118 | } | ||
1119 | break; | ||
1120 | 1094 | ||
1121 | default: | 1095 | case KEY_SYSRQ: |
1122 | if (keycode > 255) | 1096 | /* |
1123 | return -1; | 1097 | * Real AT keyboards (that's what we're trying |
1098 | * to emulate here emit 0xe0 0x2a 0xe0 0x37 when | ||
1099 | * pressing PrtSc/SysRq alone, but simply 0x54 | ||
1100 | * when pressing Alt+PrtSc/SysRq. | ||
1101 | */ | ||
1102 | if (test_bit(KEY_LEFTALT, key_down) || | ||
1103 | test_bit(KEY_RIGHTALT, key_down)) { | ||
1104 | put_queue(vc, 0x54 | up_flag); | ||
1105 | } else { | ||
1106 | put_queue(vc, 0xe0); | ||
1107 | put_queue(vc, 0x2a | up_flag); | ||
1108 | put_queue(vc, 0xe0); | ||
1109 | put_queue(vc, 0x37 | up_flag); | ||
1110 | } | ||
1111 | break; | ||
1124 | 1112 | ||
1125 | code = x86_keycodes[keycode]; | 1113 | default: |
1126 | if (!code) | 1114 | if (keycode > 255) |
1127 | return -1; | 1115 | return -1; |
1128 | 1116 | ||
1129 | if (code & 0x100) | 1117 | code = x86_keycodes[keycode]; |
1130 | put_queue(vc, 0xe0); | 1118 | if (!code) |
1131 | put_queue(vc, (code & 0x7f) | up_flag); | 1119 | return -1; |
1132 | 1120 | ||
1133 | break; | 1121 | if (code & 0x100) |
1122 | put_queue(vc, 0xe0); | ||
1123 | put_queue(vc, (code & 0x7f) | up_flag); | ||
1124 | |||
1125 | break; | ||
1134 | } | 1126 | } |
1135 | 1127 | ||
1136 | return 0; | 1128 | return 0; |
@@ -1153,6 +1145,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char u | |||
1153 | static void kbd_rawcode(unsigned char data) | 1145 | static void kbd_rawcode(unsigned char data) |
1154 | { | 1146 | { |
1155 | struct vc_data *vc = vc_cons[fg_console].d; | 1147 | struct vc_data *vc = vc_cons[fg_console].d; |
1148 | |||
1156 | kbd = kbd_table + vc->vc_num; | 1149 | kbd = kbd_table + vc->vc_num; |
1157 | if (kbd->kbdmode == VC_RAW) | 1150 | if (kbd->kbdmode == VC_RAW) |
1158 | put_queue(vc, data); | 1151 | put_queue(vc, data); |
@@ -1162,10 +1155,12 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1162 | { | 1155 | { |
1163 | struct vc_data *vc = vc_cons[fg_console].d; | 1156 | struct vc_data *vc = vc_cons[fg_console].d; |
1164 | unsigned short keysym, *key_map; | 1157 | unsigned short keysym, *key_map; |
1165 | unsigned char type, raw_mode; | 1158 | unsigned char type; |
1159 | bool raw_mode; | ||
1166 | struct tty_struct *tty; | 1160 | struct tty_struct *tty; |
1167 | int shift_final; | 1161 | int shift_final; |
1168 | struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down }; | 1162 | struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down }; |
1163 | int rc; | ||
1169 | 1164 | ||
1170 | tty = vc->vc_tty; | 1165 | tty = vc->vc_tty; |
1171 | 1166 | ||
@@ -1176,8 +1171,6 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1176 | 1171 | ||
1177 | kbd = kbd_table + vc->vc_num; | 1172 | kbd = kbd_table + vc->vc_num; |
1178 | 1173 | ||
1179 | if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT) | ||
1180 | sysrq_alt = down ? keycode : 0; | ||
1181 | #ifdef CONFIG_SPARC | 1174 | #ifdef CONFIG_SPARC |
1182 | if (keycode == KEY_STOP) | 1175 | if (keycode == KEY_STOP) |
1183 | sparc_l1_a_state = down; | 1176 | sparc_l1_a_state = down; |
@@ -1185,29 +1178,16 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1185 | 1178 | ||
1186 | rep = (down == 2); | 1179 | rep = (down == 2); |
1187 | 1180 | ||
1188 | if ((raw_mode = (kbd->kbdmode == VC_RAW)) && !hw_raw) | 1181 | raw_mode = (kbd->kbdmode == VC_RAW); |
1182 | if (raw_mode && !hw_raw) | ||
1189 | if (emulate_raw(vc, keycode, !down << 7)) | 1183 | if (emulate_raw(vc, keycode, !down << 7)) |
1190 | if (keycode < BTN_MISC && printk_ratelimit()) | 1184 | if (keycode < BTN_MISC && printk_ratelimit()) |
1191 | printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode); | 1185 | pr_warning("can't emulate rawmode for keycode %d\n", |
1186 | keycode); | ||
1192 | 1187 | ||
1193 | #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ | ||
1194 | if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) { | ||
1195 | if (!sysrq_down) { | ||
1196 | sysrq_down = down; | ||
1197 | sysrq_alt_use = sysrq_alt; | ||
1198 | } | ||
1199 | return; | ||
1200 | } | ||
1201 | if (sysrq_down && !down && keycode == sysrq_alt_use) | ||
1202 | sysrq_down = 0; | ||
1203 | if (sysrq_down && down && !rep) { | ||
1204 | handle_sysrq(kbd_sysrq_xlate[keycode], tty); | ||
1205 | return; | ||
1206 | } | ||
1207 | #endif | ||
1208 | #ifdef CONFIG_SPARC | 1188 | #ifdef CONFIG_SPARC |
1209 | if (keycode == KEY_A && sparc_l1_a_state) { | 1189 | if (keycode == KEY_A && sparc_l1_a_state) { |
1210 | sparc_l1_a_state = 0; | 1190 | sparc_l1_a_state = false; |
1211 | sun_do_break(); | 1191 | sun_do_break(); |
1212 | } | 1192 | } |
1213 | #endif | 1193 | #endif |
@@ -1229,7 +1209,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1229 | put_queue(vc, (keycode >> 7) | 0x80); | 1209 | put_queue(vc, (keycode >> 7) | 0x80); |
1230 | put_queue(vc, keycode | 0x80); | 1210 | put_queue(vc, keycode | 0x80); |
1231 | } | 1211 | } |
1232 | raw_mode = 1; | 1212 | raw_mode = true; |
1233 | } | 1213 | } |
1234 | 1214 | ||
1235 | if (down) | 1215 | if (down) |
@@ -1252,29 +1232,32 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1252 | param.ledstate = kbd->ledflagstate; | 1232 | param.ledstate = kbd->ledflagstate; |
1253 | key_map = key_maps[shift_final]; | 1233 | key_map = key_maps[shift_final]; |
1254 | 1234 | ||
1255 | if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, ¶m) == NOTIFY_STOP || !key_map) { | 1235 | rc = atomic_notifier_call_chain(&keyboard_notifier_list, |
1256 | atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNBOUND_KEYCODE, ¶m); | 1236 | KBD_KEYCODE, ¶m); |
1237 | if (rc == NOTIFY_STOP || !key_map) { | ||
1238 | atomic_notifier_call_chain(&keyboard_notifier_list, | ||
1239 | KBD_UNBOUND_KEYCODE, ¶m); | ||
1257 | compute_shiftstate(); | 1240 | compute_shiftstate(); |
1258 | kbd->slockstate = 0; | 1241 | kbd->slockstate = 0; |
1259 | return; | 1242 | return; |
1260 | } | 1243 | } |
1261 | 1244 | ||
1262 | if (keycode >= NR_KEYS) | 1245 | if (keycode < NR_KEYS) |
1263 | if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8) | ||
1264 | keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1)); | ||
1265 | else | ||
1266 | return; | ||
1267 | else | ||
1268 | keysym = key_map[keycode]; | 1246 | keysym = key_map[keycode]; |
1247 | else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8) | ||
1248 | keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1)); | ||
1249 | else | ||
1250 | return; | ||
1269 | 1251 | ||
1270 | type = KTYP(keysym); | 1252 | type = KTYP(keysym); |
1271 | 1253 | ||
1272 | if (type < 0xf0) { | 1254 | if (type < 0xf0) { |
1273 | param.value = keysym; | 1255 | param.value = keysym; |
1274 | if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, ¶m) == NOTIFY_STOP) | 1256 | rc = atomic_notifier_call_chain(&keyboard_notifier_list, |
1275 | return; | 1257 | KBD_UNICODE, ¶m); |
1276 | if (down && !raw_mode) | 1258 | if (rc != NOTIFY_STOP) |
1277 | to_utf8(vc, keysym); | 1259 | if (down && !raw_mode) |
1260 | to_utf8(vc, keysym); | ||
1278 | return; | 1261 | return; |
1279 | } | 1262 | } |
1280 | 1263 | ||
@@ -1288,9 +1271,11 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
1288 | keysym = key_map[keycode]; | 1271 | keysym = key_map[keycode]; |
1289 | } | 1272 | } |
1290 | } | 1273 | } |
1291 | param.value = keysym; | ||
1292 | 1274 | ||
1293 | if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYSYM, ¶m) == NOTIFY_STOP) | 1275 | param.value = keysym; |
1276 | rc = atomic_notifier_call_chain(&keyboard_notifier_list, | ||
1277 | KBD_KEYSYM, ¶m); | ||
1278 | if (rc == NOTIFY_STOP) | ||
1294 | return; | 1279 | return; |
1295 | 1280 | ||
1296 | if (raw_mode && type != KT_SPEC && type != KT_SHIFT) | 1281 | if (raw_mode && type != KT_SPEC && type != KT_SHIFT) |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 47023053ee85..d2692d443f7b 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -1011,6 +1011,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) | |||
1011 | if (!info->ioaddr) | 1011 | if (!info->ioaddr) |
1012 | return -ENODEV; | 1012 | return -ENODEV; |
1013 | 1013 | ||
1014 | tty->driver_data = info; | ||
1014 | return tty_port_open(&info->port, tty, filp); | 1015 | return tty_port_open(&info->port, tty, filp); |
1015 | } | 1016 | } |
1016 | 1017 | ||
@@ -1074,7 +1075,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) | |||
1074 | struct mxser_port *info = tty->driver_data; | 1075 | struct mxser_port *info = tty->driver_data; |
1075 | struct tty_port *port = &info->port; | 1076 | struct tty_port *port = &info->port; |
1076 | 1077 | ||
1077 | if (tty->index == MXSER_PORTS) | 1078 | if (tty->index == MXSER_PORTS || info == NULL) |
1078 | return; | 1079 | return; |
1079 | if (tty_port_close_start(port, tty, filp) == 0) | 1080 | if (tty_port_close_start(port, tty, filp) == 0) |
1080 | return; | 1081 | return; |
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c new file mode 100644 index 000000000000..c4161d5e053d --- /dev/null +++ b/drivers/char/n_gsm.c | |||
@@ -0,0 +1,2763 @@ | |||
1 | /* | ||
2 | * n_gsm.c GSM 0710 tty multiplexor | ||
3 | * Copyright (c) 2009/10 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | * | ||
18 | * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE * | ||
19 | * | ||
20 | * TO DO: | ||
21 | * Mostly done: ioctls for setting modes/timing | ||
22 | * Partly done: hooks so you can pull off frames to non tty devs | ||
23 | * Restart DLCI 0 when it closes ? | ||
24 | * Test basic encoding | ||
25 | * Improve the tx engine | ||
26 | * Resolve tx side locking by adding a queue_head and routing | ||
27 | * all control traffic via it | ||
28 | * General tidy/document | ||
29 | * Review the locking/move to refcounts more (mux now moved to an | ||
30 | * alloc/free model ready) | ||
31 | * Use newest tty open/close port helpers and install hooks | ||
32 | * What to do about power functions ? | ||
33 | * Termios setting and negotiation | ||
34 | * Do we need a 'which mux are you' ioctl to correlate mux and tty sets | ||
35 | * | ||
36 | */ | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | #include <linux/major.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/signal.h> | ||
42 | #include <linux/fcntl.h> | ||
43 | #include <linux/sched.h> | ||
44 | #include <linux/interrupt.h> | ||
45 | #include <linux/tty.h> | ||
46 | #include <linux/timer.h> | ||
47 | #include <linux/ctype.h> | ||
48 | #include <linux/mm.h> | ||
49 | #include <linux/string.h> | ||
50 | #include <linux/slab.h> | ||
51 | #include <linux/poll.h> | ||
52 | #include <linux/bitops.h> | ||
53 | #include <linux/file.h> | ||
54 | #include <linux/uaccess.h> | ||
55 | #include <linux/module.h> | ||
56 | #include <linux/timer.h> | ||
57 | #include <linux/tty_flip.h> | ||
58 | #include <linux/tty_driver.h> | ||
59 | #include <linux/serial.h> | ||
60 | #include <linux/kfifo.h> | ||
61 | #include <linux/skbuff.h> | ||
62 | #include <linux/gsmmux.h> | ||
63 | |||
64 | static int debug; | ||
65 | module_param(debug, int, 0600); | ||
66 | |||
67 | #define T1 (HZ/10) | ||
68 | #define T2 (HZ/3) | ||
69 | #define N2 3 | ||
70 | |||
71 | /* Use long timers for testing at low speed with debug on */ | ||
72 | #ifdef DEBUG_TIMING | ||
73 | #define T1 HZ | ||
74 | #define T2 (2 * HZ) | ||
75 | #endif | ||
76 | |||
77 | /* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte | ||
78 | limits so this is plenty */ | ||
79 | #define MAX_MRU 512 | ||
80 | #define MAX_MTU 512 | ||
81 | |||
82 | /* | ||
83 | * Each block of data we have queued to go out is in the form of | ||
84 | * a gsm_msg which holds everything we need in a link layer independant | ||
85 | * format | ||
86 | */ | ||
87 | |||
88 | struct gsm_msg { | ||
89 | struct gsm_msg *next; | ||
90 | u8 addr; /* DLCI address + flags */ | ||
91 | u8 ctrl; /* Control byte + flags */ | ||
92 | unsigned int len; /* Length of data block (can be zero) */ | ||
93 | unsigned char *data; /* Points into buffer but not at the start */ | ||
94 | unsigned char buffer[0]; | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * Each active data link has a gsm_dlci structure associated which ties | ||
99 | * the link layer to an optional tty (if the tty side is open). To avoid | ||
100 | * complexity right now these are only ever freed up when the mux is | ||
101 | * shut down. | ||
102 | * | ||
103 | * At the moment we don't free DLCI objects until the mux is torn down | ||
104 | * this avoid object life time issues but might be worth review later. | ||
105 | */ | ||
106 | |||
107 | struct gsm_dlci { | ||
108 | struct gsm_mux *gsm; | ||
109 | int addr; | ||
110 | int state; | ||
111 | #define DLCI_CLOSED 0 | ||
112 | #define DLCI_OPENING 1 /* Sending SABM not seen UA */ | ||
113 | #define DLCI_OPEN 2 /* SABM/UA complete */ | ||
114 | #define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */ | ||
115 | |||
116 | /* Link layer */ | ||
117 | spinlock_t lock; /* Protects the internal state */ | ||
118 | struct timer_list t1; /* Retransmit timer for SABM and UA */ | ||
119 | int retries; | ||
120 | /* Uplink tty if active */ | ||
121 | struct tty_port port; /* The tty bound to this DLCI if there is one */ | ||
122 | struct kfifo *fifo; /* Queue fifo for the DLCI */ | ||
123 | struct kfifo _fifo; /* For new fifo API porting only */ | ||
124 | int adaption; /* Adaption layer in use */ | ||
125 | u32 modem_rx; /* Our incoming virtual modem lines */ | ||
126 | u32 modem_tx; /* Our outgoing modem lines */ | ||
127 | int dead; /* Refuse re-open */ | ||
128 | /* Flow control */ | ||
129 | int throttled; /* Private copy of throttle state */ | ||
130 | int constipated; /* Throttle status for outgoing */ | ||
131 | /* Packetised I/O */ | ||
132 | struct sk_buff *skb; /* Frame being sent */ | ||
133 | struct sk_buff_head skb_list; /* Queued frames */ | ||
134 | /* Data handling callback */ | ||
135 | void (*data)(struct gsm_dlci *dlci, u8 *data, int len); | ||
136 | }; | ||
137 | |||
138 | /* DLCI 0, 62/63 are special or reseved see gsmtty_open */ | ||
139 | |||
140 | #define NUM_DLCI 64 | ||
141 | |||
142 | /* | ||
143 | * DLCI 0 is used to pass control blocks out of band of the data | ||
144 | * flow (and with a higher link priority). One command can be outstanding | ||
145 | * at a time and we use this structure to manage them. They are created | ||
146 | * and destroyed by the user context, and updated by the receive paths | ||
147 | * and timers | ||
148 | */ | ||
149 | |||
150 | struct gsm_control { | ||
151 | u8 cmd; /* Command we are issuing */ | ||
152 | u8 *data; /* Data for the command in case we retransmit */ | ||
153 | int len; /* Length of block for retransmission */ | ||
154 | int done; /* Done flag */ | ||
155 | int error; /* Error if any */ | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * Each GSM mux we have is represented by this structure. If we are | ||
160 | * operating as an ldisc then we use this structure as our ldisc | ||
161 | * state. We need to sort out lifetimes and locking with respect | ||
162 | * to the gsm mux array. For now we don't free DLCI objects that | ||
163 | * have been instantiated until the mux itself is terminated. | ||
164 | * | ||
165 | * To consider further: tty open versus mux shutdown. | ||
166 | */ | ||
167 | |||
168 | struct gsm_mux { | ||
169 | struct tty_struct *tty; /* The tty our ldisc is bound to */ | ||
170 | spinlock_t lock; | ||
171 | |||
172 | /* Events on the GSM channel */ | ||
173 | wait_queue_head_t event; | ||
174 | |||
175 | /* Bits for GSM mode decoding */ | ||
176 | |||
177 | /* Framing Layer */ | ||
178 | unsigned char *buf; | ||
179 | int state; | ||
180 | #define GSM_SEARCH 0 | ||
181 | #define GSM_START 1 | ||
182 | #define GSM_ADDRESS 2 | ||
183 | #define GSM_CONTROL 3 | ||
184 | #define GSM_LEN 4 | ||
185 | #define GSM_DATA 5 | ||
186 | #define GSM_FCS 6 | ||
187 | #define GSM_OVERRUN 7 | ||
188 | unsigned int len; | ||
189 | unsigned int address; | ||
190 | unsigned int count; | ||
191 | int escape; | ||
192 | int encoding; | ||
193 | u8 control; | ||
194 | u8 fcs; | ||
195 | u8 *txframe; /* TX framing buffer */ | ||
196 | |||
197 | /* Methods for the receiver side */ | ||
198 | void (*receive)(struct gsm_mux *gsm, u8 ch); | ||
199 | void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag); | ||
200 | /* And transmit side */ | ||
201 | int (*output)(struct gsm_mux *mux, u8 *data, int len); | ||
202 | |||
203 | /* Link Layer */ | ||
204 | unsigned int mru; | ||
205 | unsigned int mtu; | ||
206 | int initiator; /* Did we initiate connection */ | ||
207 | int dead; /* Has the mux been shut down */ | ||
208 | struct gsm_dlci *dlci[NUM_DLCI]; | ||
209 | int constipated; /* Asked by remote to shut up */ | ||
210 | |||
211 | spinlock_t tx_lock; | ||
212 | unsigned int tx_bytes; /* TX data outstanding */ | ||
213 | #define TX_THRESH_HI 8192 | ||
214 | #define TX_THRESH_LO 2048 | ||
215 | struct gsm_msg *tx_head; /* Pending data packets */ | ||
216 | struct gsm_msg *tx_tail; | ||
217 | |||
218 | /* Control messages */ | ||
219 | struct timer_list t2_timer; /* Retransmit timer for commands */ | ||
220 | int cretries; /* Command retry counter */ | ||
221 | struct gsm_control *pending_cmd;/* Our current pending command */ | ||
222 | spinlock_t control_lock; /* Protects the pending command */ | ||
223 | |||
224 | /* Configuration */ | ||
225 | int adaption; /* 1 or 2 supported */ | ||
226 | u8 ftype; /* UI or UIH */ | ||
227 | int t1, t2; /* Timers in 1/100th of a sec */ | ||
228 | int n2; /* Retry count */ | ||
229 | |||
230 | /* Statistics (not currently exposed) */ | ||
231 | unsigned long bad_fcs; | ||
232 | unsigned long malformed; | ||
233 | unsigned long io_error; | ||
234 | unsigned long bad_size; | ||
235 | unsigned long unsupported; | ||
236 | }; | ||
237 | |||
238 | |||
239 | /* | ||
240 | * Mux objects - needed so that we can translate a tty index into the | ||
241 | * relevant mux and DLCI. | ||
242 | */ | ||
243 | |||
244 | #define MAX_MUX 4 /* 256 minors */ | ||
245 | static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */ | ||
246 | static spinlock_t gsm_mux_lock; | ||
247 | |||
248 | /* | ||
249 | * This section of the driver logic implements the GSM encodings | ||
250 | * both the basic and the 'advanced'. Reliable transport is not | ||
251 | * supported. | ||
252 | */ | ||
253 | |||
254 | #define CR 0x02 | ||
255 | #define EA 0x01 | ||
256 | #define PF 0x10 | ||
257 | |||
258 | /* I is special: the rest are ..*/ | ||
259 | #define RR 0x01 | ||
260 | #define UI 0x03 | ||
261 | #define RNR 0x05 | ||
262 | #define REJ 0x09 | ||
263 | #define DM 0x0F | ||
264 | #define SABM 0x2F | ||
265 | #define DISC 0x43 | ||
266 | #define UA 0x63 | ||
267 | #define UIH 0xEF | ||
268 | |||
269 | /* Channel commands */ | ||
270 | #define CMD_NSC 0x09 | ||
271 | #define CMD_TEST 0x11 | ||
272 | #define CMD_PSC 0x21 | ||
273 | #define CMD_RLS 0x29 | ||
274 | #define CMD_FCOFF 0x31 | ||
275 | #define CMD_PN 0x41 | ||
276 | #define CMD_RPN 0x49 | ||
277 | #define CMD_FCON 0x51 | ||
278 | #define CMD_CLD 0x61 | ||
279 | #define CMD_SNC 0x69 | ||
280 | #define CMD_MSC 0x71 | ||
281 | |||
282 | /* Virtual modem bits */ | ||
283 | #define MDM_FC 0x01 | ||
284 | #define MDM_RTC 0x02 | ||
285 | #define MDM_RTR 0x04 | ||
286 | #define MDM_IC 0x20 | ||
287 | #define MDM_DV 0x40 | ||
288 | |||
289 | #define GSM0_SOF 0xF9 | ||
290 | #define GSM1_SOF 0x7E | ||
291 | #define GSM1_ESCAPE 0x7D | ||
292 | #define GSM1_ESCAPE_BITS 0x20 | ||
293 | #define XON 0x11 | ||
294 | #define XOFF 0x13 | ||
295 | |||
296 | static const struct tty_port_operations gsm_port_ops; | ||
297 | |||
298 | /* | ||
299 | * CRC table for GSM 0710 | ||
300 | */ | ||
301 | |||
302 | static const u8 gsm_fcs8[256] = { | ||
303 | 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75, | ||
304 | 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B, | ||
305 | 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69, | ||
306 | 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67, | ||
307 | 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D, | ||
308 | 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43, | ||
309 | 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51, | ||
310 | 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F, | ||
311 | 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05, | ||
312 | 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B, | ||
313 | 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19, | ||
314 | 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17, | ||
315 | 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D, | ||
316 | 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33, | ||
317 | 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21, | ||
318 | 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F, | ||
319 | 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95, | ||
320 | 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B, | ||
321 | 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89, | ||
322 | 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87, | ||
323 | 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD, | ||
324 | 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3, | ||
325 | 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1, | ||
326 | 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF, | ||
327 | 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5, | ||
328 | 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB, | ||
329 | 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9, | ||
330 | 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7, | ||
331 | 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD, | ||
332 | 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3, | ||
333 | 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1, | ||
334 | 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF | ||
335 | }; | ||
336 | |||
337 | #define INIT_FCS 0xFF | ||
338 | #define GOOD_FCS 0xCF | ||
339 | |||
340 | /** | ||
341 | * gsm_fcs_add - update FCS | ||
342 | * @fcs: Current FCS | ||
343 | * @c: Next data | ||
344 | * | ||
345 | * Update the FCS to include c. Uses the algorithm in the specification | ||
346 | * notes. | ||
347 | */ | ||
348 | |||
349 | static inline u8 gsm_fcs_add(u8 fcs, u8 c) | ||
350 | { | ||
351 | return gsm_fcs8[fcs ^ c]; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * gsm_fcs_add_block - update FCS for a block | ||
356 | * @fcs: Current FCS | ||
357 | * @c: buffer of data | ||
358 | * @len: length of buffer | ||
359 | * | ||
360 | * Update the FCS to include c. Uses the algorithm in the specification | ||
361 | * notes. | ||
362 | */ | ||
363 | |||
364 | static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) | ||
365 | { | ||
366 | while (len--) | ||
367 | fcs = gsm_fcs8[fcs ^ *c++]; | ||
368 | return fcs; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * gsm_read_ea - read a byte into an EA | ||
373 | * @val: variable holding value | ||
374 | * c: byte going into the EA | ||
375 | * | ||
376 | * Processes one byte of an EA. Updates the passed variable | ||
377 | * and returns 1 if the EA is now completely read | ||
378 | */ | ||
379 | |||
380 | static int gsm_read_ea(unsigned int *val, u8 c) | ||
381 | { | ||
382 | /* Add the next 7 bits into the value */ | ||
383 | *val <<= 7; | ||
384 | *val |= c >> 1; | ||
385 | /* Was this the last byte of the EA 1 = yes*/ | ||
386 | return c & EA; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * gsm_encode_modem - encode modem data bits | ||
391 | * @dlci: DLCI to encode from | ||
392 | * | ||
393 | * Returns the correct GSM encoded modem status bits (6 bit field) for | ||
394 | * the current status of the DLCI and attached tty object | ||
395 | */ | ||
396 | |||
397 | static u8 gsm_encode_modem(const struct gsm_dlci *dlci) | ||
398 | { | ||
399 | u8 modembits = 0; | ||
400 | /* FC is true flow control not modem bits */ | ||
401 | if (dlci->throttled) | ||
402 | modembits |= MDM_FC; | ||
403 | if (dlci->modem_tx & TIOCM_DTR) | ||
404 | modembits |= MDM_RTC; | ||
405 | if (dlci->modem_tx & TIOCM_RTS) | ||
406 | modembits |= MDM_RTR; | ||
407 | if (dlci->modem_tx & TIOCM_RI) | ||
408 | modembits |= MDM_IC; | ||
409 | if (dlci->modem_tx & TIOCM_CD) | ||
410 | modembits |= MDM_DV; | ||
411 | return modembits; | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * gsm_print_packet - display a frame for debug | ||
416 | * @hdr: header to print before decode | ||
417 | * @addr: address EA from the frame | ||
418 | * @cr: C/R bit from the frame | ||
419 | * @control: control including PF bit | ||
420 | * @data: following data bytes | ||
421 | * @dlen: length of data | ||
422 | * | ||
423 | * Displays a packet in human readable format for debugging purposes. The | ||
424 | * style is based on amateur radio LAP-B dump display. | ||
425 | */ | ||
426 | |||
427 | static void gsm_print_packet(const char *hdr, int addr, int cr, | ||
428 | u8 control, const u8 *data, int dlen) | ||
429 | { | ||
430 | if (!(debug & 1)) | ||
431 | return; | ||
432 | |||
433 | printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]); | ||
434 | |||
435 | switch (control & ~PF) { | ||
436 | case SABM: | ||
437 | printk(KERN_CONT "SABM"); | ||
438 | break; | ||
439 | case UA: | ||
440 | printk(KERN_CONT "UA"); | ||
441 | break; | ||
442 | case DISC: | ||
443 | printk(KERN_CONT "DISC"); | ||
444 | break; | ||
445 | case DM: | ||
446 | printk(KERN_CONT "DM"); | ||
447 | break; | ||
448 | case UI: | ||
449 | printk(KERN_CONT "UI"); | ||
450 | break; | ||
451 | case UIH: | ||
452 | printk(KERN_CONT "UIH"); | ||
453 | break; | ||
454 | default: | ||
455 | if (!(control & 0x01)) { | ||
456 | printk(KERN_CONT "I N(S)%d N(R)%d", | ||
457 | (control & 0x0E) >> 1, (control & 0xE)>> 5); | ||
458 | } else switch (control & 0x0F) { | ||
459 | case RR: | ||
460 | printk("RR(%d)", (control & 0xE0) >> 5); | ||
461 | break; | ||
462 | case RNR: | ||
463 | printk("RNR(%d)", (control & 0xE0) >> 5); | ||
464 | break; | ||
465 | case REJ: | ||
466 | printk("REJ(%d)", (control & 0xE0) >> 5); | ||
467 | break; | ||
468 | default: | ||
469 | printk(KERN_CONT "[%02X]", control); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | if (control & PF) | ||
474 | printk(KERN_CONT "(P)"); | ||
475 | else | ||
476 | printk(KERN_CONT "(F)"); | ||
477 | |||
478 | if (dlen) { | ||
479 | int ct = 0; | ||
480 | while (dlen--) { | ||
481 | if (ct % 8 == 0) | ||
482 | printk(KERN_CONT "\n "); | ||
483 | printk(KERN_CONT "%02X ", *data++); | ||
484 | ct++; | ||
485 | } | ||
486 | } | ||
487 | printk(KERN_CONT "\n"); | ||
488 | } | ||
489 | |||
490 | |||
491 | /* | ||
492 | * Link level transmission side | ||
493 | */ | ||
494 | |||
495 | /** | ||
496 | * gsm_stuff_packet - bytestuff a packet | ||
497 | * @ibuf: input | ||
498 | * @obuf: output | ||
499 | * @len: length of input | ||
500 | * | ||
501 | * Expand a buffer by bytestuffing it. The worst case size change | ||
502 | * is doubling and the caller is responsible for handing out | ||
503 | * suitable sized buffers. | ||
504 | */ | ||
505 | |||
506 | static int gsm_stuff_frame(const u8 *input, u8 *output, int len) | ||
507 | { | ||
508 | int olen = 0; | ||
509 | while (len--) { | ||
510 | if (*input == GSM1_SOF || *input == GSM1_ESCAPE | ||
511 | || *input == XON || *input == XOFF) { | ||
512 | *output++ = GSM1_ESCAPE; | ||
513 | *output++ = *input++ ^ GSM1_ESCAPE_BITS; | ||
514 | olen++; | ||
515 | } else | ||
516 | *output++ = *input++; | ||
517 | olen++; | ||
518 | } | ||
519 | return olen; | ||
520 | } | ||
521 | |||
522 | static void hex_packet(const unsigned char *p, int len) | ||
523 | { | ||
524 | int i; | ||
525 | for (i = 0; i < len; i++) { | ||
526 | if (i && (i % 16) == 0) | ||
527 | printk("\n"); | ||
528 | printk("%02X ", *p++); | ||
529 | } | ||
530 | printk("\n"); | ||
531 | } | ||
532 | |||
533 | /** | ||
534 | * gsm_send - send a control frame | ||
535 | * @gsm: our GSM mux | ||
536 | * @addr: address for control frame | ||
537 | * @cr: command/response bit | ||
538 | * @control: control byte including PF bit | ||
539 | * | ||
540 | * Format up and transmit a control frame. These do not go via the | ||
541 | * queueing logic as they should be transmitted ahead of data when | ||
542 | * they are needed. | ||
543 | * | ||
544 | * FIXME: Lock versus data TX path | ||
545 | */ | ||
546 | |||
547 | static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) | ||
548 | { | ||
549 | int len; | ||
550 | u8 cbuf[10]; | ||
551 | u8 ibuf[3]; | ||
552 | |||
553 | switch (gsm->encoding) { | ||
554 | case 0: | ||
555 | cbuf[0] = GSM0_SOF; | ||
556 | cbuf[1] = (addr << 2) | (cr << 1) | EA; | ||
557 | cbuf[2] = control; | ||
558 | cbuf[3] = EA; /* Length of data = 0 */ | ||
559 | cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); | ||
560 | cbuf[5] = GSM0_SOF; | ||
561 | len = 6; | ||
562 | break; | ||
563 | case 1: | ||
564 | case 2: | ||
565 | /* Control frame + packing (but not frame stuffing) in mode 1 */ | ||
566 | ibuf[0] = (addr << 2) | (cr << 1) | EA; | ||
567 | ibuf[1] = control; | ||
568 | ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); | ||
569 | /* Stuffing may double the size worst case */ | ||
570 | len = gsm_stuff_frame(ibuf, cbuf + 1, 3); | ||
571 | /* Now add the SOF markers */ | ||
572 | cbuf[0] = GSM1_SOF; | ||
573 | cbuf[len + 1] = GSM1_SOF; | ||
574 | /* FIXME: we can omit the lead one in many cases */ | ||
575 | len += 2; | ||
576 | break; | ||
577 | default: | ||
578 | WARN_ON(1); | ||
579 | return; | ||
580 | } | ||
581 | gsm->output(gsm, cbuf, len); | ||
582 | gsm_print_packet("-->", addr, cr, control, NULL, 0); | ||
583 | } | ||
584 | |||
585 | /** | ||
586 | * gsm_response - send a control response | ||
587 | * @gsm: our GSM mux | ||
588 | * @addr: address for control frame | ||
589 | * @control: control byte including PF bit | ||
590 | * | ||
591 | * Format up and transmit a link level response frame. | ||
592 | */ | ||
593 | |||
594 | static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) | ||
595 | { | ||
596 | gsm_send(gsm, addr, 0, control); | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * gsm_command - send a control command | ||
601 | * @gsm: our GSM mux | ||
602 | * @addr: address for control frame | ||
603 | * @control: control byte including PF bit | ||
604 | * | ||
605 | * Format up and transmit a link level command frame. | ||
606 | */ | ||
607 | |||
608 | static inline void gsm_command(struct gsm_mux *gsm, int addr, int control) | ||
609 | { | ||
610 | gsm_send(gsm, addr, 1, control); | ||
611 | } | ||
612 | |||
613 | /* Data transmission */ | ||
614 | |||
615 | #define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */ | ||
616 | |||
617 | /** | ||
618 | * gsm_data_alloc - allocate data frame | ||
619 | * @gsm: GSM mux | ||
620 | * @addr: DLCI address | ||
621 | * @len: length excluding header and FCS | ||
622 | * @ctrl: control byte | ||
623 | * | ||
624 | * Allocate a new data buffer for sending frames with data. Space is left | ||
625 | * at the front for header bytes but that is treated as an implementation | ||
626 | * detail and not for the high level code to use | ||
627 | */ | ||
628 | |||
629 | static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, | ||
630 | u8 ctrl) | ||
631 | { | ||
632 | struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN, | ||
633 | GFP_ATOMIC); | ||
634 | if (m == NULL) | ||
635 | return NULL; | ||
636 | m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */ | ||
637 | m->len = len; | ||
638 | m->addr = addr; | ||
639 | m->ctrl = ctrl; | ||
640 | m->next = NULL; | ||
641 | return m; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * gsm_data_kick - poke the queue | ||
646 | * @gsm: GSM Mux | ||
647 | * | ||
648 | * The tty device has called us to indicate that room has appeared in | ||
649 | * the transmit queue. Ram more data into the pipe if we have any | ||
650 | * | ||
651 | * FIXME: lock against link layer control transmissions | ||
652 | */ | ||
653 | |||
654 | static void gsm_data_kick(struct gsm_mux *gsm) | ||
655 | { | ||
656 | struct gsm_msg *msg = gsm->tx_head; | ||
657 | int len; | ||
658 | int skip_sof = 0; | ||
659 | |||
660 | /* FIXME: We need to apply this solely to data messages */ | ||
661 | if (gsm->constipated) | ||
662 | return; | ||
663 | |||
664 | while (gsm->tx_head != NULL) { | ||
665 | msg = gsm->tx_head; | ||
666 | if (gsm->encoding != 0) { | ||
667 | gsm->txframe[0] = GSM1_SOF; | ||
668 | len = gsm_stuff_frame(msg->data, | ||
669 | gsm->txframe + 1, msg->len); | ||
670 | gsm->txframe[len + 1] = GSM1_SOF; | ||
671 | len += 2; | ||
672 | } else { | ||
673 | gsm->txframe[0] = GSM0_SOF; | ||
674 | memcpy(gsm->txframe + 1 , msg->data, msg->len); | ||
675 | gsm->txframe[msg->len + 1] = GSM0_SOF; | ||
676 | len = msg->len + 2; | ||
677 | } | ||
678 | |||
679 | if (debug & 4) { | ||
680 | printk("gsm_data_kick: \n"); | ||
681 | hex_packet(gsm->txframe, len); | ||
682 | } | ||
683 | |||
684 | if (gsm->output(gsm, gsm->txframe + skip_sof, | ||
685 | len - skip_sof) < 0) | ||
686 | break; | ||
687 | /* FIXME: Can eliminate one SOF in many more cases */ | ||
688 | gsm->tx_head = msg->next; | ||
689 | if (gsm->tx_head == NULL) | ||
690 | gsm->tx_tail = NULL; | ||
691 | gsm->tx_bytes -= msg->len; | ||
692 | kfree(msg); | ||
693 | /* For a burst of frames skip the extra SOF within the | ||
694 | burst */ | ||
695 | skip_sof = 1; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /** | ||
700 | * __gsm_data_queue - queue a UI or UIH frame | ||
701 | * @dlci: DLCI sending the data | ||
702 | * @msg: message queued | ||
703 | * | ||
704 | * Add data to the transmit queue and try and get stuff moving | ||
705 | * out of the mux tty if not already doing so. The Caller must hold | ||
706 | * the gsm tx lock. | ||
707 | */ | ||
708 | |||
709 | static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) | ||
710 | { | ||
711 | struct gsm_mux *gsm = dlci->gsm; | ||
712 | u8 *dp = msg->data; | ||
713 | u8 *fcs = dp + msg->len; | ||
714 | |||
715 | /* Fill in the header */ | ||
716 | if (gsm->encoding == 0) { | ||
717 | if (msg->len < 128) | ||
718 | *--dp = (msg->len << 1) | EA; | ||
719 | else { | ||
720 | *--dp = (msg->len >> 6) | EA; | ||
721 | *--dp = (msg->len & 127) << 1; | ||
722 | } | ||
723 | } | ||
724 | |||
725 | *--dp = msg->ctrl; | ||
726 | if (gsm->initiator) | ||
727 | *--dp = (msg->addr << 2) | 2 | EA; | ||
728 | else | ||
729 | *--dp = (msg->addr << 2) | EA; | ||
730 | *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp); | ||
731 | /* Ugly protocol layering violation */ | ||
732 | if (msg->ctrl == UI || msg->ctrl == (UI|PF)) | ||
733 | *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len); | ||
734 | *fcs = 0xFF - *fcs; | ||
735 | |||
736 | gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl, | ||
737 | msg->data, msg->len); | ||
738 | |||
739 | /* Move the header back and adjust the length, also allow for the FCS | ||
740 | now tacked on the end */ | ||
741 | msg->len += (msg->data - dp) + 1; | ||
742 | msg->data = dp; | ||
743 | |||
744 | /* Add to the actual output queue */ | ||
745 | if (gsm->tx_tail) | ||
746 | gsm->tx_tail->next = msg; | ||
747 | else | ||
748 | gsm->tx_head = msg; | ||
749 | gsm->tx_tail = msg; | ||
750 | gsm->tx_bytes += msg->len; | ||
751 | gsm_data_kick(gsm); | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * gsm_data_queue - queue a UI or UIH frame | ||
756 | * @dlci: DLCI sending the data | ||
757 | * @msg: message queued | ||
758 | * | ||
759 | * Add data to the transmit queue and try and get stuff moving | ||
760 | * out of the mux tty if not already doing so. Take the | ||
761 | * the gsm tx lock and dlci lock. | ||
762 | */ | ||
763 | |||
764 | static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) | ||
765 | { | ||
766 | unsigned long flags; | ||
767 | spin_lock_irqsave(&dlci->gsm->tx_lock, flags); | ||
768 | __gsm_data_queue(dlci, msg); | ||
769 | spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); | ||
770 | } | ||
771 | |||
772 | /** | ||
773 | * gsm_dlci_data_output - try and push data out of a DLCI | ||
774 | * @gsm: mux | ||
775 | * @dlci: the DLCI to pull data from | ||
776 | * | ||
777 | * Pull data from a DLCI and send it into the transmit queue if there | ||
778 | * is data. Keep to the MRU of the mux. This path handles the usual tty | ||
779 | * interface which is a byte stream with optional modem data. | ||
780 | * | ||
781 | * Caller must hold the tx_lock of the mux. | ||
782 | */ | ||
783 | |||
784 | static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) | ||
785 | { | ||
786 | struct gsm_msg *msg; | ||
787 | u8 *dp; | ||
788 | int len, size; | ||
789 | int h = dlci->adaption - 1; | ||
790 | |||
791 | len = kfifo_len(dlci->fifo); | ||
792 | if (len == 0) | ||
793 | return 0; | ||
794 | |||
795 | /* MTU/MRU count only the data bits */ | ||
796 | if (len > gsm->mtu) | ||
797 | len = gsm->mtu; | ||
798 | |||
799 | size = len + h; | ||
800 | |||
801 | msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); | ||
802 | /* FIXME: need a timer or something to kick this so it can't | ||
803 | get stuck with no work outstanding and no buffer free */ | ||
804 | if (msg == NULL) | ||
805 | return -ENOMEM; | ||
806 | dp = msg->data; | ||
807 | switch (dlci->adaption) { | ||
808 | case 1: /* Unstructured */ | ||
809 | break; | ||
810 | case 2: /* Unstructed with modem bits. Always one byte as we never | ||
811 | send inline break data */ | ||
812 | *dp += gsm_encode_modem(dlci); | ||
813 | len--; | ||
814 | break; | ||
815 | } | ||
816 | WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len); | ||
817 | __gsm_data_queue(dlci, msg); | ||
818 | /* Bytes of data we used up */ | ||
819 | return size; | ||
820 | } | ||
821 | |||
822 | /** | ||
823 | * gsm_dlci_data_output_framed - try and push data out of a DLCI | ||
824 | * @gsm: mux | ||
825 | * @dlci: the DLCI to pull data from | ||
826 | * | ||
827 | * Pull data from a DLCI and send it into the transmit queue if there | ||
828 | * is data. Keep to the MRU of the mux. This path handles framed data | ||
829 | * queued as skbuffs to the DLCI. | ||
830 | * | ||
831 | * Caller must hold the tx_lock of the mux. | ||
832 | */ | ||
833 | |||
834 | static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, | ||
835 | struct gsm_dlci *dlci) | ||
836 | { | ||
837 | struct gsm_msg *msg; | ||
838 | u8 *dp; | ||
839 | int len, size; | ||
840 | int last = 0, first = 0; | ||
841 | int overhead = 0; | ||
842 | |||
843 | /* One byte per frame is used for B/F flags */ | ||
844 | if (dlci->adaption == 4) | ||
845 | overhead = 1; | ||
846 | |||
847 | /* dlci->skb is locked by tx_lock */ | ||
848 | if (dlci->skb == NULL) { | ||
849 | dlci->skb = skb_dequeue(&dlci->skb_list); | ||
850 | if (dlci->skb == NULL) | ||
851 | return 0; | ||
852 | first = 1; | ||
853 | } | ||
854 | len = dlci->skb->len + overhead; | ||
855 | |||
856 | /* MTU/MRU count only the data bits */ | ||
857 | if (len > gsm->mtu) { | ||
858 | if (dlci->adaption == 3) { | ||
859 | /* Over long frame, bin it */ | ||
860 | kfree_skb(dlci->skb); | ||
861 | dlci->skb = NULL; | ||
862 | return 0; | ||
863 | } | ||
864 | len = gsm->mtu; | ||
865 | } else | ||
866 | last = 1; | ||
867 | |||
868 | size = len + overhead; | ||
869 | msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); | ||
870 | |||
871 | /* FIXME: need a timer or something to kick this so it can't | ||
872 | get stuck with no work outstanding and no buffer free */ | ||
873 | if (msg == NULL) | ||
874 | return -ENOMEM; | ||
875 | dp = msg->data; | ||
876 | |||
877 | if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ | ||
878 | /* Flag byte to carry the start/end info */ | ||
879 | *dp++ = last << 7 | first << 6 | 1; /* EA */ | ||
880 | len--; | ||
881 | } | ||
882 | memcpy(dp, skb_pull(dlci->skb, len), len); | ||
883 | __gsm_data_queue(dlci, msg); | ||
884 | if (last) | ||
885 | dlci->skb = NULL; | ||
886 | return size; | ||
887 | } | ||
888 | |||
889 | /** | ||
890 | * gsm_dlci_data_sweep - look for data to send | ||
891 | * @gsm: the GSM mux | ||
892 | * | ||
893 | * Sweep the GSM mux channels in priority order looking for ones with | ||
894 | * data to send. We could do with optimising this scan a bit. We aim | ||
895 | * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit | ||
896 | * TX_THRESH_LO we get called again | ||
897 | * | ||
898 | * FIXME: We should round robin between groups and in theory you can | ||
899 | * renegotiate DLCI priorities with optional stuff. Needs optimising. | ||
900 | */ | ||
901 | |||
902 | static void gsm_dlci_data_sweep(struct gsm_mux *gsm) | ||
903 | { | ||
904 | int len; | ||
905 | /* Priority ordering: We should do priority with RR of the groups */ | ||
906 | int i = 1; | ||
907 | unsigned long flags; | ||
908 | |||
909 | spin_lock_irqsave(&gsm->tx_lock, flags); | ||
910 | while (i < NUM_DLCI) { | ||
911 | struct gsm_dlci *dlci; | ||
912 | |||
913 | if (gsm->tx_bytes > TX_THRESH_HI) | ||
914 | break; | ||
915 | dlci = gsm->dlci[i]; | ||
916 | if (dlci == NULL || dlci->constipated) { | ||
917 | i++; | ||
918 | continue; | ||
919 | } | ||
920 | if (dlci->adaption < 3) | ||
921 | len = gsm_dlci_data_output(gsm, dlci); | ||
922 | else | ||
923 | len = gsm_dlci_data_output_framed(gsm, dlci); | ||
924 | if (len < 0) | ||
925 | return; | ||
926 | /* DLCI empty - try the next */ | ||
927 | if (len == 0) | ||
928 | i++; | ||
929 | } | ||
930 | spin_unlock_irqrestore(&gsm->tx_lock, flags); | ||
931 | } | ||
932 | |||
933 | /** | ||
934 | * gsm_dlci_data_kick - transmit if possible | ||
935 | * @dlci: DLCI to kick | ||
936 | * | ||
937 | * Transmit data from this DLCI if the queue is empty. We can't rely on | ||
938 | * a tty wakeup except when we filled the pipe so we need to fire off | ||
939 | * new data ourselves in other cases. | ||
940 | */ | ||
941 | |||
942 | static void gsm_dlci_data_kick(struct gsm_dlci *dlci) | ||
943 | { | ||
944 | unsigned long flags; | ||
945 | |||
946 | spin_lock_irqsave(&dlci->gsm->tx_lock, flags); | ||
947 | /* If we have nothing running then we need to fire up */ | ||
948 | if (dlci->gsm->tx_bytes == 0) | ||
949 | gsm_dlci_data_output(dlci->gsm, dlci); | ||
950 | else if (dlci->gsm->tx_bytes < TX_THRESH_LO) | ||
951 | gsm_dlci_data_sweep(dlci->gsm); | ||
952 | spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Control message processing | ||
957 | */ | ||
958 | |||
959 | |||
960 | /** | ||
961 | * gsm_control_reply - send a response frame to a control | ||
962 | * @gsm: gsm channel | ||
963 | * @cmd: the command to use | ||
964 | * @data: data to follow encoded info | ||
965 | * @dlen: length of data | ||
966 | * | ||
967 | * Encode up and queue a UI/UIH frame containing our response. | ||
968 | */ | ||
969 | |||
970 | static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data, | ||
971 | int dlen) | ||
972 | { | ||
973 | struct gsm_msg *msg; | ||
974 | msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); | ||
975 | msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ | ||
976 | msg->data[1] = (dlen << 1) | EA; | ||
977 | memcpy(msg->data + 2, data, dlen); | ||
978 | gsm_data_queue(gsm->dlci[0], msg); | ||
979 | } | ||
980 | |||
981 | /** | ||
982 | * gsm_process_modem - process received modem status | ||
983 | * @tty: virtual tty bound to the DLCI | ||
984 | * @dlci: DLCI to affect | ||
985 | * @modem: modem bits (full EA) | ||
986 | * | ||
987 | * Used when a modem control message or line state inline in adaption | ||
988 | * layer 2 is processed. Sort out the local modem state and throttles | ||
989 | */ | ||
990 | |||
991 | static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, | ||
992 | u32 modem) | ||
993 | { | ||
994 | int mlines = 0; | ||
995 | u8 brk = modem >> 6; | ||
996 | |||
997 | /* Flow control/ready to communicate */ | ||
998 | if (modem & MDM_FC) { | ||
999 | /* Need to throttle our output on this device */ | ||
1000 | dlci->constipated = 1; | ||
1001 | } | ||
1002 | if (modem & MDM_RTC) { | ||
1003 | mlines |= TIOCM_DSR | TIOCM_DTR; | ||
1004 | dlci->constipated = 0; | ||
1005 | gsm_dlci_data_kick(dlci); | ||
1006 | } | ||
1007 | /* Map modem bits */ | ||
1008 | if (modem & MDM_RTR) | ||
1009 | mlines |= TIOCM_RTS | TIOCM_CTS; | ||
1010 | if (modem & MDM_IC) | ||
1011 | mlines |= TIOCM_RI; | ||
1012 | if (modem & MDM_DV) | ||
1013 | mlines |= TIOCM_CD; | ||
1014 | |||
1015 | /* Carrier drop -> hangup */ | ||
1016 | if (tty) { | ||
1017 | if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD)) | ||
1018 | if (!(tty->termios->c_cflag & CLOCAL)) | ||
1019 | tty_hangup(tty); | ||
1020 | if (brk & 0x01) | ||
1021 | tty_insert_flip_char(tty, 0, TTY_BREAK); | ||
1022 | } | ||
1023 | dlci->modem_rx = mlines; | ||
1024 | } | ||
1025 | |||
1026 | /** | ||
1027 | * gsm_control_modem - modem status received | ||
1028 | * @gsm: GSM channel | ||
1029 | * @data: data following command | ||
1030 | * @clen: command length | ||
1031 | * | ||
1032 | * We have received a modem status control message. This is used by | ||
1033 | * the GSM mux protocol to pass virtual modem line status and optionally | ||
1034 | * to indicate break signals. Unpack it, convert to Linux representation | ||
1035 | * and if need be stuff a break message down the tty. | ||
1036 | */ | ||
1037 | |||
1038 | static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | ||
1039 | { | ||
1040 | unsigned int addr = 0; | ||
1041 | unsigned int modem = 0; | ||
1042 | struct gsm_dlci *dlci; | ||
1043 | int len = clen; | ||
1044 | u8 *dp = data; | ||
1045 | struct tty_struct *tty; | ||
1046 | |||
1047 | while (gsm_read_ea(&addr, *dp++) == 0) { | ||
1048 | len--; | ||
1049 | if (len == 0) | ||
1050 | return; | ||
1051 | } | ||
1052 | /* Must be at least one byte following the EA */ | ||
1053 | len--; | ||
1054 | if (len <= 0) | ||
1055 | return; | ||
1056 | |||
1057 | addr >>= 1; | ||
1058 | /* Closed port, or invalid ? */ | ||
1059 | if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) | ||
1060 | return; | ||
1061 | dlci = gsm->dlci[addr]; | ||
1062 | |||
1063 | while (gsm_read_ea(&modem, *dp++) == 0) { | ||
1064 | len--; | ||
1065 | if (len == 0) | ||
1066 | return; | ||
1067 | } | ||
1068 | tty = tty_port_tty_get(&dlci->port); | ||
1069 | gsm_process_modem(tty, dlci, modem); | ||
1070 | if (tty) { | ||
1071 | tty_wakeup(tty); | ||
1072 | tty_kref_put(tty); | ||
1073 | } | ||
1074 | gsm_control_reply(gsm, CMD_MSC, data, clen); | ||
1075 | } | ||
1076 | |||
1077 | /** | ||
1078 | * gsm_control_rls - remote line status | ||
1079 | * @gsm: GSM channel | ||
1080 | * @data: data bytes | ||
1081 | * @clen: data length | ||
1082 | * | ||
1083 | * The modem sends us a two byte message on the control channel whenever | ||
1084 | * it wishes to send us an error state from the virtual link. Stuff | ||
1085 | * this into the uplink tty if present | ||
1086 | */ | ||
1087 | |||
1088 | static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen) | ||
1089 | { | ||
1090 | struct tty_struct *tty; | ||
1091 | unsigned int addr = 0 ; | ||
1092 | u8 bits; | ||
1093 | int len = clen; | ||
1094 | u8 *dp = data; | ||
1095 | |||
1096 | while (gsm_read_ea(&addr, *dp++) == 0) { | ||
1097 | len--; | ||
1098 | if (len == 0) | ||
1099 | return; | ||
1100 | } | ||
1101 | /* Must be at least one byte following ea */ | ||
1102 | len--; | ||
1103 | if (len <= 0) | ||
1104 | return; | ||
1105 | addr >>= 1; | ||
1106 | /* Closed port, or invalid ? */ | ||
1107 | if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) | ||
1108 | return; | ||
1109 | /* No error ? */ | ||
1110 | bits = *dp; | ||
1111 | if ((bits & 1) == 0) | ||
1112 | return; | ||
1113 | /* See if we have an uplink tty */ | ||
1114 | tty = tty_port_tty_get(&gsm->dlci[addr]->port); | ||
1115 | |||
1116 | if (tty) { | ||
1117 | if (bits & 2) | ||
1118 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
1119 | if (bits & 4) | ||
1120 | tty_insert_flip_char(tty, 0, TTY_PARITY); | ||
1121 | if (bits & 8) | ||
1122 | tty_insert_flip_char(tty, 0, TTY_FRAME); | ||
1123 | tty_flip_buffer_push(tty); | ||
1124 | tty_kref_put(tty); | ||
1125 | } | ||
1126 | gsm_control_reply(gsm, CMD_RLS, data, clen); | ||
1127 | } | ||
1128 | |||
1129 | static void gsm_dlci_begin_close(struct gsm_dlci *dlci); | ||
1130 | |||
1131 | /** | ||
1132 | * gsm_control_message - DLCI 0 control processing | ||
1133 | * @gsm: our GSM mux | ||
1134 | * @command: the command EA | ||
1135 | * @data: data beyond the command/length EAs | ||
1136 | * @clen: length | ||
1137 | * | ||
1138 | * Input processor for control messages from the other end of the link. | ||
1139 | * Processes the incoming request and queues a response frame or an | ||
1140 | * NSC response if not supported | ||
1141 | */ | ||
1142 | |||
1143 | static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, | ||
1144 | u8 *data, int clen) | ||
1145 | { | ||
1146 | u8 buf[1]; | ||
1147 | switch (command) { | ||
1148 | case CMD_CLD: { | ||
1149 | struct gsm_dlci *dlci = gsm->dlci[0]; | ||
1150 | /* Modem wishes to close down */ | ||
1151 | if (dlci) { | ||
1152 | dlci->dead = 1; | ||
1153 | gsm->dead = 1; | ||
1154 | gsm_dlci_begin_close(dlci); | ||
1155 | } | ||
1156 | } | ||
1157 | break; | ||
1158 | case CMD_TEST: | ||
1159 | /* Modem wishes to test, reply with the data */ | ||
1160 | gsm_control_reply(gsm, CMD_TEST, data, clen); | ||
1161 | break; | ||
1162 | case CMD_FCON: | ||
1163 | /* Modem wants us to STFU */ | ||
1164 | gsm->constipated = 1; | ||
1165 | gsm_control_reply(gsm, CMD_FCON, NULL, 0); | ||
1166 | break; | ||
1167 | case CMD_FCOFF: | ||
1168 | /* Modem can accept data again */ | ||
1169 | gsm->constipated = 0; | ||
1170 | gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); | ||
1171 | /* Kick the link in case it is idling */ | ||
1172 | gsm_data_kick(gsm); | ||
1173 | break; | ||
1174 | case CMD_MSC: | ||
1175 | /* Out of band modem line change indicator for a DLCI */ | ||
1176 | gsm_control_modem(gsm, data, clen); | ||
1177 | break; | ||
1178 | case CMD_RLS: | ||
1179 | /* Out of band error reception for a DLCI */ | ||
1180 | gsm_control_rls(gsm, data, clen); | ||
1181 | break; | ||
1182 | case CMD_PSC: | ||
1183 | /* Modem wishes to enter power saving state */ | ||
1184 | gsm_control_reply(gsm, CMD_PSC, NULL, 0); | ||
1185 | break; | ||
1186 | /* Optional unsupported commands */ | ||
1187 | case CMD_PN: /* Parameter negotiation */ | ||
1188 | case CMD_RPN: /* Remote port negotation */ | ||
1189 | case CMD_SNC: /* Service negotation command */ | ||
1190 | default: | ||
1191 | /* Reply to bad commands with an NSC */ | ||
1192 | buf[0] = command; | ||
1193 | gsm_control_reply(gsm, CMD_NSC, buf, 1); | ||
1194 | break; | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | /** | ||
1199 | * gsm_control_response - process a response to our control | ||
1200 | * @gsm: our GSM mux | ||
1201 | * @command: the command (response) EA | ||
1202 | * @data: data beyond the command/length EA | ||
1203 | * @clen: length | ||
1204 | * | ||
1205 | * Process a response to an outstanding command. We only allow a single | ||
1206 | * control message in flight so this is fairly easy. All the clean up | ||
1207 | * is done by the caller, we just update the fields, flag it as done | ||
1208 | * and return | ||
1209 | */ | ||
1210 | |||
1211 | static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, | ||
1212 | u8 *data, int clen) | ||
1213 | { | ||
1214 | struct gsm_control *ctrl; | ||
1215 | unsigned long flags; | ||
1216 | |||
1217 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1218 | |||
1219 | ctrl = gsm->pending_cmd; | ||
1220 | /* Does the reply match our command */ | ||
1221 | command |= 1; | ||
1222 | if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { | ||
1223 | /* Our command was replied to, kill the retry timer */ | ||
1224 | del_timer(&gsm->t2_timer); | ||
1225 | gsm->pending_cmd = NULL; | ||
1226 | /* Rejected by the other end */ | ||
1227 | if (command == CMD_NSC) | ||
1228 | ctrl->error = -EOPNOTSUPP; | ||
1229 | ctrl->done = 1; | ||
1230 | wake_up(&gsm->event); | ||
1231 | } | ||
1232 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * gsm_control_transmit - send control packet | ||
1237 | * @gsm: gsm mux | ||
1238 | * @ctrl: frame to send | ||
1239 | * | ||
1240 | * Send out a pending control command (called under control lock) | ||
1241 | */ | ||
1242 | |||
1243 | static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) | ||
1244 | { | ||
1245 | struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, | ||
1246 | gsm->ftype|PF); | ||
1247 | if (msg == NULL) | ||
1248 | return; | ||
1249 | msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ | ||
1250 | memcpy(msg->data + 1, ctrl->data, ctrl->len); | ||
1251 | gsm_data_queue(gsm->dlci[0], msg); | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * gsm_control_retransmit - retransmit a control frame | ||
1256 | * @data: pointer to our gsm object | ||
1257 | * | ||
1258 | * Called off the T2 timer expiry in order to retransmit control frames | ||
1259 | * that have been lost in the system somewhere. The control_lock protects | ||
1260 | * us from colliding with another sender or a receive completion event. | ||
1261 | * In that situation the timer may still occur in a small window but | ||
1262 | * gsm->pending_cmd will be NULL and we just let the timer expire. | ||
1263 | */ | ||
1264 | |||
1265 | static void gsm_control_retransmit(unsigned long data) | ||
1266 | { | ||
1267 | struct gsm_mux *gsm = (struct gsm_mux *)data; | ||
1268 | struct gsm_control *ctrl; | ||
1269 | unsigned long flags; | ||
1270 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1271 | ctrl = gsm->pending_cmd; | ||
1272 | if (ctrl) { | ||
1273 | gsm->cretries--; | ||
1274 | if (gsm->cretries == 0) { | ||
1275 | gsm->pending_cmd = NULL; | ||
1276 | ctrl->error = -ETIMEDOUT; | ||
1277 | ctrl->done = 1; | ||
1278 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1279 | wake_up(&gsm->event); | ||
1280 | return; | ||
1281 | } | ||
1282 | gsm_control_transmit(gsm, ctrl); | ||
1283 | mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); | ||
1284 | } | ||
1285 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1286 | } | ||
1287 | |||
1288 | /** | ||
1289 | * gsm_control_send - send a control frame on DLCI 0 | ||
1290 | * @gsm: the GSM channel | ||
1291 | * @command: command to send including CR bit | ||
1292 | * @data: bytes of data (must be kmalloced) | ||
1293 | * @len: length of the block to send | ||
1294 | * | ||
1295 | * Queue and dispatch a control command. Only one command can be | ||
1296 | * active at a time. In theory more can be outstanding but the matching | ||
1297 | * gets really complicated so for now stick to one outstanding. | ||
1298 | */ | ||
1299 | |||
1300 | static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, | ||
1301 | unsigned int command, u8 *data, int clen) | ||
1302 | { | ||
1303 | struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), | ||
1304 | GFP_KERNEL); | ||
1305 | unsigned long flags; | ||
1306 | if (ctrl == NULL) | ||
1307 | return NULL; | ||
1308 | retry: | ||
1309 | wait_event(gsm->event, gsm->pending_cmd == NULL); | ||
1310 | spin_lock_irqsave(&gsm->control_lock, flags); | ||
1311 | if (gsm->pending_cmd != NULL) { | ||
1312 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1313 | goto retry; | ||
1314 | } | ||
1315 | ctrl->cmd = command; | ||
1316 | ctrl->data = data; | ||
1317 | ctrl->len = clen; | ||
1318 | gsm->pending_cmd = ctrl; | ||
1319 | gsm->cretries = gsm->n2; | ||
1320 | mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); | ||
1321 | gsm_control_transmit(gsm, ctrl); | ||
1322 | spin_unlock_irqrestore(&gsm->control_lock, flags); | ||
1323 | return ctrl; | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1327 | * gsm_control_wait - wait for a control to finish | ||
1328 | * @gsm: GSM mux | ||
1329 | * @control: control we are waiting on | ||
1330 | * | ||
1331 | * Waits for the control to complete or time out. Frees any used | ||
1332 | * resources and returns 0 for success, or an error if the remote | ||
1333 | * rejected or ignored the request. | ||
1334 | */ | ||
1335 | |||
1336 | static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) | ||
1337 | { | ||
1338 | int err; | ||
1339 | wait_event(gsm->event, control->done == 1); | ||
1340 | err = control->error; | ||
1341 | kfree(control); | ||
1342 | return err; | ||
1343 | } | ||
1344 | |||
1345 | |||
1346 | /* | ||
1347 | * DLCI level handling: Needs krefs | ||
1348 | */ | ||
1349 | |||
1350 | /* | ||
1351 | * State transitions and timers | ||
1352 | */ | ||
1353 | |||
1354 | /** | ||
1355 | * gsm_dlci_close - a DLCI has closed | ||
1356 | * @dlci: DLCI that closed | ||
1357 | * | ||
1358 | * Perform processing when moving a DLCI into closed state. If there | ||
1359 | * is an attached tty this is hung up | ||
1360 | */ | ||
1361 | |||
1362 | static void gsm_dlci_close(struct gsm_dlci *dlci) | ||
1363 | { | ||
1364 | del_timer(&dlci->t1); | ||
1365 | if (debug & 8) | ||
1366 | printk("DLCI %d goes closed.\n", dlci->addr); | ||
1367 | dlci->state = DLCI_CLOSED; | ||
1368 | if (dlci->addr != 0) { | ||
1369 | struct tty_struct *tty = tty_port_tty_get(&dlci->port); | ||
1370 | if (tty) { | ||
1371 | tty_hangup(tty); | ||
1372 | tty_kref_put(tty); | ||
1373 | } | ||
1374 | kfifo_reset(dlci->fifo); | ||
1375 | } else | ||
1376 | dlci->gsm->dead = 1; | ||
1377 | wake_up(&dlci->gsm->event); | ||
1378 | /* A DLCI 0 close is a MUX termination so we need to kick that | ||
1379 | back to userspace somehow */ | ||
1380 | } | ||
1381 | |||
1382 | /** | ||
1383 | * gsm_dlci_open - a DLCI has opened | ||
1384 | * @dlci: DLCI that opened | ||
1385 | * | ||
1386 | * Perform processing when moving a DLCI into open state. | ||
1387 | */ | ||
1388 | |||
1389 | static void gsm_dlci_open(struct gsm_dlci *dlci) | ||
1390 | { | ||
1391 | /* Note that SABM UA .. SABM UA first UA lost can mean that we go | ||
1392 | open -> open */ | ||
1393 | del_timer(&dlci->t1); | ||
1394 | /* This will let a tty open continue */ | ||
1395 | dlci->state = DLCI_OPEN; | ||
1396 | if (debug & 8) | ||
1397 | printk("DLCI %d goes open.\n", dlci->addr); | ||
1398 | wake_up(&dlci->gsm->event); | ||
1399 | } | ||
1400 | |||
1401 | /** | ||
1402 | * gsm_dlci_t1 - T1 timer expiry | ||
1403 | * @dlci: DLCI that opened | ||
1404 | * | ||
1405 | * The T1 timer handles retransmits of control frames (essentially of | ||
1406 | * SABM and DISC). We resend the command until the retry count runs out | ||
1407 | * in which case an opening port goes back to closed and a closing port | ||
1408 | * is simply put into closed state (any further frames from the other | ||
1409 | * end will get a DM response) | ||
1410 | */ | ||
1411 | |||
1412 | static void gsm_dlci_t1(unsigned long data) | ||
1413 | { | ||
1414 | struct gsm_dlci *dlci = (struct gsm_dlci *)data; | ||
1415 | struct gsm_mux *gsm = dlci->gsm; | ||
1416 | |||
1417 | switch (dlci->state) { | ||
1418 | case DLCI_OPENING: | ||
1419 | dlci->retries--; | ||
1420 | if (dlci->retries) { | ||
1421 | gsm_command(dlci->gsm, dlci->addr, SABM|PF); | ||
1422 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1423 | } else | ||
1424 | gsm_dlci_close(dlci); | ||
1425 | break; | ||
1426 | case DLCI_CLOSING: | ||
1427 | dlci->retries--; | ||
1428 | if (dlci->retries) { | ||
1429 | gsm_command(dlci->gsm, dlci->addr, DISC|PF); | ||
1430 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1431 | } else | ||
1432 | gsm_dlci_close(dlci); | ||
1433 | break; | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | /** | ||
1438 | * gsm_dlci_begin_open - start channel open procedure | ||
1439 | * @dlci: DLCI to open | ||
1440 | * | ||
1441 | * Commence opening a DLCI from the Linux side. We issue SABM messages | ||
1442 | * to the modem which should then reply with a UA, at which point we | ||
1443 | * will move into open state. Opening is done asynchronously with retry | ||
1444 | * running off timers and the responses. | ||
1445 | */ | ||
1446 | |||
1447 | static void gsm_dlci_begin_open(struct gsm_dlci *dlci) | ||
1448 | { | ||
1449 | struct gsm_mux *gsm = dlci->gsm; | ||
1450 | if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING) | ||
1451 | return; | ||
1452 | dlci->retries = gsm->n2; | ||
1453 | dlci->state = DLCI_OPENING; | ||
1454 | gsm_command(dlci->gsm, dlci->addr, SABM|PF); | ||
1455 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1456 | } | ||
1457 | |||
1458 | /** | ||
1459 | * gsm_dlci_begin_close - start channel open procedure | ||
1460 | * @dlci: DLCI to open | ||
1461 | * | ||
1462 | * Commence closing a DLCI from the Linux side. We issue DISC messages | ||
1463 | * to the modem which should then reply with a UA, at which point we | ||
1464 | * will move into closed state. Closing is done asynchronously with retry | ||
1465 | * off timers. We may also receive a DM reply from the other end which | ||
1466 | * indicates the channel was already closed. | ||
1467 | */ | ||
1468 | |||
1469 | static void gsm_dlci_begin_close(struct gsm_dlci *dlci) | ||
1470 | { | ||
1471 | struct gsm_mux *gsm = dlci->gsm; | ||
1472 | if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING) | ||
1473 | return; | ||
1474 | dlci->retries = gsm->n2; | ||
1475 | dlci->state = DLCI_CLOSING; | ||
1476 | gsm_command(dlci->gsm, dlci->addr, DISC|PF); | ||
1477 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); | ||
1478 | } | ||
1479 | |||
1480 | /** | ||
1481 | * gsm_dlci_data - data arrived | ||
1482 | * @dlci: channel | ||
1483 | * @data: block of bytes received | ||
1484 | * @len: length of received block | ||
1485 | * | ||
1486 | * A UI or UIH frame has arrived which contains data for a channel | ||
1487 | * other than the control channel. If the relevant virtual tty is | ||
1488 | * open we shovel the bits down it, if not we drop them. | ||
1489 | */ | ||
1490 | |||
1491 | static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len) | ||
1492 | { | ||
1493 | /* krefs .. */ | ||
1494 | struct tty_port *port = &dlci->port; | ||
1495 | struct tty_struct *tty = tty_port_tty_get(port); | ||
1496 | unsigned int modem = 0; | ||
1497 | |||
1498 | if (debug & 16) | ||
1499 | printk("%d bytes for tty %p\n", len, tty); | ||
1500 | if (tty) { | ||
1501 | switch (dlci->adaption) { | ||
1502 | /* Unsupported types */ | ||
1503 | /* Packetised interruptible data */ | ||
1504 | case 4: | ||
1505 | break; | ||
1506 | /* Packetised uininterruptible voice/data */ | ||
1507 | case 3: | ||
1508 | break; | ||
1509 | /* Asynchronous serial with line state in each frame */ | ||
1510 | case 2: | ||
1511 | while (gsm_read_ea(&modem, *data++) == 0) { | ||
1512 | len--; | ||
1513 | if (len == 0) | ||
1514 | return; | ||
1515 | } | ||
1516 | gsm_process_modem(tty, dlci, modem); | ||
1517 | /* Line state will go via DLCI 0 controls only */ | ||
1518 | case 1: | ||
1519 | default: | ||
1520 | tty_insert_flip_string(tty, data, len); | ||
1521 | tty_flip_buffer_push(tty); | ||
1522 | } | ||
1523 | tty_kref_put(tty); | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | /** | ||
1528 | * gsm_dlci_control - data arrived on control channel | ||
1529 | * @dlci: channel | ||
1530 | * @data: block of bytes received | ||
1531 | * @len: length of received block | ||
1532 | * | ||
1533 | * A UI or UIH frame has arrived which contains data for DLCI 0 the | ||
1534 | * control channel. This should contain a command EA followed by | ||
1535 | * control data bytes. The command EA contains a command/response bit | ||
1536 | * and we divide up the work accordingly. | ||
1537 | */ | ||
1538 | |||
1539 | static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len) | ||
1540 | { | ||
1541 | /* See what command is involved */ | ||
1542 | unsigned int command = 0; | ||
1543 | while (len-- > 0) { | ||
1544 | if (gsm_read_ea(&command, *data++) == 1) { | ||
1545 | int clen = *data++; | ||
1546 | len--; | ||
1547 | /* FIXME: this is properly an EA */ | ||
1548 | clen >>= 1; | ||
1549 | /* Malformed command ? */ | ||
1550 | if (clen > len) | ||
1551 | return; | ||
1552 | if (command & 1) | ||
1553 | gsm_control_message(dlci->gsm, command, | ||
1554 | data, clen); | ||
1555 | else | ||
1556 | gsm_control_response(dlci->gsm, command, | ||
1557 | data, clen); | ||
1558 | return; | ||
1559 | } | ||
1560 | } | ||
1561 | } | ||
1562 | |||
1563 | /* | ||
1564 | * Allocate/Free DLCI channels | ||
1565 | */ | ||
1566 | |||
1567 | /** | ||
1568 | * gsm_dlci_alloc - allocate a DLCI | ||
1569 | * @gsm: GSM mux | ||
1570 | * @addr: address of the DLCI | ||
1571 | * | ||
1572 | * Allocate and install a new DLCI object into the GSM mux. | ||
1573 | * | ||
1574 | * FIXME: review locking races | ||
1575 | */ | ||
1576 | |||
1577 | static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) | ||
1578 | { | ||
1579 | struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC); | ||
1580 | if (dlci == NULL) | ||
1581 | return NULL; | ||
1582 | spin_lock_init(&dlci->lock); | ||
1583 | dlci->fifo = &dlci->_fifo; | ||
1584 | if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { | ||
1585 | kfree(dlci); | ||
1586 | return NULL; | ||
1587 | } | ||
1588 | |||
1589 | skb_queue_head_init(&dlci->skb_list); | ||
1590 | init_timer(&dlci->t1); | ||
1591 | dlci->t1.function = gsm_dlci_t1; | ||
1592 | dlci->t1.data = (unsigned long)dlci; | ||
1593 | tty_port_init(&dlci->port); | ||
1594 | dlci->port.ops = &gsm_port_ops; | ||
1595 | dlci->gsm = gsm; | ||
1596 | dlci->addr = addr; | ||
1597 | dlci->adaption = gsm->adaption; | ||
1598 | dlci->state = DLCI_CLOSED; | ||
1599 | if (addr) | ||
1600 | dlci->data = gsm_dlci_data; | ||
1601 | else | ||
1602 | dlci->data = gsm_dlci_command; | ||
1603 | gsm->dlci[addr] = dlci; | ||
1604 | return dlci; | ||
1605 | } | ||
1606 | |||
1607 | /** | ||
1608 | * gsm_dlci_free - release DLCI | ||
1609 | * @dlci: DLCI to destroy | ||
1610 | * | ||
1611 | * Free up a DLCI. Currently to keep the lifetime rules sane we only | ||
1612 | * clean up DLCI objects when the MUX closes rather than as the port | ||
1613 | * is closed down on both the tty and mux levels. | ||
1614 | * | ||
1615 | * Can sleep. | ||
1616 | */ | ||
1617 | static void gsm_dlci_free(struct gsm_dlci *dlci) | ||
1618 | { | ||
1619 | struct tty_struct *tty = tty_port_tty_get(&dlci->port); | ||
1620 | if (tty) { | ||
1621 | tty_vhangup(tty); | ||
1622 | tty_kref_put(tty); | ||
1623 | } | ||
1624 | del_timer_sync(&dlci->t1); | ||
1625 | dlci->gsm->dlci[dlci->addr] = NULL; | ||
1626 | kfifo_free(dlci->fifo); | ||
1627 | kfree(dlci); | ||
1628 | } | ||
1629 | |||
1630 | |||
1631 | /* | ||
1632 | * LAPBish link layer logic | ||
1633 | */ | ||
1634 | |||
1635 | /** | ||
1636 | * gsm_queue - a GSM frame is ready to process | ||
1637 | * @gsm: pointer to our gsm mux | ||
1638 | * | ||
1639 | * At this point in time a frame has arrived and been demangled from | ||
1640 | * the line encoding. All the differences between the encodings have | ||
1641 | * been handled below us and the frame is unpacked into the structures. | ||
1642 | * The fcs holds the header FCS but any data FCS must be added here. | ||
1643 | */ | ||
1644 | |||
1645 | static void gsm_queue(struct gsm_mux *gsm) | ||
1646 | { | ||
1647 | struct gsm_dlci *dlci; | ||
1648 | u8 cr; | ||
1649 | int address; | ||
1650 | /* We have to sneak a look at the packet body to do the FCS. | ||
1651 | A somewhat layering violation in the spec */ | ||
1652 | |||
1653 | if ((gsm->control & ~PF) == UI) | ||
1654 | gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); | ||
1655 | if (gsm->fcs != GOOD_FCS) { | ||
1656 | gsm->bad_fcs++; | ||
1657 | if (debug & 4) | ||
1658 | printk("BAD FCS %02x\n", gsm->fcs); | ||
1659 | return; | ||
1660 | } | ||
1661 | address = gsm->address >> 1; | ||
1662 | if (address >= NUM_DLCI) | ||
1663 | goto invalid; | ||
1664 | |||
1665 | cr = gsm->address & 1; /* C/R bit */ | ||
1666 | |||
1667 | gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); | ||
1668 | |||
1669 | cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */ | ||
1670 | dlci = gsm->dlci[address]; | ||
1671 | |||
1672 | switch (gsm->control) { | ||
1673 | case SABM|PF: | ||
1674 | if (cr == 0) | ||
1675 | goto invalid; | ||
1676 | if (dlci == NULL) | ||
1677 | dlci = gsm_dlci_alloc(gsm, address); | ||
1678 | if (dlci == NULL) | ||
1679 | return; | ||
1680 | if (dlci->dead) | ||
1681 | gsm_response(gsm, address, DM); | ||
1682 | else { | ||
1683 | gsm_response(gsm, address, UA); | ||
1684 | gsm_dlci_open(dlci); | ||
1685 | } | ||
1686 | break; | ||
1687 | case DISC|PF: | ||
1688 | if (cr == 0) | ||
1689 | goto invalid; | ||
1690 | if (dlci == NULL || dlci->state == DLCI_CLOSED) { | ||
1691 | gsm_response(gsm, address, DM); | ||
1692 | return; | ||
1693 | } | ||
1694 | /* Real close complete */ | ||
1695 | gsm_response(gsm, address, UA); | ||
1696 | gsm_dlci_close(dlci); | ||
1697 | break; | ||
1698 | case UA: | ||
1699 | case UA|PF: | ||
1700 | if (cr == 0 || dlci == NULL) | ||
1701 | break; | ||
1702 | switch (dlci->state) { | ||
1703 | case DLCI_CLOSING: | ||
1704 | gsm_dlci_close(dlci); | ||
1705 | break; | ||
1706 | case DLCI_OPENING: | ||
1707 | gsm_dlci_open(dlci); | ||
1708 | break; | ||
1709 | } | ||
1710 | break; | ||
1711 | case DM: /* DM can be valid unsolicited */ | ||
1712 | case DM|PF: | ||
1713 | if (cr) | ||
1714 | goto invalid; | ||
1715 | if (dlci == NULL) | ||
1716 | return; | ||
1717 | gsm_dlci_close(dlci); | ||
1718 | break; | ||
1719 | case UI: | ||
1720 | case UI|PF: | ||
1721 | case UIH: | ||
1722 | case UIH|PF: | ||
1723 | #if 0 | ||
1724 | if (cr) | ||
1725 | goto invalid; | ||
1726 | #endif | ||
1727 | if (dlci == NULL || dlci->state != DLCI_OPEN) { | ||
1728 | gsm_command(gsm, address, DM|PF); | ||
1729 | return; | ||
1730 | } | ||
1731 | dlci->data(dlci, gsm->buf, gsm->len); | ||
1732 | break; | ||
1733 | default: | ||
1734 | goto invalid; | ||
1735 | } | ||
1736 | return; | ||
1737 | invalid: | ||
1738 | gsm->malformed++; | ||
1739 | return; | ||
1740 | } | ||
1741 | |||
1742 | |||
1743 | /** | ||
1744 | * gsm0_receive - perform processing for non-transparency | ||
1745 | * @gsm: gsm data for this ldisc instance | ||
1746 | * @c: character | ||
1747 | * | ||
1748 | * Receive bytes in gsm mode 0 | ||
1749 | */ | ||
1750 | |||
1751 | static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) | ||
1752 | { | ||
1753 | switch (gsm->state) { | ||
1754 | case GSM_SEARCH: /* SOF marker */ | ||
1755 | if (c == GSM0_SOF) { | ||
1756 | gsm->state = GSM_ADDRESS; | ||
1757 | gsm->address = 0; | ||
1758 | gsm->len = 0; | ||
1759 | gsm->fcs = INIT_FCS; | ||
1760 | } | ||
1761 | break; /* Address EA */ | ||
1762 | case GSM_ADDRESS: | ||
1763 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1764 | if (gsm_read_ea(&gsm->address, c)) | ||
1765 | gsm->state = GSM_CONTROL; | ||
1766 | break; | ||
1767 | case GSM_CONTROL: /* Control Byte */ | ||
1768 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1769 | gsm->control = c; | ||
1770 | gsm->state = GSM_LEN; | ||
1771 | break; | ||
1772 | case GSM_LEN: /* Length EA */ | ||
1773 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1774 | if (gsm_read_ea(&gsm->len, c)) { | ||
1775 | if (gsm->len > gsm->mru) { | ||
1776 | gsm->bad_size++; | ||
1777 | gsm->state = GSM_SEARCH; | ||
1778 | break; | ||
1779 | } | ||
1780 | gsm->count = 0; | ||
1781 | gsm->state = GSM_DATA; | ||
1782 | } | ||
1783 | break; | ||
1784 | case GSM_DATA: /* Data */ | ||
1785 | gsm->buf[gsm->count++] = c; | ||
1786 | if (gsm->count == gsm->len) | ||
1787 | gsm->state = GSM_FCS; | ||
1788 | break; | ||
1789 | case GSM_FCS: /* FCS follows the packet */ | ||
1790 | gsm->fcs = c; | ||
1791 | gsm_queue(gsm); | ||
1792 | /* And then back for the next frame */ | ||
1793 | gsm->state = GSM_SEARCH; | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | /** | ||
1799 | * gsm0_receive - perform processing for non-transparency | ||
1800 | * @gsm: gsm data for this ldisc instance | ||
1801 | * @c: character | ||
1802 | * | ||
1803 | * Receive bytes in mode 1 (Advanced option) | ||
1804 | */ | ||
1805 | |||
1806 | static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) | ||
1807 | { | ||
1808 | if (c == GSM1_SOF) { | ||
1809 | /* EOF is only valid in frame if we have got to the data state | ||
1810 | and received at least one byte (the FCS) */ | ||
1811 | if (gsm->state == GSM_DATA && gsm->count) { | ||
1812 | /* Extract the FCS */ | ||
1813 | gsm->count--; | ||
1814 | gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); | ||
1815 | gsm->len = gsm->count; | ||
1816 | gsm_queue(gsm); | ||
1817 | gsm->state = GSM_START; | ||
1818 | return; | ||
1819 | } | ||
1820 | /* Any partial frame was a runt so go back to start */ | ||
1821 | if (gsm->state != GSM_START) { | ||
1822 | gsm->malformed++; | ||
1823 | gsm->state = GSM_START; | ||
1824 | } | ||
1825 | /* A SOF in GSM_START means we are still reading idling or | ||
1826 | framing bytes */ | ||
1827 | return; | ||
1828 | } | ||
1829 | |||
1830 | if (c == GSM1_ESCAPE) { | ||
1831 | gsm->escape = 1; | ||
1832 | return; | ||
1833 | } | ||
1834 | |||
1835 | /* Only an unescaped SOF gets us out of GSM search */ | ||
1836 | if (gsm->state == GSM_SEARCH) | ||
1837 | return; | ||
1838 | |||
1839 | if (gsm->escape) { | ||
1840 | c ^= GSM1_ESCAPE_BITS; | ||
1841 | gsm->escape = 0; | ||
1842 | } | ||
1843 | switch (gsm->state) { | ||
1844 | case GSM_START: /* First byte after SOF */ | ||
1845 | gsm->address = 0; | ||
1846 | gsm->state = GSM_ADDRESS; | ||
1847 | gsm->fcs = INIT_FCS; | ||
1848 | /* Drop through */ | ||
1849 | case GSM_ADDRESS: /* Address continuation */ | ||
1850 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1851 | if (gsm_read_ea(&gsm->address, c)) | ||
1852 | gsm->state = GSM_CONTROL; | ||
1853 | break; | ||
1854 | case GSM_CONTROL: /* Control Byte */ | ||
1855 | gsm->fcs = gsm_fcs_add(gsm->fcs, c); | ||
1856 | gsm->control = c; | ||
1857 | gsm->count = 0; | ||
1858 | gsm->state = GSM_DATA; | ||
1859 | break; | ||
1860 | case GSM_DATA: /* Data */ | ||
1861 | if (gsm->count > gsm->mru ) { /* Allow one for the FCS */ | ||
1862 | gsm->state = GSM_OVERRUN; | ||
1863 | gsm->bad_size++; | ||
1864 | } else | ||
1865 | gsm->buf[gsm->count++] = c; | ||
1866 | break; | ||
1867 | case GSM_OVERRUN: /* Over-long - eg a dropped SOF */ | ||
1868 | break; | ||
1869 | } | ||
1870 | } | ||
1871 | |||
1872 | /** | ||
1873 | * gsm_error - handle tty error | ||
1874 | * @gsm: ldisc data | ||
1875 | * @data: byte received (may be invalid) | ||
1876 | * @flag: error received | ||
1877 | * | ||
1878 | * Handle an error in the receipt of data for a frame. Currently we just | ||
1879 | * go back to hunting for a SOF. | ||
1880 | * | ||
1881 | * FIXME: better diagnostics ? | ||
1882 | */ | ||
1883 | |||
1884 | static void gsm_error(struct gsm_mux *gsm, | ||
1885 | unsigned char data, unsigned char flag) | ||
1886 | { | ||
1887 | gsm->state = GSM_SEARCH; | ||
1888 | gsm->io_error++; | ||
1889 | } | ||
1890 | |||
1891 | /** | ||
1892 | * gsm_cleanup_mux - generic GSM protocol cleanup | ||
1893 | * @gsm: our mux | ||
1894 | * | ||
1895 | * Clean up the bits of the mux which are the same for all framing | ||
1896 | * protocols. Remove the mux from the mux table, stop all the timers | ||
1897 | * and then shut down each device hanging up the channels as we go. | ||
1898 | */ | ||
1899 | |||
1900 | void gsm_cleanup_mux(struct gsm_mux *gsm) | ||
1901 | { | ||
1902 | int i; | ||
1903 | struct gsm_dlci *dlci = gsm->dlci[0]; | ||
1904 | struct gsm_msg *txq; | ||
1905 | |||
1906 | gsm->dead = 1; | ||
1907 | |||
1908 | spin_lock(&gsm_mux_lock); | ||
1909 | for (i = 0; i < MAX_MUX; i++) { | ||
1910 | if (gsm_mux[i] == gsm) { | ||
1911 | gsm_mux[i] = NULL; | ||
1912 | break; | ||
1913 | } | ||
1914 | } | ||
1915 | spin_unlock(&gsm_mux_lock); | ||
1916 | WARN_ON(i == MAX_MUX); | ||
1917 | |||
1918 | del_timer_sync(&gsm->t2_timer); | ||
1919 | /* Now we are sure T2 has stopped */ | ||
1920 | if (dlci) { | ||
1921 | dlci->dead = 1; | ||
1922 | gsm_dlci_begin_close(dlci); | ||
1923 | wait_event_interruptible(gsm->event, | ||
1924 | dlci->state == DLCI_CLOSED); | ||
1925 | } | ||
1926 | /* Free up any link layer users */ | ||
1927 | for (i = 0; i < NUM_DLCI; i++) | ||
1928 | if (gsm->dlci[i]) | ||
1929 | gsm_dlci_free(gsm->dlci[i]); | ||
1930 | /* Now wipe the queues */ | ||
1931 | for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) { | ||
1932 | gsm->tx_head = txq->next; | ||
1933 | kfree(txq); | ||
1934 | } | ||
1935 | gsm->tx_tail = NULL; | ||
1936 | } | ||
1937 | EXPORT_SYMBOL_GPL(gsm_cleanup_mux); | ||
1938 | |||
1939 | /** | ||
1940 | * gsm_activate_mux - generic GSM setup | ||
1941 | * @gsm: our mux | ||
1942 | * | ||
1943 | * Set up the bits of the mux which are the same for all framing | ||
1944 | * protocols. Add the mux to the mux table so it can be opened and | ||
1945 | * finally kick off connecting to DLCI 0 on the modem. | ||
1946 | */ | ||
1947 | |||
1948 | int gsm_activate_mux(struct gsm_mux *gsm) | ||
1949 | { | ||
1950 | struct gsm_dlci *dlci; | ||
1951 | int i = 0; | ||
1952 | |||
1953 | init_timer(&gsm->t2_timer); | ||
1954 | gsm->t2_timer.function = gsm_control_retransmit; | ||
1955 | gsm->t2_timer.data = (unsigned long)gsm; | ||
1956 | init_waitqueue_head(&gsm->event); | ||
1957 | spin_lock_init(&gsm->control_lock); | ||
1958 | spin_lock_init(&gsm->tx_lock); | ||
1959 | |||
1960 | if (gsm->encoding == 0) | ||
1961 | gsm->receive = gsm0_receive; | ||
1962 | else | ||
1963 | gsm->receive = gsm1_receive; | ||
1964 | gsm->error = gsm_error; | ||
1965 | |||
1966 | spin_lock(&gsm_mux_lock); | ||
1967 | for (i = 0; i < MAX_MUX; i++) { | ||
1968 | if (gsm_mux[i] == NULL) { | ||
1969 | gsm_mux[i] = gsm; | ||
1970 | break; | ||
1971 | } | ||
1972 | } | ||
1973 | spin_unlock(&gsm_mux_lock); | ||
1974 | if (i == MAX_MUX) | ||
1975 | return -EBUSY; | ||
1976 | |||
1977 | dlci = gsm_dlci_alloc(gsm, 0); | ||
1978 | if (dlci == NULL) | ||
1979 | return -ENOMEM; | ||
1980 | gsm->dead = 0; /* Tty opens are now permissible */ | ||
1981 | return 0; | ||
1982 | } | ||
1983 | EXPORT_SYMBOL_GPL(gsm_activate_mux); | ||
1984 | |||
1985 | /** | ||
1986 | * gsm_free_mux - free up a mux | ||
1987 | * @mux: mux to free | ||
1988 | * | ||
1989 | * Dispose of allocated resources for a dead mux. No refcounting | ||
1990 | * at present so the mux must be truely dead. | ||
1991 | */ | ||
1992 | void gsm_free_mux(struct gsm_mux *gsm) | ||
1993 | { | ||
1994 | kfree(gsm->txframe); | ||
1995 | kfree(gsm->buf); | ||
1996 | kfree(gsm); | ||
1997 | } | ||
1998 | EXPORT_SYMBOL_GPL(gsm_free_mux); | ||
1999 | |||
2000 | /** | ||
2001 | * gsm_alloc_mux - allocate a mux | ||
2002 | * | ||
2003 | * Creates a new mux ready for activation. | ||
2004 | */ | ||
2005 | |||
2006 | struct gsm_mux *gsm_alloc_mux(void) | ||
2007 | { | ||
2008 | struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); | ||
2009 | if (gsm == NULL) | ||
2010 | return NULL; | ||
2011 | gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL); | ||
2012 | if (gsm->buf == NULL) { | ||
2013 | kfree(gsm); | ||
2014 | return NULL; | ||
2015 | } | ||
2016 | gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL); | ||
2017 | if (gsm->txframe == NULL) { | ||
2018 | kfree(gsm->buf); | ||
2019 | kfree(gsm); | ||
2020 | return NULL; | ||
2021 | } | ||
2022 | spin_lock_init(&gsm->lock); | ||
2023 | |||
2024 | gsm->t1 = T1; | ||
2025 | gsm->t2 = T2; | ||
2026 | gsm->n2 = N2; | ||
2027 | gsm->ftype = UIH; | ||
2028 | gsm->initiator = 0; | ||
2029 | gsm->adaption = 1; | ||
2030 | gsm->encoding = 1; | ||
2031 | gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ | ||
2032 | gsm->mtu = 64; | ||
2033 | gsm->dead = 1; /* Avoid early tty opens */ | ||
2034 | |||
2035 | return gsm; | ||
2036 | } | ||
2037 | EXPORT_SYMBOL_GPL(gsm_alloc_mux); | ||
2038 | |||
2039 | |||
2040 | |||
2041 | |||
2042 | /** | ||
2043 | * gsmld_output - write to link | ||
2044 | * @gsm: our mux | ||
2045 | * @data: bytes to output | ||
2046 | * @len: size | ||
2047 | * | ||
2048 | * Write a block of data from the GSM mux to the data channel. This | ||
2049 | * will eventually be serialized from above but at the moment isn't. | ||
2050 | */ | ||
2051 | |||
2052 | static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len) | ||
2053 | { | ||
2054 | if (tty_write_room(gsm->tty) < len) { | ||
2055 | set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); | ||
2056 | return -ENOSPC; | ||
2057 | } | ||
2058 | if (debug & 4) { | ||
2059 | printk("-->%d bytes out\n", len); | ||
2060 | hex_packet(data, len); | ||
2061 | } | ||
2062 | gsm->tty->ops->write(gsm->tty, data, len); | ||
2063 | return len; | ||
2064 | } | ||
2065 | |||
2066 | /** | ||
2067 | * gsmld_attach_gsm - mode set up | ||
2068 | * @tty: our tty structure | ||
2069 | * @gsm: our mux | ||
2070 | * | ||
2071 | * Set up the MUX for basic mode and commence connecting to the | ||
2072 | * modem. Currently called from the line discipline set up but | ||
2073 | * will need moving to an ioctl path. | ||
2074 | */ | ||
2075 | |||
2076 | static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) | ||
2077 | { | ||
2078 | int ret; | ||
2079 | |||
2080 | gsm->tty = tty_kref_get(tty); | ||
2081 | gsm->output = gsmld_output; | ||
2082 | ret = gsm_activate_mux(gsm); | ||
2083 | if (ret != 0) | ||
2084 | tty_kref_put(gsm->tty); | ||
2085 | return ret; | ||
2086 | } | ||
2087 | |||
2088 | |||
2089 | /** | ||
2090 | * gsmld_detach_gsm - stop doing 0710 mux | ||
2091 | * @tty: tty atttached to the mux | ||
2092 | * @gsm: mux | ||
2093 | * | ||
2094 | * Shutdown and then clean up the resources used by the line discipline | ||
2095 | */ | ||
2096 | |||
2097 | static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) | ||
2098 | { | ||
2099 | WARN_ON(tty != gsm->tty); | ||
2100 | gsm_cleanup_mux(gsm); | ||
2101 | tty_kref_put(gsm->tty); | ||
2102 | gsm->tty = NULL; | ||
2103 | } | ||
2104 | |||
2105 | static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp, | ||
2106 | char *fp, int count) | ||
2107 | { | ||
2108 | struct gsm_mux *gsm = tty->disc_data; | ||
2109 | const unsigned char *dp; | ||
2110 | char *f; | ||
2111 | int i; | ||
2112 | char buf[64]; | ||
2113 | char flags; | ||
2114 | |||
2115 | if (debug & 4) { | ||
2116 | printk("Inbytes %dd\n", count); | ||
2117 | hex_packet(cp, count); | ||
2118 | } | ||
2119 | |||
2120 | for (i = count, dp = cp, f = fp; i; i--, dp++) { | ||
2121 | flags = *f++; | ||
2122 | switch (flags) { | ||
2123 | case TTY_NORMAL: | ||
2124 | gsm->receive(gsm, *dp); | ||
2125 | break; | ||
2126 | case TTY_OVERRUN: | ||
2127 | case TTY_BREAK: | ||
2128 | case TTY_PARITY: | ||
2129 | case TTY_FRAME: | ||
2130 | gsm->error(gsm, *dp, flags); | ||
2131 | break; | ||
2132 | default: | ||
2133 | printk(KERN_ERR "%s: unknown flag %d\n", | ||
2134 | tty_name(tty, buf), flags); | ||
2135 | break; | ||
2136 | } | ||
2137 | } | ||
2138 | /* FASYNC if needed ? */ | ||
2139 | /* If clogged call tty_throttle(tty); */ | ||
2140 | } | ||
2141 | |||
2142 | /** | ||
2143 | * gsmld_chars_in_buffer - report available bytes | ||
2144 | * @tty: tty device | ||
2145 | * | ||
2146 | * Report the number of characters buffered to be delivered to user | ||
2147 | * at this instant in time. | ||
2148 | * | ||
2149 | * Locking: gsm lock | ||
2150 | */ | ||
2151 | |||
2152 | static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty) | ||
2153 | { | ||
2154 | return 0; | ||
2155 | } | ||
2156 | |||
2157 | /** | ||
2158 | * gsmld_flush_buffer - clean input queue | ||
2159 | * @tty: terminal device | ||
2160 | * | ||
2161 | * Flush the input buffer. Called when the line discipline is | ||
2162 | * being closed, when the tty layer wants the buffer flushed (eg | ||
2163 | * at hangup). | ||
2164 | */ | ||
2165 | |||
2166 | static void gsmld_flush_buffer(struct tty_struct *tty) | ||
2167 | { | ||
2168 | } | ||
2169 | |||
2170 | /** | ||
2171 | * gsmld_close - close the ldisc for this tty | ||
2172 | * @tty: device | ||
2173 | * | ||
2174 | * Called from the terminal layer when this line discipline is | ||
2175 | * being shut down, either because of a close or becsuse of a | ||
2176 | * discipline change. The function will not be called while other | ||
2177 | * ldisc methods are in progress. | ||
2178 | */ | ||
2179 | |||
2180 | static void gsmld_close(struct tty_struct *tty) | ||
2181 | { | ||
2182 | struct gsm_mux *gsm = tty->disc_data; | ||
2183 | |||
2184 | gsmld_detach_gsm(tty, gsm); | ||
2185 | |||
2186 | gsmld_flush_buffer(tty); | ||
2187 | /* Do other clean up here */ | ||
2188 | gsm_free_mux(gsm); | ||
2189 | } | ||
2190 | |||
2191 | /** | ||
2192 | * gsmld_open - open an ldisc | ||
2193 | * @tty: terminal to open | ||
2194 | * | ||
2195 | * Called when this line discipline is being attached to the | ||
2196 | * terminal device. Can sleep. Called serialized so that no | ||
2197 | * other events will occur in parallel. No further open will occur | ||
2198 | * until a close. | ||
2199 | */ | ||
2200 | |||
2201 | static int gsmld_open(struct tty_struct *tty) | ||
2202 | { | ||
2203 | struct gsm_mux *gsm; | ||
2204 | |||
2205 | if (tty->ops->write == NULL) | ||
2206 | return -EINVAL; | ||
2207 | |||
2208 | /* Attach our ldisc data */ | ||
2209 | gsm = gsm_alloc_mux(); | ||
2210 | if (gsm == NULL) | ||
2211 | return -ENOMEM; | ||
2212 | |||
2213 | tty->disc_data = gsm; | ||
2214 | tty->receive_room = 65536; | ||
2215 | |||
2216 | /* Attach the initial passive connection */ | ||
2217 | gsm->encoding = 1; | ||
2218 | return gsmld_attach_gsm(tty, gsm); | ||
2219 | } | ||
2220 | |||
2221 | /** | ||
2222 | * gsmld_write_wakeup - asynchronous I/O notifier | ||
2223 | * @tty: tty device | ||
2224 | * | ||
2225 | * Required for the ptys, serial driver etc. since processes | ||
2226 | * that attach themselves to the master and rely on ASYNC | ||
2227 | * IO must be woken up | ||
2228 | */ | ||
2229 | |||
2230 | static void gsmld_write_wakeup(struct tty_struct *tty) | ||
2231 | { | ||
2232 | struct gsm_mux *gsm = tty->disc_data; | ||
2233 | |||
2234 | /* Queue poll */ | ||
2235 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
2236 | gsm_data_kick(gsm); | ||
2237 | if (gsm->tx_bytes < TX_THRESH_LO) | ||
2238 | gsm_dlci_data_sweep(gsm); | ||
2239 | } | ||
2240 | |||
2241 | /** | ||
2242 | * gsmld_read - read function for tty | ||
2243 | * @tty: tty device | ||
2244 | * @file: file object | ||
2245 | * @buf: userspace buffer pointer | ||
2246 | * @nr: size of I/O | ||
2247 | * | ||
2248 | * Perform reads for the line discipline. We are guaranteed that the | ||
2249 | * line discipline will not be closed under us but we may get multiple | ||
2250 | * parallel readers and must handle this ourselves. We may also get | ||
2251 | * a hangup. Always called in user context, may sleep. | ||
2252 | * | ||
2253 | * This code must be sure never to sleep through a hangup. | ||
2254 | */ | ||
2255 | |||
2256 | static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, | ||
2257 | unsigned char __user *buf, size_t nr) | ||
2258 | { | ||
2259 | return -EOPNOTSUPP; | ||
2260 | } | ||
2261 | |||
2262 | /** | ||
2263 | * gsmld_write - write function for tty | ||
2264 | * @tty: tty device | ||
2265 | * @file: file object | ||
2266 | * @buf: userspace buffer pointer | ||
2267 | * @nr: size of I/O | ||
2268 | * | ||
2269 | * Called when the owner of the device wants to send a frame | ||
2270 | * itself (or some other control data). The data is transferred | ||
2271 | * as-is and must be properly framed and checksummed as appropriate | ||
2272 | * by userspace. Frames are either sent whole or not at all as this | ||
2273 | * avoids pain user side. | ||
2274 | */ | ||
2275 | |||
2276 | static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, | ||
2277 | const unsigned char *buf, size_t nr) | ||
2278 | { | ||
2279 | int space = tty_write_room(tty); | ||
2280 | if (space >= nr) | ||
2281 | return tty->ops->write(tty, buf, nr); | ||
2282 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
2283 | return -ENOBUFS; | ||
2284 | } | ||
2285 | |||
2286 | /** | ||
2287 | * gsmld_poll - poll method for N_GSM0710 | ||
2288 | * @tty: terminal device | ||
2289 | * @file: file accessing it | ||
2290 | * @wait: poll table | ||
2291 | * | ||
2292 | * Called when the line discipline is asked to poll() for data or | ||
2293 | * for special events. This code is not serialized with respect to | ||
2294 | * other events save open/close. | ||
2295 | * | ||
2296 | * This code must be sure never to sleep through a hangup. | ||
2297 | * Called without the kernel lock held - fine | ||
2298 | */ | ||
2299 | |||
2300 | static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file, | ||
2301 | poll_table *wait) | ||
2302 | { | ||
2303 | unsigned int mask = 0; | ||
2304 | struct gsm_mux *gsm = tty->disc_data; | ||
2305 | |||
2306 | poll_wait(file, &tty->read_wait, wait); | ||
2307 | poll_wait(file, &tty->write_wait, wait); | ||
2308 | if (tty_hung_up_p(file)) | ||
2309 | mask |= POLLHUP; | ||
2310 | if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) | ||
2311 | mask |= POLLOUT | POLLWRNORM; | ||
2312 | if (gsm->dead) | ||
2313 | mask |= POLLHUP; | ||
2314 | return mask; | ||
2315 | } | ||
2316 | |||
2317 | static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, | ||
2318 | struct gsm_config *c) | ||
2319 | { | ||
2320 | int need_close = 0; | ||
2321 | int need_restart = 0; | ||
2322 | |||
2323 | /* Stuff we don't support yet - UI or I frame transport, windowing */ | ||
2324 | if ((c->adaption !=1 && c->adaption != 2) || c->k) | ||
2325 | return -EOPNOTSUPP; | ||
2326 | /* Check the MRU/MTU range looks sane */ | ||
2327 | if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) | ||
2328 | return -EINVAL; | ||
2329 | if (c->n2 < 3) | ||
2330 | return -EINVAL; | ||
2331 | if (c->encapsulation > 1) /* Basic, advanced, no I */ | ||
2332 | return -EINVAL; | ||
2333 | if (c->initiator > 1) | ||
2334 | return -EINVAL; | ||
2335 | if (c->i == 0 || c->i > 2) /* UIH and UI only */ | ||
2336 | return -EINVAL; | ||
2337 | /* | ||
2338 | * See what is needed for reconfiguration | ||
2339 | */ | ||
2340 | |||
2341 | /* Timing fields */ | ||
2342 | if (c->t1 != 0 && c->t1 != gsm->t1) | ||
2343 | need_restart = 1; | ||
2344 | if (c->t2 != 0 && c->t2 != gsm->t2) | ||
2345 | need_restart = 1; | ||
2346 | if (c->encapsulation != gsm->encoding) | ||
2347 | need_restart = 1; | ||
2348 | if (c->adaption != gsm->adaption) | ||
2349 | need_restart = 1; | ||
2350 | /* Requires care */ | ||
2351 | if (c->initiator != gsm->initiator) | ||
2352 | need_close = 1; | ||
2353 | if (c->mru != gsm->mru) | ||
2354 | need_restart = 1; | ||
2355 | if (c->mtu != gsm->mtu) | ||
2356 | need_restart = 1; | ||
2357 | |||
2358 | /* | ||
2359 | * Close down what is needed, restart and initiate the new | ||
2360 | * configuration | ||
2361 | */ | ||
2362 | |||
2363 | if (need_close || need_restart) { | ||
2364 | gsm_dlci_begin_close(gsm->dlci[0]); | ||
2365 | /* This will timeout if the link is down due to N2 expiring */ | ||
2366 | wait_event_interruptible(gsm->event, | ||
2367 | gsm->dlci[0]->state == DLCI_CLOSED); | ||
2368 | if (signal_pending(current)) | ||
2369 | return -EINTR; | ||
2370 | } | ||
2371 | if (need_restart) | ||
2372 | gsm_cleanup_mux(gsm); | ||
2373 | |||
2374 | gsm->initiator = c->initiator; | ||
2375 | gsm->mru = c->mru; | ||
2376 | gsm->encoding = c->encapsulation; | ||
2377 | gsm->adaption = c->adaption; | ||
2378 | |||
2379 | if (c->i == 1) | ||
2380 | gsm->ftype = UIH; | ||
2381 | else if (c->i == 2) | ||
2382 | gsm->ftype = UI; | ||
2383 | |||
2384 | if (c->t1) | ||
2385 | gsm->t1 = c->t1; | ||
2386 | if (c->t2) | ||
2387 | gsm->t2 = c->t2; | ||
2388 | |||
2389 | /* FIXME: We need to separate activation/deactivation from adding | ||
2390 | and removing from the mux array */ | ||
2391 | if (need_restart) | ||
2392 | gsm_activate_mux(gsm); | ||
2393 | if (gsm->initiator && need_close) | ||
2394 | gsm_dlci_begin_open(gsm->dlci[0]); | ||
2395 | return 0; | ||
2396 | } | ||
2397 | |||
2398 | static int gsmld_ioctl(struct tty_struct *tty, struct file *file, | ||
2399 | unsigned int cmd, unsigned long arg) | ||
2400 | { | ||
2401 | struct gsm_config c; | ||
2402 | struct gsm_mux *gsm = tty->disc_data; | ||
2403 | |||
2404 | switch (cmd) { | ||
2405 | case GSMIOC_GETCONF: | ||
2406 | memset(&c, 0, sizeof(c)); | ||
2407 | c.adaption = gsm->adaption; | ||
2408 | c.encapsulation = gsm->encoding; | ||
2409 | c.initiator = gsm->initiator; | ||
2410 | c.t1 = gsm->t1; | ||
2411 | c.t2 = gsm->t2; | ||
2412 | c.t3 = 0; /* Not supported */ | ||
2413 | c.n2 = gsm->n2; | ||
2414 | if (gsm->ftype == UIH) | ||
2415 | c.i = 1; | ||
2416 | else | ||
2417 | c.i = 2; | ||
2418 | printk("Ftype %d i %d\n", gsm->ftype, c.i); | ||
2419 | c.mru = gsm->mru; | ||
2420 | c.mtu = gsm->mtu; | ||
2421 | c.k = 0; | ||
2422 | if (copy_to_user((void *)arg, &c, sizeof(c))) | ||
2423 | return -EFAULT; | ||
2424 | return 0; | ||
2425 | case GSMIOC_SETCONF: | ||
2426 | if (copy_from_user(&c, (void *)arg, sizeof(c))) | ||
2427 | return -EFAULT; | ||
2428 | return gsmld_config(tty, gsm, &c); | ||
2429 | default: | ||
2430 | return n_tty_ioctl_helper(tty, file, cmd, arg); | ||
2431 | } | ||
2432 | } | ||
2433 | |||
2434 | |||
2435 | /* Line discipline for real tty */ | ||
2436 | struct tty_ldisc_ops tty_ldisc_packet = { | ||
2437 | .owner = THIS_MODULE, | ||
2438 | .magic = TTY_LDISC_MAGIC, | ||
2439 | .name = "n_gsm", | ||
2440 | .open = gsmld_open, | ||
2441 | .close = gsmld_close, | ||
2442 | .flush_buffer = gsmld_flush_buffer, | ||
2443 | .chars_in_buffer = gsmld_chars_in_buffer, | ||
2444 | .read = gsmld_read, | ||
2445 | .write = gsmld_write, | ||
2446 | .ioctl = gsmld_ioctl, | ||
2447 | .poll = gsmld_poll, | ||
2448 | .receive_buf = gsmld_receive_buf, | ||
2449 | .write_wakeup = gsmld_write_wakeup | ||
2450 | }; | ||
2451 | |||
2452 | /* | ||
2453 | * Virtual tty side | ||
2454 | */ | ||
2455 | |||
2456 | #define TX_SIZE 512 | ||
2457 | |||
2458 | static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) | ||
2459 | { | ||
2460 | u8 modembits[5]; | ||
2461 | struct gsm_control *ctrl; | ||
2462 | int len = 2; | ||
2463 | |||
2464 | if (brk) | ||
2465 | len++; | ||
2466 | |||
2467 | modembits[0] = len << 1 | EA; /* Data bytes */ | ||
2468 | modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */ | ||
2469 | modembits[2] = gsm_encode_modem(dlci) << 1 | EA; | ||
2470 | if (brk) | ||
2471 | modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */ | ||
2472 | ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1); | ||
2473 | if (ctrl == NULL) | ||
2474 | return -ENOMEM; | ||
2475 | return gsm_control_wait(dlci->gsm, ctrl); | ||
2476 | } | ||
2477 | |||
2478 | static int gsm_carrier_raised(struct tty_port *port) | ||
2479 | { | ||
2480 | struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); | ||
2481 | /* Not yet open so no carrier info */ | ||
2482 | if (dlci->state != DLCI_OPEN) | ||
2483 | return 0; | ||
2484 | if (debug & 2) | ||
2485 | return 1; | ||
2486 | return dlci->modem_rx & TIOCM_CD; | ||
2487 | } | ||
2488 | |||
2489 | static void gsm_dtr_rts(struct tty_port *port, int onoff) | ||
2490 | { | ||
2491 | struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); | ||
2492 | unsigned int modem_tx = dlci->modem_tx; | ||
2493 | if (onoff) | ||
2494 | modem_tx |= TIOCM_DTR | TIOCM_RTS; | ||
2495 | else | ||
2496 | modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); | ||
2497 | if (modem_tx != dlci->modem_tx) { | ||
2498 | dlci->modem_tx = modem_tx; | ||
2499 | gsmtty_modem_update(dlci, 0); | ||
2500 | } | ||
2501 | } | ||
2502 | |||
2503 | static const struct tty_port_operations gsm_port_ops = { | ||
2504 | .carrier_raised = gsm_carrier_raised, | ||
2505 | .dtr_rts = gsm_dtr_rts, | ||
2506 | }; | ||
2507 | |||
2508 | |||
2509 | static int gsmtty_open(struct tty_struct *tty, struct file *filp) | ||
2510 | { | ||
2511 | struct gsm_mux *gsm; | ||
2512 | struct gsm_dlci *dlci; | ||
2513 | struct tty_port *port; | ||
2514 | unsigned int line = tty->index; | ||
2515 | unsigned int mux = line >> 6; | ||
2516 | |||
2517 | line = line & 0x3F; | ||
2518 | |||
2519 | if (mux >= MAX_MUX) | ||
2520 | return -ENXIO; | ||
2521 | /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */ | ||
2522 | if (gsm_mux[mux] == NULL) | ||
2523 | return -EUNATCH; | ||
2524 | if (line == 0 || line > 61) /* 62/63 reserved */ | ||
2525 | return -ECHRNG; | ||
2526 | gsm = gsm_mux[mux]; | ||
2527 | if (gsm->dead) | ||
2528 | return -EL2HLT; | ||
2529 | dlci = gsm->dlci[line]; | ||
2530 | if (dlci == NULL) | ||
2531 | dlci = gsm_dlci_alloc(gsm, line); | ||
2532 | if (dlci == NULL) | ||
2533 | return -ENOMEM; | ||
2534 | port = &dlci->port; | ||
2535 | port->count++; | ||
2536 | tty->driver_data = dlci; | ||
2537 | tty_port_tty_set(port, tty); | ||
2538 | |||
2539 | dlci->modem_rx = 0; | ||
2540 | /* We could in theory open and close before we wait - eg if we get | ||
2541 | a DM straight back. This is ok as that will have caused a hangup */ | ||
2542 | set_bit(ASYNCB_INITIALIZED, &port->flags); | ||
2543 | /* Start sending off SABM messages */ | ||
2544 | gsm_dlci_begin_open(dlci); | ||
2545 | /* And wait for virtual carrier */ | ||
2546 | return tty_port_block_til_ready(port, tty, filp); | ||
2547 | } | ||
2548 | |||
2549 | static void gsmtty_close(struct tty_struct *tty, struct file *filp) | ||
2550 | { | ||
2551 | struct gsm_dlci *dlci = tty->driver_data; | ||
2552 | if (dlci == NULL) | ||
2553 | return; | ||
2554 | if (tty_port_close_start(&dlci->port, tty, filp) == 0) | ||
2555 | return; | ||
2556 | gsm_dlci_begin_close(dlci); | ||
2557 | tty_port_close_end(&dlci->port, tty); | ||
2558 | tty_port_tty_set(&dlci->port, NULL); | ||
2559 | } | ||
2560 | |||
2561 | static void gsmtty_hangup(struct tty_struct *tty) | ||
2562 | { | ||
2563 | struct gsm_dlci *dlci = tty->driver_data; | ||
2564 | tty_port_hangup(&dlci->port); | ||
2565 | gsm_dlci_begin_close(dlci); | ||
2566 | } | ||
2567 | |||
2568 | static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, | ||
2569 | int len) | ||
2570 | { | ||
2571 | struct gsm_dlci *dlci = tty->driver_data; | ||
2572 | /* Stuff the bytes into the fifo queue */ | ||
2573 | int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); | ||
2574 | /* Need to kick the channel */ | ||
2575 | gsm_dlci_data_kick(dlci); | ||
2576 | return sent; | ||
2577 | } | ||
2578 | |||
2579 | static int gsmtty_write_room(struct tty_struct *tty) | ||
2580 | { | ||
2581 | struct gsm_dlci *dlci = tty->driver_data; | ||
2582 | return TX_SIZE - kfifo_len(dlci->fifo); | ||
2583 | } | ||
2584 | |||
2585 | static int gsmtty_chars_in_buffer(struct tty_struct *tty) | ||
2586 | { | ||
2587 | struct gsm_dlci *dlci = tty->driver_data; | ||
2588 | return kfifo_len(dlci->fifo); | ||
2589 | } | ||
2590 | |||
2591 | static void gsmtty_flush_buffer(struct tty_struct *tty) | ||
2592 | { | ||
2593 | struct gsm_dlci *dlci = tty->driver_data; | ||
2594 | /* Caution needed: If we implement reliable transport classes | ||
2595 | then the data being transmitted can't simply be junked once | ||
2596 | it has first hit the stack. Until then we can just blow it | ||
2597 | away */ | ||
2598 | kfifo_reset(dlci->fifo); | ||
2599 | /* Need to unhook this DLCI from the transmit queue logic */ | ||
2600 | } | ||
2601 | |||
2602 | static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) | ||
2603 | { | ||
2604 | /* The FIFO handles the queue so the kernel will do the right | ||
2605 | thing waiting on chars_in_buffer before calling us. No work | ||
2606 | to do here */ | ||
2607 | } | ||
2608 | |||
2609 | static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp) | ||
2610 | { | ||
2611 | struct gsm_dlci *dlci = tty->driver_data; | ||
2612 | return dlci->modem_rx; | ||
2613 | } | ||
2614 | |||
2615 | static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp, | ||
2616 | unsigned int set, unsigned int clear) | ||
2617 | { | ||
2618 | struct gsm_dlci *dlci = tty->driver_data; | ||
2619 | unsigned int modem_tx = dlci->modem_tx; | ||
2620 | |||
2621 | modem_tx &= clear; | ||
2622 | modem_tx |= set; | ||
2623 | |||
2624 | if (modem_tx != dlci->modem_tx) { | ||
2625 | dlci->modem_tx = modem_tx; | ||
2626 | return gsmtty_modem_update(dlci, 0); | ||
2627 | } | ||
2628 | return 0; | ||
2629 | } | ||
2630 | |||
2631 | |||
2632 | static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp, | ||
2633 | unsigned int cmd, unsigned long arg) | ||
2634 | { | ||
2635 | return -ENOIOCTLCMD; | ||
2636 | } | ||
2637 | |||
2638 | static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) | ||
2639 | { | ||
2640 | /* For the moment its fixed. In actual fact the speed information | ||
2641 | for the virtual channel can be propogated in both directions by | ||
2642 | the RPN control message. This however rapidly gets nasty as we | ||
2643 | then have to remap modem signals each way according to whether | ||
2644 | our virtual cable is null modem etc .. */ | ||
2645 | tty_termios_copy_hw(tty->termios, old); | ||
2646 | } | ||
2647 | |||
2648 | static void gsmtty_throttle(struct tty_struct *tty) | ||
2649 | { | ||
2650 | struct gsm_dlci *dlci = tty->driver_data; | ||
2651 | if (tty->termios->c_cflag & CRTSCTS) | ||
2652 | dlci->modem_tx &= ~TIOCM_DTR; | ||
2653 | dlci->throttled = 1; | ||
2654 | /* Send an MSC with DTR cleared */ | ||
2655 | gsmtty_modem_update(dlci, 0); | ||
2656 | } | ||
2657 | |||
2658 | static void gsmtty_unthrottle(struct tty_struct *tty) | ||
2659 | { | ||
2660 | struct gsm_dlci *dlci = tty->driver_data; | ||
2661 | if (tty->termios->c_cflag & CRTSCTS) | ||
2662 | dlci->modem_tx |= TIOCM_DTR; | ||
2663 | dlci->throttled = 0; | ||
2664 | /* Send an MSC with DTR set */ | ||
2665 | gsmtty_modem_update(dlci, 0); | ||
2666 | } | ||
2667 | |||
2668 | static int gsmtty_break_ctl(struct tty_struct *tty, int state) | ||
2669 | { | ||
2670 | struct gsm_dlci *dlci = tty->driver_data; | ||
2671 | int encode = 0; /* Off */ | ||
2672 | |||
2673 | if (state == -1) /* "On indefinitely" - we can't encode this | ||
2674 | properly */ | ||
2675 | encode = 0x0F; | ||
2676 | else if (state > 0) { | ||
2677 | encode = state / 200; /* mS to encoding */ | ||
2678 | if (encode > 0x0F) | ||
2679 | encode = 0x0F; /* Best effort */ | ||
2680 | } | ||
2681 | return gsmtty_modem_update(dlci, encode); | ||
2682 | } | ||
2683 | |||
2684 | static struct tty_driver *gsm_tty_driver; | ||
2685 | |||
2686 | /* Virtual ttys for the demux */ | ||
2687 | static const struct tty_operations gsmtty_ops = { | ||
2688 | .open = gsmtty_open, | ||
2689 | .close = gsmtty_close, | ||
2690 | .write = gsmtty_write, | ||
2691 | .write_room = gsmtty_write_room, | ||
2692 | .chars_in_buffer = gsmtty_chars_in_buffer, | ||
2693 | .flush_buffer = gsmtty_flush_buffer, | ||
2694 | .ioctl = gsmtty_ioctl, | ||
2695 | .throttle = gsmtty_throttle, | ||
2696 | .unthrottle = gsmtty_unthrottle, | ||
2697 | .set_termios = gsmtty_set_termios, | ||
2698 | .hangup = gsmtty_hangup, | ||
2699 | .wait_until_sent = gsmtty_wait_until_sent, | ||
2700 | .tiocmget = gsmtty_tiocmget, | ||
2701 | .tiocmset = gsmtty_tiocmset, | ||
2702 | .break_ctl = gsmtty_break_ctl, | ||
2703 | }; | ||
2704 | |||
2705 | |||
2706 | |||
2707 | static int __init gsm_init(void) | ||
2708 | { | ||
2709 | /* Fill in our line protocol discipline, and register it */ | ||
2710 | int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet); | ||
2711 | if (status != 0) { | ||
2712 | printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status); | ||
2713 | return status; | ||
2714 | } | ||
2715 | |||
2716 | gsm_tty_driver = alloc_tty_driver(256); | ||
2717 | if (!gsm_tty_driver) { | ||
2718 | tty_unregister_ldisc(N_GSM0710); | ||
2719 | printk(KERN_ERR "gsm_init: tty allocation failed.\n"); | ||
2720 | return -EINVAL; | ||
2721 | } | ||
2722 | gsm_tty_driver->owner = THIS_MODULE; | ||
2723 | gsm_tty_driver->driver_name = "gsmtty"; | ||
2724 | gsm_tty_driver->name = "gsmtty"; | ||
2725 | gsm_tty_driver->major = 0; /* Dynamic */ | ||
2726 | gsm_tty_driver->minor_start = 0; | ||
2727 | gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
2728 | gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL; | ||
2729 | gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | ||
2730 | | TTY_DRIVER_HARDWARE_BREAK; | ||
2731 | gsm_tty_driver->init_termios = tty_std_termios; | ||
2732 | /* Fixme */ | ||
2733 | gsm_tty_driver->init_termios.c_lflag &= ~ECHO; | ||
2734 | tty_set_operations(gsm_tty_driver, &gsmtty_ops); | ||
2735 | |||
2736 | spin_lock_init(&gsm_mux_lock); | ||
2737 | |||
2738 | if (tty_register_driver(gsm_tty_driver)) { | ||
2739 | put_tty_driver(gsm_tty_driver); | ||
2740 | tty_unregister_ldisc(N_GSM0710); | ||
2741 | printk(KERN_ERR "gsm_init: tty registration failed.\n"); | ||
2742 | return -EBUSY; | ||
2743 | } | ||
2744 | printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start); | ||
2745 | return 0; | ||
2746 | } | ||
2747 | |||
2748 | static void __exit gsm_exit(void) | ||
2749 | { | ||
2750 | int status = tty_unregister_ldisc(N_GSM0710); | ||
2751 | if (status != 0) | ||
2752 | printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status); | ||
2753 | tty_unregister_driver(gsm_tty_driver); | ||
2754 | put_tty_driver(gsm_tty_driver); | ||
2755 | printk(KERN_INFO "gsm_init: unloaded.\n"); | ||
2756 | } | ||
2757 | |||
2758 | module_init(gsm_init); | ||
2759 | module_exit(gsm_exit); | ||
2760 | |||
2761 | |||
2762 | MODULE_LICENSE("GPL"); | ||
2763 | MODULE_ALIAS_LDISC(N_GSM0710); | ||
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index c9bc896d68af..e7956acf2ad6 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -106,7 +106,6 @@ static int major; /* major number we get from the kernel */ | |||
106 | 106 | ||
107 | struct cm4000_dev { | 107 | struct cm4000_dev { |
108 | struct pcmcia_device *p_dev; | 108 | struct pcmcia_device *p_dev; |
109 | dev_node_t node; /* OS node (major,minor) */ | ||
110 | 109 | ||
111 | unsigned char atr[MAX_ATR]; | 110 | unsigned char atr[MAX_ATR]; |
112 | unsigned char rbuf[512]; | 111 | unsigned char rbuf[512]; |
@@ -884,8 +883,7 @@ static void monitor_card(unsigned long p) | |||
884 | /* slow down warning, but prompt immediately after insertion */ | 883 | /* slow down warning, but prompt immediately after insertion */ |
885 | if (dev->cwarn == 0 || dev->cwarn == 10) { | 884 | if (dev->cwarn == 0 || dev->cwarn == 10) { |
886 | set_bit(IS_BAD_CARD, &dev->flags); | 885 | set_bit(IS_BAD_CARD, &dev->flags); |
887 | printk(KERN_WARNING MODULE_NAME ": device %s: ", | 886 | dev_warn(&dev->p_dev->dev, MODULE_NAME ": "); |
888 | dev->node.dev_name); | ||
889 | if (test_bit(IS_BAD_CSUM, &dev->flags)) { | 887 | if (test_bit(IS_BAD_CSUM, &dev->flags)) { |
890 | DEBUGP(4, dev, "ATR checksum (0x%.2x, should " | 888 | DEBUGP(4, dev, "ATR checksum (0x%.2x, should " |
891 | "be zero) failed\n", dev->atr_csum); | 889 | "be zero) failed\n", dev->atr_csum); |
@@ -1026,14 +1024,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, | |||
1026 | 1024 | ||
1027 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ | 1025 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ |
1028 | /* last check before exit */ | 1026 | /* last check before exit */ |
1029 | if (!io_detect_cm4000(iobase, dev)) | 1027 | if (!io_detect_cm4000(iobase, dev)) { |
1030 | count = -ENODEV; | 1028 | rc = -ENODEV; |
1029 | goto release_io; | ||
1030 | } | ||
1031 | 1031 | ||
1032 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) | 1032 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) |
1033 | str_invert_revert(dev->rbuf, count); | 1033 | str_invert_revert(dev->rbuf, count); |
1034 | 1034 | ||
1035 | if (copy_to_user(buf, dev->rbuf, count)) | 1035 | if (copy_to_user(buf, dev->rbuf, count)) |
1036 | return -EFAULT; | 1036 | rc = -EFAULT; |
1037 | 1037 | ||
1038 | release_io: | 1038 | release_io: |
1039 | clear_bit(LOCK_IO, &dev->flags); | 1039 | clear_bit(LOCK_IO, &dev->flags); |
@@ -1779,11 +1779,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno) | |||
1779 | goto cs_release; | 1779 | goto cs_release; |
1780 | 1780 | ||
1781 | dev = link->priv; | 1781 | dev = link->priv; |
1782 | sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno); | ||
1783 | dev->node.major = major; | ||
1784 | dev->node.minor = devno; | ||
1785 | dev->node.next = NULL; | ||
1786 | link->dev_node = &dev->node; | ||
1787 | 1782 | ||
1788 | return 0; | 1783 | return 0; |
1789 | 1784 | ||
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index a6a70e476bea..c0775c844e08 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
@@ -72,7 +72,6 @@ static struct class *cmx_class; | |||
72 | 72 | ||
73 | struct reader_dev { | 73 | struct reader_dev { |
74 | struct pcmcia_device *p_dev; | 74 | struct pcmcia_device *p_dev; |
75 | dev_node_t node; | ||
76 | wait_queue_head_t devq; | 75 | wait_queue_head_t devq; |
77 | wait_queue_head_t poll_wait; | 76 | wait_queue_head_t poll_wait; |
78 | wait_queue_head_t read_wait; | 77 | wait_queue_head_t read_wait; |
@@ -568,10 +567,6 @@ static int reader_config(struct pcmcia_device *link, int devno) | |||
568 | } | 567 | } |
569 | 568 | ||
570 | dev = link->priv; | 569 | dev = link->priv; |
571 | sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno); | ||
572 | dev->node.major = major; | ||
573 | dev->node.minor = devno; | ||
574 | dev->node.next = &dev->node; | ||
575 | 570 | ||
576 | DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno, | 571 | DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno, |
577 | link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1); | 572 | link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1); |
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index dff24dae1485..63c32e3f23ba 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c | |||
@@ -195,9 +195,6 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
195 | link->conf.Attributes = CONF_ENABLE_IRQ; | 195 | link->conf.Attributes = CONF_ENABLE_IRQ; |
196 | link->conf.IntType = INT_MEMORY_AND_IO; | 196 | link->conf.IntType = INT_MEMORY_AND_IO; |
197 | 197 | ||
198 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | ||
199 | link->irq.Handler = ipwireless_interrupt; | ||
200 | |||
201 | INIT_WORK(&ipw->work_reboot, signalled_reboot_work); | 198 | INIT_WORK(&ipw->work_reboot, signalled_reboot_work); |
202 | 199 | ||
203 | ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1, | 200 | ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1, |
@@ -205,8 +202,7 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
205 | ipw->is_v2_card, signalled_reboot_callback, | 202 | ipw->is_v2_card, signalled_reboot_callback, |
206 | ipw); | 203 | ipw); |
207 | 204 | ||
208 | ret = pcmcia_request_irq(link, &link->irq); | 205 | ret = pcmcia_request_irq(link, ipwireless_interrupt); |
209 | |||
210 | if (ret != 0) | 206 | if (ret != 0) |
211 | goto exit; | 207 | goto exit; |
212 | 208 | ||
@@ -217,7 +213,7 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
217 | (unsigned int) link->io.BasePort1, | 213 | (unsigned int) link->io.BasePort1, |
218 | (unsigned int) (link->io.BasePort1 + | 214 | (unsigned int) (link->io.BasePort1 + |
219 | link->io.NumPorts1 - 1), | 215 | link->io.NumPorts1 - 1), |
220 | (unsigned int) link->irq.AssignedIRQ); | 216 | (unsigned int) link->irq); |
221 | if (ipw->attr_memory && ipw->common_memory) | 217 | if (ipw->attr_memory && ipw->common_memory) |
222 | printk(KERN_INFO IPWIRELESS_PCCARD_NAME | 218 | printk(KERN_INFO IPWIRELESS_PCCARD_NAME |
223 | ": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n", | 219 | ": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n", |
@@ -232,8 +228,7 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
232 | if (!ipw->network) | 228 | if (!ipw->network) |
233 | goto exit; | 229 | goto exit; |
234 | 230 | ||
235 | ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, | 231 | ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network); |
236 | ipw->nodes); | ||
237 | if (!ipw->tty) | 232 | if (!ipw->tty) |
238 | goto exit; | 233 | goto exit; |
239 | 234 | ||
@@ -248,8 +243,6 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
248 | if (ret != 0) | 243 | if (ret != 0) |
249 | goto exit; | 244 | goto exit; |
250 | 245 | ||
251 | link->dev_node = &ipw->nodes[0]; | ||
252 | |||
253 | return 0; | 246 | return 0; |
254 | 247 | ||
255 | exit: | 248 | exit: |
@@ -271,8 +264,6 @@ exit: | |||
271 | 264 | ||
272 | static void release_ipwireless(struct ipw_dev *ipw) | 265 | static void release_ipwireless(struct ipw_dev *ipw) |
273 | { | 266 | { |
274 | pcmcia_disable_device(ipw->link); | ||
275 | |||
276 | if (ipw->common_memory) { | 267 | if (ipw->common_memory) { |
277 | release_mem_region(ipw->request_common_memory.Base, | 268 | release_mem_region(ipw->request_common_memory.Base, |
278 | ipw->request_common_memory.Size); | 269 | ipw->request_common_memory.Size); |
@@ -288,7 +279,6 @@ static void release_ipwireless(struct ipw_dev *ipw) | |||
288 | if (ipw->attr_memory) | 279 | if (ipw->attr_memory) |
289 | pcmcia_release_window(ipw->link, ipw->handle_attr_memory); | 280 | pcmcia_release_window(ipw->link, ipw->handle_attr_memory); |
290 | 281 | ||
291 | /* Break the link with Card Services */ | ||
292 | pcmcia_disable_device(ipw->link); | 282 | pcmcia_disable_device(ipw->link); |
293 | } | 283 | } |
294 | 284 | ||
@@ -313,9 +303,6 @@ static int ipwireless_attach(struct pcmcia_device *link) | |||
313 | ipw->link = link; | 303 | ipw->link = link; |
314 | link->priv = ipw; | 304 | link->priv = ipw; |
315 | 305 | ||
316 | /* Link this device into our device list. */ | ||
317 | link->dev_node = &ipw->nodes[0]; | ||
318 | |||
319 | ipw->hardware = ipwireless_hardware_create(); | 306 | ipw->hardware = ipwireless_hardware_create(); |
320 | if (!ipw->hardware) { | 307 | if (!ipw->hardware) { |
321 | kfree(ipw); | 308 | kfree(ipw); |
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h index 0e0363af9ab2..96d0ef31b172 100644 --- a/drivers/char/pcmcia/ipwireless/main.h +++ b/drivers/char/pcmcia/ipwireless/main.h | |||
@@ -54,7 +54,6 @@ struct ipw_dev { | |||
54 | void __iomem *common_memory; | 54 | void __iomem *common_memory; |
55 | win_req_t request_common_memory; | 55 | win_req_t request_common_memory; |
56 | 56 | ||
57 | dev_node_t nodes[2]; | ||
58 | /* Reference to attribute memory, containing CIS data */ | 57 | /* Reference to attribute memory, containing CIS data */ |
59 | void *attribute_memory; | 58 | void *attribute_memory; |
60 | 59 | ||
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c index 2bb7874a6899..1a2c2c3b068f 100644 --- a/drivers/char/pcmcia/ipwireless/tty.c +++ b/drivers/char/pcmcia/ipwireless/tty.c | |||
@@ -487,7 +487,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file, | |||
487 | return tty_mode_ioctl(linux_tty, file, cmd , arg); | 487 | return tty_mode_ioctl(linux_tty, file, cmd , arg); |
488 | } | 488 | } |
489 | 489 | ||
490 | static int add_tty(dev_node_t *nodesp, int j, | 490 | static int add_tty(int j, |
491 | struct ipw_hardware *hardware, | 491 | struct ipw_hardware *hardware, |
492 | struct ipw_network *network, int channel_idx, | 492 | struct ipw_network *network, int channel_idx, |
493 | int secondary_channel_idx, int tty_type) | 493 | int secondary_channel_idx, int tty_type) |
@@ -510,19 +510,13 @@ static int add_tty(dev_node_t *nodesp, int j, | |||
510 | ipwireless_associate_network_tty(network, | 510 | ipwireless_associate_network_tty(network, |
511 | secondary_channel_idx, | 511 | secondary_channel_idx, |
512 | ttys[j]); | 512 | ttys[j]); |
513 | if (nodesp != NULL) { | ||
514 | sprintf(nodesp->dev_name, "ttyIPWp%d", j); | ||
515 | nodesp->major = ipw_tty_driver->major; | ||
516 | nodesp->minor = j + ipw_tty_driver->minor_start; | ||
517 | } | ||
518 | if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j]) | 513 | if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j]) |
519 | report_registering(ttys[j]); | 514 | report_registering(ttys[j]); |
520 | return 0; | 515 | return 0; |
521 | } | 516 | } |
522 | 517 | ||
523 | struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware, | 518 | struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware, |
524 | struct ipw_network *network, | 519 | struct ipw_network *network) |
525 | dev_node_t *nodes) | ||
526 | { | 520 | { |
527 | int i, j; | 521 | int i, j; |
528 | 522 | ||
@@ -539,26 +533,23 @@ struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware, | |||
539 | if (allfree) { | 533 | if (allfree) { |
540 | j = i; | 534 | j = i; |
541 | 535 | ||
542 | if (add_tty(&nodes[0], j, hardware, network, | 536 | if (add_tty(j, hardware, network, |
543 | IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS, | 537 | IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS, |
544 | TTYTYPE_MODEM)) | 538 | TTYTYPE_MODEM)) |
545 | return NULL; | 539 | return NULL; |
546 | 540 | ||
547 | j += IPWIRELESS_PCMCIA_MINOR_RANGE; | 541 | j += IPWIRELESS_PCMCIA_MINOR_RANGE; |
548 | if (add_tty(&nodes[1], j, hardware, network, | 542 | if (add_tty(j, hardware, network, |
549 | IPW_CHANNEL_DIALLER, -1, | 543 | IPW_CHANNEL_DIALLER, -1, |
550 | TTYTYPE_MONITOR)) | 544 | TTYTYPE_MONITOR)) |
551 | return NULL; | 545 | return NULL; |
552 | 546 | ||
553 | j += IPWIRELESS_PCMCIA_MINOR_RANGE; | 547 | j += IPWIRELESS_PCMCIA_MINOR_RANGE; |
554 | if (add_tty(NULL, j, hardware, network, | 548 | if (add_tty(j, hardware, network, |
555 | IPW_CHANNEL_RAS, -1, | 549 | IPW_CHANNEL_RAS, -1, |
556 | TTYTYPE_RAS_RAW)) | 550 | TTYTYPE_RAS_RAW)) |
557 | return NULL; | 551 | return NULL; |
558 | 552 | ||
559 | nodes[0].next = &nodes[1]; | ||
560 | nodes[1].next = NULL; | ||
561 | |||
562 | return ttys[i]; | 553 | return ttys[i]; |
563 | } | 554 | } |
564 | } | 555 | } |
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h index b0deb9168b6b..4da6c201f727 100644 --- a/drivers/char/pcmcia/ipwireless/tty.h +++ b/drivers/char/pcmcia/ipwireless/tty.h | |||
@@ -34,8 +34,7 @@ int ipwireless_tty_init(void); | |||
34 | void ipwireless_tty_release(void); | 34 | void ipwireless_tty_release(void); |
35 | 35 | ||
36 | struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw, | 36 | struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw, |
37 | struct ipw_network *net, | 37 | struct ipw_network *net); |
38 | dev_node_t *nodes); | ||
39 | void ipwireless_tty_free(struct ipw_tty *tty); | 38 | void ipwireless_tty_free(struct ipw_tty *tty); |
40 | void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, | 39 | void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, |
41 | unsigned int length); | 40 | unsigned int length); |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index c31a0d913d37..308903ec8bf8 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -220,7 +220,6 @@ typedef struct _mgslpc_info { | |||
220 | 220 | ||
221 | /* PCMCIA support */ | 221 | /* PCMCIA support */ |
222 | struct pcmcia_device *p_dev; | 222 | struct pcmcia_device *p_dev; |
223 | dev_node_t node; | ||
224 | int stop; | 223 | int stop; |
225 | 224 | ||
226 | /* SPPP/Cisco HDLC device parts */ | 225 | /* SPPP/Cisco HDLC device parts */ |
@@ -552,10 +551,6 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
552 | 551 | ||
553 | /* Initialize the struct pcmcia_device structure */ | 552 | /* Initialize the struct pcmcia_device structure */ |
554 | 553 | ||
555 | /* Interrupt setup */ | ||
556 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | ||
557 | link->irq.Handler = NULL; | ||
558 | |||
559 | link->conf.Attributes = 0; | 554 | link->conf.Attributes = 0; |
560 | link->conf.IntType = INT_MEMORY_AND_IO; | 555 | link->conf.IntType = INT_MEMORY_AND_IO; |
561 | 556 | ||
@@ -608,9 +603,7 @@ static int mgslpc_config(struct pcmcia_device *link) | |||
608 | link->conf.ConfigIndex = 8; | 603 | link->conf.ConfigIndex = 8; |
609 | link->conf.Present = PRESENT_OPTION; | 604 | link->conf.Present = PRESENT_OPTION; |
610 | 605 | ||
611 | link->irq.Handler = mgslpc_isr; | 606 | ret = pcmcia_request_irq(link, mgslpc_isr); |
612 | |||
613 | ret = pcmcia_request_irq(link, &link->irq); | ||
614 | if (ret) | 607 | if (ret) |
615 | goto failed; | 608 | goto failed; |
616 | ret = pcmcia_request_configuration(link, &link->conf); | 609 | ret = pcmcia_request_configuration(link, &link->conf); |
@@ -618,17 +611,12 @@ static int mgslpc_config(struct pcmcia_device *link) | |||
618 | goto failed; | 611 | goto failed; |
619 | 612 | ||
620 | info->io_base = link->io.BasePort1; | 613 | info->io_base = link->io.BasePort1; |
621 | info->irq_level = link->irq.AssignedIRQ; | 614 | info->irq_level = link->irq; |
622 | |||
623 | /* add to linked list of devices */ | ||
624 | sprintf(info->node.dev_name, "mgslpc0"); | ||
625 | info->node.major = info->node.minor = 0; | ||
626 | link->dev_node = &info->node; | ||
627 | 615 | ||
628 | printk(KERN_INFO "%s: index 0x%02x:", | 616 | dev_info(&link->dev, "index 0x%02x:", |
629 | info->node.dev_name, link->conf.ConfigIndex); | 617 | link->conf.ConfigIndex); |
630 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 618 | if (link->conf.Attributes & CONF_ENABLE_IRQ) |
631 | printk(", irq %d", link->irq.AssignedIRQ); | 619 | printk(", irq %d", link->irq); |
632 | if (link->io.NumPorts1) | 620 | if (link->io.NumPorts1) |
633 | printk(", io 0x%04x-0x%04x", link->io.BasePort1, | 621 | printk(", io 0x%04x-0x%04x", link->io.BasePort1, |
634 | link->io.BasePort1+link->io.NumPorts1-1); | 622 | link->io.BasePort1+link->io.NumPorts1-1); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 2fd3d39995d5..8d85587b6d4f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -257,6 +257,7 @@ | |||
257 | #define INPUT_POOL_WORDS 128 | 257 | #define INPUT_POOL_WORDS 128 |
258 | #define OUTPUT_POOL_WORDS 32 | 258 | #define OUTPUT_POOL_WORDS 32 |
259 | #define SEC_XFER_SIZE 512 | 259 | #define SEC_XFER_SIZE 512 |
260 | #define EXTRACT_SIZE 10 | ||
260 | 261 | ||
261 | /* | 262 | /* |
262 | * The minimum number of bits of entropy before we wake up a read on | 263 | * The minimum number of bits of entropy before we wake up a read on |
@@ -414,7 +415,7 @@ struct entropy_store { | |||
414 | unsigned add_ptr; | 415 | unsigned add_ptr; |
415 | int entropy_count; | 416 | int entropy_count; |
416 | int input_rotate; | 417 | int input_rotate; |
417 | __u8 *last_data; | 418 | __u8 last_data[EXTRACT_SIZE]; |
418 | }; | 419 | }; |
419 | 420 | ||
420 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | 421 | static __u32 input_pool_data[INPUT_POOL_WORDS]; |
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk) | |||
714 | } | 715 | } |
715 | #endif | 716 | #endif |
716 | 717 | ||
717 | #define EXTRACT_SIZE 10 | ||
718 | |||
719 | /********************************************************************* | 718 | /********************************************************************* |
720 | * | 719 | * |
721 | * Entropy extraction routines | 720 | * Entropy extraction routines |
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
862 | while (nbytes) { | 861 | while (nbytes) { |
863 | extract_buf(r, tmp); | 862 | extract_buf(r, tmp); |
864 | 863 | ||
865 | if (r->last_data) { | 864 | if (fips_enabled) { |
866 | spin_lock_irqsave(&r->lock, flags); | 865 | spin_lock_irqsave(&r->lock, flags); |
867 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | 866 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) |
868 | panic("Hardware RNG duplicated output!\n"); | 867 | panic("Hardware RNG duplicated output!\n"); |
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r) | |||
951 | now = ktime_get_real(); | 950 | now = ktime_get_real(); |
952 | mix_pool_bytes(r, &now, sizeof(now)); | 951 | mix_pool_bytes(r, &now, sizeof(now)); |
953 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); | 952 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
954 | /* Enable continuous test in fips mode */ | ||
955 | if (fips_enabled) | ||
956 | r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL); | ||
957 | } | 953 | } |
958 | 954 | ||
959 | static int rand_initialize(void) | 955 | static int rand_initialize(void) |
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 0a8d1e56c993..b02332a5412f 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
@@ -909,6 +909,7 @@ static int rc_open(struct tty_struct *tty, struct file *filp) | |||
909 | if (error) | 909 | if (error) |
910 | return error; | 910 | return error; |
911 | 911 | ||
912 | tty->driver_data = port; | ||
912 | return tty_port_open(&port->port, tty, filp); | 913 | return tty_port_open(&port->port, tty, filp); |
913 | } | 914 | } |
914 | 915 | ||
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 8dfd24721a82..ecbe479c7d68 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -176,23 +176,6 @@ static void config_setup(struct cyclades_port *); | |||
176 | static void show_status(int); | 176 | static void show_status(int); |
177 | #endif | 177 | #endif |
178 | 178 | ||
179 | #ifdef CONFIG_REMOTE_DEBUG | ||
180 | static void debug_setup(void); | ||
181 | void queueDebugChar(int c); | ||
182 | int getDebugChar(void); | ||
183 | |||
184 | #define DEBUG_PORT 1 | ||
185 | #define DEBUG_LEN 256 | ||
186 | |||
187 | typedef struct { | ||
188 | int in; | ||
189 | int out; | ||
190 | unsigned char buf[DEBUG_LEN]; | ||
191 | } debugq; | ||
192 | |||
193 | debugq debugiq; | ||
194 | #endif | ||
195 | |||
196 | /* | 179 | /* |
197 | * I have my own version of udelay(), as it is needed when initialising | 180 | * I have my own version of udelay(), as it is needed when initialising |
198 | * the chip, before the delay loop has been calibrated. Should probably | 181 | * the chip, before the delay loop has been calibrated. Should probably |
@@ -515,11 +498,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id) | |||
515 | /* determine the channel and change to that context */ | 498 | /* determine the channel and change to that context */ |
516 | channel = (u_short) (base_addr[CyLICR] >> 2); | 499 | channel = (u_short) (base_addr[CyLICR] >> 2); |
517 | 500 | ||
518 | #ifdef CONFIG_REMOTE_DEBUG | ||
519 | if (channel == DEBUG_PORT) { | ||
520 | panic("TxInt on debug port!!!"); | ||
521 | } | ||
522 | #endif | ||
523 | /* validate the port number (as configured and open) */ | 501 | /* validate the port number (as configured and open) */ |
524 | if ((channel < 0) || (NR_PORTS <= channel)) { | 502 | if ((channel < 0) || (NR_PORTS <= channel)) { |
525 | base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); | 503 | base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); |
@@ -627,7 +605,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id) | |||
627 | char data; | 605 | char data; |
628 | int char_count; | 606 | int char_count; |
629 | int save_cnt; | 607 | int save_cnt; |
630 | int len; | ||
631 | 608 | ||
632 | /* determine the channel and change to that context */ | 609 | /* determine the channel and change to that context */ |
633 | channel = (u_short) (base_addr[CyLICR] >> 2); | 610 | channel = (u_short) (base_addr[CyLICR] >> 2); |
@@ -635,14 +612,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id) | |||
635 | info->last_active = jiffies; | 612 | info->last_active = jiffies; |
636 | save_cnt = char_count = base_addr[CyRFOC]; | 613 | save_cnt = char_count = base_addr[CyRFOC]; |
637 | 614 | ||
638 | #ifdef CONFIG_REMOTE_DEBUG | ||
639 | if (channel == DEBUG_PORT) { | ||
640 | while (char_count--) { | ||
641 | data = base_addr[CyRDR]; | ||
642 | queueDebugChar(data); | ||
643 | } | ||
644 | } else | ||
645 | #endif | ||
646 | /* if there is nowhere to put the data, discard it */ | 615 | /* if there is nowhere to put the data, discard it */ |
647 | if (info->tty == 0) { | 616 | if (info->tty == 0) { |
648 | while (char_count--) { | 617 | while (char_count--) { |
@@ -1528,7 +1497,6 @@ static int | |||
1528 | cy_ioctl(struct tty_struct *tty, struct file *file, | 1497 | cy_ioctl(struct tty_struct *tty, struct file *file, |
1529 | unsigned int cmd, unsigned long arg) | 1498 | unsigned int cmd, unsigned long arg) |
1530 | { | 1499 | { |
1531 | unsigned long val; | ||
1532 | struct cyclades_port *info = tty->driver_data; | 1500 | struct cyclades_port *info = tty->driver_data; |
1533 | int ret_val = 0; | 1501 | int ret_val = 0; |
1534 | void __user *argp = (void __user *)arg; | 1502 | void __user *argp = (void __user *)arg; |
@@ -2197,9 +2165,7 @@ static int __init serial167_init(void) | |||
2197 | port_num++; | 2165 | port_num++; |
2198 | info++; | 2166 | info++; |
2199 | } | 2167 | } |
2200 | #ifdef CONFIG_REMOTE_DEBUG | 2168 | |
2201 | debug_setup(); | ||
2202 | #endif | ||
2203 | ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0, | 2169 | ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0, |
2204 | "cd2401_errors", cd2401_rxerr_interrupt); | 2170 | "cd2401_errors", cd2401_rxerr_interrupt); |
2205 | if (ret) { | 2171 | if (ret) { |
@@ -2520,193 +2486,4 @@ static int __init serial167_console_init(void) | |||
2520 | 2486 | ||
2521 | console_initcall(serial167_console_init); | 2487 | console_initcall(serial167_console_init); |
2522 | 2488 | ||
2523 | #ifdef CONFIG_REMOTE_DEBUG | ||
2524 | void putDebugChar(int c) | ||
2525 | { | ||
2526 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2527 | unsigned long flags; | ||
2528 | volatile u_char sink; | ||
2529 | u_char ier; | ||
2530 | int port; | ||
2531 | |||
2532 | local_irq_save(flags); | ||
2533 | |||
2534 | /* Ensure transmitter is enabled! */ | ||
2535 | |||
2536 | port = DEBUG_PORT; | ||
2537 | base_addr[CyCAR] = (u_char) port; | ||
2538 | while (base_addr[CyCCR]) | ||
2539 | ; | ||
2540 | base_addr[CyCCR] = CyENB_XMTR; | ||
2541 | |||
2542 | ier = base_addr[CyIER]; | ||
2543 | base_addr[CyIER] = CyTxMpty; | ||
2544 | |||
2545 | while (1) { | ||
2546 | if (pcc2chip[PccSCCTICR] & 0x20) { | ||
2547 | /* We have a Tx int. Acknowledge it */ | ||
2548 | sink = pcc2chip[PccTPIACKR]; | ||
2549 | if ((base_addr[CyLICR] >> 2) == port) { | ||
2550 | base_addr[CyTDR] = c; | ||
2551 | base_addr[CyTEOIR] = 0; | ||
2552 | break; | ||
2553 | } else | ||
2554 | base_addr[CyTEOIR] = CyNOTRANS; | ||
2555 | } | ||
2556 | } | ||
2557 | |||
2558 | base_addr[CyIER] = ier; | ||
2559 | |||
2560 | local_irq_restore(flags); | ||
2561 | } | ||
2562 | |||
2563 | int getDebugChar() | ||
2564 | { | ||
2565 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2566 | unsigned long flags; | ||
2567 | volatile u_char sink; | ||
2568 | u_char ier; | ||
2569 | int port; | ||
2570 | int i, c; | ||
2571 | |||
2572 | i = debugiq.out; | ||
2573 | if (i != debugiq.in) { | ||
2574 | c = debugiq.buf[i]; | ||
2575 | if (++i == DEBUG_LEN) | ||
2576 | i = 0; | ||
2577 | debugiq.out = i; | ||
2578 | return c; | ||
2579 | } | ||
2580 | /* OK, nothing in queue, wait in poll loop */ | ||
2581 | |||
2582 | local_irq_save(flags); | ||
2583 | |||
2584 | /* Ensure receiver is enabled! */ | ||
2585 | |||
2586 | port = DEBUG_PORT; | ||
2587 | base_addr[CyCAR] = (u_char) port; | ||
2588 | #if 0 | ||
2589 | while (base_addr[CyCCR]) | ||
2590 | ; | ||
2591 | base_addr[CyCCR] = CyENB_RCVR; | ||
2592 | #endif | ||
2593 | ier = base_addr[CyIER]; | ||
2594 | base_addr[CyIER] = CyRxData; | ||
2595 | |||
2596 | while (1) { | ||
2597 | if (pcc2chip[PccSCCRICR] & 0x20) { | ||
2598 | /* We have a Rx int. Acknowledge it */ | ||
2599 | sink = pcc2chip[PccRPIACKR]; | ||
2600 | if ((base_addr[CyLICR] >> 2) == port) { | ||
2601 | int cnt = base_addr[CyRFOC]; | ||
2602 | while (cnt-- > 0) { | ||
2603 | c = base_addr[CyRDR]; | ||
2604 | if (c == 0) | ||
2605 | printk | ||
2606 | ("!! debug char is null (cnt=%d) !!", | ||
2607 | cnt); | ||
2608 | else | ||
2609 | queueDebugChar(c); | ||
2610 | } | ||
2611 | base_addr[CyREOIR] = 0; | ||
2612 | i = debugiq.out; | ||
2613 | if (i == debugiq.in) | ||
2614 | panic("Debug input queue empty!"); | ||
2615 | c = debugiq.buf[i]; | ||
2616 | if (++i == DEBUG_LEN) | ||
2617 | i = 0; | ||
2618 | debugiq.out = i; | ||
2619 | break; | ||
2620 | } else | ||
2621 | base_addr[CyREOIR] = CyNOTRANS; | ||
2622 | } | ||
2623 | } | ||
2624 | |||
2625 | base_addr[CyIER] = ier; | ||
2626 | |||
2627 | local_irq_restore(flags); | ||
2628 | |||
2629 | return (c); | ||
2630 | } | ||
2631 | |||
2632 | void queueDebugChar(int c) | ||
2633 | { | ||
2634 | int i; | ||
2635 | |||
2636 | i = debugiq.in; | ||
2637 | debugiq.buf[i] = c; | ||
2638 | if (++i == DEBUG_LEN) | ||
2639 | i = 0; | ||
2640 | if (i != debugiq.out) | ||
2641 | debugiq.in = i; | ||
2642 | } | ||
2643 | |||
2644 | static void debug_setup() | ||
2645 | { | ||
2646 | unsigned long flags; | ||
2647 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | ||
2648 | int i, cflag; | ||
2649 | |||
2650 | cflag = B19200; | ||
2651 | |||
2652 | local_irq_save(flags); | ||
2653 | |||
2654 | for (i = 0; i < 4; i++) { | ||
2655 | base_addr[CyCAR] = i; | ||
2656 | base_addr[CyLICR] = i << 2; | ||
2657 | } | ||
2658 | |||
2659 | debugiq.in = debugiq.out = 0; | ||
2660 | |||
2661 | base_addr[CyCAR] = DEBUG_PORT; | ||
2662 | |||
2663 | /* baud rate */ | ||
2664 | i = cflag & CBAUD; | ||
2665 | |||
2666 | base_addr[CyIER] = 0; | ||
2667 | |||
2668 | base_addr[CyCMR] = CyASYNC; | ||
2669 | base_addr[CyLICR] = DEBUG_PORT << 2; | ||
2670 | base_addr[CyLIVR] = 0x5c; | ||
2671 | |||
2672 | /* tx and rx baud rate */ | ||
2673 | |||
2674 | base_addr[CyTCOR] = baud_co[i]; | ||
2675 | base_addr[CyTBPR] = baud_bpr[i]; | ||
2676 | base_addr[CyRCOR] = baud_co[i] >> 5; | ||
2677 | base_addr[CyRBPR] = baud_bpr[i]; | ||
2678 | |||
2679 | /* set line characteristics according configuration */ | ||
2680 | |||
2681 | base_addr[CySCHR1] = 0; | ||
2682 | base_addr[CySCHR2] = 0; | ||
2683 | base_addr[CySCRL] = 0; | ||
2684 | base_addr[CySCRH] = 0; | ||
2685 | base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE; | ||
2686 | base_addr[CyCOR2] = 0; | ||
2687 | base_addr[CyCOR3] = Cy_1_STOP; | ||
2688 | base_addr[CyCOR4] = baud_cor4[i]; | ||
2689 | base_addr[CyCOR5] = 0; | ||
2690 | base_addr[CyCOR6] = 0; | ||
2691 | base_addr[CyCOR7] = 0; | ||
2692 | |||
2693 | write_cy_cmd(base_addr, CyINIT_CHAN); | ||
2694 | write_cy_cmd(base_addr, CyENB_RCVR); | ||
2695 | |||
2696 | base_addr[CyCAR] = DEBUG_PORT; /* !!! Is this needed? */ | ||
2697 | |||
2698 | base_addr[CyRTPRL] = 2; | ||
2699 | base_addr[CyRTPRH] = 0; | ||
2700 | |||
2701 | base_addr[CyMSVR1] = CyRTS; | ||
2702 | base_addr[CyMSVR2] = CyDTR; | ||
2703 | |||
2704 | base_addr[CyIER] = CyRxData; | ||
2705 | |||
2706 | local_irq_restore(flags); | ||
2707 | |||
2708 | } /* debug_setup */ | ||
2709 | |||
2710 | #endif | ||
2711 | |||
2712 | MODULE_LICENSE("GPL"); | 2489 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 0e511d61f544..6049fd731924 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
@@ -724,7 +724,6 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
724 | { | 724 | { |
725 | struct stlport *portp; | 725 | struct stlport *portp; |
726 | struct stlbrd *brdp; | 726 | struct stlbrd *brdp; |
727 | struct tty_port *port; | ||
728 | unsigned int minordev, brdnr, panelnr; | 727 | unsigned int minordev, brdnr, panelnr; |
729 | int portnr; | 728 | int portnr; |
730 | 729 | ||
@@ -754,7 +753,8 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
754 | portp = brdp->panels[panelnr]->ports[portnr]; | 753 | portp = brdp->panels[panelnr]->ports[portnr]; |
755 | if (portp == NULL) | 754 | if (portp == NULL) |
756 | return -ENODEV; | 755 | return -ENODEV; |
757 | port = &portp->port; | 756 | |
757 | tty->driver_data = portp; | ||
758 | return tty_port_open(&portp->port, tty, filp); | 758 | return tty_port_open(&portp->port, tty, filp); |
759 | 759 | ||
760 | } | 760 | } |
@@ -841,7 +841,8 @@ static void stl_close(struct tty_struct *tty, struct file *filp) | |||
841 | pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp); | 841 | pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp); |
842 | 842 | ||
843 | portp = tty->driver_data; | 843 | portp = tty->driver_data; |
844 | BUG_ON(portp == NULL); | 844 | if(portp == NULL) |
845 | return; | ||
845 | tty_port_close(&portp->port, tty, filp); | 846 | tty_port_close(&portp->port, tty, filp); |
846 | } | 847 | } |
847 | 848 | ||
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 59de2525d303..5d15630a5830 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* -*- linux-c -*- | 1 | /* |
2 | * | ||
3 | * $Id: sysrq.c,v 1.15 1998/08/23 14:56:41 mj Exp $ | ||
4 | * | ||
5 | * Linux Magic System Request Key Hacks | 2 | * Linux Magic System Request Key Hacks |
6 | * | 3 | * |
7 | * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | 4 | * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
@@ -10,8 +7,13 @@ | |||
10 | * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> | 7 | * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> |
11 | * overhauled to use key registration | 8 | * overhauled to use key registration |
12 | * based upon discusions in irc://irc.openprojects.net/#kernelnewbies | 9 | * based upon discusions in irc://irc.openprojects.net/#kernelnewbies |
10 | * | ||
11 | * Copyright (c) 2010 Dmitry Torokhov | ||
12 | * Input handler conversion | ||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
15 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
16 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
17 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
@@ -39,33 +41,34 @@ | |||
39 | #include <linux/hrtimer.h> | 41 | #include <linux/hrtimer.h> |
40 | #include <linux/oom.h> | 42 | #include <linux/oom.h> |
41 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/input.h> | ||
42 | 45 | ||
43 | #include <asm/ptrace.h> | 46 | #include <asm/ptrace.h> |
44 | #include <asm/irq_regs.h> | 47 | #include <asm/irq_regs.h> |
45 | 48 | ||
46 | /* Whether we react on sysrq keys or just ignore them */ | 49 | /* Whether we react on sysrq keys or just ignore them */ |
47 | int __read_mostly __sysrq_enabled = 1; | 50 | static int __read_mostly sysrq_enabled = 1; |
48 | 51 | static bool __read_mostly sysrq_always_enabled; | |
49 | static int __read_mostly sysrq_always_enabled; | ||
50 | 52 | ||
51 | int sysrq_on(void) | 53 | static bool sysrq_on(void) |
52 | { | 54 | { |
53 | return __sysrq_enabled || sysrq_always_enabled; | 55 | return sysrq_enabled || sysrq_always_enabled; |
54 | } | 56 | } |
55 | 57 | ||
56 | /* | 58 | /* |
57 | * A value of 1 means 'all', other nonzero values are an op mask: | 59 | * A value of 1 means 'all', other nonzero values are an op mask: |
58 | */ | 60 | */ |
59 | static inline int sysrq_on_mask(int mask) | 61 | static bool sysrq_on_mask(int mask) |
60 | { | 62 | { |
61 | return sysrq_always_enabled || __sysrq_enabled == 1 || | 63 | return sysrq_always_enabled || |
62 | (__sysrq_enabled & mask); | 64 | sysrq_enabled == 1 || |
65 | (sysrq_enabled & mask); | ||
63 | } | 66 | } |
64 | 67 | ||
65 | static int __init sysrq_always_enabled_setup(char *str) | 68 | static int __init sysrq_always_enabled_setup(char *str) |
66 | { | 69 | { |
67 | sysrq_always_enabled = 1; | 70 | sysrq_always_enabled = true; |
68 | printk(KERN_INFO "debug: sysrq always enabled.\n"); | 71 | pr_info("sysrq always enabled.\n"); |
69 | 72 | ||
70 | return 1; | 73 | return 1; |
71 | } | 74 | } |
@@ -76,6 +79,7 @@ __setup("sysrq_always_enabled", sysrq_always_enabled_setup); | |||
76 | static void sysrq_handle_loglevel(int key, struct tty_struct *tty) | 79 | static void sysrq_handle_loglevel(int key, struct tty_struct *tty) |
77 | { | 80 | { |
78 | int i; | 81 | int i; |
82 | |||
79 | i = key - '0'; | 83 | i = key - '0'; |
80 | console_loglevel = 7; | 84 | console_loglevel = 7; |
81 | printk("Loglevel set to %d\n", i); | 85 | printk("Loglevel set to %d\n", i); |
@@ -101,7 +105,7 @@ static struct sysrq_key_op sysrq_SAK_op = { | |||
101 | .enable_mask = SYSRQ_ENABLE_KEYBOARD, | 105 | .enable_mask = SYSRQ_ENABLE_KEYBOARD, |
102 | }; | 106 | }; |
103 | #else | 107 | #else |
104 | #define sysrq_SAK_op (*(struct sysrq_key_op *)0) | 108 | #define sysrq_SAK_op (*(struct sysrq_key_op *)NULL) |
105 | #endif | 109 | #endif |
106 | 110 | ||
107 | #ifdef CONFIG_VT | 111 | #ifdef CONFIG_VT |
@@ -119,7 +123,7 @@ static struct sysrq_key_op sysrq_unraw_op = { | |||
119 | .enable_mask = SYSRQ_ENABLE_KEYBOARD, | 123 | .enable_mask = SYSRQ_ENABLE_KEYBOARD, |
120 | }; | 124 | }; |
121 | #else | 125 | #else |
122 | #define sysrq_unraw_op (*(struct sysrq_key_op *)0) | 126 | #define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) |
123 | #endif /* CONFIG_VT */ | 127 | #endif /* CONFIG_VT */ |
124 | 128 | ||
125 | static void sysrq_handle_crash(int key, struct tty_struct *tty) | 129 | static void sysrq_handle_crash(int key, struct tty_struct *tty) |
@@ -195,7 +199,7 @@ static struct sysrq_key_op sysrq_showlocks_op = { | |||
195 | .action_msg = "Show Locks Held", | 199 | .action_msg = "Show Locks Held", |
196 | }; | 200 | }; |
197 | #else | 201 | #else |
198 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)0) | 202 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL) |
199 | #endif | 203 | #endif |
200 | 204 | ||
201 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
@@ -289,7 +293,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = { | |||
289 | 293 | ||
290 | static void sysrq_ftrace_dump(int key, struct tty_struct *tty) | 294 | static void sysrq_ftrace_dump(int key, struct tty_struct *tty) |
291 | { | 295 | { |
292 | ftrace_dump(); | 296 | ftrace_dump(DUMP_ALL); |
293 | } | 297 | } |
294 | static struct sysrq_key_op sysrq_ftrace_dump_op = { | 298 | static struct sysrq_key_op sysrq_ftrace_dump_op = { |
295 | .handler = sysrq_ftrace_dump, | 299 | .handler = sysrq_ftrace_dump, |
@@ -298,7 +302,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = { | |||
298 | .enable_mask = SYSRQ_ENABLE_DUMP, | 302 | .enable_mask = SYSRQ_ENABLE_DUMP, |
299 | }; | 303 | }; |
300 | #else | 304 | #else |
301 | #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0) | 305 | #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) |
302 | #endif | 306 | #endif |
303 | 307 | ||
304 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) | 308 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) |
@@ -477,6 +481,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key) | |||
477 | i = sysrq_key_table_key2index(key); | 481 | i = sysrq_key_table_key2index(key); |
478 | if (i != -1) | 482 | if (i != -1) |
479 | op_p = sysrq_key_table[i]; | 483 | op_p = sysrq_key_table[i]; |
484 | |||
480 | return op_p; | 485 | return op_p; |
481 | } | 486 | } |
482 | 487 | ||
@@ -488,11 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p) | |||
488 | sysrq_key_table[i] = op_p; | 493 | sysrq_key_table[i] = op_p; |
489 | } | 494 | } |
490 | 495 | ||
491 | /* | 496 | static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) |
492 | * This is the non-locking version of handle_sysrq. It must/can only be called | ||
493 | * by sysrq key handlers, as they are inside of the lock | ||
494 | */ | ||
495 | void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) | ||
496 | { | 497 | { |
497 | struct sysrq_key_op *op_p; | 498 | struct sysrq_key_op *op_p; |
498 | int orig_log_level; | 499 | int orig_log_level; |
@@ -544,10 +545,6 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) | |||
544 | spin_unlock_irqrestore(&sysrq_key_table_lock, flags); | 545 | spin_unlock_irqrestore(&sysrq_key_table_lock, flags); |
545 | } | 546 | } |
546 | 547 | ||
547 | /* | ||
548 | * This function is called by the keyboard handler when SysRq is pressed | ||
549 | * and any other keycode arrives. | ||
550 | */ | ||
551 | void handle_sysrq(int key, struct tty_struct *tty) | 548 | void handle_sysrq(int key, struct tty_struct *tty) |
552 | { | 549 | { |
553 | if (sysrq_on()) | 550 | if (sysrq_on()) |
@@ -555,10 +552,177 @@ void handle_sysrq(int key, struct tty_struct *tty) | |||
555 | } | 552 | } |
556 | EXPORT_SYMBOL(handle_sysrq); | 553 | EXPORT_SYMBOL(handle_sysrq); |
557 | 554 | ||
555 | #ifdef CONFIG_INPUT | ||
556 | |||
557 | /* Simple translation table for the SysRq keys */ | ||
558 | static const unsigned char sysrq_xlate[KEY_MAX + 1] = | ||
559 | "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ | ||
560 | "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ | ||
561 | "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ | ||
562 | "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ | ||
563 | "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ | ||
564 | "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ | ||
565 | "\r\000/"; /* 0x60 - 0x6f */ | ||
566 | |||
567 | static bool sysrq_down; | ||
568 | static int sysrq_alt_use; | ||
569 | static int sysrq_alt; | ||
570 | |||
571 | static bool sysrq_filter(struct input_handle *handle, unsigned int type, | ||
572 | unsigned int code, int value) | ||
573 | { | ||
574 | if (type != EV_KEY) | ||
575 | goto out; | ||
576 | |||
577 | switch (code) { | ||
578 | |||
579 | case KEY_LEFTALT: | ||
580 | case KEY_RIGHTALT: | ||
581 | if (value) | ||
582 | sysrq_alt = code; | ||
583 | else if (sysrq_down && code == sysrq_alt_use) | ||
584 | sysrq_down = false; | ||
585 | break; | ||
586 | |||
587 | case KEY_SYSRQ: | ||
588 | if (value == 1 && sysrq_alt) { | ||
589 | sysrq_down = true; | ||
590 | sysrq_alt_use = sysrq_alt; | ||
591 | } | ||
592 | break; | ||
593 | |||
594 | default: | ||
595 | if (sysrq_down && value && value != 2) | ||
596 | __handle_sysrq(sysrq_xlate[code], NULL, 1); | ||
597 | break; | ||
598 | } | ||
599 | |||
600 | out: | ||
601 | return sysrq_down; | ||
602 | } | ||
603 | |||
604 | static int sysrq_connect(struct input_handler *handler, | ||
605 | struct input_dev *dev, | ||
606 | const struct input_device_id *id) | ||
607 | { | ||
608 | struct input_handle *handle; | ||
609 | int error; | ||
610 | |||
611 | sysrq_down = false; | ||
612 | sysrq_alt = 0; | ||
613 | |||
614 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
615 | if (!handle) | ||
616 | return -ENOMEM; | ||
617 | |||
618 | handle->dev = dev; | ||
619 | handle->handler = handler; | ||
620 | handle->name = "sysrq"; | ||
621 | |||
622 | error = input_register_handle(handle); | ||
623 | if (error) { | ||
624 | pr_err("Failed to register input sysrq handler, error %d\n", | ||
625 | error); | ||
626 | goto err_free; | ||
627 | } | ||
628 | |||
629 | error = input_open_device(handle); | ||
630 | if (error) { | ||
631 | pr_err("Failed to open input device, error %d\n", error); | ||
632 | goto err_unregister; | ||
633 | } | ||
634 | |||
635 | return 0; | ||
636 | |||
637 | err_unregister: | ||
638 | input_unregister_handle(handle); | ||
639 | err_free: | ||
640 | kfree(handle); | ||
641 | return error; | ||
642 | } | ||
643 | |||
644 | static void sysrq_disconnect(struct input_handle *handle) | ||
645 | { | ||
646 | input_close_device(handle); | ||
647 | input_unregister_handle(handle); | ||
648 | kfree(handle); | ||
649 | } | ||
650 | |||
651 | /* | ||
652 | * We are matching on KEY_LEFTALT insteard of KEY_SYSRQ because not all | ||
653 | * keyboards have SysRq ikey predefined and so user may add it to keymap | ||
654 | * later, but we expect all such keyboards to have left alt. | ||
655 | */ | ||
656 | static const struct input_device_id sysrq_ids[] = { | ||
657 | { | ||
658 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | | ||
659 | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
660 | .evbit = { BIT_MASK(EV_KEY) }, | ||
661 | .keybit = { BIT_MASK(KEY_LEFTALT) }, | ||
662 | }, | ||
663 | { }, | ||
664 | }; | ||
665 | |||
666 | static struct input_handler sysrq_handler = { | ||
667 | .filter = sysrq_filter, | ||
668 | .connect = sysrq_connect, | ||
669 | .disconnect = sysrq_disconnect, | ||
670 | .name = "sysrq", | ||
671 | .id_table = sysrq_ids, | ||
672 | }; | ||
673 | |||
674 | static bool sysrq_handler_registered; | ||
675 | |||
676 | static inline void sysrq_register_handler(void) | ||
677 | { | ||
678 | int error; | ||
679 | |||
680 | error = input_register_handler(&sysrq_handler); | ||
681 | if (error) | ||
682 | pr_err("Failed to register input handler, error %d", error); | ||
683 | else | ||
684 | sysrq_handler_registered = true; | ||
685 | } | ||
686 | |||
687 | static inline void sysrq_unregister_handler(void) | ||
688 | { | ||
689 | if (sysrq_handler_registered) { | ||
690 | input_unregister_handler(&sysrq_handler); | ||
691 | sysrq_handler_registered = false; | ||
692 | } | ||
693 | } | ||
694 | |||
695 | #else | ||
696 | |||
697 | static inline void sysrq_register_handler(void) | ||
698 | { | ||
699 | } | ||
700 | |||
701 | static inline void sysrq_unregister_handler(void) | ||
702 | { | ||
703 | } | ||
704 | |||
705 | #endif /* CONFIG_INPUT */ | ||
706 | |||
707 | int sysrq_toggle_support(int enable_mask) | ||
708 | { | ||
709 | bool was_enabled = sysrq_on(); | ||
710 | |||
711 | sysrq_enabled = enable_mask; | ||
712 | |||
713 | if (was_enabled != sysrq_on()) { | ||
714 | if (sysrq_on()) | ||
715 | sysrq_register_handler(); | ||
716 | else | ||
717 | sysrq_unregister_handler(); | ||
718 | } | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
558 | static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, | 723 | static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, |
559 | struct sysrq_key_op *remove_op_p) | 724 | struct sysrq_key_op *remove_op_p) |
560 | { | 725 | { |
561 | |||
562 | int retval; | 726 | int retval; |
563 | unsigned long flags; | 727 | unsigned long flags; |
564 | 728 | ||
@@ -599,6 +763,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, | |||
599 | return -EFAULT; | 763 | return -EFAULT; |
600 | __handle_sysrq(c, NULL, 0); | 764 | __handle_sysrq(c, NULL, 0); |
601 | } | 765 | } |
766 | |||
602 | return count; | 767 | return count; |
603 | } | 768 | } |
604 | 769 | ||
@@ -606,10 +771,28 @@ static const struct file_operations proc_sysrq_trigger_operations = { | |||
606 | .write = write_sysrq_trigger, | 771 | .write = write_sysrq_trigger, |
607 | }; | 772 | }; |
608 | 773 | ||
774 | static void sysrq_init_procfs(void) | ||
775 | { | ||
776 | if (!proc_create("sysrq-trigger", S_IWUSR, NULL, | ||
777 | &proc_sysrq_trigger_operations)) | ||
778 | pr_err("Failed to register proc interface\n"); | ||
779 | } | ||
780 | |||
781 | #else | ||
782 | |||
783 | static inline void sysrq_init_procfs(void) | ||
784 | { | ||
785 | } | ||
786 | |||
787 | #endif /* CONFIG_PROC_FS */ | ||
788 | |||
609 | static int __init sysrq_init(void) | 789 | static int __init sysrq_init(void) |
610 | { | 790 | { |
611 | proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations); | 791 | sysrq_init_procfs(); |
792 | |||
793 | if (sysrq_on()) | ||
794 | sysrq_register_handler(); | ||
795 | |||
612 | return 0; | 796 | return 0; |
613 | } | 797 | } |
614 | module_init(sysrq_init); | 798 | module_init(sysrq_init); |
615 | #endif | ||
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index f5fc64f89c5c..4dc338f3d1aa 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -17,14 +17,16 @@ menuconfig TCG_TPM | |||
17 | obtained at: <http://sourceforge.net/projects/trousers>. To | 17 | obtained at: <http://sourceforge.net/projects/trousers>. To |
18 | compile this driver as a module, choose M here; the module | 18 | compile this driver as a module, choose M here; the module |
19 | will be called tpm. If unsure, say N. | 19 | will be called tpm. If unsure, say N. |
20 | Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI | 20 | Notes: |
21 | 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI | ||
21 | and CONFIG_PNPACPI. | 22 | and CONFIG_PNPACPI. |
23 | 2) Without ACPI enabled, the BIOS event log won't be accessible, | ||
24 | which is required to validate the PCR 0-7 values. | ||
22 | 25 | ||
23 | if TCG_TPM | 26 | if TCG_TPM |
24 | 27 | ||
25 | config TCG_TIS | 28 | config TCG_TIS |
26 | tristate "TPM Interface Specification 1.2 Interface" | 29 | tristate "TPM Interface Specification 1.2 Interface" |
27 | depends on PNP | ||
28 | ---help--- | 30 | ---help--- |
29 | If you have a TPM security chip that is compliant with the | 31 | If you have a TPM security chip that is compliant with the |
30 | TCG TIS 1.2 TPM specification say Yes and it will be accessible | 32 | TCG TIS 1.2 TPM specification say Yes and it will be accessible |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 068c816e6942..05ad4a17a28f 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev) | |||
1068 | } | 1068 | } |
1069 | EXPORT_SYMBOL_GPL(tpm_remove_hardware); | 1069 | EXPORT_SYMBOL_GPL(tpm_remove_hardware); |
1070 | 1070 | ||
1071 | #define TPM_ORD_SAVESTATE cpu_to_be32(152) | ||
1072 | #define SAVESTATE_RESULT_SIZE 10 | ||
1073 | |||
1074 | static struct tpm_input_header savestate_header = { | ||
1075 | .tag = TPM_TAG_RQU_COMMAND, | ||
1076 | .length = cpu_to_be32(10), | ||
1077 | .ordinal = TPM_ORD_SAVESTATE | ||
1078 | }; | ||
1079 | |||
1080 | /* Bug workaround - some TPM's don't flush the most | ||
1081 | * recently changed pcr on suspend, so force the flush | ||
1082 | * with an extend to the selected _unused_ non-volatile pcr. | ||
1083 | */ | ||
1084 | static int tpm_suspend_pcr; | ||
1085 | static int __init tpm_suspend_setup(char *str) | ||
1086 | { | ||
1087 | get_option(&str, &tpm_suspend_pcr); | ||
1088 | return 1; | ||
1089 | } | ||
1090 | __setup("tpm_suspend_pcr=", tpm_suspend_setup); | ||
1091 | |||
1071 | /* | 1092 | /* |
1072 | * We are about to suspend. Save the TPM state | 1093 | * We are about to suspend. Save the TPM state |
1073 | * so that it can be restored. | 1094 | * so that it can be restored. |
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware); | |||
1075 | int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) | 1096 | int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) |
1076 | { | 1097 | { |
1077 | struct tpm_chip *chip = dev_get_drvdata(dev); | 1098 | struct tpm_chip *chip = dev_get_drvdata(dev); |
1078 | u8 savestate[] = { | 1099 | struct tpm_cmd_t cmd; |
1079 | 0, 193, /* TPM_TAG_RQU_COMMAND */ | 1100 | int rc; |
1080 | 0, 0, 0, 10, /* blob length (in bytes) */ | 1101 | |
1081 | 0, 0, 0, 152 /* TPM_ORD_SaveState */ | 1102 | u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; |
1082 | }; | ||
1083 | 1103 | ||
1084 | if (chip == NULL) | 1104 | if (chip == NULL) |
1085 | return -ENODEV; | 1105 | return -ENODEV; |
1086 | 1106 | ||
1087 | tpm_transmit(chip, savestate, sizeof(savestate)); | 1107 | /* for buggy tpm, flush pcrs with extend to selected dummy */ |
1088 | return 0; | 1108 | if (tpm_suspend_pcr) { |
1109 | cmd.header.in = pcrextend_header; | ||
1110 | cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); | ||
1111 | memcpy(cmd.params.pcrextend_in.hash, dummy_hash, | ||
1112 | TPM_DIGEST_SIZE); | ||
1113 | rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, | ||
1114 | "extending dummy pcr before suspend"); | ||
1115 | } | ||
1116 | |||
1117 | /* now do the actual savestate */ | ||
1118 | cmd.header.in = savestate_header; | ||
1119 | rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, | ||
1120 | "sending savestate before suspend"); | ||
1121 | return rc; | ||
1089 | } | 1122 | } |
1090 | EXPORT_SYMBOL_GPL(tpm_pm_suspend); | 1123 | EXPORT_SYMBOL_GPL(tpm_pm_suspend); |
1091 | 1124 | ||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 94345994f8a6..24314a9cffe8 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -598,7 +598,7 @@ out_err: | |||
598 | tpm_remove_hardware(chip->dev); | 598 | tpm_remove_hardware(chip->dev); |
599 | return rc; | 599 | return rc; |
600 | } | 600 | } |
601 | 601 | #ifdef CONFIG_PNP | |
602 | static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, | 602 | static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, |
603 | const struct pnp_device_id *pnp_id) | 603 | const struct pnp_device_id *pnp_id) |
604 | { | 604 | { |
@@ -663,7 +663,7 @@ static struct pnp_driver tis_pnp_driver = { | |||
663 | module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, | 663 | module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, |
664 | sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); | 664 | sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); |
665 | MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); | 665 | MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); |
666 | 666 | #endif | |
667 | static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) | 667 | static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) |
668 | { | 668 | { |
669 | return tpm_pm_suspend(&dev->dev, msg); | 669 | return tpm_pm_suspend(&dev->dev, msg); |
@@ -690,21 +690,21 @@ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); | |||
690 | static int __init init_tis(void) | 690 | static int __init init_tis(void) |
691 | { | 691 | { |
692 | int rc; | 692 | int rc; |
693 | #ifdef CONFIG_PNP | ||
694 | if (!force) | ||
695 | return pnp_register_driver(&tis_pnp_driver); | ||
696 | #endif | ||
693 | 697 | ||
694 | if (force) { | 698 | rc = platform_driver_register(&tis_drv); |
695 | rc = platform_driver_register(&tis_drv); | 699 | if (rc < 0) |
696 | if (rc < 0) | ||
697 | return rc; | ||
698 | if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) | ||
699 | return PTR_ERR(pdev); | ||
700 | if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) { | ||
701 | platform_device_unregister(pdev); | ||
702 | platform_driver_unregister(&tis_drv); | ||
703 | } | ||
704 | return rc; | 700 | return rc; |
701 | if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) | ||
702 | return PTR_ERR(pdev); | ||
703 | if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) { | ||
704 | platform_device_unregister(pdev); | ||
705 | platform_driver_unregister(&tis_drv); | ||
705 | } | 706 | } |
706 | 707 | return rc; | |
707 | return pnp_register_driver(&tis_pnp_driver); | ||
708 | } | 708 | } |
709 | 709 | ||
710 | static void __exit cleanup_tis(void) | 710 | static void __exit cleanup_tis(void) |
@@ -728,12 +728,14 @@ static void __exit cleanup_tis(void) | |||
728 | list_del(&i->list); | 728 | list_del(&i->list); |
729 | } | 729 | } |
730 | spin_unlock(&tis_lock); | 730 | spin_unlock(&tis_lock); |
731 | 731 | #ifdef CONFIG_PNP | |
732 | if (force) { | 732 | if (!force) { |
733 | platform_device_unregister(pdev); | ||
734 | platform_driver_unregister(&tis_drv); | ||
735 | } else | ||
736 | pnp_unregister_driver(&tis_pnp_driver); | 733 | pnp_unregister_driver(&tis_pnp_driver); |
734 | return; | ||
735 | } | ||
736 | #endif | ||
737 | platform_device_unregister(pdev); | ||
738 | platform_driver_unregister(&tis_drv); | ||
737 | } | 739 | } |
738 | 740 | ||
739 | module_init(init_tis); | 741 | module_init(init_tis); |
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 7ee52164d474..cc1e9850d655 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c | |||
@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room); | |||
238 | * @size: size | 238 | * @size: size |
239 | * | 239 | * |
240 | * Queue a series of bytes to the tty buffering. All the characters | 240 | * Queue a series of bytes to the tty buffering. All the characters |
241 | * passed are marked as without error. Returns the number added. | 241 | * passed are marked with the supplied flag. Returns the number added. |
242 | * | 242 | * |
243 | * Locking: Called functions may take tty->buf.lock | 243 | * Locking: Called functions may take tty->buf.lock |
244 | */ | 244 | */ |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6da962c9b21c..d71f0fc34b46 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1875,6 +1875,7 @@ got_driver: | |||
1875 | */ | 1875 | */ |
1876 | if (filp->f_op == &hung_up_tty_fops) | 1876 | if (filp->f_op == &hung_up_tty_fops) |
1877 | filp->f_op = &tty_fops; | 1877 | filp->f_op = &tty_fops; |
1878 | unlock_kernel(); | ||
1878 | goto retry_open; | 1879 | goto retry_open; |
1879 | } | 1880 | } |
1880 | unlock_kernel(); | 1881 | unlock_kernel(); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 196428c2287a..8c99bf1b5e9f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -33,35 +33,6 @@ | |||
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | #include "hvc_console.h" | 34 | #include "hvc_console.h" |
35 | 35 | ||
36 | /* Moved here from .h file in order to disable MULTIPORT. */ | ||
37 | #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ | ||
38 | |||
39 | struct virtio_console_multiport_conf { | ||
40 | struct virtio_console_config config; | ||
41 | /* max. number of ports this device can hold */ | ||
42 | __u32 max_nr_ports; | ||
43 | /* number of ports added so far */ | ||
44 | __u32 nr_ports; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | /* | ||
48 | * A message that's passed between the Host and the Guest for a | ||
49 | * particular port. | ||
50 | */ | ||
51 | struct virtio_console_control { | ||
52 | __u32 id; /* Port number */ | ||
53 | __u16 event; /* The kind of control event (see below) */ | ||
54 | __u16 value; /* Extra information for the key */ | ||
55 | }; | ||
56 | |||
57 | /* Some events for control messages */ | ||
58 | #define VIRTIO_CONSOLE_PORT_READY 0 | ||
59 | #define VIRTIO_CONSOLE_CONSOLE_PORT 1 | ||
60 | #define VIRTIO_CONSOLE_RESIZE 2 | ||
61 | #define VIRTIO_CONSOLE_PORT_OPEN 3 | ||
62 | #define VIRTIO_CONSOLE_PORT_NAME 4 | ||
63 | #define VIRTIO_CONSOLE_PORT_REMOVE 5 | ||
64 | |||
65 | /* | 36 | /* |
66 | * This is a global struct for storing common data for all the devices | 37 | * This is a global struct for storing common data for all the devices |
67 | * this driver handles. | 38 | * this driver handles. |
@@ -107,6 +78,9 @@ struct console { | |||
107 | /* The hvc device associated with this console port */ | 78 | /* The hvc device associated with this console port */ |
108 | struct hvc_struct *hvc; | 79 | struct hvc_struct *hvc; |
109 | 80 | ||
81 | /* The size of the console */ | ||
82 | struct winsize ws; | ||
83 | |||
110 | /* | 84 | /* |
111 | * This number identifies the number that we used to register | 85 | * This number identifies the number that we used to register |
112 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the | 86 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the |
@@ -139,7 +113,6 @@ struct ports_device { | |||
139 | * notification | 113 | * notification |
140 | */ | 114 | */ |
141 | struct work_struct control_work; | 115 | struct work_struct control_work; |
142 | struct work_struct config_work; | ||
143 | 116 | ||
144 | struct list_head ports; | 117 | struct list_head ports; |
145 | 118 | ||
@@ -150,7 +123,7 @@ struct ports_device { | |||
150 | spinlock_t cvq_lock; | 123 | spinlock_t cvq_lock; |
151 | 124 | ||
152 | /* The current config space is stored here */ | 125 | /* The current config space is stored here */ |
153 | struct virtio_console_multiport_conf config; | 126 | struct virtio_console_config config; |
154 | 127 | ||
155 | /* The virtio device we're associated with */ | 128 | /* The virtio device we're associated with */ |
156 | struct virtio_device *vdev; | 129 | struct virtio_device *vdev; |
@@ -189,6 +162,9 @@ struct port { | |||
189 | */ | 162 | */ |
190 | spinlock_t inbuf_lock; | 163 | spinlock_t inbuf_lock; |
191 | 164 | ||
165 | /* Protect the operations on the out_vq. */ | ||
166 | spinlock_t outvq_lock; | ||
167 | |||
192 | /* The IO vqs for this port */ | 168 | /* The IO vqs for this port */ |
193 | struct virtqueue *in_vq, *out_vq; | 169 | struct virtqueue *in_vq, *out_vq; |
194 | 170 | ||
@@ -214,6 +190,8 @@ struct port { | |||
214 | /* The 'id' to identify the port with the Host */ | 190 | /* The 'id' to identify the port with the Host */ |
215 | u32 id; | 191 | u32 id; |
216 | 192 | ||
193 | bool outvq_full; | ||
194 | |||
217 | /* Is the host device open */ | 195 | /* Is the host device open */ |
218 | bool host_connected; | 196 | bool host_connected; |
219 | 197 | ||
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port) | |||
328 | unsigned int len; | 306 | unsigned int len; |
329 | 307 | ||
330 | vq = port->in_vq; | 308 | vq = port->in_vq; |
331 | buf = vq->vq_ops->get_buf(vq, &len); | 309 | buf = virtqueue_get_buf(vq, &len); |
332 | if (buf) { | 310 | if (buf) { |
333 | buf->len = len; | 311 | buf->len = len; |
334 | buf->offset = 0; | 312 | buf->offset = 0; |
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | |||
349 | 327 | ||
350 | sg_init_one(sg, buf->buf, buf->size); | 328 | sg_init_one(sg, buf->buf, buf->size); |
351 | 329 | ||
352 | ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); | 330 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf); |
353 | vq->vq_ops->kick(vq); | 331 | virtqueue_kick(vq); |
354 | return ret; | 332 | return ret; |
355 | } | 333 | } |
356 | 334 | ||
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port) | |||
366 | if (port->inbuf) | 344 | if (port->inbuf) |
367 | buf = port->inbuf; | 345 | buf = port->inbuf; |
368 | else | 346 | else |
369 | buf = vq->vq_ops->get_buf(vq, &len); | 347 | buf = virtqueue_get_buf(vq, &len); |
370 | 348 | ||
371 | ret = 0; | 349 | ret = 0; |
372 | while (buf) { | 350 | while (buf) { |
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port) | |||
374 | ret++; | 352 | ret++; |
375 | free_buf(buf); | 353 | free_buf(buf); |
376 | } | 354 | } |
377 | buf = vq->vq_ops->get_buf(vq, &len); | 355 | buf = virtqueue_get_buf(vq, &len); |
378 | } | 356 | } |
379 | port->inbuf = NULL; | 357 | port->inbuf = NULL; |
380 | if (ret) | 358 | if (ret) |
@@ -403,57 +381,96 @@ out: | |||
403 | return ret; | 381 | return ret; |
404 | } | 382 | } |
405 | 383 | ||
406 | static ssize_t send_control_msg(struct port *port, unsigned int event, | 384 | static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, |
407 | unsigned int value) | 385 | unsigned int event, unsigned int value) |
408 | { | 386 | { |
409 | struct scatterlist sg[1]; | 387 | struct scatterlist sg[1]; |
410 | struct virtio_console_control cpkt; | 388 | struct virtio_console_control cpkt; |
411 | struct virtqueue *vq; | 389 | struct virtqueue *vq; |
412 | unsigned int len; | 390 | unsigned int len; |
413 | 391 | ||
414 | if (!use_multiport(port->portdev)) | 392 | if (!use_multiport(portdev)) |
415 | return 0; | 393 | return 0; |
416 | 394 | ||
417 | cpkt.id = port->id; | 395 | cpkt.id = port_id; |
418 | cpkt.event = event; | 396 | cpkt.event = event; |
419 | cpkt.value = value; | 397 | cpkt.value = value; |
420 | 398 | ||
421 | vq = port->portdev->c_ovq; | 399 | vq = portdev->c_ovq; |
422 | 400 | ||
423 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 401 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
424 | if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { | 402 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { |
425 | vq->vq_ops->kick(vq); | 403 | virtqueue_kick(vq); |
426 | while (!vq->vq_ops->get_buf(vq, &len)) | 404 | while (!virtqueue_get_buf(vq, &len)) |
427 | cpu_relax(); | 405 | cpu_relax(); |
428 | } | 406 | } |
429 | return 0; | 407 | return 0; |
430 | } | 408 | } |
431 | 409 | ||
432 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) | 410 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
411 | unsigned int value) | ||
412 | { | ||
413 | return __send_control_msg(port->portdev, port->id, event, value); | ||
414 | } | ||
415 | |||
416 | /* Callers must take the port->outvq_lock */ | ||
417 | static void reclaim_consumed_buffers(struct port *port) | ||
418 | { | ||
419 | void *buf; | ||
420 | unsigned int len; | ||
421 | |||
422 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | ||
423 | kfree(buf); | ||
424 | port->outvq_full = false; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
429 | bool nonblock) | ||
433 | { | 430 | { |
434 | struct scatterlist sg[1]; | 431 | struct scatterlist sg[1]; |
435 | struct virtqueue *out_vq; | 432 | struct virtqueue *out_vq; |
436 | ssize_t ret; | 433 | ssize_t ret; |
434 | unsigned long flags; | ||
437 | unsigned int len; | 435 | unsigned int len; |
438 | 436 | ||
439 | out_vq = port->out_vq; | 437 | out_vq = port->out_vq; |
440 | 438 | ||
439 | spin_lock_irqsave(&port->outvq_lock, flags); | ||
440 | |||
441 | reclaim_consumed_buffers(port); | ||
442 | |||
441 | sg_init_one(sg, in_buf, in_count); | 443 | sg_init_one(sg, in_buf, in_count); |
442 | ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); | 444 | ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); |
443 | 445 | ||
444 | /* Tell Host to go! */ | 446 | /* Tell Host to go! */ |
445 | out_vq->vq_ops->kick(out_vq); | 447 | virtqueue_kick(out_vq); |
446 | 448 | ||
447 | if (ret < 0) { | 449 | if (ret < 0) { |
448 | in_count = 0; | 450 | in_count = 0; |
449 | goto fail; | 451 | goto done; |
450 | } | 452 | } |
451 | 453 | ||
452 | /* Wait till the host acknowledges it pushed out the data we sent. */ | 454 | if (ret == 0) |
453 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | 455 | port->outvq_full = true; |
456 | |||
457 | if (nonblock) | ||
458 | goto done; | ||
459 | |||
460 | /* | ||
461 | * Wait till the host acknowledges it pushed out the data we | ||
462 | * sent. This is done for ports in blocking mode or for data | ||
463 | * from the hvc_console; the tty operations are performed with | ||
464 | * spinlocks held so we can't sleep here. | ||
465 | */ | ||
466 | while (!virtqueue_get_buf(out_vq, &len)) | ||
454 | cpu_relax(); | 467 | cpu_relax(); |
455 | fail: | 468 | done: |
456 | /* We're expected to return the amount of data we wrote */ | 469 | spin_unlock_irqrestore(&port->outvq_lock, flags); |
470 | /* | ||
471 | * We're expected to return the amount of data we wrote -- all | ||
472 | * of it | ||
473 | */ | ||
457 | return in_count; | 474 | return in_count; |
458 | } | 475 | } |
459 | 476 | ||
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | |||
503 | } | 520 | } |
504 | 521 | ||
505 | /* The condition that must be true for polling to end */ | 522 | /* The condition that must be true for polling to end */ |
506 | static bool wait_is_over(struct port *port) | 523 | static bool will_read_block(struct port *port) |
524 | { | ||
525 | return !port_has_data(port) && port->host_connected; | ||
526 | } | ||
527 | |||
528 | static bool will_write_block(struct port *port) | ||
507 | { | 529 | { |
508 | return port_has_data(port) || !port->host_connected; | 530 | bool ret; |
531 | |||
532 | if (!port->host_connected) | ||
533 | return true; | ||
534 | |||
535 | spin_lock_irq(&port->outvq_lock); | ||
536 | /* | ||
537 | * Check if the Host has consumed any buffers since we last | ||
538 | * sent data (this is only applicable for nonblocking ports). | ||
539 | */ | ||
540 | reclaim_consumed_buffers(port); | ||
541 | ret = port->outvq_full; | ||
542 | spin_unlock_irq(&port->outvq_lock); | ||
543 | |||
544 | return ret; | ||
509 | } | 545 | } |
510 | 546 | ||
511 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | 547 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, |
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
528 | return -EAGAIN; | 564 | return -EAGAIN; |
529 | 565 | ||
530 | ret = wait_event_interruptible(port->waitqueue, | 566 | ret = wait_event_interruptible(port->waitqueue, |
531 | wait_is_over(port)); | 567 | !will_read_block(port)); |
532 | if (ret < 0) | 568 | if (ret < 0) |
533 | return ret; | 569 | return ret; |
534 | } | 570 | } |
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
554 | struct port *port; | 590 | struct port *port; |
555 | char *buf; | 591 | char *buf; |
556 | ssize_t ret; | 592 | ssize_t ret; |
593 | bool nonblock; | ||
557 | 594 | ||
558 | port = filp->private_data; | 595 | port = filp->private_data; |
559 | 596 | ||
597 | nonblock = filp->f_flags & O_NONBLOCK; | ||
598 | |||
599 | if (will_write_block(port)) { | ||
600 | if (nonblock) | ||
601 | return -EAGAIN; | ||
602 | |||
603 | ret = wait_event_interruptible(port->waitqueue, | ||
604 | !will_write_block(port)); | ||
605 | if (ret < 0) | ||
606 | return ret; | ||
607 | } | ||
608 | |||
560 | count = min((size_t)(32 * 1024), count); | 609 | count = min((size_t)(32 * 1024), count); |
561 | 610 | ||
562 | buf = kmalloc(count, GFP_KERNEL); | 611 | buf = kmalloc(count, GFP_KERNEL); |
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
569 | goto free_buf; | 618 | goto free_buf; |
570 | } | 619 | } |
571 | 620 | ||
572 | ret = send_buf(port, buf, count); | 621 | ret = send_buf(port, buf, count, nonblock); |
622 | |||
623 | if (nonblock && ret > 0) | ||
624 | goto out; | ||
625 | |||
573 | free_buf: | 626 | free_buf: |
574 | kfree(buf); | 627 | kfree(buf); |
628 | out: | ||
575 | return ret; | 629 | return ret; |
576 | } | 630 | } |
577 | 631 | ||
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | |||
586 | ret = 0; | 640 | ret = 0; |
587 | if (port->inbuf) | 641 | if (port->inbuf) |
588 | ret |= POLLIN | POLLRDNORM; | 642 | ret |= POLLIN | POLLRDNORM; |
589 | if (port->host_connected) | 643 | if (!will_write_block(port)) |
590 | ret |= POLLOUT; | 644 | ret |= POLLOUT; |
591 | if (!port->host_connected) | 645 | if (!port->host_connected) |
592 | ret |= POLLHUP; | 646 | ret |= POLLHUP; |
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp) | |||
610 | 664 | ||
611 | spin_unlock_irq(&port->inbuf_lock); | 665 | spin_unlock_irq(&port->inbuf_lock); |
612 | 666 | ||
667 | spin_lock_irq(&port->outvq_lock); | ||
668 | reclaim_consumed_buffers(port); | ||
669 | spin_unlock_irq(&port->outvq_lock); | ||
670 | |||
613 | return 0; | 671 | return 0; |
614 | } | 672 | } |
615 | 673 | ||
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
638 | port->guest_connected = true; | 696 | port->guest_connected = true; |
639 | spin_unlock_irq(&port->inbuf_lock); | 697 | spin_unlock_irq(&port->inbuf_lock); |
640 | 698 | ||
699 | spin_lock_irq(&port->outvq_lock); | ||
700 | /* | ||
701 | * There might be a chance that we missed reclaiming a few | ||
702 | * buffers in the window of the port getting previously closed | ||
703 | * and opening now. | ||
704 | */ | ||
705 | reclaim_consumed_buffers(port); | ||
706 | spin_unlock_irq(&port->outvq_lock); | ||
707 | |||
641 | /* Notify host of port being opened */ | 708 | /* Notify host of port being opened */ |
642 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); | 709 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
643 | 710 | ||
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
676 | 743 | ||
677 | port = find_port_by_vtermno(vtermno); | 744 | port = find_port_by_vtermno(vtermno); |
678 | if (!port) | 745 | if (!port) |
679 | return 0; | 746 | return -EPIPE; |
680 | 747 | ||
681 | return send_buf(port, (void *)buf, count); | 748 | return send_buf(port, (void *)buf, count, false); |
682 | } | 749 | } |
683 | 750 | ||
684 | /* | 751 | /* |
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count) | |||
692 | { | 759 | { |
693 | struct port *port; | 760 | struct port *port; |
694 | 761 | ||
762 | /* If we've not set up the port yet, we have no input to give. */ | ||
763 | if (unlikely(early_put_chars)) | ||
764 | return 0; | ||
765 | |||
695 | port = find_port_by_vtermno(vtermno); | 766 | port = find_port_by_vtermno(vtermno); |
696 | if (!port) | 767 | if (!port) |
697 | return 0; | 768 | return -EPIPE; |
698 | 769 | ||
699 | /* If we don't have an input queue yet, we can't get input. */ | 770 | /* If we don't have an input queue yet, we can't get input. */ |
700 | BUG_ON(!port->in_vq); | 771 | BUG_ON(!port->in_vq); |
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count) | |||
705 | static void resize_console(struct port *port) | 776 | static void resize_console(struct port *port) |
706 | { | 777 | { |
707 | struct virtio_device *vdev; | 778 | struct virtio_device *vdev; |
708 | struct winsize ws; | ||
709 | 779 | ||
710 | /* The port could have been hot-unplugged */ | 780 | /* The port could have been hot-unplugged */ |
711 | if (!port) | 781 | if (!port || !is_console_port(port)) |
712 | return; | 782 | return; |
713 | 783 | ||
714 | vdev = port->portdev->vdev; | 784 | vdev = port->portdev->vdev; |
715 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { | 785 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) |
716 | vdev->config->get(vdev, | 786 | hvc_resize(port->cons.hvc, port->cons.ws); |
717 | offsetof(struct virtio_console_config, cols), | ||
718 | &ws.ws_col, sizeof(u16)); | ||
719 | vdev->config->get(vdev, | ||
720 | offsetof(struct virtio_console_config, rows), | ||
721 | &ws.ws_row, sizeof(u16)); | ||
722 | hvc_resize(port->cons.hvc, ws); | ||
723 | } | ||
724 | } | 787 | } |
725 | 788 | ||
726 | /* We set the configuration at this point, since we now have a tty */ | 789 | /* We set the configuration at this point, since we now have a tty */ |
@@ -804,6 +867,13 @@ int init_port_console(struct port *port) | |||
804 | spin_unlock_irq(&pdrvdata_lock); | 867 | spin_unlock_irq(&pdrvdata_lock); |
805 | port->guest_connected = true; | 868 | port->guest_connected = true; |
806 | 869 | ||
870 | /* | ||
871 | * Start using the new console output if this is the first | ||
872 | * console to come up. | ||
873 | */ | ||
874 | if (early_put_chars) | ||
875 | early_put_chars = NULL; | ||
876 | |||
807 | /* Notify host of port being opened */ | 877 | /* Notify host of port being opened */ |
808 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | 878 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); |
809 | 879 | ||
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, | |||
859 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 929 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
860 | "host_connected: %d\n", port->host_connected); | 930 | "host_connected: %d\n", port->host_connected); |
861 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 931 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
932 | "outvq_full: %d\n", port->outvq_full); | ||
933 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
862 | "is_console: %s\n", | 934 | "is_console: %s\n", |
863 | is_console_port(port) ? "yes" : "no"); | 935 | is_console_port(port) ? "yes" : "no"); |
864 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 936 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = { | |||
875 | .read = debugfs_read, | 947 | .read = debugfs_read, |
876 | }; | 948 | }; |
877 | 949 | ||
950 | static void set_console_size(struct port *port, u16 rows, u16 cols) | ||
951 | { | ||
952 | if (!port || !is_console_port(port)) | ||
953 | return; | ||
954 | |||
955 | port->cons.ws.ws_row = rows; | ||
956 | port->cons.ws.ws_col = cols; | ||
957 | } | ||
958 | |||
959 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
960 | { | ||
961 | struct port_buffer *buf; | ||
962 | unsigned int nr_added_bufs; | ||
963 | int ret; | ||
964 | |||
965 | nr_added_bufs = 0; | ||
966 | do { | ||
967 | buf = alloc_buf(PAGE_SIZE); | ||
968 | if (!buf) | ||
969 | break; | ||
970 | |||
971 | spin_lock_irq(lock); | ||
972 | ret = add_inbuf(vq, buf); | ||
973 | if (ret < 0) { | ||
974 | spin_unlock_irq(lock); | ||
975 | free_buf(buf); | ||
976 | break; | ||
977 | } | ||
978 | nr_added_bufs++; | ||
979 | spin_unlock_irq(lock); | ||
980 | } while (ret > 0); | ||
981 | |||
982 | return nr_added_bufs; | ||
983 | } | ||
984 | |||
985 | static int add_port(struct ports_device *portdev, u32 id) | ||
986 | { | ||
987 | char debugfs_name[16]; | ||
988 | struct port *port; | ||
989 | struct port_buffer *buf; | ||
990 | dev_t devt; | ||
991 | unsigned int nr_added_bufs; | ||
992 | int err; | ||
993 | |||
994 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
995 | if (!port) { | ||
996 | err = -ENOMEM; | ||
997 | goto fail; | ||
998 | } | ||
999 | |||
1000 | port->portdev = portdev; | ||
1001 | port->id = id; | ||
1002 | |||
1003 | port->name = NULL; | ||
1004 | port->inbuf = NULL; | ||
1005 | port->cons.hvc = NULL; | ||
1006 | |||
1007 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; | ||
1008 | |||
1009 | port->host_connected = port->guest_connected = false; | ||
1010 | |||
1011 | port->outvq_full = false; | ||
1012 | |||
1013 | port->in_vq = portdev->in_vqs[port->id]; | ||
1014 | port->out_vq = portdev->out_vqs[port->id]; | ||
1015 | |||
1016 | cdev_init(&port->cdev, &port_fops); | ||
1017 | |||
1018 | devt = MKDEV(portdev->chr_major, id); | ||
1019 | err = cdev_add(&port->cdev, devt, 1); | ||
1020 | if (err < 0) { | ||
1021 | dev_err(&port->portdev->vdev->dev, | ||
1022 | "Error %d adding cdev for port %u\n", err, id); | ||
1023 | goto free_port; | ||
1024 | } | ||
1025 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1026 | devt, port, "vport%up%u", | ||
1027 | port->portdev->drv_index, id); | ||
1028 | if (IS_ERR(port->dev)) { | ||
1029 | err = PTR_ERR(port->dev); | ||
1030 | dev_err(&port->portdev->vdev->dev, | ||
1031 | "Error %d creating device for port %u\n", | ||
1032 | err, id); | ||
1033 | goto free_cdev; | ||
1034 | } | ||
1035 | |||
1036 | spin_lock_init(&port->inbuf_lock); | ||
1037 | spin_lock_init(&port->outvq_lock); | ||
1038 | init_waitqueue_head(&port->waitqueue); | ||
1039 | |||
1040 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1041 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1042 | if (!nr_added_bufs) { | ||
1043 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1044 | err = -ENOMEM; | ||
1045 | goto free_device; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * If we're not using multiport support, this has to be a console port | ||
1050 | */ | ||
1051 | if (!use_multiport(port->portdev)) { | ||
1052 | err = init_port_console(port); | ||
1053 | if (err) | ||
1054 | goto free_inbufs; | ||
1055 | } | ||
1056 | |||
1057 | spin_lock_irq(&portdev->ports_lock); | ||
1058 | list_add_tail(&port->list, &port->portdev->ports); | ||
1059 | spin_unlock_irq(&portdev->ports_lock); | ||
1060 | |||
1061 | /* | ||
1062 | * Tell the Host we're set so that it can send us various | ||
1063 | * configuration parameters for this port (eg, port name, | ||
1064 | * caching, whether this is a console port, etc.) | ||
1065 | */ | ||
1066 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1067 | |||
1068 | if (pdrvdata.debugfs_dir) { | ||
1069 | /* | ||
1070 | * Finally, create the debugfs file that we can use to | ||
1071 | * inspect a port's state at any time | ||
1072 | */ | ||
1073 | sprintf(debugfs_name, "vport%up%u", | ||
1074 | port->portdev->drv_index, id); | ||
1075 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1076 | pdrvdata.debugfs_dir, | ||
1077 | port, | ||
1078 | &port_debugfs_ops); | ||
1079 | } | ||
1080 | return 0; | ||
1081 | |||
1082 | free_inbufs: | ||
1083 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | ||
1084 | free_buf(buf); | ||
1085 | free_device: | ||
1086 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1087 | free_cdev: | ||
1088 | cdev_del(&port->cdev); | ||
1089 | free_port: | ||
1090 | kfree(port); | ||
1091 | fail: | ||
1092 | /* The host might want to notify management sw about port add failure */ | ||
1093 | __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); | ||
1094 | return err; | ||
1095 | } | ||
1096 | |||
878 | /* Remove all port-specific data. */ | 1097 | /* Remove all port-specific data. */ |
879 | static int remove_port(struct port *port) | 1098 | static int remove_port(struct port *port) |
880 | { | 1099 | { |
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port) | |||
888 | spin_lock_irq(&pdrvdata_lock); | 1107 | spin_lock_irq(&pdrvdata_lock); |
889 | list_del(&port->cons.list); | 1108 | list_del(&port->cons.list); |
890 | spin_unlock_irq(&pdrvdata_lock); | 1109 | spin_unlock_irq(&pdrvdata_lock); |
1110 | #if 0 | ||
1111 | /* | ||
1112 | * hvc_remove() not called as removing one hvc port | ||
1113 | * results in other hvc ports getting frozen. | ||
1114 | * | ||
1115 | * Once this is resolved in hvc, this functionality | ||
1116 | * will be enabled. Till that is done, the -EPIPE | ||
1117 | * return from get_chars() above will help | ||
1118 | * hvc_console.c to clean up on ports we remove here. | ||
1119 | */ | ||
891 | hvc_remove(port->cons.hvc); | 1120 | hvc_remove(port->cons.hvc); |
1121 | #endif | ||
892 | } | 1122 | } |
893 | if (port->guest_connected) | 1123 | if (port->guest_connected) |
894 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | 1124 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); |
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port) | |||
900 | /* Remove unused data this port might have received. */ | 1130 | /* Remove unused data this port might have received. */ |
901 | discard_port_data(port); | 1131 | discard_port_data(port); |
902 | 1132 | ||
1133 | reclaim_consumed_buffers(port); | ||
1134 | |||
903 | /* Remove buffers we queued up for the Host to send us data in. */ | 1135 | /* Remove buffers we queued up for the Host to send us data in. */ |
904 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | 1136 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
905 | free_buf(buf); | 1137 | free_buf(buf); |
906 | 1138 | ||
907 | kfree(port->name); | 1139 | kfree(port->name); |
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev, | |||
924 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); | 1156 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); |
925 | 1157 | ||
926 | port = find_port_by_id(portdev, cpkt->id); | 1158 | port = find_port_by_id(portdev, cpkt->id); |
927 | if (!port) { | 1159 | if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { |
928 | /* No valid header at start of buffer. Drop it. */ | 1160 | /* No valid header at start of buffer. Drop it. */ |
929 | dev_dbg(&portdev->vdev->dev, | 1161 | dev_dbg(&portdev->vdev->dev, |
930 | "Invalid index %u in control packet\n", cpkt->id); | 1162 | "Invalid index %u in control packet\n", cpkt->id); |
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev, | |||
932 | } | 1164 | } |
933 | 1165 | ||
934 | switch (cpkt->event) { | 1166 | switch (cpkt->event) { |
1167 | case VIRTIO_CONSOLE_PORT_ADD: | ||
1168 | if (port) { | ||
1169 | dev_dbg(&portdev->vdev->dev, | ||
1170 | "Port %u already added\n", port->id); | ||
1171 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1172 | break; | ||
1173 | } | ||
1174 | if (cpkt->id >= portdev->config.max_nr_ports) { | ||
1175 | dev_warn(&portdev->vdev->dev, | ||
1176 | "Request for adding port with out-of-bound id %u, max. supported id: %u\n", | ||
1177 | cpkt->id, portdev->config.max_nr_ports - 1); | ||
1178 | break; | ||
1179 | } | ||
1180 | add_port(portdev, cpkt->id); | ||
1181 | break; | ||
1182 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
1183 | remove_port(port); | ||
1184 | break; | ||
935 | case VIRTIO_CONSOLE_CONSOLE_PORT: | 1185 | case VIRTIO_CONSOLE_CONSOLE_PORT: |
936 | if (!cpkt->value) | 1186 | if (!cpkt->value) |
937 | break; | 1187 | break; |
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev, | |||
944 | * have to notify the host first. | 1194 | * have to notify the host first. |
945 | */ | 1195 | */ |
946 | break; | 1196 | break; |
947 | case VIRTIO_CONSOLE_RESIZE: | 1197 | case VIRTIO_CONSOLE_RESIZE: { |
1198 | struct { | ||
1199 | __u16 rows; | ||
1200 | __u16 cols; | ||
1201 | } size; | ||
1202 | |||
948 | if (!is_console_port(port)) | 1203 | if (!is_console_port(port)) |
949 | break; | 1204 | break; |
1205 | |||
1206 | memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), | ||
1207 | sizeof(size)); | ||
1208 | set_console_size(port, size.rows, size.cols); | ||
1209 | |||
950 | port->cons.hvc->irq_requested = 1; | 1210 | port->cons.hvc->irq_requested = 1; |
951 | resize_console(port); | 1211 | resize_console(port); |
952 | break; | 1212 | break; |
1213 | } | ||
953 | case VIRTIO_CONSOLE_PORT_OPEN: | 1214 | case VIRTIO_CONSOLE_PORT_OPEN: |
954 | port->host_connected = cpkt->value; | 1215 | port->host_connected = cpkt->value; |
955 | wake_up_interruptible(&port->waitqueue); | 1216 | wake_up_interruptible(&port->waitqueue); |
1217 | /* | ||
1218 | * If the host port got closed and the host had any | ||
1219 | * unconsumed buffers, we'll be able to reclaim them | ||
1220 | * now. | ||
1221 | */ | ||
1222 | spin_lock_irq(&port->outvq_lock); | ||
1223 | reclaim_consumed_buffers(port); | ||
1224 | spin_unlock_irq(&port->outvq_lock); | ||
956 | break; | 1225 | break; |
957 | case VIRTIO_CONSOLE_PORT_NAME: | 1226 | case VIRTIO_CONSOLE_PORT_NAME: |
958 | /* | 1227 | /* |
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev, | |||
990 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); | 1259 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); |
991 | } | 1260 | } |
992 | break; | 1261 | break; |
993 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
994 | /* | ||
995 | * Hot unplug the port. We don't decrement nr_ports | ||
996 | * since we don't want to deal with extra complexities | ||
997 | * of using the lowest-available port id: We can just | ||
998 | * pick up the nr_ports number as the id and not have | ||
999 | * userspace send it to us. This helps us in two | ||
1000 | * ways: | ||
1001 | * | ||
1002 | * - We don't need to have a 'port_id' field in the | ||
1003 | * config space when a port is hot-added. This is a | ||
1004 | * good thing as we might queue up multiple hotplug | ||
1005 | * requests issued in our workqueue. | ||
1006 | * | ||
1007 | * - Another way to deal with this would have been to | ||
1008 | * use a bitmap of the active ports and select the | ||
1009 | * lowest non-active port from that map. That | ||
1010 | * bloats the already tight config space and we | ||
1011 | * would end up artificially limiting the | ||
1012 | * max. number of ports to sizeof(bitmap). Right | ||
1013 | * now we can support 2^32 ports (as the port id is | ||
1014 | * stored in a u32 type). | ||
1015 | * | ||
1016 | */ | ||
1017 | remove_port(port); | ||
1018 | break; | ||
1019 | } | 1262 | } |
1020 | } | 1263 | } |
1021 | 1264 | ||
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work) | |||
1030 | vq = portdev->c_ivq; | 1273 | vq = portdev->c_ivq; |
1031 | 1274 | ||
1032 | spin_lock(&portdev->cvq_lock); | 1275 | spin_lock(&portdev->cvq_lock); |
1033 | while ((buf = vq->vq_ops->get_buf(vq, &len))) { | 1276 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1034 | spin_unlock(&portdev->cvq_lock); | 1277 | spin_unlock(&portdev->cvq_lock); |
1035 | 1278 | ||
1036 | buf->len = len; | 1279 | buf->len = len; |
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev) | |||
1092 | struct ports_device *portdev; | 1335 | struct ports_device *portdev; |
1093 | 1336 | ||
1094 | portdev = vdev->priv; | 1337 | portdev = vdev->priv; |
1095 | if (use_multiport(portdev)) { | ||
1096 | /* Handle port hot-add */ | ||
1097 | schedule_work(&portdev->config_work); | ||
1098 | } | ||
1099 | /* | ||
1100 | * We'll use this way of resizing only for legacy support. | ||
1101 | * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use | ||
1102 | * control messages to indicate console size changes so that | ||
1103 | * it can be done per-port | ||
1104 | */ | ||
1105 | resize_console(find_port_by_id(portdev, 0)); | ||
1106 | } | ||
1107 | |||
1108 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
1109 | { | ||
1110 | struct port_buffer *buf; | ||
1111 | unsigned int nr_added_bufs; | ||
1112 | int ret; | ||
1113 | |||
1114 | nr_added_bufs = 0; | ||
1115 | do { | ||
1116 | buf = alloc_buf(PAGE_SIZE); | ||
1117 | if (!buf) | ||
1118 | break; | ||
1119 | |||
1120 | spin_lock_irq(lock); | ||
1121 | ret = add_inbuf(vq, buf); | ||
1122 | if (ret < 0) { | ||
1123 | spin_unlock_irq(lock); | ||
1124 | free_buf(buf); | ||
1125 | break; | ||
1126 | } | ||
1127 | nr_added_bufs++; | ||
1128 | spin_unlock_irq(lock); | ||
1129 | } while (ret > 0); | ||
1130 | |||
1131 | return nr_added_bufs; | ||
1132 | } | ||
1133 | |||
1134 | static int add_port(struct ports_device *portdev, u32 id) | ||
1135 | { | ||
1136 | char debugfs_name[16]; | ||
1137 | struct port *port; | ||
1138 | struct port_buffer *buf; | ||
1139 | dev_t devt; | ||
1140 | unsigned int nr_added_bufs; | ||
1141 | int err; | ||
1142 | |||
1143 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
1144 | if (!port) { | ||
1145 | err = -ENOMEM; | ||
1146 | goto fail; | ||
1147 | } | ||
1148 | |||
1149 | port->portdev = portdev; | ||
1150 | port->id = id; | ||
1151 | |||
1152 | port->name = NULL; | ||
1153 | port->inbuf = NULL; | ||
1154 | port->cons.hvc = NULL; | ||
1155 | |||
1156 | port->host_connected = port->guest_connected = false; | ||
1157 | |||
1158 | port->in_vq = portdev->in_vqs[port->id]; | ||
1159 | port->out_vq = portdev->out_vqs[port->id]; | ||
1160 | |||
1161 | cdev_init(&port->cdev, &port_fops); | ||
1162 | |||
1163 | devt = MKDEV(portdev->chr_major, id); | ||
1164 | err = cdev_add(&port->cdev, devt, 1); | ||
1165 | if (err < 0) { | ||
1166 | dev_err(&port->portdev->vdev->dev, | ||
1167 | "Error %d adding cdev for port %u\n", err, id); | ||
1168 | goto free_port; | ||
1169 | } | ||
1170 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1171 | devt, port, "vport%up%u", | ||
1172 | port->portdev->drv_index, id); | ||
1173 | if (IS_ERR(port->dev)) { | ||
1174 | err = PTR_ERR(port->dev); | ||
1175 | dev_err(&port->portdev->vdev->dev, | ||
1176 | "Error %d creating device for port %u\n", | ||
1177 | err, id); | ||
1178 | goto free_cdev; | ||
1179 | } | ||
1180 | |||
1181 | spin_lock_init(&port->inbuf_lock); | ||
1182 | init_waitqueue_head(&port->waitqueue); | ||
1183 | |||
1184 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1185 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1186 | if (!nr_added_bufs) { | ||
1187 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1188 | err = -ENOMEM; | ||
1189 | goto free_device; | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * If we're not using multiport support, this has to be a console port | ||
1194 | */ | ||
1195 | if (!use_multiport(port->portdev)) { | ||
1196 | err = init_port_console(port); | ||
1197 | if (err) | ||
1198 | goto free_inbufs; | ||
1199 | } | ||
1200 | |||
1201 | spin_lock_irq(&portdev->ports_lock); | ||
1202 | list_add_tail(&port->list, &port->portdev->ports); | ||
1203 | spin_unlock_irq(&portdev->ports_lock); | ||
1204 | |||
1205 | /* | ||
1206 | * Tell the Host we're set so that it can send us various | ||
1207 | * configuration parameters for this port (eg, port name, | ||
1208 | * caching, whether this is a console port, etc.) | ||
1209 | */ | ||
1210 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1211 | |||
1212 | if (pdrvdata.debugfs_dir) { | ||
1213 | /* | ||
1214 | * Finally, create the debugfs file that we can use to | ||
1215 | * inspect a port's state at any time | ||
1216 | */ | ||
1217 | sprintf(debugfs_name, "vport%up%u", | ||
1218 | port->portdev->drv_index, id); | ||
1219 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1220 | pdrvdata.debugfs_dir, | ||
1221 | port, | ||
1222 | &port_debugfs_ops); | ||
1223 | } | ||
1224 | return 0; | ||
1225 | |||
1226 | free_inbufs: | ||
1227 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
1228 | free_buf(buf); | ||
1229 | free_device: | ||
1230 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1231 | free_cdev: | ||
1232 | cdev_del(&port->cdev); | ||
1233 | free_port: | ||
1234 | kfree(port); | ||
1235 | fail: | ||
1236 | return err; | ||
1237 | } | ||
1238 | 1338 | ||
1239 | /* | 1339 | if (!use_multiport(portdev)) { |
1240 | * The workhandler for config-space updates. | 1340 | struct port *port; |
1241 | * | 1341 | u16 rows, cols; |
1242 | * This is called when ports are hot-added. | ||
1243 | */ | ||
1244 | static void config_work_handler(struct work_struct *work) | ||
1245 | { | ||
1246 | struct virtio_console_multiport_conf virtconconf; | ||
1247 | struct ports_device *portdev; | ||
1248 | struct virtio_device *vdev; | ||
1249 | int err; | ||
1250 | 1342 | ||
1251 | portdev = container_of(work, struct ports_device, config_work); | 1343 | vdev->config->get(vdev, |
1344 | offsetof(struct virtio_console_config, cols), | ||
1345 | &cols, sizeof(u16)); | ||
1346 | vdev->config->get(vdev, | ||
1347 | offsetof(struct virtio_console_config, rows), | ||
1348 | &rows, sizeof(u16)); | ||
1252 | 1349 | ||
1253 | vdev = portdev->vdev; | 1350 | port = find_port_by_id(portdev, 0); |
1254 | vdev->config->get(vdev, | 1351 | set_console_size(port, rows, cols); |
1255 | offsetof(struct virtio_console_multiport_conf, | ||
1256 | nr_ports), | ||
1257 | &virtconconf.nr_ports, | ||
1258 | sizeof(virtconconf.nr_ports)); | ||
1259 | 1352 | ||
1260 | if (portdev->config.nr_ports == virtconconf.nr_ports) { | ||
1261 | /* | 1353 | /* |
1262 | * Port 0 got hot-added. Since we already did all the | 1354 | * We'll use this way of resizing only for legacy |
1263 | * other initialisation for it, just tell the Host | 1355 | * support. For newer userspace |
1264 | * that the port is ready if we find the port. In | 1356 | * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages |
1265 | * case the port was hot-removed earlier, we call | 1357 | * to indicate console size changes so that it can be |
1266 | * add_port to add the port. | 1358 | * done per-port. |
1267 | */ | 1359 | */ |
1268 | struct port *port; | 1360 | resize_console(port); |
1269 | |||
1270 | port = find_port_by_id(portdev, 0); | ||
1271 | if (!port) | ||
1272 | add_port(portdev, 0); | ||
1273 | else | ||
1274 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1275 | return; | ||
1276 | } | ||
1277 | if (virtconconf.nr_ports > portdev->config.max_nr_ports) { | ||
1278 | dev_warn(&vdev->dev, | ||
1279 | "More ports specified (%u) than allowed (%u)", | ||
1280 | portdev->config.nr_ports + 1, | ||
1281 | portdev->config.max_nr_ports); | ||
1282 | return; | ||
1283 | } | ||
1284 | if (virtconconf.nr_ports < portdev->config.nr_ports) | ||
1285 | return; | ||
1286 | |||
1287 | /* Hot-add ports */ | ||
1288 | while (virtconconf.nr_ports - portdev->config.nr_ports) { | ||
1289 | err = add_port(portdev, portdev->config.nr_ports); | ||
1290 | if (err) | ||
1291 | break; | ||
1292 | portdev->config.nr_ports++; | ||
1293 | } | 1361 | } |
1294 | } | 1362 | } |
1295 | 1363 | ||
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = { | |||
1414 | static int __devinit virtcons_probe(struct virtio_device *vdev) | 1482 | static int __devinit virtcons_probe(struct virtio_device *vdev) |
1415 | { | 1483 | { |
1416 | struct ports_device *portdev; | 1484 | struct ports_device *portdev; |
1417 | u32 i; | ||
1418 | int err; | 1485 | int err; |
1419 | bool multiport; | 1486 | bool multiport; |
1420 | 1487 | ||
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1443 | } | 1510 | } |
1444 | 1511 | ||
1445 | multiport = false; | 1512 | multiport = false; |
1446 | portdev->config.nr_ports = 1; | ||
1447 | portdev->config.max_nr_ports = 1; | 1513 | portdev->config.max_nr_ports = 1; |
1448 | #if 0 /* Multiport is not quite ready yet --RR */ | ||
1449 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { | 1514 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { |
1450 | multiport = true; | 1515 | multiport = true; |
1451 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; | 1516 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; |
1452 | 1517 | ||
1453 | vdev->config->get(vdev, | 1518 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
1454 | offsetof(struct virtio_console_multiport_conf, | 1519 | max_nr_ports), |
1455 | nr_ports), | ||
1456 | &portdev->config.nr_ports, | ||
1457 | sizeof(portdev->config.nr_ports)); | ||
1458 | vdev->config->get(vdev, | ||
1459 | offsetof(struct virtio_console_multiport_conf, | ||
1460 | max_nr_ports), | ||
1461 | &portdev->config.max_nr_ports, | 1520 | &portdev->config.max_nr_ports, |
1462 | sizeof(portdev->config.max_nr_ports)); | 1521 | sizeof(portdev->config.max_nr_ports)); |
1463 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { | ||
1464 | dev_warn(&vdev->dev, | ||
1465 | "More ports (%u) specified than allowed (%u). Will init %u ports.", | ||
1466 | portdev->config.nr_ports, | ||
1467 | portdev->config.max_nr_ports, | ||
1468 | portdev->config.max_nr_ports); | ||
1469 | |||
1470 | portdev->config.nr_ports = portdev->config.max_nr_ports; | ||
1471 | } | ||
1472 | } | 1522 | } |
1473 | 1523 | ||
1474 | /* Let the Host know we support multiple ports.*/ | 1524 | /* Let the Host know we support multiple ports.*/ |
1475 | vdev->config->finalize_features(vdev); | 1525 | vdev->config->finalize_features(vdev); |
1476 | #endif | ||
1477 | 1526 | ||
1478 | err = init_vqs(portdev); | 1527 | err = init_vqs(portdev); |
1479 | if (err < 0) { | 1528 | if (err < 0) { |
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1489 | 1538 | ||
1490 | spin_lock_init(&portdev->cvq_lock); | 1539 | spin_lock_init(&portdev->cvq_lock); |
1491 | INIT_WORK(&portdev->control_work, &control_work_handler); | 1540 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1492 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
1493 | 1541 | ||
1494 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 1542 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); |
1495 | if (!nr_added_bufs) { | 1543 | if (!nr_added_bufs) { |
@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
1498 | err = -ENOMEM; | 1546 | err = -ENOMEM; |
1499 | goto free_vqs; | 1547 | goto free_vqs; |
1500 | } | 1548 | } |
1549 | } else { | ||
1550 | /* | ||
1551 | * For backward compatibility: Create a console port | ||
1552 | * if we're running on older host. | ||
1553 | */ | ||
1554 | add_port(portdev, 0); | ||
1501 | } | 1555 | } |
1502 | 1556 | ||
1503 | for (i = 0; i < portdev->config.nr_ports; i++) | 1557 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, |
1504 | add_port(portdev, i); | 1558 | VIRTIO_CONSOLE_DEVICE_READY, 1); |
1505 | |||
1506 | /* Start using the new console output. */ | ||
1507 | early_put_chars = NULL; | ||
1508 | return 0; | 1559 | return 0; |
1509 | 1560 | ||
1510 | free_vqs: | 1561 | free_vqs: |
1562 | /* The host might want to notify mgmt sw about device add failure */ | ||
1563 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, | ||
1564 | VIRTIO_CONSOLE_DEVICE_READY, 0); | ||
1511 | vdev->config->del_vqs(vdev); | 1565 | vdev->config->del_vqs(vdev); |
1512 | kfree(portdev->in_vqs); | 1566 | kfree(portdev->in_vqs); |
1513 | kfree(portdev->out_vqs); | 1567 | kfree(portdev->out_vqs); |
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
1529 | portdev = vdev->priv; | 1583 | portdev = vdev->priv; |
1530 | 1584 | ||
1531 | cancel_work_sync(&portdev->control_work); | 1585 | cancel_work_sync(&portdev->control_work); |
1532 | cancel_work_sync(&portdev->config_work); | ||
1533 | 1586 | ||
1534 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 1587 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
1535 | remove_port(port); | 1588 | remove_port(port); |
1536 | 1589 | ||
1537 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | 1590 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); |
1538 | 1591 | ||
1539 | while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) | 1592 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
1540 | free_buf(buf); | 1593 | free_buf(buf); |
1541 | 1594 | ||
1542 | while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) | 1595 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) |
1543 | free_buf(buf); | 1596 | free_buf(buf); |
1544 | 1597 | ||
1545 | vdev->config->del_vqs(vdev); | 1598 | vdev->config->del_vqs(vdev); |
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = { | |||
1556 | 1609 | ||
1557 | static unsigned int features[] = { | 1610 | static unsigned int features[] = { |
1558 | VIRTIO_CONSOLE_F_SIZE, | 1611 | VIRTIO_CONSOLE_F_SIZE, |
1612 | VIRTIO_CONSOLE_F_MULTIPORT, | ||
1559 | }; | 1613 | }; |
1560 | 1614 | ||
1561 | static struct virtio_driver virtio_console = { | 1615 | static struct virtio_driver virtio_console = { |