diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-09-24 23:15:15 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-09-24 23:15:15 -0400 |
commit | c373ba999103fa794f041eab5bd490714d2dee88 (patch) | |
tree | 8f2b445b1e0af2491c83527967dbcda76054a486 /drivers | |
parent | 6f3529f00a0a9ac06413d18d3926adf099cb59af (diff) | |
parent | 851b147e4411df6a1e7e90e2a609773c277eefd2 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
140 files changed, 11835 insertions, 4099 deletions
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index d295bdccc09c..9335b87c5174 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -115,6 +115,9 @@ static const struct file_operations acpi_button_state_fops = { | |||
115 | .release = single_release, | 115 | .release = single_release, |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); | ||
119 | static struct acpi_device *lid_device; | ||
120 | |||
118 | /* -------------------------------------------------------------------------- | 121 | /* -------------------------------------------------------------------------- |
119 | FS Interface (/proc) | 122 | FS Interface (/proc) |
120 | -------------------------------------------------------------------------- */ | 123 | -------------------------------------------------------------------------- */ |
@@ -231,11 +234,38 @@ static int acpi_button_remove_fs(struct acpi_device *device) | |||
231 | /* -------------------------------------------------------------------------- | 234 | /* -------------------------------------------------------------------------- |
232 | Driver Interface | 235 | Driver Interface |
233 | -------------------------------------------------------------------------- */ | 236 | -------------------------------------------------------------------------- */ |
237 | int acpi_lid_notifier_register(struct notifier_block *nb) | ||
238 | { | ||
239 | return blocking_notifier_chain_register(&acpi_lid_notifier, nb); | ||
240 | } | ||
241 | EXPORT_SYMBOL(acpi_lid_notifier_register); | ||
242 | |||
243 | int acpi_lid_notifier_unregister(struct notifier_block *nb) | ||
244 | { | ||
245 | return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); | ||
246 | } | ||
247 | EXPORT_SYMBOL(acpi_lid_notifier_unregister); | ||
248 | |||
249 | int acpi_lid_open(void) | ||
250 | { | ||
251 | acpi_status status; | ||
252 | unsigned long long state; | ||
253 | |||
254 | status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, | ||
255 | &state); | ||
256 | if (ACPI_FAILURE(status)) | ||
257 | return -ENODEV; | ||
258 | |||
259 | return !!state; | ||
260 | } | ||
261 | EXPORT_SYMBOL(acpi_lid_open); | ||
262 | |||
234 | static int acpi_lid_send_state(struct acpi_device *device) | 263 | static int acpi_lid_send_state(struct acpi_device *device) |
235 | { | 264 | { |
236 | struct acpi_button *button = acpi_driver_data(device); | 265 | struct acpi_button *button = acpi_driver_data(device); |
237 | unsigned long long state; | 266 | unsigned long long state; |
238 | acpi_status status; | 267 | acpi_status status; |
268 | int ret; | ||
239 | 269 | ||
240 | status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); | 270 | status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); |
241 | if (ACPI_FAILURE(status)) | 271 | if (ACPI_FAILURE(status)) |
@@ -244,7 +274,12 @@ static int acpi_lid_send_state(struct acpi_device *device) | |||
244 | /* input layer checks if event is redundant */ | 274 | /* input layer checks if event is redundant */ |
245 | input_report_switch(button->input, SW_LID, !state); | 275 | input_report_switch(button->input, SW_LID, !state); |
246 | input_sync(button->input); | 276 | input_sync(button->input); |
247 | return 0; | 277 | |
278 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); | ||
279 | if (ret == NOTIFY_DONE) | ||
280 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, | ||
281 | device); | ||
282 | return ret; | ||
248 | } | 283 | } |
249 | 284 | ||
250 | static void acpi_button_notify(struct acpi_device *device, u32 event) | 285 | static void acpi_button_notify(struct acpi_device *device, u32 event) |
@@ -366,8 +401,14 @@ static int acpi_button_add(struct acpi_device *device) | |||
366 | error = input_register_device(input); | 401 | error = input_register_device(input); |
367 | if (error) | 402 | if (error) |
368 | goto err_remove_fs; | 403 | goto err_remove_fs; |
369 | if (button->type == ACPI_BUTTON_TYPE_LID) | 404 | if (button->type == ACPI_BUTTON_TYPE_LID) { |
370 | acpi_lid_send_state(device); | 405 | acpi_lid_send_state(device); |
406 | /* | ||
407 | * This assumes there's only one lid device, or if there are | ||
408 | * more we only care about the last one... | ||
409 | */ | ||
410 | lid_device = device; | ||
411 | } | ||
371 | 412 | ||
372 | if (device->wakeup.flags.valid) { | 413 | if (device->wakeup.flags.valid) { |
373 | /* Button's GPE is run-wake GPE */ | 414 | /* Button's GPE is run-wake GPE */ |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 71d1b9bab70b..614da5b8613a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -3412,7 +3412,7 @@ static int cdrom_print_info(const char *header, int val, char *info, | |||
3412 | return 0; | 3412 | return 0; |
3413 | } | 3413 | } |
3414 | 3414 | ||
3415 | static int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp, | 3415 | static int cdrom_sysctl_info(ctl_table *ctl, int write, |
3416 | void __user *buffer, size_t *lenp, loff_t *ppos) | 3416 | void __user *buffer, size_t *lenp, loff_t *ppos) |
3417 | { | 3417 | { |
3418 | int pos; | 3418 | int pos; |
@@ -3489,7 +3489,7 @@ static int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp, | |||
3489 | goto done; | 3489 | goto done; |
3490 | doit: | 3490 | doit: |
3491 | mutex_unlock(&cdrom_mutex); | 3491 | mutex_unlock(&cdrom_mutex); |
3492 | return proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 3492 | return proc_dostring(ctl, write, buffer, lenp, ppos); |
3493 | done: | 3493 | done: |
3494 | printk(KERN_INFO "cdrom: info buffer too small\n"); | 3494 | printk(KERN_INFO "cdrom: info buffer too small\n"); |
3495 | goto doit; | 3495 | goto doit; |
@@ -3525,12 +3525,12 @@ static void cdrom_update_settings(void) | |||
3525 | mutex_unlock(&cdrom_mutex); | 3525 | mutex_unlock(&cdrom_mutex); |
3526 | } | 3526 | } |
3527 | 3527 | ||
3528 | static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp, | 3528 | static int cdrom_sysctl_handler(ctl_table *ctl, int write, |
3529 | void __user *buffer, size_t *lenp, loff_t *ppos) | 3529 | void __user *buffer, size_t *lenp, loff_t *ppos) |
3530 | { | 3530 | { |
3531 | int ret; | 3531 | int ret; |
3532 | 3532 | ||
3533 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 3533 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
3534 | 3534 | ||
3535 | if (write) { | 3535 | if (write) { |
3536 | 3536 | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 6a06913b01d3..08a6f50ae791 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -1087,6 +1087,14 @@ config MMTIMER | |||
1087 | The mmtimer device allows direct userspace access to the | 1087 | The mmtimer device allows direct userspace access to the |
1088 | Altix system timer. | 1088 | Altix system timer. |
1089 | 1089 | ||
1090 | config UV_MMTIMER | ||
1091 | tristate "UV_MMTIMER Memory mapped RTC for SGI UV" | ||
1092 | depends on X86_UV | ||
1093 | default m | ||
1094 | help | ||
1095 | The uv_mmtimer device allows direct userspace access to the | ||
1096 | UV system timer. | ||
1097 | |||
1090 | source "drivers/char/tpm/Kconfig" | 1098 | source "drivers/char/tpm/Kconfig" |
1091 | 1099 | ||
1092 | config TELCLOCK | 1100 | config TELCLOCK |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 66f779ad4f4c..19a79dd79eee 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -58,6 +58,7 @@ obj-$(CONFIG_RAW_DRIVER) += raw.o | |||
58 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 58 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
59 | obj-$(CONFIG_MSPEC) += mspec.o | 59 | obj-$(CONFIG_MSPEC) += mspec.o |
60 | obj-$(CONFIG_MMTIMER) += mmtimer.o | 60 | obj-$(CONFIG_MMTIMER) += mmtimer.o |
61 | obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o | ||
61 | obj-$(CONFIG_VIOTAPE) += viotape.o | 62 | obj-$(CONFIG_VIOTAPE) += viotape.o |
62 | obj-$(CONFIG_HVCS) += hvcs.o | 63 | obj-$(CONFIG_HVCS) += hvcs.o |
63 | obj-$(CONFIG_IBM_BSR) += bsr.o | 64 | obj-$(CONFIG_IBM_BSR) += bsr.o |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 1540e693d91e..4068467ce7b9 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | 46 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 |
47 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | 47 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 |
48 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | 48 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 |
49 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
50 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
49 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | 51 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 |
50 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | 52 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 |
51 | #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 | 53 | #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 |
@@ -91,6 +93,7 @@ | |||
91 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | 93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ |
92 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | 94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ |
93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | 95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ |
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ | 97 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ |
95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ | 98 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ |
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) | 99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) |
@@ -804,23 +807,39 @@ static void intel_i830_setup_flush(void) | |||
804 | if (!intel_private.i8xx_page) | 807 | if (!intel_private.i8xx_page) |
805 | return; | 808 | return; |
806 | 809 | ||
807 | /* make page uncached */ | ||
808 | map_page_into_agp(intel_private.i8xx_page); | ||
809 | |||
810 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | 810 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); |
811 | if (!intel_private.i8xx_flush_page) | 811 | if (!intel_private.i8xx_flush_page) |
812 | intel_i830_fini_flush(); | 812 | intel_i830_fini_flush(); |
813 | } | 813 | } |
814 | 814 | ||
815 | static void | ||
816 | do_wbinvd(void *null) | ||
817 | { | ||
818 | wbinvd(); | ||
819 | } | ||
820 | |||
821 | /* The chipset_flush interface needs to get data that has already been | ||
822 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
823 | * doesn't snoop those buffers. | ||
824 | * | ||
825 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
826 | * chipset write buffers that the later chips do. According to the 865 | ||
827 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
828 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
829 | * that it'll push whatever was in there out. It appears to work. | ||
830 | */ | ||
815 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | 831 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) |
816 | { | 832 | { |
817 | unsigned int *pg = intel_private.i8xx_flush_page; | 833 | unsigned int *pg = intel_private.i8xx_flush_page; |
818 | int i; | ||
819 | 834 | ||
820 | for (i = 0; i < 256; i += 2) | 835 | memset(pg, 0, 1024); |
821 | *(pg + i) = i; | ||
822 | 836 | ||
823 | wmb(); | 837 | if (cpu_has_clflush) { |
838 | clflush_cache_range(pg, 1024); | ||
839 | } else { | ||
840 | if (on_each_cpu(do_wbinvd, NULL, 1) != 0) | ||
841 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
842 | } | ||
824 | } | 843 | } |
825 | 844 | ||
826 | /* The intel i830 automatically initializes the agp aperture during POST. | 845 | /* The intel i830 automatically initializes the agp aperture during POST. |
@@ -1341,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | |||
1341 | case PCI_DEVICE_ID_INTEL_Q45_HB: | 1360 | case PCI_DEVICE_ID_INTEL_Q45_HB: |
1342 | case PCI_DEVICE_ID_INTEL_G45_HB: | 1361 | case PCI_DEVICE_ID_INTEL_G45_HB: |
1343 | case PCI_DEVICE_ID_INTEL_G41_HB: | 1362 | case PCI_DEVICE_ID_INTEL_G41_HB: |
1363 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1344 | case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: | 1364 | case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: |
1345 | case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: | 1365 | case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: |
1346 | case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: | 1366 | case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: |
@@ -2335,6 +2355,8 @@ static const struct intel_driver_description { | |||
2335 | "Q45/Q43", NULL, &intel_i965_driver }, | 2355 | "Q45/Q43", NULL, &intel_i965_driver }, |
2336 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, | 2356 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, |
2337 | "G45/G43", NULL, &intel_i965_driver }, | 2357 | "G45/G43", NULL, &intel_i965_driver }, |
2358 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, | ||
2359 | "B43", NULL, &intel_i965_driver }, | ||
2338 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, | 2360 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, |
2339 | "G41", NULL, &intel_i965_driver }, | 2361 | "G41", NULL, &intel_i965_driver }, |
2340 | { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, | 2362 | { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, |
@@ -2535,6 +2557,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2535 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), | 2557 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), |
2536 | ID(PCI_DEVICE_ID_INTEL_G45_HB), | 2558 | ID(PCI_DEVICE_ID_INTEL_G45_HB), |
2537 | ID(PCI_DEVICE_ID_INTEL_G41_HB), | 2559 | ID(PCI_DEVICE_ID_INTEL_G41_HB), |
2560 | ID(PCI_DEVICE_ID_INTEL_B43_HB), | ||
2538 | ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), | 2561 | ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), |
2539 | ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), | 2562 | ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), |
2540 | ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), | 2563 | ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), |
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c index 0a01329451e4..e3dd24bff514 100644 --- a/drivers/char/bfin-otp.c +++ b/drivers/char/bfin-otp.c | |||
@@ -1,8 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Blackfin On-Chip OTP Memory Interface | 2 | * Blackfin On-Chip OTP Memory Interface |
3 | * Supports BF52x/BF54x | ||
4 | * | 3 | * |
5 | * Copyright 2007-2008 Analog Devices Inc. | 4 | * Copyright 2007-2009 Analog Devices Inc. |
6 | * | 5 | * |
7 | * Enter bugs at http://blackfin.uclinux.org/ | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
8 | * | 7 | * |
@@ -17,8 +16,10 @@ | |||
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
19 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <mtd/mtd-abi.h> | ||
20 | 20 | ||
21 | #include <asm/blackfin.h> | 21 | #include <asm/blackfin.h> |
22 | #include <asm/bfrom.h> | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | 24 | ||
24 | #define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) | 25 | #define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) |
@@ -30,39 +31,6 @@ | |||
30 | 31 | ||
31 | static DEFINE_MUTEX(bfin_otp_lock); | 32 | static DEFINE_MUTEX(bfin_otp_lock); |
32 | 33 | ||
33 | /* OTP Boot ROM functions */ | ||
34 | #define _BOOTROM_OTP_COMMAND 0xEF000018 | ||
35 | #define _BOOTROM_OTP_READ 0xEF00001A | ||
36 | #define _BOOTROM_OTP_WRITE 0xEF00001C | ||
37 | |||
38 | static u32 (* const otp_command)(u32 command, u32 value) = (void *)_BOOTROM_OTP_COMMAND; | ||
39 | static u32 (* const otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)_BOOTROM_OTP_READ; | ||
40 | static u32 (* const otp_write)(u32 page, u32 flags, u64 *page_content) = (void *)_BOOTROM_OTP_WRITE; | ||
41 | |||
42 | /* otp_command(): defines for "command" */ | ||
43 | #define OTP_INIT 0x00000001 | ||
44 | #define OTP_CLOSE 0x00000002 | ||
45 | |||
46 | /* otp_{read,write}(): defines for "flags" */ | ||
47 | #define OTP_LOWER_HALF 0x00000000 /* select upper/lower 64-bit half (bit 0) */ | ||
48 | #define OTP_UPPER_HALF 0x00000001 | ||
49 | #define OTP_NO_ECC 0x00000010 /* do not use ECC */ | ||
50 | #define OTP_LOCK 0x00000020 /* sets page protection bit for page */ | ||
51 | #define OTP_ACCESS_READ 0x00001000 | ||
52 | #define OTP_ACCESS_READWRITE 0x00002000 | ||
53 | |||
54 | /* Return values for all functions */ | ||
55 | #define OTP_SUCCESS 0x00000000 | ||
56 | #define OTP_MASTER_ERROR 0x001 | ||
57 | #define OTP_WRITE_ERROR 0x003 | ||
58 | #define OTP_READ_ERROR 0x005 | ||
59 | #define OTP_ACC_VIO_ERROR 0x009 | ||
60 | #define OTP_DATA_MULT_ERROR 0x011 | ||
61 | #define OTP_ECC_MULT_ERROR 0x021 | ||
62 | #define OTP_PREV_WR_ERROR 0x041 | ||
63 | #define OTP_DATA_SB_WARN 0x100 | ||
64 | #define OTP_ECC_SB_WARN 0x200 | ||
65 | |||
66 | /** | 34 | /** |
67 | * bfin_otp_read - Read OTP pages | 35 | * bfin_otp_read - Read OTP pages |
68 | * | 36 | * |
@@ -86,9 +54,11 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count, | |||
86 | page = *pos / (sizeof(u64) * 2); | 54 | page = *pos / (sizeof(u64) * 2); |
87 | while (bytes_done < count) { | 55 | while (bytes_done < count) { |
88 | flags = (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF); | 56 | flags = (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF); |
89 | stamp("processing page %i (%s)", page, (flags == OTP_UPPER_HALF ? "upper" : "lower")); | 57 | stamp("processing page %i (0x%x:%s)", page, flags, |
90 | ret = otp_read(page, flags, &content); | 58 | (flags & OTP_UPPER_HALF ? "upper" : "lower")); |
59 | ret = bfrom_OtpRead(page, flags, &content); | ||
91 | if (ret & OTP_MASTER_ERROR) { | 60 | if (ret & OTP_MASTER_ERROR) { |
61 | stamp("error from otp: 0x%x", ret); | ||
92 | bytes_done = -EIO; | 62 | bytes_done = -EIO; |
93 | break; | 63 | break; |
94 | } | 64 | } |
@@ -96,7 +66,7 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count, | |||
96 | bytes_done = -EFAULT; | 66 | bytes_done = -EFAULT; |
97 | break; | 67 | break; |
98 | } | 68 | } |
99 | if (flags == OTP_UPPER_HALF) | 69 | if (flags & OTP_UPPER_HALF) |
100 | ++page; | 70 | ++page; |
101 | bytes_done += sizeof(content); | 71 | bytes_done += sizeof(content); |
102 | *pos += sizeof(content); | 72 | *pos += sizeof(content); |
@@ -108,14 +78,53 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count, | |||
108 | } | 78 | } |
109 | 79 | ||
110 | #ifdef CONFIG_BFIN_OTP_WRITE_ENABLE | 80 | #ifdef CONFIG_BFIN_OTP_WRITE_ENABLE |
81 | static bool allow_writes; | ||
82 | |||
83 | /** | ||
84 | * bfin_otp_init_timing - setup OTP timing parameters | ||
85 | * | ||
86 | * Required before doing any write operation. Algorithms from HRM. | ||
87 | */ | ||
88 | static u32 bfin_otp_init_timing(void) | ||
89 | { | ||
90 | u32 tp1, tp2, tp3, timing; | ||
91 | |||
92 | tp1 = get_sclk() / 1000000; | ||
93 | tp2 = (2 * get_sclk() / 10000000) << 8; | ||
94 | tp3 = (0x1401) << 15; | ||
95 | timing = tp1 | tp2 | tp3; | ||
96 | if (bfrom_OtpCommand(OTP_INIT, timing)) | ||
97 | return 0; | ||
98 | |||
99 | return timing; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * bfin_otp_deinit_timing - set timings to only allow reads | ||
104 | * | ||
105 | * Should be called after all writes are done. | ||
106 | */ | ||
107 | static void bfin_otp_deinit_timing(u32 timing) | ||
108 | { | ||
109 | /* mask bits [31:15] so that any attempts to write fail */ | ||
110 | bfrom_OtpCommand(OTP_CLOSE, 0); | ||
111 | bfrom_OtpCommand(OTP_INIT, timing & ~(-1 << 15)); | ||
112 | bfrom_OtpCommand(OTP_CLOSE, 0); | ||
113 | } | ||
114 | |||
111 | /** | 115 | /** |
112 | * bfin_otp_write - Write OTP pages | 116 | * bfin_otp_write - write OTP pages |
113 | * | 117 | * |
114 | * All writes must be in half page chunks (half page == 64 bits). | 118 | * All writes must be in half page chunks (half page == 64 bits). |
115 | */ | 119 | */ |
116 | static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t count, loff_t *pos) | 120 | static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t count, loff_t *pos) |
117 | { | 121 | { |
118 | stampit(); | 122 | ssize_t bytes_done; |
123 | u32 timing, page, base_flags, flags, ret; | ||
124 | u64 content; | ||
125 | |||
126 | if (!allow_writes) | ||
127 | return -EACCES; | ||
119 | 128 | ||
120 | if (count % sizeof(u64)) | 129 | if (count % sizeof(u64)) |
121 | return -EMSGSIZE; | 130 | return -EMSGSIZE; |
@@ -123,20 +132,96 @@ static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t | |||
123 | if (mutex_lock_interruptible(&bfin_otp_lock)) | 132 | if (mutex_lock_interruptible(&bfin_otp_lock)) |
124 | return -ERESTARTSYS; | 133 | return -ERESTARTSYS; |
125 | 134 | ||
126 | /* need otp_init() documentation before this can be implemented */ | 135 | stampit(); |
136 | |||
137 | timing = bfin_otp_init_timing(); | ||
138 | if (timing == 0) { | ||
139 | mutex_unlock(&bfin_otp_lock); | ||
140 | return -EIO; | ||
141 | } | ||
142 | |||
143 | base_flags = OTP_CHECK_FOR_PREV_WRITE; | ||
144 | |||
145 | bytes_done = 0; | ||
146 | page = *pos / (sizeof(u64) * 2); | ||
147 | while (bytes_done < count) { | ||
148 | flags = base_flags | (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF); | ||
149 | stamp("processing page %i (0x%x:%s) from %p", page, flags, | ||
150 | (flags & OTP_UPPER_HALF ? "upper" : "lower"), buff + bytes_done); | ||
151 | if (copy_from_user(&content, buff + bytes_done, sizeof(content))) { | ||
152 | bytes_done = -EFAULT; | ||
153 | break; | ||
154 | } | ||
155 | ret = bfrom_OtpWrite(page, flags, &content); | ||
156 | if (ret & OTP_MASTER_ERROR) { | ||
157 | stamp("error from otp: 0x%x", ret); | ||
158 | bytes_done = -EIO; | ||
159 | break; | ||
160 | } | ||
161 | if (flags & OTP_UPPER_HALF) | ||
162 | ++page; | ||
163 | bytes_done += sizeof(content); | ||
164 | *pos += sizeof(content); | ||
165 | } | ||
166 | |||
167 | bfin_otp_deinit_timing(timing); | ||
127 | 168 | ||
128 | mutex_unlock(&bfin_otp_lock); | 169 | mutex_unlock(&bfin_otp_lock); |
129 | 170 | ||
171 | return bytes_done; | ||
172 | } | ||
173 | |||
174 | static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg) | ||
175 | { | ||
176 | stampit(); | ||
177 | |||
178 | switch (cmd) { | ||
179 | case OTPLOCK: { | ||
180 | u32 timing; | ||
181 | int ret = -EIO; | ||
182 | |||
183 | if (!allow_writes) | ||
184 | return -EACCES; | ||
185 | |||
186 | if (mutex_lock_interruptible(&bfin_otp_lock)) | ||
187 | return -ERESTARTSYS; | ||
188 | |||
189 | timing = bfin_otp_init_timing(); | ||
190 | if (timing) { | ||
191 | u32 otp_result = bfrom_OtpWrite(arg, OTP_LOCK, NULL); | ||
192 | stamp("locking page %lu resulted in 0x%x", arg, otp_result); | ||
193 | if (!(otp_result & OTP_MASTER_ERROR)) | ||
194 | ret = 0; | ||
195 | |||
196 | bfin_otp_deinit_timing(timing); | ||
197 | } | ||
198 | |||
199 | mutex_unlock(&bfin_otp_lock); | ||
200 | |||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | case MEMLOCK: | ||
205 | allow_writes = false; | ||
206 | return 0; | ||
207 | |||
208 | case MEMUNLOCK: | ||
209 | allow_writes = true; | ||
210 | return 0; | ||
211 | } | ||
212 | |||
130 | return -EINVAL; | 213 | return -EINVAL; |
131 | } | 214 | } |
132 | #else | 215 | #else |
133 | # define bfin_otp_write NULL | 216 | # define bfin_otp_write NULL |
217 | # define bfin_otp_ioctl NULL | ||
134 | #endif | 218 | #endif |
135 | 219 | ||
136 | static struct file_operations bfin_otp_fops = { | 220 | static struct file_operations bfin_otp_fops = { |
137 | .owner = THIS_MODULE, | 221 | .owner = THIS_MODULE, |
138 | .read = bfin_otp_read, | 222 | .unlocked_ioctl = bfin_otp_ioctl, |
139 | .write = bfin_otp_write, | 223 | .read = bfin_otp_read, |
224 | .write = bfin_otp_write, | ||
140 | }; | 225 | }; |
141 | 226 | ||
142 | static struct miscdevice bfin_otp_misc_device = { | 227 | static struct miscdevice bfin_otp_misc_device = { |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 4a9f3492b921..70a770ac0138 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -166,9 +166,8 @@ static irqreturn_t hpet_interrupt(int irq, void *data) | |||
166 | unsigned long m, t; | 166 | unsigned long m, t; |
167 | 167 | ||
168 | t = devp->hd_ireqfreq; | 168 | t = devp->hd_ireqfreq; |
169 | m = read_counter(&devp->hd_hpet->hpet_mc); | 169 | m = read_counter(&devp->hd_timer->hpet_compare); |
170 | write_counter(t + m + devp->hd_hpets->hp_delta, | 170 | write_counter(t + m, &devp->hd_timer->hpet_compare); |
171 | &devp->hd_timer->hpet_compare); | ||
172 | } | 171 | } |
173 | 172 | ||
174 | if (devp->hd_flags & HPET_SHARED_IRQ) | 173 | if (devp->hd_flags & HPET_SHARED_IRQ) |
@@ -504,21 +503,25 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp) | |||
504 | g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; | 503 | g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; |
505 | 504 | ||
506 | if (devp->hd_flags & HPET_PERIODIC) { | 505 | if (devp->hd_flags & HPET_PERIODIC) { |
507 | write_counter(t, &timer->hpet_compare); | ||
508 | g |= Tn_TYPE_CNF_MASK; | 506 | g |= Tn_TYPE_CNF_MASK; |
509 | v |= Tn_TYPE_CNF_MASK; | 507 | v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; |
510 | writeq(v, &timer->hpet_config); | ||
511 | v |= Tn_VAL_SET_CNF_MASK; | ||
512 | writeq(v, &timer->hpet_config); | 508 | writeq(v, &timer->hpet_config); |
513 | local_irq_save(flags); | 509 | local_irq_save(flags); |
514 | 510 | ||
515 | /* NOTE: what we modify here is a hidden accumulator | 511 | /* |
512 | * NOTE: First we modify the hidden accumulator | ||
516 | * register supported by periodic-capable comparators. | 513 | * register supported by periodic-capable comparators. |
517 | * We never want to modify the (single) counter; that | 514 | * We never want to modify the (single) counter; that |
518 | * would affect all the comparators. | 515 | * would affect all the comparators. The value written |
516 | * is the counter value when the first interrupt is due. | ||
519 | */ | 517 | */ |
520 | m = read_counter(&hpet->hpet_mc); | 518 | m = read_counter(&hpet->hpet_mc); |
521 | write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); | 519 | write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); |
520 | /* | ||
521 | * Then we modify the comparator, indicating the period | ||
522 | * for subsequent interrupt. | ||
523 | */ | ||
524 | write_counter(t, &timer->hpet_compare); | ||
522 | } else { | 525 | } else { |
523 | local_irq_save(flags); | 526 | local_irq_save(flags); |
524 | m = read_counter(&hpet->hpet_mc); | 527 | m = read_counter(&hpet->hpet_mc); |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 25ce15bb1c08..a632f25f144a 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -678,7 +678,7 @@ int hvc_poll(struct hvc_struct *hp) | |||
678 | EXPORT_SYMBOL_GPL(hvc_poll); | 678 | EXPORT_SYMBOL_GPL(hvc_poll); |
679 | 679 | ||
680 | /** | 680 | /** |
681 | * hvc_resize() - Update terminal window size information. | 681 | * __hvc_resize() - Update terminal window size information. |
682 | * @hp: HVC console pointer | 682 | * @hp: HVC console pointer |
683 | * @ws: Terminal window size structure | 683 | * @ws: Terminal window size structure |
684 | * | 684 | * |
@@ -687,12 +687,12 @@ EXPORT_SYMBOL_GPL(hvc_poll); | |||
687 | * | 687 | * |
688 | * Locking: Locking free; the function MUST be called holding hp->lock | 688 | * Locking: Locking free; the function MUST be called holding hp->lock |
689 | */ | 689 | */ |
690 | void hvc_resize(struct hvc_struct *hp, struct winsize ws) | 690 | void __hvc_resize(struct hvc_struct *hp, struct winsize ws) |
691 | { | 691 | { |
692 | hp->ws = ws; | 692 | hp->ws = ws; |
693 | schedule_work(&hp->tty_resize); | 693 | schedule_work(&hp->tty_resize); |
694 | } | 694 | } |
695 | EXPORT_SYMBOL_GPL(hvc_resize); | 695 | EXPORT_SYMBOL_GPL(__hvc_resize); |
696 | 696 | ||
697 | /* | 697 | /* |
698 | * This kthread is either polling or interrupt driven. This is determined by | 698 | * This kthread is either polling or interrupt driven. This is determined by |
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 3c85d78c975c..10950ca706d8 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #define HVC_CONSOLE_H | 28 | #define HVC_CONSOLE_H |
29 | #include <linux/kref.h> | 29 | #include <linux/kref.h> |
30 | #include <linux/tty.h> | 30 | #include <linux/tty.h> |
31 | #include <linux/spinlock.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * This is the max number of console adapters that can/will be found as | 34 | * This is the max number of console adapters that can/will be found as |
@@ -88,7 +89,16 @@ int hvc_poll(struct hvc_struct *hp); | |||
88 | void hvc_kick(void); | 89 | void hvc_kick(void); |
89 | 90 | ||
90 | /* Resize hvc tty terminal window */ | 91 | /* Resize hvc tty terminal window */ |
91 | extern void hvc_resize(struct hvc_struct *hp, struct winsize ws); | 92 | extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws); |
93 | |||
94 | static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | |||
98 | spin_lock_irqsave(&hp->lock, flags); | ||
99 | __hvc_resize(hp, ws); | ||
100 | spin_unlock_irqrestore(&hp->lock, flags); | ||
101 | } | ||
92 | 102 | ||
93 | /* default notifier for irq based notification */ | 103 | /* default notifier for irq based notification */ |
94 | extern int notifier_add_irq(struct hvc_struct *hp, int data); | 104 | extern int notifier_add_irq(struct hvc_struct *hp, int data); |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 0ecac7e532f6..b8a5d654d3d0 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -273,7 +273,9 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv, | |||
273 | case MSG_TYPE_WINSIZE: | 273 | case MSG_TYPE_WINSIZE: |
274 | if (rb->mbuf->datalen != sizeof(struct winsize)) | 274 | if (rb->mbuf->datalen != sizeof(struct winsize)) |
275 | break; | 275 | break; |
276 | hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); | 276 | /* The caller must ensure that the hvc is locked, which |
277 | * is the case when called from hvc_iucv_get_chars() */ | ||
278 | __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); | ||
277 | break; | 279 | break; |
278 | 280 | ||
279 | case MSG_TYPE_ERROR: /* ignored ... */ | 281 | case MSG_TYPE_ERROR: /* ignored ... */ |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0aede1d6a9ea..6c8b65d069e5 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -690,7 +690,7 @@ static ssize_t read_zero(struct file * file, char __user * buf, | |||
690 | 690 | ||
691 | if (chunk > PAGE_SIZE) | 691 | if (chunk > PAGE_SIZE) |
692 | chunk = PAGE_SIZE; /* Just for latency reasons */ | 692 | chunk = PAGE_SIZE; /* Just for latency reasons */ |
693 | unwritten = clear_user(buf, chunk); | 693 | unwritten = __clear_user(buf, chunk); |
694 | written += chunk - unwritten; | 694 | written += chunk - unwritten; |
695 | if (unwritten) | 695 | if (unwritten) |
696 | break; | 696 | break; |
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index 94ad2c3bfc4a..a4ec50c95072 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c | |||
@@ -281,12 +281,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
281 | case IOCTL_MW_REGISTER_IPC: { | 281 | case IOCTL_MW_REGISTER_IPC: { |
282 | unsigned int ipcnum = (unsigned int) ioarg; | 282 | unsigned int ipcnum = (unsigned int) ioarg; |
283 | 283 | ||
284 | PRINTK_3(TRACE_MWAVE, | ||
285 | "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" | ||
286 | " ipcnum %x entry usIntCount %x\n", | ||
287 | ipcnum, | ||
288 | pDrvData->IPCs[ipcnum].usIntCount); | ||
289 | |||
290 | if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { | 284 | if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { |
291 | PRINTK_ERROR(KERN_ERR_MWAVE | 285 | PRINTK_ERROR(KERN_ERR_MWAVE |
292 | "mwavedd::mwave_ioctl:" | 286 | "mwavedd::mwave_ioctl:" |
@@ -295,6 +289,12 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
295 | ipcnum); | 289 | ipcnum); |
296 | return -EINVAL; | 290 | return -EINVAL; |
297 | } | 291 | } |
292 | PRINTK_3(TRACE_MWAVE, | ||
293 | "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" | ||
294 | " ipcnum %x entry usIntCount %x\n", | ||
295 | ipcnum, | ||
296 | pDrvData->IPCs[ipcnum].usIntCount); | ||
297 | |||
298 | lock_kernel(); | 298 | lock_kernel(); |
299 | pDrvData->IPCs[ipcnum].bIsHere = FALSE; | 299 | pDrvData->IPCs[ipcnum].bIsHere = FALSE; |
300 | pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; | 300 | pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; |
@@ -310,11 +310,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
310 | case IOCTL_MW_GET_IPC: { | 310 | case IOCTL_MW_GET_IPC: { |
311 | unsigned int ipcnum = (unsigned int) ioarg; | 311 | unsigned int ipcnum = (unsigned int) ioarg; |
312 | 312 | ||
313 | PRINTK_3(TRACE_MWAVE, | ||
314 | "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" | ||
315 | " ipcnum %x, usIntCount %x\n", | ||
316 | ipcnum, | ||
317 | pDrvData->IPCs[ipcnum].usIntCount); | ||
318 | if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { | 313 | if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { |
319 | PRINTK_ERROR(KERN_ERR_MWAVE | 314 | PRINTK_ERROR(KERN_ERR_MWAVE |
320 | "mwavedd::mwave_ioctl:" | 315 | "mwavedd::mwave_ioctl:" |
@@ -322,6 +317,11 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
322 | " Invalid ipcnum %x\n", ipcnum); | 317 | " Invalid ipcnum %x\n", ipcnum); |
323 | return -EINVAL; | 318 | return -EINVAL; |
324 | } | 319 | } |
320 | PRINTK_3(TRACE_MWAVE, | ||
321 | "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" | ||
322 | " ipcnum %x, usIntCount %x\n", | ||
323 | ipcnum, | ||
324 | pDrvData->IPCs[ipcnum].usIntCount); | ||
325 | 325 | ||
326 | lock_kernel(); | 326 | lock_kernel(); |
327 | if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { | 327 | if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { |
diff --git a/drivers/char/random.c b/drivers/char/random.c index d8a9255e1a3f..04b505e5a5e2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1231,7 +1231,7 @@ static char sysctl_bootid[16]; | |||
1231 | * as an ASCII string in the standard UUID format. If accesses via the | 1231 | * as an ASCII string in the standard UUID format. If accesses via the |
1232 | * sysctl system call, it is returned as 16 bytes of binary data. | 1232 | * sysctl system call, it is returned as 16 bytes of binary data. |
1233 | */ | 1233 | */ |
1234 | static int proc_do_uuid(ctl_table *table, int write, struct file *filp, | 1234 | static int proc_do_uuid(ctl_table *table, int write, |
1235 | void __user *buffer, size_t *lenp, loff_t *ppos) | 1235 | void __user *buffer, size_t *lenp, loff_t *ppos) |
1236 | { | 1236 | { |
1237 | ctl_table fake_table; | 1237 | ctl_table fake_table; |
@@ -1254,7 +1254,7 @@ static int proc_do_uuid(ctl_table *table, int write, struct file *filp, | |||
1254 | fake_table.data = buf; | 1254 | fake_table.data = buf; |
1255 | fake_table.maxlen = sizeof(buf); | 1255 | fake_table.maxlen = sizeof(buf); |
1256 | 1256 | ||
1257 | return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos); | 1257 | return proc_dostring(&fake_table, write, buffer, lenp, ppos); |
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | static int uuid_strategy(ctl_table *table, | 1260 | static int uuid_strategy(ctl_table *table, |
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c index eecee0f576d2..74339559f0b9 100644 --- a/drivers/char/rio/rioctrl.c +++ b/drivers/char/rio/rioctrl.c | |||
@@ -873,7 +873,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su | |||
873 | /* | 873 | /* |
874 | ** It is important that the product code is an unsigned object! | 874 | ** It is important that the product code is an unsigned object! |
875 | */ | 875 | */ |
876 | if (DownLoad.ProductCode > MAX_PRODUCT) { | 876 | if (DownLoad.ProductCode >= MAX_PRODUCT) { |
877 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n", DownLoad.ProductCode); | 877 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n", DownLoad.ProductCode); |
878 | p->RIOError.Error = NO_SUCH_PRODUCT; | 878 | p->RIOError.Error = NO_SUCH_PRODUCT; |
879 | return -ENXIO; | 879 | return -ENXIO; |
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c new file mode 100644 index 000000000000..867b67be9f0a --- /dev/null +++ b/drivers/char/uv_mmtimer.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * Timer device implementation for SGI UV platform. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/ioctl.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <linux/mmtimer.h> | ||
21 | #include <linux/miscdevice.h> | ||
22 | #include <linux/posix-timers.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/time.h> | ||
25 | #include <linux/math64.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | |||
28 | #include <asm/genapic.h> | ||
29 | #include <asm/uv/uv_hub.h> | ||
30 | #include <asm/uv/bios.h> | ||
31 | #include <asm/uv/uv.h> | ||
32 | |||
33 | MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>"); | ||
34 | MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | |||
37 | /* name of the device, usually in /dev */ | ||
38 | #define UV_MMTIMER_NAME "mmtimer" | ||
39 | #define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer" | ||
40 | #define UV_MMTIMER_VERSION "1.0" | ||
41 | |||
42 | static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, | ||
43 | unsigned long arg); | ||
44 | static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma); | ||
45 | |||
46 | /* | ||
47 | * Period in femtoseconds (10^-15 s) | ||
48 | */ | ||
49 | static unsigned long uv_mmtimer_femtoperiod; | ||
50 | |||
51 | static const struct file_operations uv_mmtimer_fops = { | ||
52 | .owner = THIS_MODULE, | ||
53 | .mmap = uv_mmtimer_mmap, | ||
54 | .unlocked_ioctl = uv_mmtimer_ioctl, | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer | ||
59 | * @file: file structure for the device | ||
60 | * @cmd: command to execute | ||
61 | * @arg: optional argument to command | ||
62 | * | ||
63 | * Executes the command specified by @cmd. Returns 0 for success, < 0 for | ||
64 | * failure. | ||
65 | * | ||
66 | * Valid commands: | ||
67 | * | ||
68 | * %MMTIMER_GETOFFSET - Should return the offset (relative to the start | ||
69 | * of the page where the registers are mapped) for the counter in question. | ||
70 | * | ||
71 | * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) | ||
72 | * seconds | ||
73 | * | ||
74 | * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address | ||
75 | * specified by @arg | ||
76 | * | ||
77 | * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter | ||
78 | * | ||
79 | * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace | ||
80 | * | ||
81 | * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it | ||
82 | * in the address specified by @arg. | ||
83 | */ | ||
84 | static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, | ||
85 | unsigned long arg) | ||
86 | { | ||
87 | int ret = 0; | ||
88 | |||
89 | switch (cmd) { | ||
90 | case MMTIMER_GETOFFSET: /* offset of the counter */ | ||
91 | /* | ||
92 | * UV RTC register is on its own page | ||
93 | */ | ||
94 | if (PAGE_SIZE <= (1 << 16)) | ||
95 | ret = ((UV_LOCAL_MMR_BASE | UVH_RTC) & (PAGE_SIZE-1)) | ||
96 | / 8; | ||
97 | else | ||
98 | ret = -ENOSYS; | ||
99 | break; | ||
100 | |||
101 | case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ | ||
102 | if (copy_to_user((unsigned long __user *)arg, | ||
103 | &uv_mmtimer_femtoperiod, sizeof(unsigned long))) | ||
104 | ret = -EFAULT; | ||
105 | break; | ||
106 | |||
107 | case MMTIMER_GETFREQ: /* frequency in Hz */ | ||
108 | if (copy_to_user((unsigned long __user *)arg, | ||
109 | &sn_rtc_cycles_per_second, | ||
110 | sizeof(unsigned long))) | ||
111 | ret = -EFAULT; | ||
112 | break; | ||
113 | |||
114 | case MMTIMER_GETBITS: /* number of bits in the clock */ | ||
115 | ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); | ||
116 | break; | ||
117 | |||
118 | case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ | ||
119 | ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; | ||
120 | break; | ||
121 | |||
122 | case MMTIMER_GETCOUNTER: | ||
123 | if (copy_to_user((unsigned long __user *)arg, | ||
124 | (unsigned long *)uv_local_mmr_address(UVH_RTC), | ||
125 | sizeof(unsigned long))) | ||
126 | ret = -EFAULT; | ||
127 | break; | ||
128 | default: | ||
129 | ret = -ENOTTY; | ||
130 | break; | ||
131 | } | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * uv_mmtimer_mmap - maps the clock's registers into userspace | ||
137 | * @file: file structure for the device | ||
138 | * @vma: VMA to map the registers into | ||
139 | * | ||
140 | * Calls remap_pfn_range() to map the clock's registers into | ||
141 | * the calling process' address space. | ||
142 | */ | ||
143 | static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) | ||
144 | { | ||
145 | unsigned long uv_mmtimer_addr; | ||
146 | |||
147 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||
148 | return -EINVAL; | ||
149 | |||
150 | if (vma->vm_flags & VM_WRITE) | ||
151 | return -EPERM; | ||
152 | |||
153 | if (PAGE_SIZE > (1 << 16)) | ||
154 | return -ENOSYS; | ||
155 | |||
156 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
157 | |||
158 | uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC; | ||
159 | uv_mmtimer_addr &= ~(PAGE_SIZE - 1); | ||
160 | uv_mmtimer_addr &= 0xfffffffffffffffUL; | ||
161 | |||
162 | if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, | ||
163 | PAGE_SIZE, vma->vm_page_prot)) { | ||
164 | printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n"); | ||
165 | return -EAGAIN; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static struct miscdevice uv_mmtimer_miscdev = { | ||
172 | MISC_DYNAMIC_MINOR, | ||
173 | UV_MMTIMER_NAME, | ||
174 | &uv_mmtimer_fops | ||
175 | }; | ||
176 | |||
177 | |||
178 | /** | ||
179 | * uv_mmtimer_init - device initialization routine | ||
180 | * | ||
181 | * Does initial setup for the uv_mmtimer device. | ||
182 | */ | ||
183 | static int __init uv_mmtimer_init(void) | ||
184 | { | ||
185 | if (!is_uv_system()) { | ||
186 | printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME); | ||
187 | return -1; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Sanity check the cycles/sec variable | ||
192 | */ | ||
193 | if (sn_rtc_cycles_per_second < 100000) { | ||
194 | printk(KERN_ERR "%s: unable to determine clock frequency\n", | ||
195 | UV_MMTIMER_NAME); | ||
196 | return -1; | ||
197 | } | ||
198 | |||
199 | uv_mmtimer_femtoperiod = ((unsigned long)1E15 + | ||
200 | sn_rtc_cycles_per_second / 2) / | ||
201 | sn_rtc_cycles_per_second; | ||
202 | |||
203 | if (misc_register(&uv_mmtimer_miscdev)) { | ||
204 | printk(KERN_ERR "%s: failed to register device\n", | ||
205 | UV_MMTIMER_NAME); | ||
206 | return -1; | ||
207 | } | ||
208 | |||
209 | printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC, | ||
210 | UV_MMTIMER_VERSION, | ||
211 | sn_rtc_cycles_per_second/(unsigned long)1E6); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | module_init(uv_mmtimer_init); | ||
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 25b743abfb59..52e6bb70a490 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/dca.h> | 29 | #include <linux/dca.h> |
30 | 30 | ||
31 | #define DCA_VERSION "1.8" | 31 | #define DCA_VERSION "1.12.1" |
32 | 32 | ||
33 | MODULE_VERSION(DCA_VERSION); | 33 | MODULE_VERSION(DCA_VERSION); |
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
@@ -36,20 +36,92 @@ MODULE_AUTHOR("Intel Corporation"); | |||
36 | 36 | ||
37 | static DEFINE_SPINLOCK(dca_lock); | 37 | static DEFINE_SPINLOCK(dca_lock); |
38 | 38 | ||
39 | static LIST_HEAD(dca_providers); | 39 | static LIST_HEAD(dca_domains); |
40 | 40 | ||
41 | static struct dca_provider *dca_find_provider_by_dev(struct device *dev) | 41 | static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) |
42 | { | 42 | { |
43 | struct dca_provider *dca, *ret = NULL; | 43 | struct pci_dev *pdev = to_pci_dev(dev); |
44 | struct pci_bus *bus = pdev->bus; | ||
44 | 45 | ||
45 | list_for_each_entry(dca, &dca_providers, node) { | 46 | while (bus->parent) |
46 | if ((!dev) || (dca->ops->dev_managed(dca, dev))) { | 47 | bus = bus->parent; |
47 | ret = dca; | 48 | |
48 | break; | 49 | return bus; |
49 | } | 50 | } |
51 | |||
52 | static struct dca_domain *dca_allocate_domain(struct pci_bus *rc) | ||
53 | { | ||
54 | struct dca_domain *domain; | ||
55 | |||
56 | domain = kzalloc(sizeof(*domain), GFP_NOWAIT); | ||
57 | if (!domain) | ||
58 | return NULL; | ||
59 | |||
60 | INIT_LIST_HEAD(&domain->dca_providers); | ||
61 | domain->pci_rc = rc; | ||
62 | |||
63 | return domain; | ||
64 | } | ||
65 | |||
66 | static void dca_free_domain(struct dca_domain *domain) | ||
67 | { | ||
68 | list_del(&domain->node); | ||
69 | kfree(domain); | ||
70 | } | ||
71 | |||
72 | static struct dca_domain *dca_find_domain(struct pci_bus *rc) | ||
73 | { | ||
74 | struct dca_domain *domain; | ||
75 | |||
76 | list_for_each_entry(domain, &dca_domains, node) | ||
77 | if (domain->pci_rc == rc) | ||
78 | return domain; | ||
79 | |||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | static struct dca_domain *dca_get_domain(struct device *dev) | ||
84 | { | ||
85 | struct pci_bus *rc; | ||
86 | struct dca_domain *domain; | ||
87 | |||
88 | rc = dca_pci_rc_from_dev(dev); | ||
89 | domain = dca_find_domain(rc); | ||
90 | |||
91 | if (!domain) { | ||
92 | domain = dca_allocate_domain(rc); | ||
93 | if (domain) | ||
94 | list_add(&domain->node, &dca_domains); | ||
95 | } | ||
96 | |||
97 | return domain; | ||
98 | } | ||
99 | |||
100 | static struct dca_provider *dca_find_provider_by_dev(struct device *dev) | ||
101 | { | ||
102 | struct dca_provider *dca; | ||
103 | struct pci_bus *rc; | ||
104 | struct dca_domain *domain; | ||
105 | |||
106 | if (dev) { | ||
107 | rc = dca_pci_rc_from_dev(dev); | ||
108 | domain = dca_find_domain(rc); | ||
109 | if (!domain) | ||
110 | return NULL; | ||
111 | } else { | ||
112 | if (!list_empty(&dca_domains)) | ||
113 | domain = list_first_entry(&dca_domains, | ||
114 | struct dca_domain, | ||
115 | node); | ||
116 | else | ||
117 | return NULL; | ||
50 | } | 118 | } |
51 | 119 | ||
52 | return ret; | 120 | list_for_each_entry(dca, &domain->dca_providers, node) |
121 | if ((!dev) || (dca->ops->dev_managed(dca, dev))) | ||
122 | return dca; | ||
123 | |||
124 | return NULL; | ||
53 | } | 125 | } |
54 | 126 | ||
55 | /** | 127 | /** |
@@ -61,6 +133,8 @@ int dca_add_requester(struct device *dev) | |||
61 | struct dca_provider *dca; | 133 | struct dca_provider *dca; |
62 | int err, slot = -ENODEV; | 134 | int err, slot = -ENODEV; |
63 | unsigned long flags; | 135 | unsigned long flags; |
136 | struct pci_bus *pci_rc; | ||
137 | struct dca_domain *domain; | ||
64 | 138 | ||
65 | if (!dev) | 139 | if (!dev) |
66 | return -EFAULT; | 140 | return -EFAULT; |
@@ -74,7 +148,14 @@ int dca_add_requester(struct device *dev) | |||
74 | return -EEXIST; | 148 | return -EEXIST; |
75 | } | 149 | } |
76 | 150 | ||
77 | list_for_each_entry(dca, &dca_providers, node) { | 151 | pci_rc = dca_pci_rc_from_dev(dev); |
152 | domain = dca_find_domain(pci_rc); | ||
153 | if (!domain) { | ||
154 | spin_unlock_irqrestore(&dca_lock, flags); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | |||
158 | list_for_each_entry(dca, &domain->dca_providers, node) { | ||
78 | slot = dca->ops->add_requester(dca, dev); | 159 | slot = dca->ops->add_requester(dca, dev); |
79 | if (slot >= 0) | 160 | if (slot >= 0) |
80 | break; | 161 | break; |
@@ -222,13 +303,19 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
222 | { | 303 | { |
223 | int err; | 304 | int err; |
224 | unsigned long flags; | 305 | unsigned long flags; |
306 | struct dca_domain *domain; | ||
225 | 307 | ||
226 | err = dca_sysfs_add_provider(dca, dev); | 308 | err = dca_sysfs_add_provider(dca, dev); |
227 | if (err) | 309 | if (err) |
228 | return err; | 310 | return err; |
229 | 311 | ||
230 | spin_lock_irqsave(&dca_lock, flags); | 312 | spin_lock_irqsave(&dca_lock, flags); |
231 | list_add(&dca->node, &dca_providers); | 313 | domain = dca_get_domain(dev); |
314 | if (!domain) { | ||
315 | spin_unlock_irqrestore(&dca_lock, flags); | ||
316 | return -ENODEV; | ||
317 | } | ||
318 | list_add(&dca->node, &domain->dca_providers); | ||
232 | spin_unlock_irqrestore(&dca_lock, flags); | 319 | spin_unlock_irqrestore(&dca_lock, flags); |
233 | 320 | ||
234 | blocking_notifier_call_chain(&dca_provider_chain, | 321 | blocking_notifier_call_chain(&dca_provider_chain, |
@@ -241,15 +328,24 @@ EXPORT_SYMBOL_GPL(register_dca_provider); | |||
241 | * unregister_dca_provider - remove a dca provider | 328 | * unregister_dca_provider - remove a dca provider |
242 | * @dca - struct created by alloc_dca_provider() | 329 | * @dca - struct created by alloc_dca_provider() |
243 | */ | 330 | */ |
244 | void unregister_dca_provider(struct dca_provider *dca) | 331 | void unregister_dca_provider(struct dca_provider *dca, struct device *dev) |
245 | { | 332 | { |
246 | unsigned long flags; | 333 | unsigned long flags; |
334 | struct pci_bus *pci_rc; | ||
335 | struct dca_domain *domain; | ||
247 | 336 | ||
248 | blocking_notifier_call_chain(&dca_provider_chain, | 337 | blocking_notifier_call_chain(&dca_provider_chain, |
249 | DCA_PROVIDER_REMOVE, NULL); | 338 | DCA_PROVIDER_REMOVE, NULL); |
250 | 339 | ||
251 | spin_lock_irqsave(&dca_lock, flags); | 340 | spin_lock_irqsave(&dca_lock, flags); |
341 | |||
252 | list_del(&dca->node); | 342 | list_del(&dca->node); |
343 | |||
344 | pci_rc = dca_pci_rc_from_dev(dev); | ||
345 | domain = dca_find_domain(pci_rc); | ||
346 | if (list_empty(&domain->dca_providers)) | ||
347 | dca_free_domain(domain); | ||
348 | |||
253 | spin_unlock_irqrestore(&dca_lock, flags); | 349 | spin_unlock_irqrestore(&dca_lock, flags); |
254 | 350 | ||
255 | dca_sysfs_remove_provider(dca); | 351 | dca_sysfs_remove_provider(dca); |
@@ -276,7 +372,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify); | |||
276 | 372 | ||
277 | static int __init dca_init(void) | 373 | static int __init dca_init(void) |
278 | { | 374 | { |
279 | printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION); | 375 | pr_info("dca service started, version %s\n", DCA_VERSION); |
280 | return dca_sysfs_init(); | 376 | return dca_sysfs_init(); |
281 | } | 377 | } |
282 | 378 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 81e1020fb514..5903a88351bf 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -17,11 +17,15 @@ if DMADEVICES | |||
17 | 17 | ||
18 | comment "DMA Devices" | 18 | comment "DMA Devices" |
19 | 19 | ||
20 | config ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
21 | bool | ||
22 | |||
20 | config INTEL_IOATDMA | 23 | config INTEL_IOATDMA |
21 | tristate "Intel I/OAT DMA support" | 24 | tristate "Intel I/OAT DMA support" |
22 | depends on PCI && X86 | 25 | depends on PCI && X86 |
23 | select DMA_ENGINE | 26 | select DMA_ENGINE |
24 | select DCA | 27 | select DCA |
28 | select ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
25 | help | 29 | help |
26 | Enable support for the Intel(R) I/OAT DMA engine present | 30 | Enable support for the Intel(R) I/OAT DMA engine present |
27 | in recent Intel Xeon chipsets. | 31 | in recent Intel Xeon chipsets. |
@@ -97,6 +101,14 @@ config TXX9_DMAC | |||
97 | Support the TXx9 SoC internal DMA controller. This can be | 101 | Support the TXx9 SoC internal DMA controller. This can be |
98 | integrated in chips such as the Toshiba TX4927/38/39. | 102 | integrated in chips such as the Toshiba TX4927/38/39. |
99 | 103 | ||
104 | config SH_DMAE | ||
105 | tristate "Renesas SuperH DMAC support" | ||
106 | depends on SUPERH && SH_DMA | ||
107 | depends on !SH_DMA_API | ||
108 | select DMA_ENGINE | ||
109 | help | ||
110 | Enable support for the Renesas SuperH DMA controllers. | ||
111 | |||
100 | config DMA_ENGINE | 112 | config DMA_ENGINE |
101 | bool | 113 | bool |
102 | 114 | ||
@@ -116,7 +128,7 @@ config NET_DMA | |||
116 | 128 | ||
117 | config ASYNC_TX_DMA | 129 | config ASYNC_TX_DMA |
118 | bool "Async_tx: Offload support for the async_tx api" | 130 | bool "Async_tx: Offload support for the async_tx api" |
119 | depends on DMA_ENGINE && !HIGHMEM64G | 131 | depends on DMA_ENGINE |
120 | help | 132 | help |
121 | This allows the async_tx api to take advantage of offload engines for | 133 | This allows the async_tx api to take advantage of offload engines for |
122 | memcpy, memset, xor, and raid6 p+q operations. If your platform has | 134 | memcpy, memset, xor, and raid6 p+q operations. If your platform has |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 40e1e0083571..eca71ba78ae9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,8 +1,7 @@ | |||
1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
2 | obj-$(CONFIG_NET_DMA) += iovlock.o | 2 | obj-$(CONFIG_NET_DMA) += iovlock.o |
3 | obj-$(CONFIG_DMATEST) += dmatest.o | 3 | obj-$(CONFIG_DMATEST) += dmatest.o |
4 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 4 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
5 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | ||
6 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
7 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 6 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
8 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 7 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
@@ -10,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o | |||
10 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 9 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
11 | obj-$(CONFIG_MX3_IPU) += ipu/ | 10 | obj-$(CONFIG_MX3_IPU) += ipu/ |
12 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 11 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
12 | obj-$(CONFIG_SH_DMAE) += shdma.o | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c8522e6f1ad2..7585c4164bd5 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, | |||
87 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); | 87 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); |
88 | if (desc) { | 88 | if (desc) { |
89 | memset(desc, 0, sizeof(struct at_desc)); | 89 | memset(desc, 0, sizeof(struct at_desc)); |
90 | INIT_LIST_HEAD(&desc->tx_list); | ||
90 | dma_async_tx_descriptor_init(&desc->txd, chan); | 91 | dma_async_tx_descriptor_init(&desc->txd, chan); |
91 | /* txd.flags will be overwritten in prep functions */ | 92 | /* txd.flags will be overwritten in prep functions */ |
92 | desc->txd.flags = DMA_CTRL_ACK; | 93 | desc->txd.flags = DMA_CTRL_ACK; |
@@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
150 | struct at_desc *child; | 151 | struct at_desc *child; |
151 | 152 | ||
152 | spin_lock_bh(&atchan->lock); | 153 | spin_lock_bh(&atchan->lock); |
153 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 154 | list_for_each_entry(child, &desc->tx_list, desc_node) |
154 | dev_vdbg(chan2dev(&atchan->chan_common), | 155 | dev_vdbg(chan2dev(&atchan->chan_common), |
155 | "moving child desc %p to freelist\n", | 156 | "moving child desc %p to freelist\n", |
156 | child); | 157 | child); |
157 | list_splice_init(&desc->txd.tx_list, &atchan->free_list); | 158 | list_splice_init(&desc->tx_list, &atchan->free_list); |
158 | dev_vdbg(chan2dev(&atchan->chan_common), | 159 | dev_vdbg(chan2dev(&atchan->chan_common), |
159 | "moving desc %p to freelist\n", desc); | 160 | "moving desc %p to freelist\n", desc); |
160 | list_add(&desc->desc_node, &atchan->free_list); | 161 | list_add(&desc->desc_node, &atchan->free_list); |
@@ -247,30 +248,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
247 | param = txd->callback_param; | 248 | param = txd->callback_param; |
248 | 249 | ||
249 | /* move children to free_list */ | 250 | /* move children to free_list */ |
250 | list_splice_init(&txd->tx_list, &atchan->free_list); | 251 | list_splice_init(&desc->tx_list, &atchan->free_list); |
251 | /* move myself to free_list */ | 252 | /* move myself to free_list */ |
252 | list_move(&desc->desc_node, &atchan->free_list); | 253 | list_move(&desc->desc_node, &atchan->free_list); |
253 | 254 | ||
254 | /* unmap dma addresses */ | 255 | /* unmap dma addresses */ |
255 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 256 | if (!atchan->chan_common.private) { |
256 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 257 | struct device *parent = chan2parent(&atchan->chan_common); |
257 | dma_unmap_single(chan2parent(&atchan->chan_common), | 258 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
258 | desc->lli.daddr, | 259 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
259 | desc->len, DMA_FROM_DEVICE); | 260 | dma_unmap_single(parent, |
260 | else | 261 | desc->lli.daddr, |
261 | dma_unmap_page(chan2parent(&atchan->chan_common), | 262 | desc->len, DMA_FROM_DEVICE); |
262 | desc->lli.daddr, | 263 | else |
263 | desc->len, DMA_FROM_DEVICE); | 264 | dma_unmap_page(parent, |
264 | } | 265 | desc->lli.daddr, |
265 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 266 | desc->len, DMA_FROM_DEVICE); |
266 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 267 | } |
267 | dma_unmap_single(chan2parent(&atchan->chan_common), | 268 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
268 | desc->lli.saddr, | 269 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
269 | desc->len, DMA_TO_DEVICE); | 270 | dma_unmap_single(parent, |
270 | else | 271 | desc->lli.saddr, |
271 | dma_unmap_page(chan2parent(&atchan->chan_common), | 272 | desc->len, DMA_TO_DEVICE); |
272 | desc->lli.saddr, | 273 | else |
273 | desc->len, DMA_TO_DEVICE); | 274 | dma_unmap_page(parent, |
275 | desc->lli.saddr, | ||
276 | desc->len, DMA_TO_DEVICE); | ||
277 | } | ||
274 | } | 278 | } |
275 | 279 | ||
276 | /* | 280 | /* |
@@ -334,7 +338,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan) | |||
334 | /* This one is currently in progress */ | 338 | /* This one is currently in progress */ |
335 | return; | 339 | return; |
336 | 340 | ||
337 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 341 | list_for_each_entry(child, &desc->tx_list, desc_node) |
338 | if (!(child->lli.ctrla & ATC_DONE)) | 342 | if (!(child->lli.ctrla & ATC_DONE)) |
339 | /* Currently in progress */ | 343 | /* Currently in progress */ |
340 | return; | 344 | return; |
@@ -407,7 +411,7 @@ static void atc_handle_error(struct at_dma_chan *atchan) | |||
407 | dev_crit(chan2dev(&atchan->chan_common), | 411 | dev_crit(chan2dev(&atchan->chan_common), |
408 | " cookie: %d\n", bad_desc->txd.cookie); | 412 | " cookie: %d\n", bad_desc->txd.cookie); |
409 | atc_dump_lli(atchan, &bad_desc->lli); | 413 | atc_dump_lli(atchan, &bad_desc->lli); |
410 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 414 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
411 | atc_dump_lli(atchan, &child->lli); | 415 | atc_dump_lli(atchan, &child->lli); |
412 | 416 | ||
413 | /* Pretend the descriptor completed successfully */ | 417 | /* Pretend the descriptor completed successfully */ |
@@ -587,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
587 | prev->lli.dscr = desc->txd.phys; | 591 | prev->lli.dscr = desc->txd.phys; |
588 | /* insert the link descriptor to the LD ring */ | 592 | /* insert the link descriptor to the LD ring */ |
589 | list_add_tail(&desc->desc_node, | 593 | list_add_tail(&desc->desc_node, |
590 | &first->txd.tx_list); | 594 | &first->tx_list); |
591 | } | 595 | } |
592 | prev = desc; | 596 | prev = desc; |
593 | } | 597 | } |
@@ -646,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
646 | 650 | ||
647 | reg_width = atslave->reg_width; | 651 | reg_width = atslave->reg_width; |
648 | 652 | ||
649 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); | ||
650 | |||
651 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 653 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
652 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | 654 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; |
653 | 655 | ||
@@ -687,7 +689,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
687 | prev->lli.dscr = desc->txd.phys; | 689 | prev->lli.dscr = desc->txd.phys; |
688 | /* insert the link descriptor to the LD ring */ | 690 | /* insert the link descriptor to the LD ring */ |
689 | list_add_tail(&desc->desc_node, | 691 | list_add_tail(&desc->desc_node, |
690 | &first->txd.tx_list); | 692 | &first->tx_list); |
691 | } | 693 | } |
692 | prev = desc; | 694 | prev = desc; |
693 | total_len += len; | 695 | total_len += len; |
@@ -729,7 +731,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
729 | prev->lli.dscr = desc->txd.phys; | 731 | prev->lli.dscr = desc->txd.phys; |
730 | /* insert the link descriptor to the LD ring */ | 732 | /* insert the link descriptor to the LD ring */ |
731 | list_add_tail(&desc->desc_node, | 733 | list_add_tail(&desc->desc_node, |
732 | &first->txd.tx_list); | 734 | &first->tx_list); |
733 | } | 735 | } |
734 | prev = desc; | 736 | prev = desc; |
735 | total_len += len; | 737 | total_len += len; |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 4c972afc49ec..495457e3dc4b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -165,6 +165,7 @@ struct at_desc { | |||
165 | struct at_lli lli; | 165 | struct at_lli lli; |
166 | 166 | ||
167 | /* THEN values for driver housekeeping */ | 167 | /* THEN values for driver housekeeping */ |
168 | struct list_head tx_list; | ||
168 | struct dma_async_tx_descriptor txd; | 169 | struct dma_async_tx_descriptor txd; |
169 | struct list_head desc_node; | 170 | struct list_head desc_node; |
170 | size_t len; | 171 | size_t len; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5a87384ea4ff..bd0b248de2cf 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -608,6 +608,40 @@ void dmaengine_put(void) | |||
608 | } | 608 | } |
609 | EXPORT_SYMBOL(dmaengine_put); | 609 | EXPORT_SYMBOL(dmaengine_put); |
610 | 610 | ||
611 | static bool device_has_all_tx_types(struct dma_device *device) | ||
612 | { | ||
613 | /* A device that satisfies this test has channels that will never cause | ||
614 | * an async_tx channel switch event as all possible operation types can | ||
615 | * be handled. | ||
616 | */ | ||
617 | #ifdef CONFIG_ASYNC_TX_DMA | ||
618 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | ||
619 | return false; | ||
620 | #endif | ||
621 | |||
622 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | ||
623 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | ||
624 | return false; | ||
625 | #endif | ||
626 | |||
627 | #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) | ||
628 | if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) | ||
629 | return false; | ||
630 | #endif | ||
631 | |||
632 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | ||
633 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | ||
634 | return false; | ||
635 | #endif | ||
636 | |||
637 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | ||
638 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | ||
639 | return false; | ||
640 | #endif | ||
641 | |||
642 | return true; | ||
643 | } | ||
644 | |||
611 | static int get_dma_id(struct dma_device *device) | 645 | static int get_dma_id(struct dma_device *device) |
612 | { | 646 | { |
613 | int rc; | 647 | int rc; |
@@ -644,8 +678,12 @@ int dma_async_device_register(struct dma_device *device) | |||
644 | !device->device_prep_dma_memcpy); | 678 | !device->device_prep_dma_memcpy); |
645 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | 679 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && |
646 | !device->device_prep_dma_xor); | 680 | !device->device_prep_dma_xor); |
647 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | 681 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && |
648 | !device->device_prep_dma_zero_sum); | 682 | !device->device_prep_dma_xor_val); |
683 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && | ||
684 | !device->device_prep_dma_pq); | ||
685 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | ||
686 | !device->device_prep_dma_pq_val); | ||
649 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | 687 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && |
650 | !device->device_prep_dma_memset); | 688 | !device->device_prep_dma_memset); |
651 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 689 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
@@ -661,6 +699,12 @@ int dma_async_device_register(struct dma_device *device) | |||
661 | BUG_ON(!device->device_issue_pending); | 699 | BUG_ON(!device->device_issue_pending); |
662 | BUG_ON(!device->dev); | 700 | BUG_ON(!device->dev); |
663 | 701 | ||
702 | /* note: this only matters in the | ||
703 | * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case | ||
704 | */ | ||
705 | if (device_has_all_tx_types(device)) | ||
706 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | ||
707 | |||
664 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); | 708 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
665 | if (!idr_ref) | 709 | if (!idr_ref) |
666 | return -ENOMEM; | 710 | return -ENOMEM; |
@@ -933,55 +977,29 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
933 | { | 977 | { |
934 | tx->chan = chan; | 978 | tx->chan = chan; |
935 | spin_lock_init(&tx->lock); | 979 | spin_lock_init(&tx->lock); |
936 | INIT_LIST_HEAD(&tx->tx_list); | ||
937 | } | 980 | } |
938 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 981 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
939 | 982 | ||
940 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | 983 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
941 | * @tx: in-flight transaction to wait on | 984 | * @tx: in-flight transaction to wait on |
942 | * | ||
943 | * This routine assumes that tx was obtained from a call to async_memcpy, | ||
944 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | ||
945 | * and submitted). Walking the parent chain is only meant to cover for DMA | ||
946 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | ||
947 | * the driver's descriptor cleanup routine. | ||
948 | */ | 985 | */ |
949 | enum dma_status | 986 | enum dma_status |
950 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 987 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
951 | { | 988 | { |
952 | enum dma_status status; | 989 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
953 | struct dma_async_tx_descriptor *iter; | ||
954 | struct dma_async_tx_descriptor *parent; | ||
955 | 990 | ||
956 | if (!tx) | 991 | if (!tx) |
957 | return DMA_SUCCESS; | 992 | return DMA_SUCCESS; |
958 | 993 | ||
959 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | 994 | while (tx->cookie == -EBUSY) { |
960 | " %s\n", __func__, dma_chan_name(tx->chan)); | 995 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
961 | 996 | pr_err("%s timeout waiting for descriptor submission\n", | |
962 | /* poll through the dependency chain, return when tx is complete */ | 997 | __func__); |
963 | do { | 998 | return DMA_ERROR; |
964 | iter = tx; | 999 | } |
965 | 1000 | cpu_relax(); | |
966 | /* find the root of the unsubmitted dependency chain */ | 1001 | } |
967 | do { | 1002 | return dma_sync_wait(tx->chan, tx->cookie); |
968 | parent = iter->parent; | ||
969 | if (!parent) | ||
970 | break; | ||
971 | else | ||
972 | iter = parent; | ||
973 | } while (parent); | ||
974 | |||
975 | /* there is a small window for ->parent == NULL and | ||
976 | * ->cookie == -EBUSY | ||
977 | */ | ||
978 | while (iter->cookie == -EBUSY) | ||
979 | cpu_relax(); | ||
980 | |||
981 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
982 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
983 | |||
984 | return status; | ||
985 | } | 1003 | } |
986 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | 1004 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); |
987 | 1005 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d93017fc7872..a32a4cf7b1e0 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -48,6 +48,11 @@ module_param(xor_sources, uint, S_IRUGO); | |||
48 | MODULE_PARM_DESC(xor_sources, | 48 | MODULE_PARM_DESC(xor_sources, |
49 | "Number of xor source buffers (default: 3)"); | 49 | "Number of xor source buffers (default: 3)"); |
50 | 50 | ||
51 | static unsigned int pq_sources = 3; | ||
52 | module_param(pq_sources, uint, S_IRUGO); | ||
53 | MODULE_PARM_DESC(pq_sources, | ||
54 | "Number of p+q source buffers (default: 3)"); | ||
55 | |||
51 | /* | 56 | /* |
52 | * Initialization patterns. All bytes in the source buffer has bit 7 | 57 | * Initialization patterns. All bytes in the source buffer has bit 7 |
53 | * set, all bytes in the destination buffer has bit 7 cleared. | 58 | * set, all bytes in the destination buffer has bit 7 cleared. |
@@ -232,6 +237,7 @@ static int dmatest_func(void *data) | |||
232 | dma_cookie_t cookie; | 237 | dma_cookie_t cookie; |
233 | enum dma_status status; | 238 | enum dma_status status; |
234 | enum dma_ctrl_flags flags; | 239 | enum dma_ctrl_flags flags; |
240 | u8 pq_coefs[pq_sources]; | ||
235 | int ret; | 241 | int ret; |
236 | int src_cnt; | 242 | int src_cnt; |
237 | int dst_cnt; | 243 | int dst_cnt; |
@@ -248,6 +254,11 @@ static int dmatest_func(void *data) | |||
248 | else if (thread->type == DMA_XOR) { | 254 | else if (thread->type == DMA_XOR) { |
249 | src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ | 255 | src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ |
250 | dst_cnt = 1; | 256 | dst_cnt = 1; |
257 | } else if (thread->type == DMA_PQ) { | ||
258 | src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ | ||
259 | dst_cnt = 2; | ||
260 | for (i = 0; i < pq_sources; i++) | ||
261 | pq_coefs[i] = 1; | ||
251 | } else | 262 | } else |
252 | goto err_srcs; | 263 | goto err_srcs; |
253 | 264 | ||
@@ -283,6 +294,7 @@ static int dmatest_func(void *data) | |||
283 | dma_addr_t dma_dsts[dst_cnt]; | 294 | dma_addr_t dma_dsts[dst_cnt]; |
284 | struct completion cmp; | 295 | struct completion cmp; |
285 | unsigned long tmo = msecs_to_jiffies(3000); | 296 | unsigned long tmo = msecs_to_jiffies(3000); |
297 | u8 align = 0; | ||
286 | 298 | ||
287 | total_tests++; | 299 | total_tests++; |
288 | 300 | ||
@@ -290,6 +302,18 @@ static int dmatest_func(void *data) | |||
290 | src_off = dmatest_random() % (test_buf_size - len + 1); | 302 | src_off = dmatest_random() % (test_buf_size - len + 1); |
291 | dst_off = dmatest_random() % (test_buf_size - len + 1); | 303 | dst_off = dmatest_random() % (test_buf_size - len + 1); |
292 | 304 | ||
305 | /* honor alignment restrictions */ | ||
306 | if (thread->type == DMA_MEMCPY) | ||
307 | align = dev->copy_align; | ||
308 | else if (thread->type == DMA_XOR) | ||
309 | align = dev->xor_align; | ||
310 | else if (thread->type == DMA_PQ) | ||
311 | align = dev->pq_align; | ||
312 | |||
313 | len = (len >> align) << align; | ||
314 | src_off = (src_off >> align) << align; | ||
315 | dst_off = (dst_off >> align) << align; | ||
316 | |||
293 | dmatest_init_srcs(thread->srcs, src_off, len); | 317 | dmatest_init_srcs(thread->srcs, src_off, len); |
294 | dmatest_init_dsts(thread->dsts, dst_off, len); | 318 | dmatest_init_dsts(thread->dsts, dst_off, len); |
295 | 319 | ||
@@ -306,6 +330,7 @@ static int dmatest_func(void *data) | |||
306 | DMA_BIDIRECTIONAL); | 330 | DMA_BIDIRECTIONAL); |
307 | } | 331 | } |
308 | 332 | ||
333 | |||
309 | if (thread->type == DMA_MEMCPY) | 334 | if (thread->type == DMA_MEMCPY) |
310 | tx = dev->device_prep_dma_memcpy(chan, | 335 | tx = dev->device_prep_dma_memcpy(chan, |
311 | dma_dsts[0] + dst_off, | 336 | dma_dsts[0] + dst_off, |
@@ -316,6 +341,15 @@ static int dmatest_func(void *data) | |||
316 | dma_dsts[0] + dst_off, | 341 | dma_dsts[0] + dst_off, |
317 | dma_srcs, xor_sources, | 342 | dma_srcs, xor_sources, |
318 | len, flags); | 343 | len, flags); |
344 | else if (thread->type == DMA_PQ) { | ||
345 | dma_addr_t dma_pq[dst_cnt]; | ||
346 | |||
347 | for (i = 0; i < dst_cnt; i++) | ||
348 | dma_pq[i] = dma_dsts[i] + dst_off; | ||
349 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, | ||
350 | pq_sources, pq_coefs, | ||
351 | len, flags); | ||
352 | } | ||
319 | 353 | ||
320 | if (!tx) { | 354 | if (!tx) { |
321 | for (i = 0; i < src_cnt; i++) | 355 | for (i = 0; i < src_cnt; i++) |
@@ -459,6 +493,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty | |||
459 | op = "copy"; | 493 | op = "copy"; |
460 | else if (type == DMA_XOR) | 494 | else if (type == DMA_XOR) |
461 | op = "xor"; | 495 | op = "xor"; |
496 | else if (type == DMA_PQ) | ||
497 | op = "pq"; | ||
462 | else | 498 | else |
463 | return -EINVAL; | 499 | return -EINVAL; |
464 | 500 | ||
@@ -514,6 +550,10 @@ static int dmatest_add_channel(struct dma_chan *chan) | |||
514 | cnt = dmatest_add_threads(dtc, DMA_XOR); | 550 | cnt = dmatest_add_threads(dtc, DMA_XOR); |
515 | thread_count += cnt > 0 ? cnt : 0; | 551 | thread_count += cnt > 0 ? cnt : 0; |
516 | } | 552 | } |
553 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | ||
554 | cnt = dmatest_add_threads(dtc, DMA_PQ); | ||
555 | thread_count += cnt > 0 ?: 0; | ||
556 | } | ||
517 | 557 | ||
518 | pr_info("dmatest: Started %u threads using %s\n", | 558 | pr_info("dmatest: Started %u threads using %s\n", |
519 | thread_count, dma_chan_name(chan)); | 559 | thread_count, dma_chan_name(chan)); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 933c143b6a74..2eea823516a7 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
116 | { | 116 | { |
117 | struct dw_desc *child; | 117 | struct dw_desc *child; |
118 | 118 | ||
119 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 119 | list_for_each_entry(child, &desc->tx_list, desc_node) |
120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
121 | child->txd.phys, sizeof(child->lli), | 121 | child->txd.phys, sizeof(child->lli), |
122 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
@@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
137 | dwc_sync_desc_for_cpu(dwc, desc); | 137 | dwc_sync_desc_for_cpu(dwc, desc); |
138 | 138 | ||
139 | spin_lock_bh(&dwc->lock); | 139 | spin_lock_bh(&dwc->lock); |
140 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 140 | list_for_each_entry(child, &desc->tx_list, desc_node) |
141 | dev_vdbg(chan2dev(&dwc->chan), | 141 | dev_vdbg(chan2dev(&dwc->chan), |
142 | "moving child desc %p to freelist\n", | 142 | "moving child desc %p to freelist\n", |
143 | child); | 143 | child); |
144 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | 144 | list_splice_init(&desc->tx_list, &dwc->free_list); |
145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
146 | list_add(&desc->desc_node, &dwc->free_list); | 146 | list_add(&desc->desc_node, &dwc->free_list); |
147 | spin_unlock_bh(&dwc->lock); | 147 | spin_unlock_bh(&dwc->lock); |
@@ -209,19 +209,28 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
209 | param = txd->callback_param; | 209 | param = txd->callback_param; |
210 | 210 | ||
211 | dwc_sync_desc_for_cpu(dwc, desc); | 211 | dwc_sync_desc_for_cpu(dwc, desc); |
212 | list_splice_init(&txd->tx_list, &dwc->free_list); | 212 | list_splice_init(&desc->tx_list, &dwc->free_list); |
213 | list_move(&desc->desc_node, &dwc->free_list); | 213 | list_move(&desc->desc_node, &dwc->free_list); |
214 | 214 | ||
215 | /* | 215 | if (!dwc->chan.private) { |
216 | * We use dma_unmap_page() regardless of how the buffers were | 216 | struct device *parent = chan2parent(&dwc->chan); |
217 | * mapped before they were submitted... | 217 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
218 | */ | 218 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
219 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | 219 | dma_unmap_single(parent, desc->lli.dar, |
220 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, | 220 | desc->len, DMA_FROM_DEVICE); |
221 | desc->len, DMA_FROM_DEVICE); | 221 | else |
222 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 222 | dma_unmap_page(parent, desc->lli.dar, |
223 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, | 223 | desc->len, DMA_FROM_DEVICE); |
224 | desc->len, DMA_TO_DEVICE); | 224 | } |
225 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
226 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
227 | dma_unmap_single(parent, desc->lli.sar, | ||
228 | desc->len, DMA_TO_DEVICE); | ||
229 | else | ||
230 | dma_unmap_page(parent, desc->lli.sar, | ||
231 | desc->len, DMA_TO_DEVICE); | ||
232 | } | ||
233 | } | ||
225 | 234 | ||
226 | /* | 235 | /* |
227 | * The API requires that no submissions are done from a | 236 | * The API requires that no submissions are done from a |
@@ -289,7 +298,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
289 | /* This one is currently in progress */ | 298 | /* This one is currently in progress */ |
290 | return; | 299 | return; |
291 | 300 | ||
292 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 301 | list_for_each_entry(child, &desc->tx_list, desc_node) |
293 | if (child->lli.llp == llp) | 302 | if (child->lli.llp == llp) |
294 | /* Currently in progress */ | 303 | /* Currently in progress */ |
295 | return; | 304 | return; |
@@ -356,7 +365,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
356 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 365 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
357 | " cookie: %d\n", bad_desc->txd.cookie); | 366 | " cookie: %d\n", bad_desc->txd.cookie); |
358 | dwc_dump_lli(dwc, &bad_desc->lli); | 367 | dwc_dump_lli(dwc, &bad_desc->lli); |
359 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 368 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
360 | dwc_dump_lli(dwc, &child->lli); | 369 | dwc_dump_lli(dwc, &child->lli); |
361 | 370 | ||
362 | /* Pretend the descriptor completed successfully */ | 371 | /* Pretend the descriptor completed successfully */ |
@@ -608,7 +617,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
608 | prev->txd.phys, sizeof(prev->lli), | 617 | prev->txd.phys, sizeof(prev->lli), |
609 | DMA_TO_DEVICE); | 618 | DMA_TO_DEVICE); |
610 | list_add_tail(&desc->desc_node, | 619 | list_add_tail(&desc->desc_node, |
611 | &first->txd.tx_list); | 620 | &first->tx_list); |
612 | } | 621 | } |
613 | prev = desc; | 622 | prev = desc; |
614 | } | 623 | } |
@@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
658 | reg_width = dws->reg_width; | 667 | reg_width = dws->reg_width; |
659 | prev = first = NULL; | 668 | prev = first = NULL; |
660 | 669 | ||
661 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); | ||
662 | |||
663 | switch (direction) { | 670 | switch (direction) { |
664 | case DMA_TO_DEVICE: | 671 | case DMA_TO_DEVICE: |
665 | ctllo = (DWC_DEFAULT_CTLLO | 672 | ctllo = (DWC_DEFAULT_CTLLO |
@@ -700,7 +707,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
700 | sizeof(prev->lli), | 707 | sizeof(prev->lli), |
701 | DMA_TO_DEVICE); | 708 | DMA_TO_DEVICE); |
702 | list_add_tail(&desc->desc_node, | 709 | list_add_tail(&desc->desc_node, |
703 | &first->txd.tx_list); | 710 | &first->tx_list); |
704 | } | 711 | } |
705 | prev = desc; | 712 | prev = desc; |
706 | total_len += len; | 713 | total_len += len; |
@@ -746,7 +753,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
746 | sizeof(prev->lli), | 753 | sizeof(prev->lli), |
747 | DMA_TO_DEVICE); | 754 | DMA_TO_DEVICE); |
748 | list_add_tail(&desc->desc_node, | 755 | list_add_tail(&desc->desc_node, |
749 | &first->txd.tx_list); | 756 | &first->tx_list); |
750 | } | 757 | } |
751 | prev = desc; | 758 | prev = desc; |
752 | total_len += len; | 759 | total_len += len; |
@@ -902,6 +909,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
902 | break; | 909 | break; |
903 | } | 910 | } |
904 | 911 | ||
912 | INIT_LIST_HEAD(&desc->tx_list); | ||
905 | dma_async_tx_descriptor_init(&desc->txd, chan); | 913 | dma_async_tx_descriptor_init(&desc->txd, chan); |
906 | desc->txd.tx_submit = dwc_tx_submit; | 914 | desc->txd.tx_submit = dwc_tx_submit; |
907 | desc->txd.flags = DMA_CTRL_ACK; | 915 | desc->txd.flags = DMA_CTRL_ACK; |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 13a580767031..d9a939f67f46 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -217,6 +217,7 @@ struct dw_desc { | |||
217 | 217 | ||
218 | /* THEN values for driver housekeeping */ | 218 | /* THEN values for driver housekeeping */ |
219 | struct list_head desc_node; | 219 | struct list_head desc_node; |
220 | struct list_head tx_list; | ||
220 | struct dma_async_tx_descriptor txd; | 221 | struct dma_async_tx_descriptor txd; |
221 | size_t len; | 222 | size_t len; |
222 | }; | 223 | }; |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ef87a8984145..296f9e747fac 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/dmapool.h> | 34 | #include <linux/dmapool.h> |
35 | #include <linux/of_platform.h> | 35 | #include <linux/of_platform.h> |
36 | 36 | ||
37 | #include <asm/fsldma.h> | ||
37 | #include "fsldma.h" | 38 | #include "fsldma.h" |
38 | 39 | ||
39 | static void dma_init(struct fsl_dma_chan *fsl_chan) | 40 | static void dma_init(struct fsl_dma_chan *fsl_chan) |
@@ -280,28 +281,40 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | /** | 283 | /** |
283 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 284 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
284 | * @fsl_chan : Freescale DMA channel | 285 | * @fsl_chan : Freescale DMA channel |
285 | * @size : Pause control size, 0 for disable external pause control. | 286 | * @size : Number of bytes to transfer in a single request |
286 | * The maximum is 1024. | 287 | * |
288 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
289 | * The DMA request count is how many bytes are allowed to transfer before | ||
290 | * pausing the channel, after which a new assertion of DREQ# resumes channel | ||
291 | * operation. | ||
287 | * | 292 | * |
288 | * The Freescale DMA channel can be controlled by the external | 293 | * A size of 0 disables external pause control. The maximum size is 1024. |
289 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
290 | * to transfer before pausing the channel, after which a new assertion | ||
291 | * of DREQ# resumes channel operation. | ||
292 | */ | 294 | */ |
293 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | 295 | static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) |
294 | { | 296 | { |
295 | if (size > 1024) | 297 | BUG_ON(size > 1024); |
296 | return; | 298 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, |
299 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
300 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
301 | 32); | ||
302 | } | ||
297 | 303 | ||
298 | if (size) { | 304 | /** |
299 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 305 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
300 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 306 | * @fsl_chan : Freescale DMA channel |
301 | | ((__ilog2(size) << 24) & 0x0f000000), | 307 | * @enable : 0 is disabled, 1 is enabled. |
302 | 32); | 308 | * |
309 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
310 | * The DMA Request Count feature should be used in addition to this feature | ||
311 | * to set the number of bytes to transfer before pausing the channel. | ||
312 | */ | ||
313 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | ||
314 | { | ||
315 | if (enable) | ||
303 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 316 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
304 | } else | 317 | else |
305 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 318 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
306 | } | 319 | } |
307 | 320 | ||
@@ -326,7 +339,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
326 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 339 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
327 | { | 340 | { |
328 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 341 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
329 | struct fsl_desc_sw *desc; | 342 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
343 | struct fsl_desc_sw *child; | ||
330 | unsigned long flags; | 344 | unsigned long flags; |
331 | dma_cookie_t cookie; | 345 | dma_cookie_t cookie; |
332 | 346 | ||
@@ -334,7 +348,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
334 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 348 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
335 | 349 | ||
336 | cookie = fsl_chan->common.cookie; | 350 | cookie = fsl_chan->common.cookie; |
337 | list_for_each_entry(desc, &tx->tx_list, node) { | 351 | list_for_each_entry(child, &desc->tx_list, node) { |
338 | cookie++; | 352 | cookie++; |
339 | if (cookie < 0) | 353 | if (cookie < 0) |
340 | cookie = 1; | 354 | cookie = 1; |
@@ -343,8 +357,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
343 | } | 357 | } |
344 | 358 | ||
345 | fsl_chan->common.cookie = cookie; | 359 | fsl_chan->common.cookie = cookie; |
346 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | 360 | append_ld_queue(fsl_chan, desc); |
347 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | 361 | list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); |
348 | 362 | ||
349 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 363 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
350 | 364 | ||
@@ -366,6 +380,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
366 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | 380 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); |
367 | if (desc_sw) { | 381 | if (desc_sw) { |
368 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | 382 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); |
383 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
369 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | 384 | dma_async_tx_descriptor_init(&desc_sw->async_tx, |
370 | &fsl_chan->common); | 385 | &fsl_chan->common); |
371 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | 386 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; |
@@ -455,7 +470,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |||
455 | new->async_tx.flags = flags; | 470 | new->async_tx.flags = flags; |
456 | 471 | ||
457 | /* Insert the link descriptor to the LD ring */ | 472 | /* Insert the link descriptor to the LD ring */ |
458 | list_add_tail(&new->node, &new->async_tx.tx_list); | 473 | list_add_tail(&new->node, &new->tx_list); |
459 | 474 | ||
460 | /* Set End-of-link to the last link descriptor of new list*/ | 475 | /* Set End-of-link to the last link descriptor of new list*/ |
461 | set_ld_eol(fsl_chan, new); | 476 | set_ld_eol(fsl_chan, new); |
@@ -513,7 +528,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
513 | dma_dest += copy; | 528 | dma_dest += copy; |
514 | 529 | ||
515 | /* Insert the link descriptor to the LD ring */ | 530 | /* Insert the link descriptor to the LD ring */ |
516 | list_add_tail(&new->node, &first->async_tx.tx_list); | 531 | list_add_tail(&new->node, &first->tx_list); |
517 | } while (len); | 532 | } while (len); |
518 | 533 | ||
519 | new->async_tx.flags = flags; /* client is in control of this ack */ | 534 | new->async_tx.flags = flags; /* client is in control of this ack */ |
@@ -528,7 +543,7 @@ fail: | |||
528 | if (!first) | 543 | if (!first) |
529 | return NULL; | 544 | return NULL; |
530 | 545 | ||
531 | list = &first->async_tx.tx_list; | 546 | list = &first->tx_list; |
532 | list_for_each_entry_safe_reverse(new, prev, list, node) { | 547 | list_for_each_entry_safe_reverse(new, prev, list, node) { |
533 | list_del(&new->node); | 548 | list_del(&new->node); |
534 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | 549 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); |
@@ -538,6 +553,229 @@ fail: | |||
538 | } | 553 | } |
539 | 554 | ||
540 | /** | 555 | /** |
556 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
557 | * @chan: DMA channel | ||
558 | * @sgl: scatterlist to transfer to/from | ||
559 | * @sg_len: number of entries in @scatterlist | ||
560 | * @direction: DMA direction | ||
561 | * @flags: DMAEngine flags | ||
562 | * | ||
563 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
564 | * DMA_SLAVE API, this gets the device-specific information from the | ||
565 | * chan->private variable. | ||
566 | */ | ||
567 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
568 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
569 | enum dma_data_direction direction, unsigned long flags) | ||
570 | { | ||
571 | struct fsl_dma_chan *fsl_chan; | ||
572 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
573 | struct fsl_dma_slave *slave; | ||
574 | struct list_head *tx_list; | ||
575 | size_t copy; | ||
576 | |||
577 | int i; | ||
578 | struct scatterlist *sg; | ||
579 | size_t sg_used; | ||
580 | size_t hw_used; | ||
581 | struct fsl_dma_hw_addr *hw; | ||
582 | dma_addr_t dma_dst, dma_src; | ||
583 | |||
584 | if (!chan) | ||
585 | return NULL; | ||
586 | |||
587 | if (!chan->private) | ||
588 | return NULL; | ||
589 | |||
590 | fsl_chan = to_fsl_chan(chan); | ||
591 | slave = chan->private; | ||
592 | |||
593 | if (list_empty(&slave->addresses)) | ||
594 | return NULL; | ||
595 | |||
596 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | ||
597 | hw_used = 0; | ||
598 | |||
599 | /* | ||
600 | * Build the hardware transaction to copy from the scatterlist to | ||
601 | * the hardware, or from the hardware to the scatterlist | ||
602 | * | ||
603 | * If you are copying from the hardware to the scatterlist and it | ||
604 | * takes two hardware entries to fill an entire page, then both | ||
605 | * hardware entries will be coalesced into the same page | ||
606 | * | ||
607 | * If you are copying from the scatterlist to the hardware and a | ||
608 | * single page can fill two hardware entries, then the data will | ||
609 | * be read out of the page into the first hardware entry, and so on | ||
610 | */ | ||
611 | for_each_sg(sgl, sg, sg_len, i) { | ||
612 | sg_used = 0; | ||
613 | |||
614 | /* Loop until the entire scatterlist entry is used */ | ||
615 | while (sg_used < sg_dma_len(sg)) { | ||
616 | |||
617 | /* | ||
618 | * If we've used up the current hardware address/length | ||
619 | * pair, we need to load a new one | ||
620 | * | ||
621 | * This is done in a while loop so that descriptors with | ||
622 | * length == 0 will be skipped | ||
623 | */ | ||
624 | while (hw_used >= hw->length) { | ||
625 | |||
626 | /* | ||
627 | * If the current hardware entry is the last | ||
628 | * entry in the list, we're finished | ||
629 | */ | ||
630 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
631 | goto finished; | ||
632 | |||
633 | /* Get the next hardware address/length pair */ | ||
634 | hw = list_entry(hw->entry.next, | ||
635 | struct fsl_dma_hw_addr, entry); | ||
636 | hw_used = 0; | ||
637 | } | ||
638 | |||
639 | /* Allocate the link descriptor from DMA pool */ | ||
640 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
641 | if (!new) { | ||
642 | dev_err(fsl_chan->dev, "No free memory for " | ||
643 | "link descriptor\n"); | ||
644 | goto fail; | ||
645 | } | ||
646 | #ifdef FSL_DMA_LD_DEBUG | ||
647 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
648 | #endif | ||
649 | |||
650 | /* | ||
651 | * Calculate the maximum number of bytes to transfer, | ||
652 | * making sure it is less than the DMA controller limit | ||
653 | */ | ||
654 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
655 | hw->length - hw_used); | ||
656 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | ||
657 | |||
658 | /* | ||
659 | * DMA_FROM_DEVICE | ||
660 | * from the hardware to the scatterlist | ||
661 | * | ||
662 | * DMA_TO_DEVICE | ||
663 | * from the scatterlist to the hardware | ||
664 | */ | ||
665 | if (direction == DMA_FROM_DEVICE) { | ||
666 | dma_src = hw->address + hw_used; | ||
667 | dma_dst = sg_dma_address(sg) + sg_used; | ||
668 | } else { | ||
669 | dma_src = sg_dma_address(sg) + sg_used; | ||
670 | dma_dst = hw->address + hw_used; | ||
671 | } | ||
672 | |||
673 | /* Fill in the descriptor */ | ||
674 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
675 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
676 | set_desc_dest(fsl_chan, &new->hw, dma_dst); | ||
677 | |||
678 | /* | ||
679 | * If this is not the first descriptor, chain the | ||
680 | * current descriptor after the previous descriptor | ||
681 | */ | ||
682 | if (!first) { | ||
683 | first = new; | ||
684 | } else { | ||
685 | set_desc_next(fsl_chan, &prev->hw, | ||
686 | new->async_tx.phys); | ||
687 | } | ||
688 | |||
689 | new->async_tx.cookie = 0; | ||
690 | async_tx_ack(&new->async_tx); | ||
691 | |||
692 | prev = new; | ||
693 | sg_used += copy; | ||
694 | hw_used += copy; | ||
695 | |||
696 | /* Insert the link descriptor into the LD ring */ | ||
697 | list_add_tail(&new->node, &first->tx_list); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | finished: | ||
702 | |||
703 | /* All of the hardware address/length pairs had length == 0 */ | ||
704 | if (!first || !new) | ||
705 | return NULL; | ||
706 | |||
707 | new->async_tx.flags = flags; | ||
708 | new->async_tx.cookie = -EBUSY; | ||
709 | |||
710 | /* Set End-of-link to the last link descriptor of new list */ | ||
711 | set_ld_eol(fsl_chan, new); | ||
712 | |||
713 | /* Enable extra controller features */ | ||
714 | if (fsl_chan->set_src_loop_size) | ||
715 | fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | ||
716 | |||
717 | if (fsl_chan->set_dest_loop_size) | ||
718 | fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); | ||
719 | |||
720 | if (fsl_chan->toggle_ext_start) | ||
721 | fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | ||
722 | |||
723 | if (fsl_chan->toggle_ext_pause) | ||
724 | fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | ||
725 | |||
726 | if (fsl_chan->set_request_count) | ||
727 | fsl_chan->set_request_count(fsl_chan, slave->request_count); | ||
728 | |||
729 | return &first->async_tx; | ||
730 | |||
731 | fail: | ||
732 | /* If first was not set, then we failed to allocate the very first | ||
733 | * descriptor, and we're done */ | ||
734 | if (!first) | ||
735 | return NULL; | ||
736 | |||
737 | /* | ||
738 | * First is set, so all of the descriptors we allocated have been added | ||
739 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
740 | * must traverse the list backwards freeing each descriptor in turn | ||
741 | * | ||
742 | * We're re-using variables for the loop, oh well | ||
743 | */ | ||
744 | tx_list = &first->tx_list; | ||
745 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { | ||
746 | list_del_init(&new->node); | ||
747 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
748 | } | ||
749 | |||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | ||
754 | { | ||
755 | struct fsl_dma_chan *fsl_chan; | ||
756 | struct fsl_desc_sw *desc, *tmp; | ||
757 | unsigned long flags; | ||
758 | |||
759 | if (!chan) | ||
760 | return; | ||
761 | |||
762 | fsl_chan = to_fsl_chan(chan); | ||
763 | |||
764 | /* Halt the DMA engine */ | ||
765 | dma_halt(fsl_chan); | ||
766 | |||
767 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
768 | |||
769 | /* Remove and free all of the descriptors in the LD queue */ | ||
770 | list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | ||
771 | list_del(&desc->node); | ||
772 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
773 | } | ||
774 | |||
775 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
776 | } | ||
777 | |||
778 | /** | ||
541 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 779 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
542 | * @fsl_chan : Freescale DMA channel | 780 | * @fsl_chan : Freescale DMA channel |
543 | */ | 781 | */ |
@@ -883,6 +1121,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
883 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | 1121 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; |
884 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1122 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
885 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | 1123 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; |
1124 | new_fsl_chan->set_request_count = fsl_chan_set_request_count; | ||
886 | } | 1125 | } |
887 | 1126 | ||
888 | spin_lock_init(&new_fsl_chan->desc_lock); | 1127 | spin_lock_init(&new_fsl_chan->desc_lock); |
@@ -962,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
962 | 1201 | ||
963 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1202 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
964 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1203 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1204 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | ||
965 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1205 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
966 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1206 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
967 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1207 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
968 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1208 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
969 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | 1209 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; |
970 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1210 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1211 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | ||
1212 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; | ||
971 | fdev->common.dev = &dev->dev; | 1213 | fdev->common.dev = &dev->dev; |
972 | 1214 | ||
973 | fdev->irq = irq_of_parse_and_map(dev->node, 0); | 1215 | fdev->irq = irq_of_parse_and_map(dev->node, 0); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index dc7f26865797..0df14cbb8ca3 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -90,6 +90,7 @@ struct fsl_dma_ld_hw { | |||
90 | struct fsl_desc_sw { | 90 | struct fsl_desc_sw { |
91 | struct fsl_dma_ld_hw hw; | 91 | struct fsl_dma_ld_hw hw; |
92 | struct list_head node; | 92 | struct list_head node; |
93 | struct list_head tx_list; | ||
93 | struct dma_async_tx_descriptor async_tx; | 94 | struct dma_async_tx_descriptor async_tx; |
94 | struct list_head *ld; | 95 | struct list_head *ld; |
95 | void *priv; | 96 | void *priv; |
@@ -143,10 +144,11 @@ struct fsl_dma_chan { | |||
143 | struct tasklet_struct tasklet; | 144 | struct tasklet_struct tasklet; |
144 | u32 feature; | 145 | u32 feature; |
145 | 146 | ||
146 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | 147 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); |
147 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | 148 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); |
148 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 149 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); |
149 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 150 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); |
151 | void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); | ||
150 | }; | 152 | }; |
151 | 153 | ||
152 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | 154 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) |
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c deleted file mode 100644 index 2225bb6ba3d1..000000000000 --- a/drivers/dma/ioat.c +++ /dev/null | |||
@@ -1,202 +0,0 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2007 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
25 | * copy operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dca.h> | ||
33 | #include "ioatdma.h" | ||
34 | #include "ioatdma_registers.h" | ||
35 | #include "ioatdma_hw.h" | ||
36 | |||
37 | MODULE_VERSION(IOAT_DMA_VERSION); | ||
38 | MODULE_LICENSE("GPL"); | ||
39 | MODULE_AUTHOR("Intel Corporation"); | ||
40 | |||
41 | static struct pci_device_id ioat_pci_tbl[] = { | ||
42 | /* I/OAT v1 platforms */ | ||
43 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | ||
44 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, | ||
45 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, | ||
46 | { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, | ||
47 | |||
48 | /* I/OAT v2 platforms */ | ||
49 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, | ||
50 | |||
51 | /* I/OAT v3 platforms */ | ||
52 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | ||
53 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | ||
54 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | ||
55 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | ||
56 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | ||
57 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | ||
58 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | ||
59 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | ||
60 | { 0, } | ||
61 | }; | ||
62 | |||
63 | struct ioat_device { | ||
64 | struct pci_dev *pdev; | ||
65 | void __iomem *iobase; | ||
66 | struct ioatdma_device *dma; | ||
67 | struct dca_provider *dca; | ||
68 | }; | ||
69 | |||
70 | static int __devinit ioat_probe(struct pci_dev *pdev, | ||
71 | const struct pci_device_id *id); | ||
72 | static void __devexit ioat_remove(struct pci_dev *pdev); | ||
73 | |||
74 | static int ioat_dca_enabled = 1; | ||
75 | module_param(ioat_dca_enabled, int, 0644); | ||
76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | ||
77 | |||
78 | static struct pci_driver ioat_pci_driver = { | ||
79 | .name = "ioatdma", | ||
80 | .id_table = ioat_pci_tbl, | ||
81 | .probe = ioat_probe, | ||
82 | .remove = __devexit_p(ioat_remove), | ||
83 | }; | ||
84 | |||
85 | static int __devinit ioat_probe(struct pci_dev *pdev, | ||
86 | const struct pci_device_id *id) | ||
87 | { | ||
88 | void __iomem *iobase; | ||
89 | struct ioat_device *device; | ||
90 | unsigned long mmio_start, mmio_len; | ||
91 | int err; | ||
92 | |||
93 | err = pci_enable_device(pdev); | ||
94 | if (err) | ||
95 | goto err_enable_device; | ||
96 | |||
97 | err = pci_request_regions(pdev, ioat_pci_driver.name); | ||
98 | if (err) | ||
99 | goto err_request_regions; | ||
100 | |||
101 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
102 | if (err) | ||
103 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
104 | if (err) | ||
105 | goto err_set_dma_mask; | ||
106 | |||
107 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
108 | if (err) | ||
109 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
110 | if (err) | ||
111 | goto err_set_dma_mask; | ||
112 | |||
113 | mmio_start = pci_resource_start(pdev, 0); | ||
114 | mmio_len = pci_resource_len(pdev, 0); | ||
115 | iobase = ioremap(mmio_start, mmio_len); | ||
116 | if (!iobase) { | ||
117 | err = -ENOMEM; | ||
118 | goto err_ioremap; | ||
119 | } | ||
120 | |||
121 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
122 | if (!device) { | ||
123 | err = -ENOMEM; | ||
124 | goto err_kzalloc; | ||
125 | } | ||
126 | device->pdev = pdev; | ||
127 | pci_set_drvdata(pdev, device); | ||
128 | device->iobase = iobase; | ||
129 | |||
130 | pci_set_master(pdev); | ||
131 | |||
132 | switch (readb(iobase + IOAT_VER_OFFSET)) { | ||
133 | case IOAT_VER_1_2: | ||
134 | device->dma = ioat_dma_probe(pdev, iobase); | ||
135 | if (device->dma && ioat_dca_enabled) | ||
136 | device->dca = ioat_dca_init(pdev, iobase); | ||
137 | break; | ||
138 | case IOAT_VER_2_0: | ||
139 | device->dma = ioat_dma_probe(pdev, iobase); | ||
140 | if (device->dma && ioat_dca_enabled) | ||
141 | device->dca = ioat2_dca_init(pdev, iobase); | ||
142 | break; | ||
143 | case IOAT_VER_3_0: | ||
144 | device->dma = ioat_dma_probe(pdev, iobase); | ||
145 | if (device->dma && ioat_dca_enabled) | ||
146 | device->dca = ioat3_dca_init(pdev, iobase); | ||
147 | break; | ||
148 | default: | ||
149 | err = -ENODEV; | ||
150 | break; | ||
151 | } | ||
152 | if (!device->dma) | ||
153 | err = -ENODEV; | ||
154 | |||
155 | if (err) | ||
156 | goto err_version; | ||
157 | |||
158 | return 0; | ||
159 | |||
160 | err_version: | ||
161 | kfree(device); | ||
162 | err_kzalloc: | ||
163 | iounmap(iobase); | ||
164 | err_ioremap: | ||
165 | err_set_dma_mask: | ||
166 | pci_release_regions(pdev); | ||
167 | pci_disable_device(pdev); | ||
168 | err_request_regions: | ||
169 | err_enable_device: | ||
170 | return err; | ||
171 | } | ||
172 | |||
173 | static void __devexit ioat_remove(struct pci_dev *pdev) | ||
174 | { | ||
175 | struct ioat_device *device = pci_get_drvdata(pdev); | ||
176 | |||
177 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
178 | if (device->dca) { | ||
179 | unregister_dca_provider(device->dca); | ||
180 | free_dca_provider(device->dca); | ||
181 | device->dca = NULL; | ||
182 | } | ||
183 | |||
184 | if (device->dma) { | ||
185 | ioat_dma_remove(device->dma); | ||
186 | device->dma = NULL; | ||
187 | } | ||
188 | |||
189 | kfree(device); | ||
190 | } | ||
191 | |||
192 | static int __init ioat_init_module(void) | ||
193 | { | ||
194 | return pci_register_driver(&ioat_pci_driver); | ||
195 | } | ||
196 | module_init(ioat_init_module); | ||
197 | |||
198 | static void __exit ioat_exit_module(void) | ||
199 | { | ||
200 | pci_unregister_driver(&ioat_pci_driver); | ||
201 | } | ||
202 | module_exit(ioat_exit_module); | ||
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile new file mode 100644 index 000000000000..8997d3fb9051 --- /dev/null +++ b/drivers/dma/ioat/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | ||
2 | ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o | ||
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat/dca.c index c012a1e15043..69d02615c4d6 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -33,8 +33,8 @@ | |||
33 | #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) | 33 | #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #include "ioatdma.h" | 36 | #include "dma.h" |
37 | #include "ioatdma_registers.h" | 37 | #include "registers.h" |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 | 40 | * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 |
@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { | |||
242 | }; | 242 | }; |
243 | 243 | ||
244 | 244 | ||
245 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 245 | struct dca_provider * __devinit |
246 | ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
246 | { | 247 | { |
247 | struct dca_provider *dca; | 248 | struct dca_provider *dca; |
248 | struct ioat_dca_priv *ioatdca; | 249 | struct ioat_dca_priv *ioatdca; |
@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) | |||
407 | return slots; | 408 | return slots; |
408 | } | 409 | } |
409 | 410 | ||
410 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 411 | struct dca_provider * __devinit |
412 | ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
411 | { | 413 | { |
412 | struct dca_provider *dca; | 414 | struct dca_provider *dca; |
413 | struct ioat_dca_priv *ioatdca; | 415 | struct ioat_dca_priv *ioatdca; |
@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | |||
602 | return slots; | 604 | return slots; |
603 | } | 605 | } |
604 | 606 | ||
605 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 607 | struct dca_provider * __devinit |
608 | ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
606 | { | 609 | { |
607 | struct dca_provider *dca; | 610 | struct dca_provider *dca; |
608 | struct ioat_dca_priv *ioatdca; | 611 | struct ioat_dca_priv *ioatdca; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c new file mode 100644 index 000000000000..c524d36d3c2e --- /dev/null +++ b/drivers/dma/ioat/dma.c | |||
@@ -0,0 +1,1238 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
25 | * copy operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dmaengine.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/i7300_idle.h> | ||
37 | #include "dma.h" | ||
38 | #include "registers.h" | ||
39 | #include "hw.h" | ||
40 | |||
41 | int ioat_pending_level = 4; | ||
42 | module_param(ioat_pending_level, int, 0644); | ||
43 | MODULE_PARM_DESC(ioat_pending_level, | ||
44 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
45 | |||
46 | /* internal functions */ | ||
47 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); | ||
48 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | ||
49 | |||
50 | /** | ||
51 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
52 | * @irq: interrupt id | ||
53 | * @data: interrupt data | ||
54 | */ | ||
55 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
56 | { | ||
57 | struct ioatdma_device *instance = data; | ||
58 | struct ioat_chan_common *chan; | ||
59 | unsigned long attnstatus; | ||
60 | int bit; | ||
61 | u8 intrctrl; | ||
62 | |||
63 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
64 | |||
65 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
66 | return IRQ_NONE; | ||
67 | |||
68 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
69 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
70 | return IRQ_NONE; | ||
71 | } | ||
72 | |||
73 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
74 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
75 | chan = ioat_chan_by_index(instance, bit); | ||
76 | tasklet_schedule(&chan->cleanup_task); | ||
77 | } | ||
78 | |||
79 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
80 | return IRQ_HANDLED; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
85 | * @irq: interrupt id | ||
86 | * @data: interrupt data | ||
87 | */ | ||
88 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
89 | { | ||
90 | struct ioat_chan_common *chan = data; | ||
91 | |||
92 | tasklet_schedule(&chan->cleanup_task); | ||
93 | |||
94 | return IRQ_HANDLED; | ||
95 | } | ||
96 | |||
97 | static void ioat1_cleanup_tasklet(unsigned long data); | ||
98 | |||
99 | /* common channel initialization */ | ||
100 | void ioat_init_channel(struct ioatdma_device *device, | ||
101 | struct ioat_chan_common *chan, int idx, | ||
102 | void (*timer_fn)(unsigned long), | ||
103 | void (*tasklet)(unsigned long), | ||
104 | unsigned long ioat) | ||
105 | { | ||
106 | struct dma_device *dma = &device->common; | ||
107 | |||
108 | chan->device = device; | ||
109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | ||
110 | spin_lock_init(&chan->cleanup_lock); | ||
111 | chan->common.device = dma; | ||
112 | list_add_tail(&chan->common.device_node, &dma->channels); | ||
113 | device->idx[idx] = chan; | ||
114 | init_timer(&chan->timer); | ||
115 | chan->timer.function = timer_fn; | ||
116 | chan->timer.data = ioat; | ||
117 | tasklet_init(&chan->cleanup_task, tasklet, ioat); | ||
118 | tasklet_disable(&chan->cleanup_task); | ||
119 | } | ||
120 | |||
121 | static void ioat1_timer_event(unsigned long data); | ||
122 | |||
123 | /** | ||
124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | ||
125 | * @device: the device to be enumerated | ||
126 | */ | ||
127 | static int ioat1_enumerate_channels(struct ioatdma_device *device) | ||
128 | { | ||
129 | u8 xfercap_scale; | ||
130 | u32 xfercap; | ||
131 | int i; | ||
132 | struct ioat_dma_chan *ioat; | ||
133 | struct device *dev = &device->pdev->dev; | ||
134 | struct dma_device *dma = &device->common; | ||
135 | |||
136 | INIT_LIST_HEAD(&dma->channels); | ||
137 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
138 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
139 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
140 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
141 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
142 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
143 | } | ||
144 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
145 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ | ||
146 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
147 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); | ||
148 | |||
149 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
150 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
151 | dma->chancnt--; | ||
152 | #endif | ||
153 | for (i = 0; i < dma->chancnt; i++) { | ||
154 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
155 | if (!ioat) | ||
156 | break; | ||
157 | |||
158 | ioat_init_channel(device, &ioat->base, i, | ||
159 | ioat1_timer_event, | ||
160 | ioat1_cleanup_tasklet, | ||
161 | (unsigned long) ioat); | ||
162 | ioat->xfercap = xfercap; | ||
163 | spin_lock_init(&ioat->desc_lock); | ||
164 | INIT_LIST_HEAD(&ioat->free_desc); | ||
165 | INIT_LIST_HEAD(&ioat->used_desc); | ||
166 | } | ||
167 | dma->chancnt = i; | ||
168 | return i; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
173 | * descriptors to hw | ||
174 | * @chan: DMA channel handle | ||
175 | */ | ||
176 | static inline void | ||
177 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) | ||
178 | { | ||
179 | void __iomem *reg_base = ioat->base.reg_base; | ||
180 | |||
181 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", | ||
182 | __func__, ioat->pending); | ||
183 | ioat->pending = 0; | ||
184 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | ||
185 | } | ||
186 | |||
187 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
188 | { | ||
189 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); | ||
190 | |||
191 | if (ioat->pending > 0) { | ||
192 | spin_lock_bh(&ioat->desc_lock); | ||
193 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
194 | spin_unlock_bh(&ioat->desc_lock); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * ioat1_reset_channel - restart a channel | ||
200 | * @ioat: IOAT DMA channel handle | ||
201 | */ | ||
202 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | ||
203 | { | ||
204 | struct ioat_chan_common *chan = &ioat->base; | ||
205 | void __iomem *reg_base = chan->reg_base; | ||
206 | u32 chansts, chanerr; | ||
207 | |||
208 | dev_warn(to_dev(chan), "reset\n"); | ||
209 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | ||
210 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; | ||
211 | if (chanerr) { | ||
212 | dev_err(to_dev(chan), | ||
213 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
214 | chan_num(chan), chansts, chanerr); | ||
215 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * whack it upside the head with a reset | ||
220 | * and wait for things to settle out. | ||
221 | * force the pending count to a really big negative | ||
222 | * to make sure no one forces an issue_pending | ||
223 | * while we're waiting. | ||
224 | */ | ||
225 | |||
226 | ioat->pending = INT_MIN; | ||
227 | writeb(IOAT_CHANCMD_RESET, | ||
228 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
229 | set_bit(IOAT_RESET_PENDING, &chan->state); | ||
230 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | ||
231 | } | ||
232 | |||
233 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
234 | { | ||
235 | struct dma_chan *c = tx->chan; | ||
236 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
237 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | ||
238 | struct ioat_chan_common *chan = &ioat->base; | ||
239 | struct ioat_desc_sw *first; | ||
240 | struct ioat_desc_sw *chain_tail; | ||
241 | dma_cookie_t cookie; | ||
242 | |||
243 | spin_lock_bh(&ioat->desc_lock); | ||
244 | /* cookie incr and addition to used_list must be atomic */ | ||
245 | cookie = c->cookie; | ||
246 | cookie++; | ||
247 | if (cookie < 0) | ||
248 | cookie = 1; | ||
249 | c->cookie = cookie; | ||
250 | tx->cookie = cookie; | ||
251 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
252 | |||
253 | /* write address into NextDescriptor field of last desc in chain */ | ||
254 | first = to_ioat_desc(desc->tx_list.next); | ||
255 | chain_tail = to_ioat_desc(ioat->used_desc.prev); | ||
256 | /* make descriptor updates globally visible before chaining */ | ||
257 | wmb(); | ||
258 | chain_tail->hw->next = first->txd.phys; | ||
259 | list_splice_tail_init(&desc->tx_list, &ioat->used_desc); | ||
260 | dump_desc_dbg(ioat, chain_tail); | ||
261 | dump_desc_dbg(ioat, first); | ||
262 | |||
263 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
264 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
265 | |||
266 | ioat->active += desc->hw->tx_cnt; | ||
267 | ioat->pending += desc->hw->tx_cnt; | ||
268 | if (ioat->pending >= ioat_pending_level) | ||
269 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
270 | spin_unlock_bh(&ioat->desc_lock); | ||
271 | |||
272 | return cookie; | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
277 | * @ioat: the channel supplying the memory pool for the descriptors | ||
278 | * @flags: allocation flags | ||
279 | */ | ||
280 | static struct ioat_desc_sw * | ||
281 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) | ||
282 | { | ||
283 | struct ioat_dma_descriptor *desc; | ||
284 | struct ioat_desc_sw *desc_sw; | ||
285 | struct ioatdma_device *ioatdma_device; | ||
286 | dma_addr_t phys; | ||
287 | |||
288 | ioatdma_device = ioat->base.device; | ||
289 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
290 | if (unlikely(!desc)) | ||
291 | return NULL; | ||
292 | |||
293 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
294 | if (unlikely(!desc_sw)) { | ||
295 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
296 | return NULL; | ||
297 | } | ||
298 | |||
299 | memset(desc, 0, sizeof(*desc)); | ||
300 | |||
301 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
302 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); | ||
303 | desc_sw->txd.tx_submit = ioat1_tx_submit; | ||
304 | desc_sw->hw = desc; | ||
305 | desc_sw->txd.phys = phys; | ||
306 | set_desc_id(desc_sw, -1); | ||
307 | |||
308 | return desc_sw; | ||
309 | } | ||
310 | |||
311 | static int ioat_initial_desc_count = 256; | ||
312 | module_param(ioat_initial_desc_count, int, 0644); | ||
313 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
314 | "ioat1: initial descriptors per channel (default: 256)"); | ||
315 | /** | ||
316 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
317 | * @chan: the channel to be filled out | ||
318 | */ | ||
319 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | ||
320 | { | ||
321 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
322 | struct ioat_chan_common *chan = &ioat->base; | ||
323 | struct ioat_desc_sw *desc; | ||
324 | u32 chanerr; | ||
325 | int i; | ||
326 | LIST_HEAD(tmp_list); | ||
327 | |||
328 | /* have we already been set up? */ | ||
329 | if (!list_empty(&ioat->free_desc)) | ||
330 | return ioat->desccount; | ||
331 | |||
332 | /* Setup register to interrupt and write completion status on error */ | ||
333 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
334 | |||
335 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
336 | if (chanerr) { | ||
337 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
338 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
339 | } | ||
340 | |||
341 | /* Allocate descriptors */ | ||
342 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
343 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); | ||
344 | if (!desc) { | ||
345 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); | ||
346 | break; | ||
347 | } | ||
348 | set_desc_id(desc, i); | ||
349 | list_add_tail(&desc->node, &tmp_list); | ||
350 | } | ||
351 | spin_lock_bh(&ioat->desc_lock); | ||
352 | ioat->desccount = i; | ||
353 | list_splice(&tmp_list, &ioat->free_desc); | ||
354 | spin_unlock_bh(&ioat->desc_lock); | ||
355 | |||
356 | /* allocate a completion writeback area */ | ||
357 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
358 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
359 | GFP_KERNEL, &chan->completion_dma); | ||
360 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
361 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
362 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
363 | writel(((u64) chan->completion_dma) >> 32, | ||
364 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
365 | |||
366 | tasklet_enable(&chan->cleanup_task); | ||
367 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ | ||
368 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
369 | __func__, ioat->desccount); | ||
370 | return ioat->desccount; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * ioat1_dma_free_chan_resources - release all the descriptors | ||
375 | * @chan: the channel to be cleaned | ||
376 | */ | ||
377 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) | ||
378 | { | ||
379 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
380 | struct ioat_chan_common *chan = &ioat->base; | ||
381 | struct ioatdma_device *ioatdma_device = chan->device; | ||
382 | struct ioat_desc_sw *desc, *_desc; | ||
383 | int in_use_descs = 0; | ||
384 | |||
385 | /* Before freeing channel resources first check | ||
386 | * if they have been previously allocated for this channel. | ||
387 | */ | ||
388 | if (ioat->desccount == 0) | ||
389 | return; | ||
390 | |||
391 | tasklet_disable(&chan->cleanup_task); | ||
392 | del_timer_sync(&chan->timer); | ||
393 | ioat1_cleanup(ioat); | ||
394 | |||
395 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
396 | * before removing DMA descriptor resources. | ||
397 | */ | ||
398 | writeb(IOAT_CHANCMD_RESET, | ||
399 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
400 | mdelay(100); | ||
401 | |||
402 | spin_lock_bh(&ioat->desc_lock); | ||
403 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | ||
404 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | ||
405 | __func__, desc_id(desc)); | ||
406 | dump_desc_dbg(ioat, desc); | ||
407 | in_use_descs++; | ||
408 | list_del(&desc->node); | ||
409 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
410 | desc->txd.phys); | ||
411 | kfree(desc); | ||
412 | } | ||
413 | list_for_each_entry_safe(desc, _desc, | ||
414 | &ioat->free_desc, node) { | ||
415 | list_del(&desc->node); | ||
416 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
417 | desc->txd.phys); | ||
418 | kfree(desc); | ||
419 | } | ||
420 | spin_unlock_bh(&ioat->desc_lock); | ||
421 | |||
422 | pci_pool_free(ioatdma_device->completion_pool, | ||
423 | chan->completion, | ||
424 | chan->completion_dma); | ||
425 | |||
426 | /* one is ok since we left it on there on purpose */ | ||
427 | if (in_use_descs > 1) | ||
428 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
429 | in_use_descs - 1); | ||
430 | |||
431 | chan->last_completion = 0; | ||
432 | chan->completion_dma = 0; | ||
433 | ioat->pending = 0; | ||
434 | ioat->desccount = 0; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * ioat1_dma_get_next_descriptor - return the next available descriptor | ||
439 | * @ioat: IOAT DMA channel handle | ||
440 | * | ||
441 | * Gets the next descriptor from the chain, and must be called with the | ||
442 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
443 | * has run out. | ||
444 | */ | ||
445 | static struct ioat_desc_sw * | ||
446 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) | ||
447 | { | ||
448 | struct ioat_desc_sw *new; | ||
449 | |||
450 | if (!list_empty(&ioat->free_desc)) { | ||
451 | new = to_ioat_desc(ioat->free_desc.next); | ||
452 | list_del(&new->node); | ||
453 | } else { | ||
454 | /* try to get another desc */ | ||
455 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); | ||
456 | if (!new) { | ||
457 | dev_err(to_dev(&ioat->base), "alloc failed\n"); | ||
458 | return NULL; | ||
459 | } | ||
460 | } | ||
461 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", | ||
462 | __func__, desc_id(new)); | ||
463 | prefetch(new->hw); | ||
464 | return new; | ||
465 | } | ||
466 | |||
467 | static struct dma_async_tx_descriptor * | ||
468 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | ||
469 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
470 | { | ||
471 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
472 | struct ioat_desc_sw *desc; | ||
473 | size_t copy; | ||
474 | LIST_HEAD(chain); | ||
475 | dma_addr_t src = dma_src; | ||
476 | dma_addr_t dest = dma_dest; | ||
477 | size_t total_len = len; | ||
478 | struct ioat_dma_descriptor *hw = NULL; | ||
479 | int tx_cnt = 0; | ||
480 | |||
481 | spin_lock_bh(&ioat->desc_lock); | ||
482 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
483 | do { | ||
484 | if (!desc) | ||
485 | break; | ||
486 | |||
487 | tx_cnt++; | ||
488 | copy = min_t(size_t, len, ioat->xfercap); | ||
489 | |||
490 | hw = desc->hw; | ||
491 | hw->size = copy; | ||
492 | hw->ctl = 0; | ||
493 | hw->src_addr = src; | ||
494 | hw->dst_addr = dest; | ||
495 | |||
496 | list_add_tail(&desc->node, &chain); | ||
497 | |||
498 | len -= copy; | ||
499 | dest += copy; | ||
500 | src += copy; | ||
501 | if (len) { | ||
502 | struct ioat_desc_sw *next; | ||
503 | |||
504 | async_tx_ack(&desc->txd); | ||
505 | next = ioat1_dma_get_next_descriptor(ioat); | ||
506 | hw->next = next ? next->txd.phys : 0; | ||
507 | dump_desc_dbg(ioat, desc); | ||
508 | desc = next; | ||
509 | } else | ||
510 | hw->next = 0; | ||
511 | } while (len); | ||
512 | |||
513 | if (!desc) { | ||
514 | struct ioat_chan_common *chan = &ioat->base; | ||
515 | |||
516 | dev_err(to_dev(chan), | ||
517 | "chan%d - get_next_desc failed\n", chan_num(chan)); | ||
518 | list_splice(&chain, &ioat->free_desc); | ||
519 | spin_unlock_bh(&ioat->desc_lock); | ||
520 | return NULL; | ||
521 | } | ||
522 | spin_unlock_bh(&ioat->desc_lock); | ||
523 | |||
524 | desc->txd.flags = flags; | ||
525 | desc->len = total_len; | ||
526 | list_splice(&chain, &desc->tx_list); | ||
527 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
528 | hw->ctl_f.compl_write = 1; | ||
529 | hw->tx_cnt = tx_cnt; | ||
530 | dump_desc_dbg(ioat, desc); | ||
531 | |||
532 | return &desc->txd; | ||
533 | } | ||
534 | |||
535 | static void ioat1_cleanup_tasklet(unsigned long data) | ||
536 | { | ||
537 | struct ioat_dma_chan *chan = (void *)data; | ||
538 | |||
539 | ioat1_cleanup(chan); | ||
540 | writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
541 | } | ||
542 | |||
543 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
544 | size_t len, struct ioat_dma_descriptor *hw) | ||
545 | { | ||
546 | struct pci_dev *pdev = chan->device->pdev; | ||
547 | size_t offset = len - hw->size; | ||
548 | |||
549 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
550 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
551 | PCI_DMA_FROMDEVICE, flags, 1); | ||
552 | |||
553 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
554 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
555 | PCI_DMA_TODEVICE, flags, 0); | ||
556 | } | ||
557 | |||
558 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | ||
559 | { | ||
560 | unsigned long phys_complete; | ||
561 | u64 completion; | ||
562 | |||
563 | completion = *chan->completion; | ||
564 | phys_complete = ioat_chansts_to_addr(completion); | ||
565 | |||
566 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | ||
567 | (unsigned long long) phys_complete); | ||
568 | |||
569 | if (is_ioat_halted(completion)) { | ||
570 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
571 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", | ||
572 | chanerr); | ||
573 | |||
574 | /* TODO do something to salvage the situation */ | ||
575 | } | ||
576 | |||
577 | return phys_complete; | ||
578 | } | ||
579 | |||
580 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
581 | unsigned long *phys_complete) | ||
582 | { | ||
583 | *phys_complete = ioat_get_current_completion(chan); | ||
584 | if (*phys_complete == chan->last_completion) | ||
585 | return false; | ||
586 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
587 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
588 | |||
589 | return true; | ||
590 | } | ||
591 | |||
592 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | ||
593 | { | ||
594 | struct ioat_chan_common *chan = &ioat->base; | ||
595 | struct list_head *_desc, *n; | ||
596 | struct dma_async_tx_descriptor *tx; | ||
597 | |||
598 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | ||
599 | __func__, phys_complete); | ||
600 | list_for_each_safe(_desc, n, &ioat->used_desc) { | ||
601 | struct ioat_desc_sw *desc; | ||
602 | |||
603 | prefetch(n); | ||
604 | desc = list_entry(_desc, typeof(*desc), node); | ||
605 | tx = &desc->txd; | ||
606 | /* | ||
607 | * Incoming DMA requests may use multiple descriptors, | ||
608 | * due to exceeding xfercap, perhaps. If so, only the | ||
609 | * last one will have a cookie, and require unmapping. | ||
610 | */ | ||
611 | dump_desc_dbg(ioat, desc); | ||
612 | if (tx->cookie) { | ||
613 | chan->completed_cookie = tx->cookie; | ||
614 | tx->cookie = 0; | ||
615 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
616 | ioat->active -= desc->hw->tx_cnt; | ||
617 | if (tx->callback) { | ||
618 | tx->callback(tx->callback_param); | ||
619 | tx->callback = NULL; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | if (tx->phys != phys_complete) { | ||
624 | /* | ||
625 | * a completed entry, but not the last, so clean | ||
626 | * up if the client is done with the descriptor | ||
627 | */ | ||
628 | if (async_tx_test_ack(tx)) | ||
629 | list_move_tail(&desc->node, &ioat->free_desc); | ||
630 | } else { | ||
631 | /* | ||
632 | * last used desc. Do not remove, so we can | ||
633 | * append from it. | ||
634 | */ | ||
635 | |||
636 | /* if nothing else is pending, cancel the | ||
637 | * completion timeout | ||
638 | */ | ||
639 | if (n == &ioat->used_desc) { | ||
640 | dev_dbg(to_dev(chan), | ||
641 | "%s cancel completion timeout\n", | ||
642 | __func__); | ||
643 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
644 | } | ||
645 | |||
646 | /* TODO check status bits? */ | ||
647 | break; | ||
648 | } | ||
649 | } | ||
650 | |||
651 | chan->last_completion = phys_complete; | ||
652 | } | ||
653 | |||
654 | /** | ||
655 | * ioat1_cleanup - cleanup up finished descriptors | ||
656 | * @chan: ioat channel to be cleaned up | ||
657 | * | ||
658 | * To prevent lock contention we defer cleanup when the locks are | ||
659 | * contended with a terminal timeout that forces cleanup and catches | ||
660 | * completion notification errors. | ||
661 | */ | ||
662 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
663 | { | ||
664 | struct ioat_chan_common *chan = &ioat->base; | ||
665 | unsigned long phys_complete; | ||
666 | |||
667 | prefetch(chan->completion); | ||
668 | |||
669 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
670 | return; | ||
671 | |||
672 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
673 | spin_unlock_bh(&chan->cleanup_lock); | ||
674 | return; | ||
675 | } | ||
676 | |||
677 | if (!spin_trylock_bh(&ioat->desc_lock)) { | ||
678 | spin_unlock_bh(&chan->cleanup_lock); | ||
679 | return; | ||
680 | } | ||
681 | |||
682 | __cleanup(ioat, phys_complete); | ||
683 | |||
684 | spin_unlock_bh(&ioat->desc_lock); | ||
685 | spin_unlock_bh(&chan->cleanup_lock); | ||
686 | } | ||
687 | |||
688 | static void ioat1_timer_event(unsigned long data) | ||
689 | { | ||
690 | struct ioat_dma_chan *ioat = (void *) data; | ||
691 | struct ioat_chan_common *chan = &ioat->base; | ||
692 | |||
693 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||
694 | |||
695 | spin_lock_bh(&chan->cleanup_lock); | ||
696 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | ||
697 | struct ioat_desc_sw *desc; | ||
698 | |||
699 | spin_lock_bh(&ioat->desc_lock); | ||
700 | |||
701 | /* restart active descriptors */ | ||
702 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
703 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
704 | ioat_start(chan); | ||
705 | |||
706 | ioat->pending = 0; | ||
707 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
708 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
709 | spin_unlock_bh(&ioat->desc_lock); | ||
710 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
711 | unsigned long phys_complete; | ||
712 | |||
713 | spin_lock_bh(&ioat->desc_lock); | ||
714 | /* if we haven't made progress and we have already | ||
715 | * acknowledged a pending completion once, then be more | ||
716 | * forceful with a restart | ||
717 | */ | ||
718 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
719 | __cleanup(ioat, phys_complete); | ||
720 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
721 | ioat1_reset_channel(ioat); | ||
722 | else { | ||
723 | u64 status = ioat_chansts(chan); | ||
724 | |||
725 | /* manually update the last completion address */ | ||
726 | if (ioat_chansts_to_addr(status) != 0) | ||
727 | *chan->completion = status; | ||
728 | |||
729 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
730 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
731 | } | ||
732 | spin_unlock_bh(&ioat->desc_lock); | ||
733 | } | ||
734 | spin_unlock_bh(&chan->cleanup_lock); | ||
735 | } | ||
736 | |||
737 | static enum dma_status | ||
738 | ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
739 | dma_cookie_t *done, dma_cookie_t *used) | ||
740 | { | ||
741 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
742 | |||
743 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
744 | return DMA_SUCCESS; | ||
745 | |||
746 | ioat1_cleanup(ioat); | ||
747 | |||
748 | return ioat_is_complete(c, cookie, done, used); | ||
749 | } | ||
750 | |||
751 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | ||
752 | { | ||
753 | struct ioat_chan_common *chan = &ioat->base; | ||
754 | struct ioat_desc_sw *desc; | ||
755 | struct ioat_dma_descriptor *hw; | ||
756 | |||
757 | spin_lock_bh(&ioat->desc_lock); | ||
758 | |||
759 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
760 | |||
761 | if (!desc) { | ||
762 | dev_err(to_dev(chan), | ||
763 | "Unable to start null desc - get next desc failed\n"); | ||
764 | spin_unlock_bh(&ioat->desc_lock); | ||
765 | return; | ||
766 | } | ||
767 | |||
768 | hw = desc->hw; | ||
769 | hw->ctl = 0; | ||
770 | hw->ctl_f.null = 1; | ||
771 | hw->ctl_f.int_en = 1; | ||
772 | hw->ctl_f.compl_write = 1; | ||
773 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
774 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
775 | hw->src_addr = 0; | ||
776 | hw->dst_addr = 0; | ||
777 | async_tx_ack(&desc->txd); | ||
778 | hw->next = 0; | ||
779 | list_add_tail(&desc->node, &ioat->used_desc); | ||
780 | dump_desc_dbg(ioat, desc); | ||
781 | |||
782 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
783 | ioat_start(chan); | ||
784 | spin_unlock_bh(&ioat->desc_lock); | ||
785 | } | ||
786 | |||
787 | /* | ||
788 | * Perform a IOAT transaction to verify the HW works. | ||
789 | */ | ||
790 | #define IOAT_TEST_SIZE 2000 | ||
791 | |||
792 | static void __devinit ioat_dma_test_callback(void *dma_async_param) | ||
793 | { | ||
794 | struct completion *cmp = dma_async_param; | ||
795 | |||
796 | complete(cmp); | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
801 | * @device: device to be tested | ||
802 | */ | ||
803 | int __devinit ioat_dma_self_test(struct ioatdma_device *device) | ||
804 | { | ||
805 | int i; | ||
806 | u8 *src; | ||
807 | u8 *dest; | ||
808 | struct dma_device *dma = &device->common; | ||
809 | struct device *dev = &device->pdev->dev; | ||
810 | struct dma_chan *dma_chan; | ||
811 | struct dma_async_tx_descriptor *tx; | ||
812 | dma_addr_t dma_dest, dma_src; | ||
813 | dma_cookie_t cookie; | ||
814 | int err = 0; | ||
815 | struct completion cmp; | ||
816 | unsigned long tmo; | ||
817 | unsigned long flags; | ||
818 | |||
819 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
820 | if (!src) | ||
821 | return -ENOMEM; | ||
822 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
823 | if (!dest) { | ||
824 | kfree(src); | ||
825 | return -ENOMEM; | ||
826 | } | ||
827 | |||
828 | /* Fill in src buffer */ | ||
829 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
830 | src[i] = (u8)i; | ||
831 | |||
832 | /* Start copy, using first DMA channel */ | ||
833 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
834 | device_node); | ||
835 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
836 | dev_err(dev, "selftest cannot allocate chan resource\n"); | ||
837 | err = -ENODEV; | ||
838 | goto out; | ||
839 | } | ||
840 | |||
841 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
842 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
843 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | | ||
844 | DMA_PREP_INTERRUPT; | ||
845 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
846 | IOAT_TEST_SIZE, flags); | ||
847 | if (!tx) { | ||
848 | dev_err(dev, "Self-test prep failed, disabling\n"); | ||
849 | err = -ENODEV; | ||
850 | goto free_resources; | ||
851 | } | ||
852 | |||
853 | async_tx_ack(tx); | ||
854 | init_completion(&cmp); | ||
855 | tx->callback = ioat_dma_test_callback; | ||
856 | tx->callback_param = &cmp; | ||
857 | cookie = tx->tx_submit(tx); | ||
858 | if (cookie < 0) { | ||
859 | dev_err(dev, "Self-test setup failed, disabling\n"); | ||
860 | err = -ENODEV; | ||
861 | goto free_resources; | ||
862 | } | ||
863 | dma->device_issue_pending(dma_chan); | ||
864 | |||
865 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
866 | |||
867 | if (tmo == 0 || | ||
868 | dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) | ||
869 | != DMA_SUCCESS) { | ||
870 | dev_err(dev, "Self-test copy timed out, disabling\n"); | ||
871 | err = -ENODEV; | ||
872 | goto free_resources; | ||
873 | } | ||
874 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
875 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | ||
876 | err = -ENODEV; | ||
877 | goto free_resources; | ||
878 | } | ||
879 | |||
880 | free_resources: | ||
881 | dma->device_free_chan_resources(dma_chan); | ||
882 | out: | ||
883 | kfree(src); | ||
884 | kfree(dest); | ||
885 | return err; | ||
886 | } | ||
887 | |||
888 | static char ioat_interrupt_style[32] = "msix"; | ||
889 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
890 | sizeof(ioat_interrupt_style), 0644); | ||
891 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
892 | "set ioat interrupt style: msix (default), " | ||
893 | "msix-single-vector, msi, intx)"); | ||
894 | |||
895 | /** | ||
896 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
897 | * @device: ioat device | ||
898 | */ | ||
899 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
900 | { | ||
901 | struct ioat_chan_common *chan; | ||
902 | struct pci_dev *pdev = device->pdev; | ||
903 | struct device *dev = &pdev->dev; | ||
904 | struct msix_entry *msix; | ||
905 | int i, j, msixcnt; | ||
906 | int err = -EINVAL; | ||
907 | u8 intrctrl = 0; | ||
908 | |||
909 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
910 | goto msix; | ||
911 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
912 | goto msix_single_vector; | ||
913 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
914 | goto msi; | ||
915 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
916 | goto intx; | ||
917 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | ||
918 | goto err_no_irq; | ||
919 | |||
920 | msix: | ||
921 | /* The number of MSI-X vectors should equal the number of channels */ | ||
922 | msixcnt = device->common.chancnt; | ||
923 | for (i = 0; i < msixcnt; i++) | ||
924 | device->msix_entries[i].entry = i; | ||
925 | |||
926 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); | ||
927 | if (err < 0) | ||
928 | goto msi; | ||
929 | if (err > 0) | ||
930 | goto msix_single_vector; | ||
931 | |||
932 | for (i = 0; i < msixcnt; i++) { | ||
933 | msix = &device->msix_entries[i]; | ||
934 | chan = ioat_chan_by_index(device, i); | ||
935 | err = devm_request_irq(dev, msix->vector, | ||
936 | ioat_dma_do_interrupt_msix, 0, | ||
937 | "ioat-msix", chan); | ||
938 | if (err) { | ||
939 | for (j = 0; j < i; j++) { | ||
940 | msix = &device->msix_entries[j]; | ||
941 | chan = ioat_chan_by_index(device, j); | ||
942 | devm_free_irq(dev, msix->vector, chan); | ||
943 | } | ||
944 | goto msix_single_vector; | ||
945 | } | ||
946 | } | ||
947 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
948 | goto done; | ||
949 | |||
950 | msix_single_vector: | ||
951 | msix = &device->msix_entries[0]; | ||
952 | msix->entry = 0; | ||
953 | err = pci_enable_msix(pdev, device->msix_entries, 1); | ||
954 | if (err) | ||
955 | goto msi; | ||
956 | |||
957 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, | ||
958 | "ioat-msix", device); | ||
959 | if (err) { | ||
960 | pci_disable_msix(pdev); | ||
961 | goto msi; | ||
962 | } | ||
963 | goto done; | ||
964 | |||
965 | msi: | ||
966 | err = pci_enable_msi(pdev); | ||
967 | if (err) | ||
968 | goto intx; | ||
969 | |||
970 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | ||
971 | "ioat-msi", device); | ||
972 | if (err) { | ||
973 | pci_disable_msi(pdev); | ||
974 | goto intx; | ||
975 | } | ||
976 | goto done; | ||
977 | |||
978 | intx: | ||
979 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | ||
980 | IRQF_SHARED, "ioat-intx", device); | ||
981 | if (err) | ||
982 | goto err_no_irq; | ||
983 | |||
984 | done: | ||
985 | if (device->intr_quirk) | ||
986 | device->intr_quirk(device); | ||
987 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
988 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
989 | return 0; | ||
990 | |||
991 | err_no_irq: | ||
992 | /* Disable all interrupt generation */ | ||
993 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
994 | dev_err(dev, "no usable interrupts\n"); | ||
995 | return err; | ||
996 | } | ||
997 | |||
998 | static void ioat_disable_interrupts(struct ioatdma_device *device) | ||
999 | { | ||
1000 | /* Disable all interrupt generation */ | ||
1001 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1002 | } | ||
1003 | |||
1004 | int __devinit ioat_probe(struct ioatdma_device *device) | ||
1005 | { | ||
1006 | int err = -ENODEV; | ||
1007 | struct dma_device *dma = &device->common; | ||
1008 | struct pci_dev *pdev = device->pdev; | ||
1009 | struct device *dev = &pdev->dev; | ||
1010 | |||
1011 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
1012 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
1013 | sizeof(struct ioat_dma_descriptor), | ||
1014 | 64, 0); | ||
1015 | if (!device->dma_pool) { | ||
1016 | err = -ENOMEM; | ||
1017 | goto err_dma_pool; | ||
1018 | } | ||
1019 | |||
1020 | device->completion_pool = pci_pool_create("completion_pool", pdev, | ||
1021 | sizeof(u64), SMP_CACHE_BYTES, | ||
1022 | SMP_CACHE_BYTES); | ||
1023 | |||
1024 | if (!device->completion_pool) { | ||
1025 | err = -ENOMEM; | ||
1026 | goto err_completion_pool; | ||
1027 | } | ||
1028 | |||
1029 | device->enumerate_channels(device); | ||
1030 | |||
1031 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1032 | dma->dev = &pdev->dev; | ||
1033 | |||
1034 | if (!dma->chancnt) { | ||
1035 | dev_err(dev, "zero channels detected\n"); | ||
1036 | goto err_setup_interrupts; | ||
1037 | } | ||
1038 | |||
1039 | err = ioat_dma_setup_interrupts(device); | ||
1040 | if (err) | ||
1041 | goto err_setup_interrupts; | ||
1042 | |||
1043 | err = device->self_test(device); | ||
1044 | if (err) | ||
1045 | goto err_self_test; | ||
1046 | |||
1047 | return 0; | ||
1048 | |||
1049 | err_self_test: | ||
1050 | ioat_disable_interrupts(device); | ||
1051 | err_setup_interrupts: | ||
1052 | pci_pool_destroy(device->completion_pool); | ||
1053 | err_completion_pool: | ||
1054 | pci_pool_destroy(device->dma_pool); | ||
1055 | err_dma_pool: | ||
1056 | return err; | ||
1057 | } | ||
1058 | |||
1059 | int __devinit ioat_register(struct ioatdma_device *device) | ||
1060 | { | ||
1061 | int err = dma_async_device_register(&device->common); | ||
1062 | |||
1063 | if (err) { | ||
1064 | ioat_disable_interrupts(device); | ||
1065 | pci_pool_destroy(device->completion_pool); | ||
1066 | pci_pool_destroy(device->dma_pool); | ||
1067 | } | ||
1068 | |||
1069 | return err; | ||
1070 | } | ||
1071 | |||
1072 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
1073 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
1074 | { | ||
1075 | struct pci_dev *pdev = device->pdev; | ||
1076 | u32 dmactrl; | ||
1077 | |||
1078 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1079 | if (pdev->msi_enabled) | ||
1080 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1081 | else | ||
1082 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
1083 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1084 | } | ||
1085 | |||
1086 | static ssize_t ring_size_show(struct dma_chan *c, char *page) | ||
1087 | { | ||
1088 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
1089 | |||
1090 | return sprintf(page, "%d\n", ioat->desccount); | ||
1091 | } | ||
1092 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | ||
1093 | |||
1094 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | ||
1095 | { | ||
1096 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
1097 | |||
1098 | return sprintf(page, "%d\n", ioat->active); | ||
1099 | } | ||
1100 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | ||
1101 | |||
1102 | static ssize_t cap_show(struct dma_chan *c, char *page) | ||
1103 | { | ||
1104 | struct dma_device *dma = c->device; | ||
1105 | |||
1106 | return sprintf(page, "copy%s%s%s%s%s%s\n", | ||
1107 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", | ||
1108 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | ||
1109 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | ||
1110 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | ||
1111 | dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", | ||
1112 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); | ||
1113 | |||
1114 | } | ||
1115 | struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); | ||
1116 | |||
1117 | static ssize_t version_show(struct dma_chan *c, char *page) | ||
1118 | { | ||
1119 | struct dma_device *dma = c->device; | ||
1120 | struct ioatdma_device *device = to_ioatdma_device(dma); | ||
1121 | |||
1122 | return sprintf(page, "%d.%d\n", | ||
1123 | device->version >> 4, device->version & 0xf); | ||
1124 | } | ||
1125 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | ||
1126 | |||
1127 | static struct attribute *ioat1_attrs[] = { | ||
1128 | &ring_size_attr.attr, | ||
1129 | &ring_active_attr.attr, | ||
1130 | &ioat_cap_attr.attr, | ||
1131 | &ioat_version_attr.attr, | ||
1132 | NULL, | ||
1133 | }; | ||
1134 | |||
1135 | static ssize_t | ||
1136 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
1137 | { | ||
1138 | struct ioat_sysfs_entry *entry; | ||
1139 | struct ioat_chan_common *chan; | ||
1140 | |||
1141 | entry = container_of(attr, struct ioat_sysfs_entry, attr); | ||
1142 | chan = container_of(kobj, struct ioat_chan_common, kobj); | ||
1143 | |||
1144 | if (!entry->show) | ||
1145 | return -EIO; | ||
1146 | return entry->show(&chan->common, page); | ||
1147 | } | ||
1148 | |||
1149 | struct sysfs_ops ioat_sysfs_ops = { | ||
1150 | .show = ioat_attr_show, | ||
1151 | }; | ||
1152 | |||
1153 | static struct kobj_type ioat1_ktype = { | ||
1154 | .sysfs_ops = &ioat_sysfs_ops, | ||
1155 | .default_attrs = ioat1_attrs, | ||
1156 | }; | ||
1157 | |||
1158 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | ||
1159 | { | ||
1160 | struct dma_device *dma = &device->common; | ||
1161 | struct dma_chan *c; | ||
1162 | |||
1163 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1164 | struct ioat_chan_common *chan = to_chan_common(c); | ||
1165 | struct kobject *parent = &c->dev->device.kobj; | ||
1166 | int err; | ||
1167 | |||
1168 | err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); | ||
1169 | if (err) { | ||
1170 | dev_warn(to_dev(chan), | ||
1171 | "sysfs init error (%d), continuing...\n", err); | ||
1172 | kobject_put(&chan->kobj); | ||
1173 | set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); | ||
1174 | } | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | void ioat_kobject_del(struct ioatdma_device *device) | ||
1179 | { | ||
1180 | struct dma_device *dma = &device->common; | ||
1181 | struct dma_chan *c; | ||
1182 | |||
1183 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1184 | struct ioat_chan_common *chan = to_chan_common(c); | ||
1185 | |||
1186 | if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { | ||
1187 | kobject_del(&chan->kobj); | ||
1188 | kobject_put(&chan->kobj); | ||
1189 | } | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1193 | int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
1194 | { | ||
1195 | struct pci_dev *pdev = device->pdev; | ||
1196 | struct dma_device *dma; | ||
1197 | int err; | ||
1198 | |||
1199 | device->intr_quirk = ioat1_intr_quirk; | ||
1200 | device->enumerate_channels = ioat1_enumerate_channels; | ||
1201 | device->self_test = ioat_dma_self_test; | ||
1202 | dma = &device->common; | ||
1203 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1204 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1205 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | ||
1206 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | ||
1207 | dma->device_is_tx_complete = ioat1_dma_is_complete; | ||
1208 | |||
1209 | err = ioat_probe(device); | ||
1210 | if (err) | ||
1211 | return err; | ||
1212 | ioat_set_tcp_copy_break(4096); | ||
1213 | err = ioat_register(device); | ||
1214 | if (err) | ||
1215 | return err; | ||
1216 | ioat_kobject_add(device, &ioat1_ktype); | ||
1217 | |||
1218 | if (dca) | ||
1219 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
1220 | |||
1221 | return err; | ||
1222 | } | ||
1223 | |||
1224 | void __devexit ioat_dma_remove(struct ioatdma_device *device) | ||
1225 | { | ||
1226 | struct dma_device *dma = &device->common; | ||
1227 | |||
1228 | ioat_disable_interrupts(device); | ||
1229 | |||
1230 | ioat_kobject_del(device); | ||
1231 | |||
1232 | dma_async_device_unregister(dma); | ||
1233 | |||
1234 | pci_pool_destroy(device->dma_pool); | ||
1235 | pci_pool_destroy(device->completion_pool); | ||
1236 | |||
1237 | INIT_LIST_HEAD(&dma->channels); | ||
1238 | } | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h new file mode 100644 index 000000000000..c14fdfeb7f33 --- /dev/null +++ b/drivers/dma/ioat/dma.h | |||
@@ -0,0 +1,337 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef IOATDMA_H | ||
22 | #define IOATDMA_H | ||
23 | |||
24 | #include <linux/dmaengine.h> | ||
25 | #include "hw.h" | ||
26 | #include "registers.h" | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/dmapool.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/pci_ids.h> | ||
31 | #include <net/tcp.h> | ||
32 | |||
33 | #define IOAT_DMA_VERSION "4.00" | ||
34 | |||
35 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | ||
36 | #define IOAT_DMA_DCA_ANY_CPU ~0 | ||
37 | |||
38 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | ||
39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | ||
40 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) | ||
41 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | ||
42 | |||
43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | ||
44 | |||
45 | /* | ||
46 | * workaround for IOAT ver.3.0 null descriptor issue | ||
47 | * (channel returns error when size is 0) | ||
48 | */ | ||
49 | #define NULL_DESC_BUFFER_SIZE 1 | ||
50 | |||
51 | /** | ||
52 | * struct ioatdma_device - internal representation of a IOAT device | ||
53 | * @pdev: PCI-Express device | ||
54 | * @reg_base: MMIO register space base address | ||
55 | * @dma_pool: for allocating DMA descriptors | ||
56 | * @common: embedded struct dma_device | ||
57 | * @version: version of ioatdma device | ||
58 | * @msix_entries: irq handlers | ||
59 | * @idx: per channel data | ||
60 | * @dca: direct cache access context | ||
61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | ||
62 | * @enumerate_channels: hw version specific channel enumeration | ||
63 | * @cleanup_tasklet: select between the v2 and v3 cleanup routines | ||
64 | * @timer_fn: select between the v2 and v3 timer watchdog routines | ||
65 | * @self_test: hardware version specific self test for each supported op type | ||
66 | * | ||
67 | * Note: the v3 cleanup routine supports raid operations | ||
68 | */ | ||
69 | struct ioatdma_device { | ||
70 | struct pci_dev *pdev; | ||
71 | void __iomem *reg_base; | ||
72 | struct pci_pool *dma_pool; | ||
73 | struct pci_pool *completion_pool; | ||
74 | struct dma_device common; | ||
75 | u8 version; | ||
76 | struct msix_entry msix_entries[4]; | ||
77 | struct ioat_chan_common *idx[4]; | ||
78 | struct dca_provider *dca; | ||
79 | void (*intr_quirk)(struct ioatdma_device *device); | ||
80 | int (*enumerate_channels)(struct ioatdma_device *device); | ||
81 | void (*cleanup_tasklet)(unsigned long data); | ||
82 | void (*timer_fn)(unsigned long data); | ||
83 | int (*self_test)(struct ioatdma_device *device); | ||
84 | }; | ||
85 | |||
86 | struct ioat_chan_common { | ||
87 | struct dma_chan common; | ||
88 | void __iomem *reg_base; | ||
89 | unsigned long last_completion; | ||
90 | spinlock_t cleanup_lock; | ||
91 | dma_cookie_t completed_cookie; | ||
92 | unsigned long state; | ||
93 | #define IOAT_COMPLETION_PENDING 0 | ||
94 | #define IOAT_COMPLETION_ACK 1 | ||
95 | #define IOAT_RESET_PENDING 2 | ||
96 | #define IOAT_KOBJ_INIT_FAIL 3 | ||
97 | struct timer_list timer; | ||
98 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | ||
99 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | ||
100 | #define RESET_DELAY msecs_to_jiffies(100) | ||
101 | struct ioatdma_device *device; | ||
102 | dma_addr_t completion_dma; | ||
103 | u64 *completion; | ||
104 | struct tasklet_struct cleanup_task; | ||
105 | struct kobject kobj; | ||
106 | }; | ||
107 | |||
108 | struct ioat_sysfs_entry { | ||
109 | struct attribute attr; | ||
110 | ssize_t (*show)(struct dma_chan *, char *); | ||
111 | }; | ||
112 | |||
113 | /** | ||
114 | * struct ioat_dma_chan - internal representation of a DMA channel | ||
115 | */ | ||
116 | struct ioat_dma_chan { | ||
117 | struct ioat_chan_common base; | ||
118 | |||
119 | size_t xfercap; /* XFERCAP register value expanded out */ | ||
120 | |||
121 | spinlock_t desc_lock; | ||
122 | struct list_head free_desc; | ||
123 | struct list_head used_desc; | ||
124 | |||
125 | int pending; | ||
126 | u16 desccount; | ||
127 | u16 active; | ||
128 | }; | ||
129 | |||
130 | static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) | ||
131 | { | ||
132 | return container_of(c, struct ioat_chan_common, common); | ||
133 | } | ||
134 | |||
135 | static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | ||
136 | { | ||
137 | struct ioat_chan_common *chan = to_chan_common(c); | ||
138 | |||
139 | return container_of(chan, struct ioat_dma_chan, base); | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * ioat_is_complete - poll the status of an ioat transaction | ||
144 | * @c: channel handle | ||
145 | * @cookie: transaction identifier | ||
146 | * @done: if set, updated with last completed transaction | ||
147 | * @used: if set, updated with last used transaction | ||
148 | */ | ||
149 | static inline enum dma_status | ||
150 | ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
151 | dma_cookie_t *done, dma_cookie_t *used) | ||
152 | { | ||
153 | struct ioat_chan_common *chan = to_chan_common(c); | ||
154 | dma_cookie_t last_used; | ||
155 | dma_cookie_t last_complete; | ||
156 | |||
157 | last_used = c->cookie; | ||
158 | last_complete = chan->completed_cookie; | ||
159 | |||
160 | if (done) | ||
161 | *done = last_complete; | ||
162 | if (used) | ||
163 | *used = last_used; | ||
164 | |||
165 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
166 | } | ||
167 | |||
168 | /* wrapper around hardware descriptor format + additional software fields */ | ||
169 | |||
170 | /** | ||
171 | * struct ioat_desc_sw - wrapper around hardware descriptor | ||
172 | * @hw: hardware DMA descriptor (for memcpy) | ||
173 | * @node: this descriptor will either be on the free list, | ||
174 | * or attached to a transaction list (tx_list) | ||
175 | * @txd: the generic software descriptor for all engines | ||
176 | * @id: identifier for debug | ||
177 | */ | ||
178 | struct ioat_desc_sw { | ||
179 | struct ioat_dma_descriptor *hw; | ||
180 | struct list_head node; | ||
181 | size_t len; | ||
182 | struct list_head tx_list; | ||
183 | struct dma_async_tx_descriptor txd; | ||
184 | #ifdef DEBUG | ||
185 | int id; | ||
186 | #endif | ||
187 | }; | ||
188 | |||
189 | #ifdef DEBUG | ||
190 | #define set_desc_id(desc, i) ((desc)->id = (i)) | ||
191 | #define desc_id(desc) ((desc)->id) | ||
192 | #else | ||
193 | #define set_desc_id(desc, i) | ||
194 | #define desc_id(desc) (0) | ||
195 | #endif | ||
196 | |||
197 | static inline void | ||
198 | __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, | ||
199 | struct dma_async_tx_descriptor *tx, int id) | ||
200 | { | ||
201 | struct device *dev = to_dev(chan); | ||
202 | |||
203 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | ||
204 | " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, | ||
205 | (unsigned long long) tx->phys, | ||
206 | (unsigned long long) hw->next, tx->cookie, tx->flags, | ||
207 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | ||
208 | } | ||
209 | |||
210 | #define dump_desc_dbg(c, d) \ | ||
211 | ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) | ||
212 | |||
213 | static inline void ioat_set_tcp_copy_break(unsigned long copybreak) | ||
214 | { | ||
215 | #ifdef CONFIG_NET_DMA | ||
216 | sysctl_tcp_dma_copybreak = copybreak; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static inline struct ioat_chan_common * | ||
221 | ioat_chan_by_index(struct ioatdma_device *device, int index) | ||
222 | { | ||
223 | return device->idx[index]; | ||
224 | } | ||
225 | |||
226 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) | ||
227 | { | ||
228 | u8 ver = chan->device->version; | ||
229 | u64 status; | ||
230 | u32 status_lo; | ||
231 | |||
232 | /* We need to read the low address first as this causes the | ||
233 | * chipset to latch the upper bits for the subsequent read | ||
234 | */ | ||
235 | status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); | ||
236 | status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); | ||
237 | status <<= 32; | ||
238 | status |= status_lo; | ||
239 | |||
240 | return status; | ||
241 | } | ||
242 | |||
243 | static inline void ioat_start(struct ioat_chan_common *chan) | ||
244 | { | ||
245 | u8 ver = chan->device->version; | ||
246 | |||
247 | writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
248 | } | ||
249 | |||
250 | static inline u64 ioat_chansts_to_addr(u64 status) | ||
251 | { | ||
252 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
253 | } | ||
254 | |||
255 | static inline u32 ioat_chanerr(struct ioat_chan_common *chan) | ||
256 | { | ||
257 | return readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
258 | } | ||
259 | |||
260 | static inline void ioat_suspend(struct ioat_chan_common *chan) | ||
261 | { | ||
262 | u8 ver = chan->device->version; | ||
263 | |||
264 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
265 | } | ||
266 | |||
267 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | ||
268 | { | ||
269 | struct ioat_chan_common *chan = &ioat->base; | ||
270 | |||
271 | writel(addr & 0x00000000FFFFFFFF, | ||
272 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
273 | writel(addr >> 32, | ||
274 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
275 | } | ||
276 | |||
277 | static inline bool is_ioat_active(unsigned long status) | ||
278 | { | ||
279 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | ||
280 | } | ||
281 | |||
282 | static inline bool is_ioat_idle(unsigned long status) | ||
283 | { | ||
284 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | ||
285 | } | ||
286 | |||
287 | static inline bool is_ioat_halted(unsigned long status) | ||
288 | { | ||
289 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | ||
290 | } | ||
291 | |||
292 | static inline bool is_ioat_suspended(unsigned long status) | ||
293 | { | ||
294 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | ||
295 | } | ||
296 | |||
297 | /* channel was fatally programmed */ | ||
298 | static inline bool is_ioat_bug(unsigned long err) | ||
299 | { | ||
300 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | ||
301 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | ||
302 | IOAT_CHANERR_LENGTH_ERR)); | ||
303 | } | ||
304 | |||
305 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
306 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
307 | { | ||
308 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
309 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
310 | pci_unmap_single(pdev, addr, len, direction); | ||
311 | else | ||
312 | pci_unmap_page(pdev, addr, len, direction); | ||
313 | } | ||
314 | |||
315 | int __devinit ioat_probe(struct ioatdma_device *device); | ||
316 | int __devinit ioat_register(struct ioatdma_device *device); | ||
317 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); | ||
318 | int __devinit ioat_dma_self_test(struct ioatdma_device *device); | ||
319 | void __devexit ioat_dma_remove(struct ioatdma_device *device); | ||
320 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | ||
321 | void __iomem *iobase); | ||
322 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | ||
323 | void ioat_init_channel(struct ioatdma_device *device, | ||
324 | struct ioat_chan_common *chan, int idx, | ||
325 | void (*timer_fn)(unsigned long), | ||
326 | void (*tasklet)(unsigned long), | ||
327 | unsigned long ioat); | ||
328 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
329 | size_t len, struct ioat_dma_descriptor *hw); | ||
330 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
331 | unsigned long *phys_complete); | ||
332 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | ||
333 | void ioat_kobject_del(struct ioatdma_device *device); | ||
334 | extern struct sysfs_ops ioat_sysfs_ops; | ||
335 | extern struct ioat_sysfs_entry ioat_version_attr; | ||
336 | extern struct ioat_sysfs_entry ioat_cap_attr; | ||
337 | #endif /* IOATDMA_H */ | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c new file mode 100644 index 000000000000..96ffab7d37a7 --- /dev/null +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -0,0 +1,871 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | ||
25 | * does asynchronous data movement and checksumming operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dmaengine.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/i7300_idle.h> | ||
37 | #include "dma.h" | ||
38 | #include "dma_v2.h" | ||
39 | #include "registers.h" | ||
40 | #include "hw.h" | ||
41 | |||
42 | int ioat_ring_alloc_order = 8; | ||
43 | module_param(ioat_ring_alloc_order, int, 0644); | ||
44 | MODULE_PARM_DESC(ioat_ring_alloc_order, | ||
45 | "ioat2+: allocate 2^n descriptors per channel" | ||
46 | " (default: 8 max: 16)"); | ||
47 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | ||
48 | module_param(ioat_ring_max_alloc_order, int, 0644); | ||
49 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | ||
50 | "ioat2+: upper limit for ring size (default: 16)"); | ||
51 | |||
52 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | ||
53 | { | ||
54 | void * __iomem reg_base = ioat->base.reg_base; | ||
55 | |||
56 | ioat->pending = 0; | ||
57 | ioat->dmacount += ioat2_ring_pending(ioat); | ||
58 | ioat->issued = ioat->head; | ||
59 | /* make descriptor updates globally visible before notifying channel */ | ||
60 | wmb(); | ||
61 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
62 | dev_dbg(to_dev(&ioat->base), | ||
63 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
64 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
65 | } | ||
66 | |||
67 | void ioat2_issue_pending(struct dma_chan *chan) | ||
68 | { | ||
69 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | ||
70 | |||
71 | spin_lock_bh(&ioat->ring_lock); | ||
72 | if (ioat->pending == 1) | ||
73 | __ioat2_issue_pending(ioat); | ||
74 | spin_unlock_bh(&ioat->ring_lock); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * ioat2_update_pending - log pending descriptors | ||
79 | * @ioat: ioat2+ channel | ||
80 | * | ||
81 | * set pending to '1' unless pending is already set to '2', pending == 2 | ||
82 | * indicates that submission is temporarily blocked due to an in-flight | ||
83 | * reset. If we are already above the ioat_pending_level threshold then | ||
84 | * just issue pending. | ||
85 | * | ||
86 | * called with ring_lock held | ||
87 | */ | ||
88 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | ||
89 | { | ||
90 | if (unlikely(ioat->pending == 2)) | ||
91 | return; | ||
92 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | ||
93 | __ioat2_issue_pending(ioat); | ||
94 | else | ||
95 | ioat->pending = 1; | ||
96 | } | ||
97 | |||
98 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | ||
99 | { | ||
100 | struct ioat_ring_ent *desc; | ||
101 | struct ioat_dma_descriptor *hw; | ||
102 | int idx; | ||
103 | |||
104 | if (ioat2_ring_space(ioat) < 1) { | ||
105 | dev_err(to_dev(&ioat->base), | ||
106 | "Unable to start null desc - ring full\n"); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", | ||
111 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
112 | idx = ioat2_desc_alloc(ioat, 1); | ||
113 | desc = ioat2_get_ring_ent(ioat, idx); | ||
114 | |||
115 | hw = desc->hw; | ||
116 | hw->ctl = 0; | ||
117 | hw->ctl_f.null = 1; | ||
118 | hw->ctl_f.int_en = 1; | ||
119 | hw->ctl_f.compl_write = 1; | ||
120 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
121 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
122 | hw->src_addr = 0; | ||
123 | hw->dst_addr = 0; | ||
124 | async_tx_ack(&desc->txd); | ||
125 | ioat2_set_chainaddr(ioat, desc->txd.phys); | ||
126 | dump_desc_dbg(ioat, desc); | ||
127 | __ioat2_issue_pending(ioat); | ||
128 | } | ||
129 | |||
130 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | ||
131 | { | ||
132 | spin_lock_bh(&ioat->ring_lock); | ||
133 | __ioat2_start_null_desc(ioat); | ||
134 | spin_unlock_bh(&ioat->ring_lock); | ||
135 | } | ||
136 | |||
137 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | ||
138 | { | ||
139 | struct ioat_chan_common *chan = &ioat->base; | ||
140 | struct dma_async_tx_descriptor *tx; | ||
141 | struct ioat_ring_ent *desc; | ||
142 | bool seen_current = false; | ||
143 | u16 active; | ||
144 | int i; | ||
145 | |||
146 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
147 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
148 | |||
149 | active = ioat2_ring_active(ioat); | ||
150 | for (i = 0; i < active && !seen_current; i++) { | ||
151 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | ||
152 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | ||
153 | tx = &desc->txd; | ||
154 | dump_desc_dbg(ioat, desc); | ||
155 | if (tx->cookie) { | ||
156 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
157 | chan->completed_cookie = tx->cookie; | ||
158 | tx->cookie = 0; | ||
159 | if (tx->callback) { | ||
160 | tx->callback(tx->callback_param); | ||
161 | tx->callback = NULL; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | if (tx->phys == phys_complete) | ||
166 | seen_current = true; | ||
167 | } | ||
168 | ioat->tail += i; | ||
169 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | ||
170 | |||
171 | chan->last_completion = phys_complete; | ||
172 | if (ioat->head == ioat->tail) { | ||
173 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | ||
174 | __func__); | ||
175 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
176 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | ||
182 | * @chan: ioat channel to be cleaned up | ||
183 | */ | ||
184 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||
185 | { | ||
186 | struct ioat_chan_common *chan = &ioat->base; | ||
187 | unsigned long phys_complete; | ||
188 | |||
189 | prefetch(chan->completion); | ||
190 | |||
191 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
192 | return; | ||
193 | |||
194 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
195 | spin_unlock_bh(&chan->cleanup_lock); | ||
196 | return; | ||
197 | } | ||
198 | |||
199 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
200 | spin_unlock_bh(&chan->cleanup_lock); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | __cleanup(ioat, phys_complete); | ||
205 | |||
206 | spin_unlock_bh(&ioat->ring_lock); | ||
207 | spin_unlock_bh(&chan->cleanup_lock); | ||
208 | } | ||
209 | |||
210 | void ioat2_cleanup_tasklet(unsigned long data) | ||
211 | { | ||
212 | struct ioat2_dma_chan *ioat = (void *) data; | ||
213 | |||
214 | ioat2_cleanup(ioat); | ||
215 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
216 | } | ||
217 | |||
218 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) | ||
219 | { | ||
220 | struct ioat_chan_common *chan = &ioat->base; | ||
221 | |||
222 | /* set the tail to be re-issued */ | ||
223 | ioat->issued = ioat->tail; | ||
224 | ioat->dmacount = 0; | ||
225 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
226 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
227 | |||
228 | dev_dbg(to_dev(chan), | ||
229 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
230 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
231 | |||
232 | if (ioat2_ring_pending(ioat)) { | ||
233 | struct ioat_ring_ent *desc; | ||
234 | |||
235 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | ||
236 | ioat2_set_chainaddr(ioat, desc->txd.phys); | ||
237 | __ioat2_issue_pending(ioat); | ||
238 | } else | ||
239 | __ioat2_start_null_desc(ioat); | ||
240 | } | ||
241 | |||
242 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
243 | { | ||
244 | struct ioat_chan_common *chan = &ioat->base; | ||
245 | unsigned long phys_complete; | ||
246 | u32 status; | ||
247 | |||
248 | status = ioat_chansts(chan); | ||
249 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
250 | ioat_suspend(chan); | ||
251 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
252 | status = ioat_chansts(chan); | ||
253 | cpu_relax(); | ||
254 | } | ||
255 | |||
256 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
257 | __cleanup(ioat, phys_complete); | ||
258 | |||
259 | __ioat2_restart_chan(ioat); | ||
260 | } | ||
261 | |||
262 | void ioat2_timer_event(unsigned long data) | ||
263 | { | ||
264 | struct ioat2_dma_chan *ioat = (void *) data; | ||
265 | struct ioat_chan_common *chan = &ioat->base; | ||
266 | |||
267 | spin_lock_bh(&chan->cleanup_lock); | ||
268 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
269 | unsigned long phys_complete; | ||
270 | u64 status; | ||
271 | |||
272 | spin_lock_bh(&ioat->ring_lock); | ||
273 | status = ioat_chansts(chan); | ||
274 | |||
275 | /* when halted due to errors check for channel | ||
276 | * programming errors before advancing the completion state | ||
277 | */ | ||
278 | if (is_ioat_halted(status)) { | ||
279 | u32 chanerr; | ||
280 | |||
281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
282 | BUG_ON(is_ioat_bug(chanerr)); | ||
283 | } | ||
284 | |||
285 | /* if we haven't made progress and we have already | ||
286 | * acknowledged a pending completion once, then be more | ||
287 | * forceful with a restart | ||
288 | */ | ||
289 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
290 | __cleanup(ioat, phys_complete); | ||
291 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
292 | ioat2_restart_channel(ioat); | ||
293 | else { | ||
294 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
295 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
296 | } | ||
297 | spin_unlock_bh(&ioat->ring_lock); | ||
298 | } else { | ||
299 | u16 active; | ||
300 | |||
301 | /* if the ring is idle, empty, and oversized try to step | ||
302 | * down the size | ||
303 | */ | ||
304 | spin_lock_bh(&ioat->ring_lock); | ||
305 | active = ioat2_ring_active(ioat); | ||
306 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
307 | reshape_ring(ioat, ioat->alloc_order-1); | ||
308 | spin_unlock_bh(&ioat->ring_lock); | ||
309 | |||
310 | /* keep shrinking until we get back to our minimum | ||
311 | * default size | ||
312 | */ | ||
313 | if (ioat->alloc_order > ioat_get_alloc_order()) | ||
314 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
315 | } | ||
316 | spin_unlock_bh(&chan->cleanup_lock); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * ioat2_enumerate_channels - find and initialize the device's channels | ||
321 | * @device: the device to be enumerated | ||
322 | */ | ||
323 | int ioat2_enumerate_channels(struct ioatdma_device *device) | ||
324 | { | ||
325 | struct ioat2_dma_chan *ioat; | ||
326 | struct device *dev = &device->pdev->dev; | ||
327 | struct dma_device *dma = &device->common; | ||
328 | u8 xfercap_log; | ||
329 | int i; | ||
330 | |||
331 | INIT_LIST_HEAD(&dma->channels); | ||
332 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
333 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
334 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
335 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
336 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
337 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
338 | } | ||
339 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
340 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | ||
341 | if (xfercap_log == 0) | ||
342 | return 0; | ||
343 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | ||
344 | |||
345 | /* FIXME which i/oat version is i7300? */ | ||
346 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
347 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
348 | dma->chancnt--; | ||
349 | #endif | ||
350 | for (i = 0; i < dma->chancnt; i++) { | ||
351 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
352 | if (!ioat) | ||
353 | break; | ||
354 | |||
355 | ioat_init_channel(device, &ioat->base, i, | ||
356 | device->timer_fn, | ||
357 | device->cleanup_tasklet, | ||
358 | (unsigned long) ioat); | ||
359 | ioat->xfercap_log = xfercap_log; | ||
360 | spin_lock_init(&ioat->ring_lock); | ||
361 | } | ||
362 | dma->chancnt = i; | ||
363 | return i; | ||
364 | } | ||
365 | |||
366 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | ||
367 | { | ||
368 | struct dma_chan *c = tx->chan; | ||
369 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
370 | struct ioat_chan_common *chan = &ioat->base; | ||
371 | dma_cookie_t cookie = c->cookie; | ||
372 | |||
373 | cookie++; | ||
374 | if (cookie < 0) | ||
375 | cookie = 1; | ||
376 | tx->cookie = cookie; | ||
377 | c->cookie = cookie; | ||
378 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
379 | |||
380 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
381 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
382 | ioat2_update_pending(ioat); | ||
383 | spin_unlock_bh(&ioat->ring_lock); | ||
384 | |||
385 | return cookie; | ||
386 | } | ||
387 | |||
388 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) | ||
389 | { | ||
390 | struct ioat_dma_descriptor *hw; | ||
391 | struct ioat_ring_ent *desc; | ||
392 | struct ioatdma_device *dma; | ||
393 | dma_addr_t phys; | ||
394 | |||
395 | dma = to_ioatdma_device(chan->device); | ||
396 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); | ||
397 | if (!hw) | ||
398 | return NULL; | ||
399 | memset(hw, 0, sizeof(*hw)); | ||
400 | |||
401 | desc = kmem_cache_alloc(ioat2_cache, flags); | ||
402 | if (!desc) { | ||
403 | pci_pool_free(dma->dma_pool, hw, phys); | ||
404 | return NULL; | ||
405 | } | ||
406 | memset(desc, 0, sizeof(*desc)); | ||
407 | |||
408 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
409 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | ||
410 | desc->hw = hw; | ||
411 | desc->txd.phys = phys; | ||
412 | return desc; | ||
413 | } | ||
414 | |||
415 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | ||
416 | { | ||
417 | struct ioatdma_device *dma; | ||
418 | |||
419 | dma = to_ioatdma_device(chan->device); | ||
420 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | ||
421 | kmem_cache_free(ioat2_cache, desc); | ||
422 | } | ||
423 | |||
424 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | ||
425 | { | ||
426 | struct ioat_ring_ent **ring; | ||
427 | int descs = 1 << order; | ||
428 | int i; | ||
429 | |||
430 | if (order > ioat_get_max_alloc_order()) | ||
431 | return NULL; | ||
432 | |||
433 | /* allocate the array to hold the software ring */ | ||
434 | ring = kcalloc(descs, sizeof(*ring), flags); | ||
435 | if (!ring) | ||
436 | return NULL; | ||
437 | for (i = 0; i < descs; i++) { | ||
438 | ring[i] = ioat2_alloc_ring_ent(c, flags); | ||
439 | if (!ring[i]) { | ||
440 | while (i--) | ||
441 | ioat2_free_ring_ent(ring[i], c); | ||
442 | kfree(ring); | ||
443 | return NULL; | ||
444 | } | ||
445 | set_desc_id(ring[i], i); | ||
446 | } | ||
447 | |||
448 | /* link descs */ | ||
449 | for (i = 0; i < descs-1; i++) { | ||
450 | struct ioat_ring_ent *next = ring[i+1]; | ||
451 | struct ioat_dma_descriptor *hw = ring[i]->hw; | ||
452 | |||
453 | hw->next = next->txd.phys; | ||
454 | } | ||
455 | ring[i]->hw->next = ring[0]->txd.phys; | ||
456 | |||
457 | return ring; | ||
458 | } | ||
459 | |||
460 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring | ||
461 | * @chan: channel to be initialized | ||
462 | */ | ||
463 | int ioat2_alloc_chan_resources(struct dma_chan *c) | ||
464 | { | ||
465 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
466 | struct ioat_chan_common *chan = &ioat->base; | ||
467 | struct ioat_ring_ent **ring; | ||
468 | u32 chanerr; | ||
469 | int order; | ||
470 | |||
471 | /* have we already been set up? */ | ||
472 | if (ioat->ring) | ||
473 | return 1 << ioat->alloc_order; | ||
474 | |||
475 | /* Setup register to interrupt and write completion status on error */ | ||
476 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
477 | |||
478 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
479 | if (chanerr) { | ||
480 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
481 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
482 | } | ||
483 | |||
484 | /* allocate a completion writeback area */ | ||
485 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
486 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
487 | GFP_KERNEL, &chan->completion_dma); | ||
488 | if (!chan->completion) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
492 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
493 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
494 | writel(((u64) chan->completion_dma) >> 32, | ||
495 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
496 | |||
497 | order = ioat_get_alloc_order(); | ||
498 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | ||
499 | if (!ring) | ||
500 | return -ENOMEM; | ||
501 | |||
502 | spin_lock_bh(&ioat->ring_lock); | ||
503 | ioat->ring = ring; | ||
504 | ioat->head = 0; | ||
505 | ioat->issued = 0; | ||
506 | ioat->tail = 0; | ||
507 | ioat->pending = 0; | ||
508 | ioat->alloc_order = order; | ||
509 | spin_unlock_bh(&ioat->ring_lock); | ||
510 | |||
511 | tasklet_enable(&chan->cleanup_task); | ||
512 | ioat2_start_null_desc(ioat); | ||
513 | |||
514 | return 1 << ioat->alloc_order; | ||
515 | } | ||
516 | |||
517 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | ||
518 | { | ||
519 | /* reshape differs from normal ring allocation in that we want | ||
520 | * to allocate a new software ring while only | ||
521 | * extending/truncating the hardware ring | ||
522 | */ | ||
523 | struct ioat_chan_common *chan = &ioat->base; | ||
524 | struct dma_chan *c = &chan->common; | ||
525 | const u16 curr_size = ioat2_ring_mask(ioat) + 1; | ||
526 | const u16 active = ioat2_ring_active(ioat); | ||
527 | const u16 new_size = 1 << order; | ||
528 | struct ioat_ring_ent **ring; | ||
529 | u16 i; | ||
530 | |||
531 | if (order > ioat_get_max_alloc_order()) | ||
532 | return false; | ||
533 | |||
534 | /* double check that we have at least 1 free descriptor */ | ||
535 | if (active == curr_size) | ||
536 | return false; | ||
537 | |||
538 | /* when shrinking, verify that we can hold the current active | ||
539 | * set in the new ring | ||
540 | */ | ||
541 | if (active >= new_size) | ||
542 | return false; | ||
543 | |||
544 | /* allocate the array to hold the software ring */ | ||
545 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | ||
546 | if (!ring) | ||
547 | return false; | ||
548 | |||
549 | /* allocate/trim descriptors as needed */ | ||
550 | if (new_size > curr_size) { | ||
551 | /* copy current descriptors to the new ring */ | ||
552 | for (i = 0; i < curr_size; i++) { | ||
553 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | ||
554 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
555 | |||
556 | ring[new_idx] = ioat->ring[curr_idx]; | ||
557 | set_desc_id(ring[new_idx], new_idx); | ||
558 | } | ||
559 | |||
560 | /* add new descriptors to the ring */ | ||
561 | for (i = curr_size; i < new_size; i++) { | ||
562 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
563 | |||
564 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | ||
565 | if (!ring[new_idx]) { | ||
566 | while (i--) { | ||
567 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
568 | |||
569 | ioat2_free_ring_ent(ring[new_idx], c); | ||
570 | } | ||
571 | kfree(ring); | ||
572 | return false; | ||
573 | } | ||
574 | set_desc_id(ring[new_idx], new_idx); | ||
575 | } | ||
576 | |||
577 | /* hw link new descriptors */ | ||
578 | for (i = curr_size-1; i < new_size; i++) { | ||
579 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
580 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | ||
581 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | ||
582 | |||
583 | hw->next = next->txd.phys; | ||
584 | } | ||
585 | } else { | ||
586 | struct ioat_dma_descriptor *hw; | ||
587 | struct ioat_ring_ent *next; | ||
588 | |||
589 | /* copy current descriptors to the new ring, dropping the | ||
590 | * removed descriptors | ||
591 | */ | ||
592 | for (i = 0; i < new_size; i++) { | ||
593 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | ||
594 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
595 | |||
596 | ring[new_idx] = ioat->ring[curr_idx]; | ||
597 | set_desc_id(ring[new_idx], new_idx); | ||
598 | } | ||
599 | |||
600 | /* free deleted descriptors */ | ||
601 | for (i = new_size; i < curr_size; i++) { | ||
602 | struct ioat_ring_ent *ent; | ||
603 | |||
604 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | ||
605 | ioat2_free_ring_ent(ent, c); | ||
606 | } | ||
607 | |||
608 | /* fix up hardware ring */ | ||
609 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | ||
610 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | ||
611 | hw->next = next->txd.phys; | ||
612 | } | ||
613 | |||
614 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
615 | __func__, new_size); | ||
616 | |||
617 | kfree(ioat->ring); | ||
618 | ioat->ring = ring; | ||
619 | ioat->alloc_order = order; | ||
620 | |||
621 | return true; | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | ||
626 | * @idx: gets starting descriptor index on successful allocation | ||
627 | * @ioat: ioat2,3 channel (ring) to operate on | ||
628 | * @num_descs: allocation length | ||
629 | */ | ||
630 | int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | ||
631 | { | ||
632 | struct ioat_chan_common *chan = &ioat->base; | ||
633 | |||
634 | spin_lock_bh(&ioat->ring_lock); | ||
635 | /* never allow the last descriptor to be consumed, we need at | ||
636 | * least one free at all times to allow for on-the-fly ring | ||
637 | * resizing. | ||
638 | */ | ||
639 | while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { | ||
640 | if (reshape_ring(ioat, ioat->alloc_order + 1) && | ||
641 | ioat2_ring_space(ioat) > num_descs) | ||
642 | break; | ||
643 | |||
644 | if (printk_ratelimit()) | ||
645 | dev_dbg(to_dev(chan), | ||
646 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | ||
647 | __func__, num_descs, ioat->head, ioat->tail, | ||
648 | ioat->issued); | ||
649 | spin_unlock_bh(&ioat->ring_lock); | ||
650 | |||
651 | /* progress reclaim in the allocation failure case we | ||
652 | * may be called under bh_disabled so we need to trigger | ||
653 | * the timer event directly | ||
654 | */ | ||
655 | spin_lock_bh(&chan->cleanup_lock); | ||
656 | if (jiffies > chan->timer.expires && | ||
657 | timer_pending(&chan->timer)) { | ||
658 | struct ioatdma_device *device = chan->device; | ||
659 | |||
660 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
661 | spin_unlock_bh(&chan->cleanup_lock); | ||
662 | device->timer_fn((unsigned long) ioat); | ||
663 | } else | ||
664 | spin_unlock_bh(&chan->cleanup_lock); | ||
665 | return -ENOMEM; | ||
666 | } | ||
667 | |||
668 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | ||
669 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | ||
670 | |||
671 | *idx = ioat2_desc_alloc(ioat, num_descs); | ||
672 | return 0; /* with ioat->ring_lock held */ | ||
673 | } | ||
674 | |||
675 | struct dma_async_tx_descriptor * | ||
676 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
677 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
678 | { | ||
679 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
680 | struct ioat_dma_descriptor *hw; | ||
681 | struct ioat_ring_ent *desc; | ||
682 | dma_addr_t dst = dma_dest; | ||
683 | dma_addr_t src = dma_src; | ||
684 | size_t total_len = len; | ||
685 | int num_descs; | ||
686 | u16 idx; | ||
687 | int i; | ||
688 | |||
689 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
690 | if (likely(num_descs) && | ||
691 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | ||
692 | /* pass */; | ||
693 | else | ||
694 | return NULL; | ||
695 | i = 0; | ||
696 | do { | ||
697 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
698 | |||
699 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
700 | hw = desc->hw; | ||
701 | |||
702 | hw->size = copy; | ||
703 | hw->ctl = 0; | ||
704 | hw->src_addr = src; | ||
705 | hw->dst_addr = dst; | ||
706 | |||
707 | len -= copy; | ||
708 | dst += copy; | ||
709 | src += copy; | ||
710 | dump_desc_dbg(ioat, desc); | ||
711 | } while (++i < num_descs); | ||
712 | |||
713 | desc->txd.flags = flags; | ||
714 | desc->len = total_len; | ||
715 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
716 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
717 | hw->ctl_f.compl_write = 1; | ||
718 | dump_desc_dbg(ioat, desc); | ||
719 | /* we leave the channel locked to ensure in order submission */ | ||
720 | |||
721 | return &desc->txd; | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * ioat2_free_chan_resources - release all the descriptors | ||
726 | * @chan: the channel to be cleaned | ||
727 | */ | ||
728 | void ioat2_free_chan_resources(struct dma_chan *c) | ||
729 | { | ||
730 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
731 | struct ioat_chan_common *chan = &ioat->base; | ||
732 | struct ioatdma_device *device = chan->device; | ||
733 | struct ioat_ring_ent *desc; | ||
734 | const u16 total_descs = 1 << ioat->alloc_order; | ||
735 | int descs; | ||
736 | int i; | ||
737 | |||
738 | /* Before freeing channel resources first check | ||
739 | * if they have been previously allocated for this channel. | ||
740 | */ | ||
741 | if (!ioat->ring) | ||
742 | return; | ||
743 | |||
744 | tasklet_disable(&chan->cleanup_task); | ||
745 | del_timer_sync(&chan->timer); | ||
746 | device->cleanup_tasklet((unsigned long) ioat); | ||
747 | |||
748 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
749 | * before removing DMA descriptor resources. | ||
750 | */ | ||
751 | writeb(IOAT_CHANCMD_RESET, | ||
752 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
753 | mdelay(100); | ||
754 | |||
755 | spin_lock_bh(&ioat->ring_lock); | ||
756 | descs = ioat2_ring_space(ioat); | ||
757 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); | ||
758 | for (i = 0; i < descs; i++) { | ||
759 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | ||
760 | ioat2_free_ring_ent(desc, c); | ||
761 | } | ||
762 | |||
763 | if (descs < total_descs) | ||
764 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
765 | total_descs - descs); | ||
766 | |||
767 | for (i = 0; i < total_descs - descs; i++) { | ||
768 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | ||
769 | dump_desc_dbg(ioat, desc); | ||
770 | ioat2_free_ring_ent(desc, c); | ||
771 | } | ||
772 | |||
773 | kfree(ioat->ring); | ||
774 | ioat->ring = NULL; | ||
775 | ioat->alloc_order = 0; | ||
776 | pci_pool_free(device->completion_pool, chan->completion, | ||
777 | chan->completion_dma); | ||
778 | spin_unlock_bh(&ioat->ring_lock); | ||
779 | |||
780 | chan->last_completion = 0; | ||
781 | chan->completion_dma = 0; | ||
782 | ioat->pending = 0; | ||
783 | ioat->dmacount = 0; | ||
784 | } | ||
785 | |||
786 | enum dma_status | ||
787 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
788 | dma_cookie_t *done, dma_cookie_t *used) | ||
789 | { | ||
790 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
791 | struct ioatdma_device *device = ioat->base.device; | ||
792 | |||
793 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
794 | return DMA_SUCCESS; | ||
795 | |||
796 | device->cleanup_tasklet((unsigned long) ioat); | ||
797 | |||
798 | return ioat_is_complete(c, cookie, done, used); | ||
799 | } | ||
800 | |||
801 | static ssize_t ring_size_show(struct dma_chan *c, char *page) | ||
802 | { | ||
803 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
804 | |||
805 | return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); | ||
806 | } | ||
807 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | ||
808 | |||
809 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | ||
810 | { | ||
811 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
812 | |||
813 | /* ...taken outside the lock, no need to be precise */ | ||
814 | return sprintf(page, "%d\n", ioat2_ring_active(ioat)); | ||
815 | } | ||
816 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | ||
817 | |||
818 | static struct attribute *ioat2_attrs[] = { | ||
819 | &ring_size_attr.attr, | ||
820 | &ring_active_attr.attr, | ||
821 | &ioat_cap_attr.attr, | ||
822 | &ioat_version_attr.attr, | ||
823 | NULL, | ||
824 | }; | ||
825 | |||
826 | struct kobj_type ioat2_ktype = { | ||
827 | .sysfs_ops = &ioat_sysfs_ops, | ||
828 | .default_attrs = ioat2_attrs, | ||
829 | }; | ||
830 | |||
831 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
832 | { | ||
833 | struct pci_dev *pdev = device->pdev; | ||
834 | struct dma_device *dma; | ||
835 | struct dma_chan *c; | ||
836 | struct ioat_chan_common *chan; | ||
837 | int err; | ||
838 | |||
839 | device->enumerate_channels = ioat2_enumerate_channels; | ||
840 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | ||
841 | device->timer_fn = ioat2_timer_event; | ||
842 | device->self_test = ioat_dma_self_test; | ||
843 | dma = &device->common; | ||
844 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
845 | dma->device_issue_pending = ioat2_issue_pending; | ||
846 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||
847 | dma->device_free_chan_resources = ioat2_free_chan_resources; | ||
848 | dma->device_is_tx_complete = ioat2_is_complete; | ||
849 | |||
850 | err = ioat_probe(device); | ||
851 | if (err) | ||
852 | return err; | ||
853 | ioat_set_tcp_copy_break(2048); | ||
854 | |||
855 | list_for_each_entry(c, &dma->channels, device_node) { | ||
856 | chan = to_chan_common(c); | ||
857 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | ||
858 | chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
859 | } | ||
860 | |||
861 | err = ioat_register(device); | ||
862 | if (err) | ||
863 | return err; | ||
864 | |||
865 | ioat_kobject_add(device, &ioat2_ktype); | ||
866 | |||
867 | if (dca) | ||
868 | device->dca = ioat2_dca_init(pdev, device->reg_base); | ||
869 | |||
870 | return err; | ||
871 | } | ||
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h new file mode 100644 index 000000000000..1d849ef74d5f --- /dev/null +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef IOATDMA_V2_H | ||
22 | #define IOATDMA_V2_H | ||
23 | |||
24 | #include <linux/dmaengine.h> | ||
25 | #include "dma.h" | ||
26 | #include "hw.h" | ||
27 | |||
28 | |||
29 | extern int ioat_pending_level; | ||
30 | extern int ioat_ring_alloc_order; | ||
31 | |||
32 | /* | ||
33 | * workaround for IOAT ver.3.0 null descriptor issue | ||
34 | * (channel returns error when size is 0) | ||
35 | */ | ||
36 | #define NULL_DESC_BUFFER_SIZE 1 | ||
37 | |||
38 | #define IOAT_MAX_ORDER 16 | ||
39 | #define ioat_get_alloc_order() \ | ||
40 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | ||
41 | #define ioat_get_max_alloc_order() \ | ||
42 | (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) | ||
43 | |||
44 | /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes | ||
45 | * @base: common ioat channel parameters | ||
46 | * @xfercap_log; log2 of channel max transfer length (for fast division) | ||
47 | * @head: allocated index | ||
48 | * @issued: hardware notification point | ||
49 | * @tail: cleanup index | ||
50 | * @pending: lock free indicator for issued != head | ||
51 | * @dmacount: identical to 'head' except for occasionally resetting to zero | ||
52 | * @alloc_order: log2 of the number of allocated descriptors | ||
53 | * @ring: software ring buffer implementation of hardware ring | ||
54 | * @ring_lock: protects ring attributes | ||
55 | */ | ||
56 | struct ioat2_dma_chan { | ||
57 | struct ioat_chan_common base; | ||
58 | size_t xfercap_log; | ||
59 | u16 head; | ||
60 | u16 issued; | ||
61 | u16 tail; | ||
62 | u16 dmacount; | ||
63 | u16 alloc_order; | ||
64 | int pending; | ||
65 | struct ioat_ring_ent **ring; | ||
66 | spinlock_t ring_lock; | ||
67 | }; | ||
68 | |||
69 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | ||
70 | { | ||
71 | struct ioat_chan_common *chan = to_chan_common(c); | ||
72 | |||
73 | return container_of(chan, struct ioat2_dma_chan, base); | ||
74 | } | ||
75 | |||
76 | static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) | ||
77 | { | ||
78 | return (1 << ioat->alloc_order) - 1; | ||
79 | } | ||
80 | |||
81 | /* count of descriptors in flight with the engine */ | ||
82 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) | ||
83 | { | ||
84 | return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); | ||
85 | } | ||
86 | |||
87 | /* count of descriptors pending submission to hardware */ | ||
88 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | ||
89 | { | ||
90 | return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); | ||
91 | } | ||
92 | |||
93 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) | ||
94 | { | ||
95 | u16 num_descs = ioat2_ring_mask(ioat) + 1; | ||
96 | u16 active = ioat2_ring_active(ioat); | ||
97 | |||
98 | BUG_ON(active > num_descs); | ||
99 | |||
100 | return num_descs - active; | ||
101 | } | ||
102 | |||
103 | /* assumes caller already checked space */ | ||
104 | static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) | ||
105 | { | ||
106 | ioat->head += len; | ||
107 | return ioat->head - len; | ||
108 | } | ||
109 | |||
110 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) | ||
111 | { | ||
112 | u16 num_descs = len >> ioat->xfercap_log; | ||
113 | |||
114 | num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); | ||
115 | return num_descs; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * struct ioat_ring_ent - wrapper around hardware descriptor | ||
120 | * @hw: hardware DMA descriptor (for memcpy) | ||
121 | * @fill: hardware fill descriptor | ||
122 | * @xor: hardware xor descriptor | ||
123 | * @xor_ex: hardware xor extension descriptor | ||
124 | * @pq: hardware pq descriptor | ||
125 | * @pq_ex: hardware pq extension descriptor | ||
126 | * @pqu: hardware pq update descriptor | ||
127 | * @raw: hardware raw (un-typed) descriptor | ||
128 | * @txd: the generic software descriptor for all engines | ||
129 | * @len: total transaction length for unmap | ||
130 | * @result: asynchronous result of validate operations | ||
131 | * @id: identifier for debug | ||
132 | */ | ||
133 | |||
134 | struct ioat_ring_ent { | ||
135 | union { | ||
136 | struct ioat_dma_descriptor *hw; | ||
137 | struct ioat_fill_descriptor *fill; | ||
138 | struct ioat_xor_descriptor *xor; | ||
139 | struct ioat_xor_ext_descriptor *xor_ex; | ||
140 | struct ioat_pq_descriptor *pq; | ||
141 | struct ioat_pq_ext_descriptor *pq_ex; | ||
142 | struct ioat_pq_update_descriptor *pqu; | ||
143 | struct ioat_raw_descriptor *raw; | ||
144 | }; | ||
145 | size_t len; | ||
146 | struct dma_async_tx_descriptor txd; | ||
147 | enum sum_check_flags *result; | ||
148 | #ifdef DEBUG | ||
149 | int id; | ||
150 | #endif | ||
151 | }; | ||
152 | |||
153 | static inline struct ioat_ring_ent * | ||
154 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | ||
155 | { | ||
156 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; | ||
157 | } | ||
158 | |||
159 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | ||
160 | { | ||
161 | struct ioat_chan_common *chan = &ioat->base; | ||
162 | |||
163 | writel(addr & 0x00000000FFFFFFFF, | ||
164 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
165 | writel(addr >> 32, | ||
166 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
167 | } | ||
168 | |||
169 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); | ||
170 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); | ||
171 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
172 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
173 | int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); | ||
174 | int ioat2_enumerate_channels(struct ioatdma_device *device); | ||
175 | struct dma_async_tx_descriptor * | ||
176 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
177 | dma_addr_t dma_src, size_t len, unsigned long flags); | ||
178 | void ioat2_issue_pending(struct dma_chan *chan); | ||
179 | int ioat2_alloc_chan_resources(struct dma_chan *c); | ||
180 | void ioat2_free_chan_resources(struct dma_chan *c); | ||
181 | enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
182 | dma_cookie_t *done, dma_cookie_t *used); | ||
183 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); | ||
184 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | ||
185 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | ||
186 | void ioat2_cleanup_tasklet(unsigned long data); | ||
187 | void ioat2_timer_event(unsigned long data); | ||
188 | extern struct kobj_type ioat2_ktype; | ||
189 | extern struct kmem_cache *ioat2_cache; | ||
190 | #endif /* IOATDMA_V2_H */ | ||
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c new file mode 100644 index 000000000000..35d1e33afd5b --- /dev/null +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -0,0 +1,1223 @@ | |||
1 | /* | ||
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | * | ||
22 | * The full GNU General Public License is included in this distribution in | ||
23 | * the file called "COPYING". | ||
24 | * | ||
25 | * BSD LICENSE | ||
26 | * | ||
27 | * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. | ||
28 | * | ||
29 | * Redistribution and use in source and binary forms, with or without | ||
30 | * modification, are permitted provided that the following conditions are met: | ||
31 | * | ||
32 | * * Redistributions of source code must retain the above copyright | ||
33 | * notice, this list of conditions and the following disclaimer. | ||
34 | * * Redistributions in binary form must reproduce the above copyright | ||
35 | * notice, this list of conditions and the following disclaimer in | ||
36 | * the documentation and/or other materials provided with the | ||
37 | * distribution. | ||
38 | * * Neither the name of Intel Corporation nor the names of its | ||
39 | * contributors may be used to endorse or promote products derived | ||
40 | * from this software without specific prior written permission. | ||
41 | * | ||
42 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
43 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
46 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
47 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
48 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
49 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
50 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
51 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
52 | * POSSIBILITY OF SUCH DAMAGE. | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | * Support routines for v3+ hardware | ||
57 | */ | ||
58 | |||
59 | #include <linux/pci.h> | ||
60 | #include <linux/dmaengine.h> | ||
61 | #include <linux/dma-mapping.h> | ||
62 | #include "registers.h" | ||
63 | #include "hw.h" | ||
64 | #include "dma.h" | ||
65 | #include "dma_v2.h" | ||
66 | |||
67 | /* ioat hardware assumes at least two sources for raid operations */ | ||
68 | #define src_cnt_to_sw(x) ((x) + 2) | ||
69 | #define src_cnt_to_hw(x) ((x) - 2) | ||
70 | |||
71 | /* provide a lookup table for setting the source address in the base or | ||
72 | * extended descriptor of an xor or pq descriptor | ||
73 | */ | ||
74 | static const u8 xor_idx_to_desc __read_mostly = 0xd0; | ||
75 | static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; | ||
76 | static const u8 pq_idx_to_desc __read_mostly = 0xf8; | ||
77 | static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; | ||
78 | |||
79 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
80 | { | ||
81 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
82 | |||
83 | return raw->field[xor_idx_to_field[idx]]; | ||
84 | } | ||
85 | |||
86 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | ||
87 | dma_addr_t addr, u32 offset, int idx) | ||
88 | { | ||
89 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
90 | |||
91 | raw->field[xor_idx_to_field[idx]] = addr + offset; | ||
92 | } | ||
93 | |||
94 | static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
95 | { | ||
96 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
97 | |||
98 | return raw->field[pq_idx_to_field[idx]]; | ||
99 | } | ||
100 | |||
101 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], | ||
102 | dma_addr_t addr, u32 offset, u8 coef, int idx) | ||
103 | { | ||
104 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; | ||
105 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | ||
106 | |||
107 | raw->field[pq_idx_to_field[idx]] = addr + offset; | ||
108 | pq->coef[idx] = coef; | ||
109 | } | ||
110 | |||
111 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | ||
112 | struct ioat_ring_ent *desc, int idx) | ||
113 | { | ||
114 | struct ioat_chan_common *chan = &ioat->base; | ||
115 | struct pci_dev *pdev = chan->device->pdev; | ||
116 | size_t len = desc->len; | ||
117 | size_t offset = len - desc->hw->size; | ||
118 | struct dma_async_tx_descriptor *tx = &desc->txd; | ||
119 | enum dma_ctrl_flags flags = tx->flags; | ||
120 | |||
121 | switch (desc->hw->ctl_f.op) { | ||
122 | case IOAT_OP_COPY: | ||
123 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | ||
124 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
125 | break; | ||
126 | case IOAT_OP_FILL: { | ||
127 | struct ioat_fill_descriptor *hw = desc->fill; | ||
128 | |||
129 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
130 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
131 | PCI_DMA_FROMDEVICE, flags, 1); | ||
132 | break; | ||
133 | } | ||
134 | case IOAT_OP_XOR_VAL: | ||
135 | case IOAT_OP_XOR: { | ||
136 | struct ioat_xor_descriptor *xor = desc->xor; | ||
137 | struct ioat_ring_ent *ext; | ||
138 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
139 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | ||
140 | struct ioat_raw_descriptor *descs[2]; | ||
141 | int i; | ||
142 | |||
143 | if (src_cnt > 5) { | ||
144 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
145 | xor_ex = ext->xor_ex; | ||
146 | } | ||
147 | |||
148 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
149 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
150 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
151 | for (i = 0; i < src_cnt; i++) { | ||
152 | dma_addr_t src = xor_get_src(descs, i); | ||
153 | |||
154 | ioat_unmap(pdev, src - offset, len, | ||
155 | PCI_DMA_TODEVICE, flags, 0); | ||
156 | } | ||
157 | |||
158 | /* dest is a source in xor validate operations */ | ||
159 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
160 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
161 | PCI_DMA_TODEVICE, flags, 1); | ||
162 | break; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
167 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
168 | PCI_DMA_FROMDEVICE, flags, 1); | ||
169 | break; | ||
170 | } | ||
171 | case IOAT_OP_PQ_VAL: | ||
172 | case IOAT_OP_PQ: { | ||
173 | struct ioat_pq_descriptor *pq = desc->pq; | ||
174 | struct ioat_ring_ent *ext; | ||
175 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
176 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
177 | struct ioat_raw_descriptor *descs[2]; | ||
178 | int i; | ||
179 | |||
180 | if (src_cnt > 3) { | ||
181 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
182 | pq_ex = ext->pq_ex; | ||
183 | } | ||
184 | |||
185 | /* in the 'continue' case don't unmap the dests as sources */ | ||
186 | if (dmaf_p_disabled_continue(flags)) | ||
187 | src_cnt--; | ||
188 | else if (dmaf_continue(flags)) | ||
189 | src_cnt -= 3; | ||
190 | |||
191 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
192 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
193 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
194 | for (i = 0; i < src_cnt; i++) { | ||
195 | dma_addr_t src = pq_get_src(descs, i); | ||
196 | |||
197 | ioat_unmap(pdev, src - offset, len, | ||
198 | PCI_DMA_TODEVICE, flags, 0); | ||
199 | } | ||
200 | |||
201 | /* the dests are sources in pq validate operations */ | ||
202 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
203 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
204 | ioat_unmap(pdev, pq->p_addr - offset, | ||
205 | len, PCI_DMA_TODEVICE, flags, 0); | ||
206 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
207 | ioat_unmap(pdev, pq->q_addr - offset, | ||
208 | len, PCI_DMA_TODEVICE, flags, 0); | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
214 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
215 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
216 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
217 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
218 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
219 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
220 | } | ||
221 | break; | ||
222 | } | ||
223 | default: | ||
224 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | ||
225 | __func__, desc->hw->ctl_f.op); | ||
226 | } | ||
227 | } | ||
228 | |||
229 | static bool desc_has_ext(struct ioat_ring_ent *desc) | ||
230 | { | ||
231 | struct ioat_dma_descriptor *hw = desc->hw; | ||
232 | |||
233 | if (hw->ctl_f.op == IOAT_OP_XOR || | ||
234 | hw->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
235 | struct ioat_xor_descriptor *xor = desc->xor; | ||
236 | |||
237 | if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) | ||
238 | return true; | ||
239 | } else if (hw->ctl_f.op == IOAT_OP_PQ || | ||
240 | hw->ctl_f.op == IOAT_OP_PQ_VAL) { | ||
241 | struct ioat_pq_descriptor *pq = desc->pq; | ||
242 | |||
243 | if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) | ||
244 | return true; | ||
245 | } | ||
246 | |||
247 | return false; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * __cleanup - reclaim used descriptors | ||
252 | * @ioat: channel (ring) to clean | ||
253 | * | ||
254 | * The difference from the dma_v2.c __cleanup() is that this routine | ||
255 | * handles extended descriptors and dma-unmapping raid operations. | ||
256 | */ | ||
257 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | ||
258 | { | ||
259 | struct ioat_chan_common *chan = &ioat->base; | ||
260 | struct ioat_ring_ent *desc; | ||
261 | bool seen_current = false; | ||
262 | u16 active; | ||
263 | int i; | ||
264 | |||
265 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
266 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
267 | |||
268 | active = ioat2_ring_active(ioat); | ||
269 | for (i = 0; i < active && !seen_current; i++) { | ||
270 | struct dma_async_tx_descriptor *tx; | ||
271 | |||
272 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | ||
273 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | ||
274 | dump_desc_dbg(ioat, desc); | ||
275 | tx = &desc->txd; | ||
276 | if (tx->cookie) { | ||
277 | chan->completed_cookie = tx->cookie; | ||
278 | ioat3_dma_unmap(ioat, desc, ioat->tail + i); | ||
279 | tx->cookie = 0; | ||
280 | if (tx->callback) { | ||
281 | tx->callback(tx->callback_param); | ||
282 | tx->callback = NULL; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | if (tx->phys == phys_complete) | ||
287 | seen_current = true; | ||
288 | |||
289 | /* skip extended descriptors */ | ||
290 | if (desc_has_ext(desc)) { | ||
291 | BUG_ON(i + 1 >= active); | ||
292 | i++; | ||
293 | } | ||
294 | } | ||
295 | ioat->tail += i; | ||
296 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | ||
297 | chan->last_completion = phys_complete; | ||
298 | if (ioat->head == ioat->tail) { | ||
299 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | ||
300 | __func__); | ||
301 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
302 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | ||
307 | { | ||
308 | struct ioat_chan_common *chan = &ioat->base; | ||
309 | unsigned long phys_complete; | ||
310 | |||
311 | prefetch(chan->completion); | ||
312 | |||
313 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
314 | return; | ||
315 | |||
316 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
317 | spin_unlock_bh(&chan->cleanup_lock); | ||
318 | return; | ||
319 | } | ||
320 | |||
321 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
322 | spin_unlock_bh(&chan->cleanup_lock); | ||
323 | return; | ||
324 | } | ||
325 | |||
326 | __cleanup(ioat, phys_complete); | ||
327 | |||
328 | spin_unlock_bh(&ioat->ring_lock); | ||
329 | spin_unlock_bh(&chan->cleanup_lock); | ||
330 | } | ||
331 | |||
332 | static void ioat3_cleanup_tasklet(unsigned long data) | ||
333 | { | ||
334 | struct ioat2_dma_chan *ioat = (void *) data; | ||
335 | |||
336 | ioat3_cleanup(ioat); | ||
337 | writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, | ||
338 | ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
339 | } | ||
340 | |||
341 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | ||
342 | { | ||
343 | struct ioat_chan_common *chan = &ioat->base; | ||
344 | unsigned long phys_complete; | ||
345 | u32 status; | ||
346 | |||
347 | status = ioat_chansts(chan); | ||
348 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
349 | ioat_suspend(chan); | ||
350 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
351 | status = ioat_chansts(chan); | ||
352 | cpu_relax(); | ||
353 | } | ||
354 | |||
355 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
356 | __cleanup(ioat, phys_complete); | ||
357 | |||
358 | __ioat2_restart_chan(ioat); | ||
359 | } | ||
360 | |||
361 | static void ioat3_timer_event(unsigned long data) | ||
362 | { | ||
363 | struct ioat2_dma_chan *ioat = (void *) data; | ||
364 | struct ioat_chan_common *chan = &ioat->base; | ||
365 | |||
366 | spin_lock_bh(&chan->cleanup_lock); | ||
367 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
368 | unsigned long phys_complete; | ||
369 | u64 status; | ||
370 | |||
371 | spin_lock_bh(&ioat->ring_lock); | ||
372 | status = ioat_chansts(chan); | ||
373 | |||
374 | /* when halted due to errors check for channel | ||
375 | * programming errors before advancing the completion state | ||
376 | */ | ||
377 | if (is_ioat_halted(status)) { | ||
378 | u32 chanerr; | ||
379 | |||
380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
381 | BUG_ON(is_ioat_bug(chanerr)); | ||
382 | } | ||
383 | |||
384 | /* if we haven't made progress and we have already | ||
385 | * acknowledged a pending completion once, then be more | ||
386 | * forceful with a restart | ||
387 | */ | ||
388 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
389 | __cleanup(ioat, phys_complete); | ||
390 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
391 | ioat3_restart_channel(ioat); | ||
392 | else { | ||
393 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
394 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
395 | } | ||
396 | spin_unlock_bh(&ioat->ring_lock); | ||
397 | } else { | ||
398 | u16 active; | ||
399 | |||
400 | /* if the ring is idle, empty, and oversized try to step | ||
401 | * down the size | ||
402 | */ | ||
403 | spin_lock_bh(&ioat->ring_lock); | ||
404 | active = ioat2_ring_active(ioat); | ||
405 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
406 | reshape_ring(ioat, ioat->alloc_order-1); | ||
407 | spin_unlock_bh(&ioat->ring_lock); | ||
408 | |||
409 | /* keep shrinking until we get back to our minimum | ||
410 | * default size | ||
411 | */ | ||
412 | if (ioat->alloc_order > ioat_get_alloc_order()) | ||
413 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
414 | } | ||
415 | spin_unlock_bh(&chan->cleanup_lock); | ||
416 | } | ||
417 | |||
418 | static enum dma_status | ||
419 | ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
420 | dma_cookie_t *done, dma_cookie_t *used) | ||
421 | { | ||
422 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
423 | |||
424 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
425 | return DMA_SUCCESS; | ||
426 | |||
427 | ioat3_cleanup(ioat); | ||
428 | |||
429 | return ioat_is_complete(c, cookie, done, used); | ||
430 | } | ||
431 | |||
432 | static struct dma_async_tx_descriptor * | ||
433 | ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | ||
434 | size_t len, unsigned long flags) | ||
435 | { | ||
436 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
437 | struct ioat_ring_ent *desc; | ||
438 | size_t total_len = len; | ||
439 | struct ioat_fill_descriptor *fill; | ||
440 | int num_descs; | ||
441 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); | ||
442 | u16 idx; | ||
443 | int i; | ||
444 | |||
445 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
446 | if (likely(num_descs) && | ||
447 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | ||
448 | /* pass */; | ||
449 | else | ||
450 | return NULL; | ||
451 | i = 0; | ||
452 | do { | ||
453 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
454 | |||
455 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
456 | fill = desc->fill; | ||
457 | |||
458 | fill->size = xfer_size; | ||
459 | fill->src_data = src_data; | ||
460 | fill->dst_addr = dest; | ||
461 | fill->ctl = 0; | ||
462 | fill->ctl_f.op = IOAT_OP_FILL; | ||
463 | |||
464 | len -= xfer_size; | ||
465 | dest += xfer_size; | ||
466 | dump_desc_dbg(ioat, desc); | ||
467 | } while (++i < num_descs); | ||
468 | |||
469 | desc->txd.flags = flags; | ||
470 | desc->len = total_len; | ||
471 | fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
472 | fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
473 | fill->ctl_f.compl_write = 1; | ||
474 | dump_desc_dbg(ioat, desc); | ||
475 | |||
476 | /* we leave the channel locked to ensure in order submission */ | ||
477 | return &desc->txd; | ||
478 | } | ||
479 | |||
480 | static struct dma_async_tx_descriptor * | ||
481 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
482 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | ||
483 | size_t len, unsigned long flags) | ||
484 | { | ||
485 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
486 | struct ioat_ring_ent *compl_desc; | ||
487 | struct ioat_ring_ent *desc; | ||
488 | struct ioat_ring_ent *ext; | ||
489 | size_t total_len = len; | ||
490 | struct ioat_xor_descriptor *xor; | ||
491 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
492 | struct ioat_dma_descriptor *hw; | ||
493 | u32 offset = 0; | ||
494 | int num_descs; | ||
495 | int with_ext; | ||
496 | int i; | ||
497 | u16 idx; | ||
498 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; | ||
499 | |||
500 | BUG_ON(src_cnt < 2); | ||
501 | |||
502 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
503 | /* we need 2x the number of descriptors to cover greater than 5 | ||
504 | * sources | ||
505 | */ | ||
506 | if (src_cnt > 5) { | ||
507 | with_ext = 1; | ||
508 | num_descs *= 2; | ||
509 | } else | ||
510 | with_ext = 0; | ||
511 | |||
512 | /* completion writes from the raid engine may pass completion | ||
513 | * writes from the legacy engine, so we need one extra null | ||
514 | * (legacy) descriptor to ensure all completion writes arrive in | ||
515 | * order. | ||
516 | */ | ||
517 | if (likely(num_descs) && | ||
518 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | ||
519 | /* pass */; | ||
520 | else | ||
521 | return NULL; | ||
522 | i = 0; | ||
523 | do { | ||
524 | struct ioat_raw_descriptor *descs[2]; | ||
525 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
526 | int s; | ||
527 | |||
528 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
529 | xor = desc->xor; | ||
530 | |||
531 | /* save a branch by unconditionally retrieving the | ||
532 | * extended descriptor xor_set_src() knows to not write | ||
533 | * to it in the single descriptor case | ||
534 | */ | ||
535 | ext = ioat2_get_ring_ent(ioat, idx + i + 1); | ||
536 | xor_ex = ext->xor_ex; | ||
537 | |||
538 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
539 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
540 | for (s = 0; s < src_cnt; s++) | ||
541 | xor_set_src(descs, src[s], offset, s); | ||
542 | xor->size = xfer_size; | ||
543 | xor->dst_addr = dest + offset; | ||
544 | xor->ctl = 0; | ||
545 | xor->ctl_f.op = op; | ||
546 | xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); | ||
547 | |||
548 | len -= xfer_size; | ||
549 | offset += xfer_size; | ||
550 | dump_desc_dbg(ioat, desc); | ||
551 | } while ((i += 1 + with_ext) < num_descs); | ||
552 | |||
553 | /* last xor descriptor carries the unmap parameters and fence bit */ | ||
554 | desc->txd.flags = flags; | ||
555 | desc->len = total_len; | ||
556 | if (result) | ||
557 | desc->result = result; | ||
558 | xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
559 | |||
560 | /* completion descriptor carries interrupt bit */ | ||
561 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | ||
562 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
563 | hw = compl_desc->hw; | ||
564 | hw->ctl = 0; | ||
565 | hw->ctl_f.null = 1; | ||
566 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
567 | hw->ctl_f.compl_write = 1; | ||
568 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
569 | dump_desc_dbg(ioat, compl_desc); | ||
570 | |||
571 | /* we leave the channel locked to ensure in order submission */ | ||
572 | return &desc->txd; | ||
573 | } | ||
574 | |||
575 | static struct dma_async_tx_descriptor * | ||
576 | ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
577 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
578 | { | ||
579 | return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | ||
580 | } | ||
581 | |||
582 | struct dma_async_tx_descriptor * | ||
583 | ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | ||
584 | unsigned int src_cnt, size_t len, | ||
585 | enum sum_check_flags *result, unsigned long flags) | ||
586 | { | ||
587 | /* the cleanup routine only sets bits on validate failure, it | ||
588 | * does not clear bits on validate success... so clear it here | ||
589 | */ | ||
590 | *result = 0; | ||
591 | |||
592 | return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], | ||
593 | src_cnt - 1, len, flags); | ||
594 | } | ||
595 | |||
596 | static void | ||
597 | dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) | ||
598 | { | ||
599 | struct device *dev = to_dev(&ioat->base); | ||
600 | struct ioat_pq_descriptor *pq = desc->pq; | ||
601 | struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; | ||
602 | struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; | ||
603 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
604 | int i; | ||
605 | |||
606 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
607 | " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", | ||
608 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
609 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | ||
610 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, | ||
611 | pq->ctl_f.compl_write, | ||
612 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
613 | pq->ctl_f.src_cnt); | ||
614 | for (i = 0; i < src_cnt; i++) | ||
615 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
616 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | ||
617 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
618 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
619 | } | ||
620 | |||
621 | static struct dma_async_tx_descriptor * | ||
622 | __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
623 | const dma_addr_t *dst, const dma_addr_t *src, | ||
624 | unsigned int src_cnt, const unsigned char *scf, | ||
625 | size_t len, unsigned long flags) | ||
626 | { | ||
627 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
628 | struct ioat_chan_common *chan = &ioat->base; | ||
629 | struct ioat_ring_ent *compl_desc; | ||
630 | struct ioat_ring_ent *desc; | ||
631 | struct ioat_ring_ent *ext; | ||
632 | size_t total_len = len; | ||
633 | struct ioat_pq_descriptor *pq; | ||
634 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
635 | struct ioat_dma_descriptor *hw; | ||
636 | u32 offset = 0; | ||
637 | int num_descs; | ||
638 | int with_ext; | ||
639 | int i, s; | ||
640 | u16 idx; | ||
641 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | ||
642 | |||
643 | dev_dbg(to_dev(chan), "%s\n", __func__); | ||
644 | /* the engine requires at least two sources (we provide | ||
645 | * at least 1 implied source in the DMA_PREP_CONTINUE case) | ||
646 | */ | ||
647 | BUG_ON(src_cnt + dmaf_continue(flags) < 2); | ||
648 | |||
649 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
650 | /* we need 2x the number of descriptors to cover greater than 3 | ||
651 | * sources | ||
652 | */ | ||
653 | if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { | ||
654 | with_ext = 1; | ||
655 | num_descs *= 2; | ||
656 | } else | ||
657 | with_ext = 0; | ||
658 | |||
659 | /* completion writes from the raid engine may pass completion | ||
660 | * writes from the legacy engine, so we need one extra null | ||
661 | * (legacy) descriptor to ensure all completion writes arrive in | ||
662 | * order. | ||
663 | */ | ||
664 | if (likely(num_descs) && | ||
665 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | ||
666 | /* pass */; | ||
667 | else | ||
668 | return NULL; | ||
669 | i = 0; | ||
670 | do { | ||
671 | struct ioat_raw_descriptor *descs[2]; | ||
672 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
673 | |||
674 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
675 | pq = desc->pq; | ||
676 | |||
677 | /* save a branch by unconditionally retrieving the | ||
678 | * extended descriptor pq_set_src() knows to not write | ||
679 | * to it in the single descriptor case | ||
680 | */ | ||
681 | ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); | ||
682 | pq_ex = ext->pq_ex; | ||
683 | |||
684 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
685 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
686 | |||
687 | for (s = 0; s < src_cnt; s++) | ||
688 | pq_set_src(descs, src[s], offset, scf[s], s); | ||
689 | |||
690 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
691 | if (dmaf_p_disabled_continue(flags)) | ||
692 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
693 | else if (dmaf_continue(flags)) { | ||
694 | pq_set_src(descs, dst[0], offset, 0, s++); | ||
695 | pq_set_src(descs, dst[1], offset, 1, s++); | ||
696 | pq_set_src(descs, dst[1], offset, 0, s++); | ||
697 | } | ||
698 | pq->size = xfer_size; | ||
699 | pq->p_addr = dst[0] + offset; | ||
700 | pq->q_addr = dst[1] + offset; | ||
701 | pq->ctl = 0; | ||
702 | pq->ctl_f.op = op; | ||
703 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | ||
704 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
705 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
706 | |||
707 | len -= xfer_size; | ||
708 | offset += xfer_size; | ||
709 | } while ((i += 1 + with_ext) < num_descs); | ||
710 | |||
711 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
712 | desc->txd.flags = flags; | ||
713 | desc->len = total_len; | ||
714 | if (result) | ||
715 | desc->result = result; | ||
716 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
717 | dump_pq_desc_dbg(ioat, desc, ext); | ||
718 | |||
719 | /* completion descriptor carries interrupt bit */ | ||
720 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | ||
721 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | ||
722 | hw = compl_desc->hw; | ||
723 | hw->ctl = 0; | ||
724 | hw->ctl_f.null = 1; | ||
725 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
726 | hw->ctl_f.compl_write = 1; | ||
727 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
728 | dump_desc_dbg(ioat, compl_desc); | ||
729 | |||
730 | /* we leave the channel locked to ensure in order submission */ | ||
731 | return &desc->txd; | ||
732 | } | ||
733 | |||
734 | static struct dma_async_tx_descriptor * | ||
735 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
736 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
737 | unsigned long flags) | ||
738 | { | ||
739 | /* handle the single source multiply case from the raid6 | ||
740 | * recovery path | ||
741 | */ | ||
742 | if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { | ||
743 | dma_addr_t single_source[2]; | ||
744 | unsigned char single_source_coef[2]; | ||
745 | |||
746 | BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); | ||
747 | single_source[0] = src[0]; | ||
748 | single_source[1] = src[0]; | ||
749 | single_source_coef[0] = scf[0]; | ||
750 | single_source_coef[1] = 0; | ||
751 | |||
752 | return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, | ||
753 | single_source_coef, len, flags); | ||
754 | } else | ||
755 | return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, | ||
756 | len, flags); | ||
757 | } | ||
758 | |||
759 | struct dma_async_tx_descriptor * | ||
760 | ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
761 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
762 | enum sum_check_flags *pqres, unsigned long flags) | ||
763 | { | ||
764 | /* the cleanup routine only sets bits on validate failure, it | ||
765 | * does not clear bits on validate success... so clear it here | ||
766 | */ | ||
767 | *pqres = 0; | ||
768 | |||
769 | return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
770 | flags); | ||
771 | } | ||
772 | |||
773 | static struct dma_async_tx_descriptor * | ||
774 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | ||
775 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
776 | { | ||
777 | unsigned char scf[src_cnt]; | ||
778 | dma_addr_t pq[2]; | ||
779 | |||
780 | memset(scf, 0, src_cnt); | ||
781 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
782 | pq[0] = dst; | ||
783 | pq[1] = ~0; | ||
784 | |||
785 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
786 | flags); | ||
787 | } | ||
788 | |||
789 | struct dma_async_tx_descriptor * | ||
790 | ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | ||
791 | unsigned int src_cnt, size_t len, | ||
792 | enum sum_check_flags *result, unsigned long flags) | ||
793 | { | ||
794 | unsigned char scf[src_cnt]; | ||
795 | dma_addr_t pq[2]; | ||
796 | |||
797 | /* the cleanup routine only sets bits on validate failure, it | ||
798 | * does not clear bits on validate success... so clear it here | ||
799 | */ | ||
800 | *result = 0; | ||
801 | |||
802 | memset(scf, 0, src_cnt); | ||
803 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
804 | pq[0] = src[0]; | ||
805 | pq[1] = ~0; | ||
806 | |||
807 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | ||
808 | len, flags); | ||
809 | } | ||
810 | |||
811 | static struct dma_async_tx_descriptor * | ||
812 | ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | ||
813 | { | ||
814 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
815 | struct ioat_ring_ent *desc; | ||
816 | struct ioat_dma_descriptor *hw; | ||
817 | u16 idx; | ||
818 | |||
819 | if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) | ||
820 | desc = ioat2_get_ring_ent(ioat, idx); | ||
821 | else | ||
822 | return NULL; | ||
823 | |||
824 | hw = desc->hw; | ||
825 | hw->ctl = 0; | ||
826 | hw->ctl_f.null = 1; | ||
827 | hw->ctl_f.int_en = 1; | ||
828 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
829 | hw->ctl_f.compl_write = 1; | ||
830 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
831 | hw->src_addr = 0; | ||
832 | hw->dst_addr = 0; | ||
833 | |||
834 | desc->txd.flags = flags; | ||
835 | desc->len = 1; | ||
836 | |||
837 | dump_desc_dbg(ioat, desc); | ||
838 | |||
839 | /* we leave the channel locked to ensure in order submission */ | ||
840 | return &desc->txd; | ||
841 | } | ||
842 | |||
843 | static void __devinit ioat3_dma_test_callback(void *dma_async_param) | ||
844 | { | ||
845 | struct completion *cmp = dma_async_param; | ||
846 | |||
847 | complete(cmp); | ||
848 | } | ||
849 | |||
850 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | ||
851 | static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) | ||
852 | { | ||
853 | int i, src_idx; | ||
854 | struct page *dest; | ||
855 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | ||
856 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
857 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
858 | dma_addr_t dma_addr, dest_dma; | ||
859 | struct dma_async_tx_descriptor *tx; | ||
860 | struct dma_chan *dma_chan; | ||
861 | dma_cookie_t cookie; | ||
862 | u8 cmp_byte = 0; | ||
863 | u32 cmp_word; | ||
864 | u32 xor_val_result; | ||
865 | int err = 0; | ||
866 | struct completion cmp; | ||
867 | unsigned long tmo; | ||
868 | struct device *dev = &device->pdev->dev; | ||
869 | struct dma_device *dma = &device->common; | ||
870 | |||
871 | dev_dbg(dev, "%s\n", __func__); | ||
872 | |||
873 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | ||
874 | return 0; | ||
875 | |||
876 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
877 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||
878 | if (!xor_srcs[src_idx]) { | ||
879 | while (src_idx--) | ||
880 | __free_page(xor_srcs[src_idx]); | ||
881 | return -ENOMEM; | ||
882 | } | ||
883 | } | ||
884 | |||
885 | dest = alloc_page(GFP_KERNEL); | ||
886 | if (!dest) { | ||
887 | while (src_idx--) | ||
888 | __free_page(xor_srcs[src_idx]); | ||
889 | return -ENOMEM; | ||
890 | } | ||
891 | |||
892 | /* Fill in src buffers */ | ||
893 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
894 | u8 *ptr = page_address(xor_srcs[src_idx]); | ||
895 | for (i = 0; i < PAGE_SIZE; i++) | ||
896 | ptr[i] = (1 << src_idx); | ||
897 | } | ||
898 | |||
899 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | ||
900 | cmp_byte ^= (u8) (1 << src_idx); | ||
901 | |||
902 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||
903 | (cmp_byte << 8) | cmp_byte; | ||
904 | |||
905 | memset(page_address(dest), 0, PAGE_SIZE); | ||
906 | |||
907 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
908 | device_node); | ||
909 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
910 | err = -ENODEV; | ||
911 | goto out; | ||
912 | } | ||
913 | |||
914 | /* test xor */ | ||
915 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
916 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
917 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | ||
918 | DMA_TO_DEVICE); | ||
919 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||
920 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | ||
921 | DMA_PREP_INTERRUPT); | ||
922 | |||
923 | if (!tx) { | ||
924 | dev_err(dev, "Self-test xor prep failed\n"); | ||
925 | err = -ENODEV; | ||
926 | goto free_resources; | ||
927 | } | ||
928 | |||
929 | async_tx_ack(tx); | ||
930 | init_completion(&cmp); | ||
931 | tx->callback = ioat3_dma_test_callback; | ||
932 | tx->callback_param = &cmp; | ||
933 | cookie = tx->tx_submit(tx); | ||
934 | if (cookie < 0) { | ||
935 | dev_err(dev, "Self-test xor setup failed\n"); | ||
936 | err = -ENODEV; | ||
937 | goto free_resources; | ||
938 | } | ||
939 | dma->device_issue_pending(dma_chan); | ||
940 | |||
941 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
942 | |||
943 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
944 | dev_err(dev, "Self-test xor timed out\n"); | ||
945 | err = -ENODEV; | ||
946 | goto free_resources; | ||
947 | } | ||
948 | |||
949 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
950 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | ||
951 | u32 *ptr = page_address(dest); | ||
952 | if (ptr[i] != cmp_word) { | ||
953 | dev_err(dev, "Self-test xor failed compare\n"); | ||
954 | err = -ENODEV; | ||
955 | goto free_resources; | ||
956 | } | ||
957 | } | ||
958 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); | ||
959 | |||
960 | /* skip validate if the capability is not present */ | ||
961 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | ||
962 | goto free_resources; | ||
963 | |||
964 | /* validate the sources with the destintation page */ | ||
965 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
966 | xor_val_srcs[i] = xor_srcs[i]; | ||
967 | xor_val_srcs[i] = dest; | ||
968 | |||
969 | xor_val_result = 1; | ||
970 | |||
971 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
972 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
973 | DMA_TO_DEVICE); | ||
974 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
975 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
976 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
977 | if (!tx) { | ||
978 | dev_err(dev, "Self-test zero prep failed\n"); | ||
979 | err = -ENODEV; | ||
980 | goto free_resources; | ||
981 | } | ||
982 | |||
983 | async_tx_ack(tx); | ||
984 | init_completion(&cmp); | ||
985 | tx->callback = ioat3_dma_test_callback; | ||
986 | tx->callback_param = &cmp; | ||
987 | cookie = tx->tx_submit(tx); | ||
988 | if (cookie < 0) { | ||
989 | dev_err(dev, "Self-test zero setup failed\n"); | ||
990 | err = -ENODEV; | ||
991 | goto free_resources; | ||
992 | } | ||
993 | dma->device_issue_pending(dma_chan); | ||
994 | |||
995 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
996 | |||
997 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
998 | dev_err(dev, "Self-test validate timed out\n"); | ||
999 | err = -ENODEV; | ||
1000 | goto free_resources; | ||
1001 | } | ||
1002 | |||
1003 | if (xor_val_result != 0) { | ||
1004 | dev_err(dev, "Self-test validate failed compare\n"); | ||
1005 | err = -ENODEV; | ||
1006 | goto free_resources; | ||
1007 | } | ||
1008 | |||
1009 | /* skip memset if the capability is not present */ | ||
1010 | if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) | ||
1011 | goto free_resources; | ||
1012 | |||
1013 | /* test memset */ | ||
1014 | dma_addr = dma_map_page(dev, dest, 0, | ||
1015 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1016 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | ||
1017 | DMA_PREP_INTERRUPT); | ||
1018 | if (!tx) { | ||
1019 | dev_err(dev, "Self-test memset prep failed\n"); | ||
1020 | err = -ENODEV; | ||
1021 | goto free_resources; | ||
1022 | } | ||
1023 | |||
1024 | async_tx_ack(tx); | ||
1025 | init_completion(&cmp); | ||
1026 | tx->callback = ioat3_dma_test_callback; | ||
1027 | tx->callback_param = &cmp; | ||
1028 | cookie = tx->tx_submit(tx); | ||
1029 | if (cookie < 0) { | ||
1030 | dev_err(dev, "Self-test memset setup failed\n"); | ||
1031 | err = -ENODEV; | ||
1032 | goto free_resources; | ||
1033 | } | ||
1034 | dma->device_issue_pending(dma_chan); | ||
1035 | |||
1036 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1037 | |||
1038 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
1039 | dev_err(dev, "Self-test memset timed out\n"); | ||
1040 | err = -ENODEV; | ||
1041 | goto free_resources; | ||
1042 | } | ||
1043 | |||
1044 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | ||
1045 | u32 *ptr = page_address(dest); | ||
1046 | if (ptr[i]) { | ||
1047 | dev_err(dev, "Self-test memset failed compare\n"); | ||
1048 | err = -ENODEV; | ||
1049 | goto free_resources; | ||
1050 | } | ||
1051 | } | ||
1052 | |||
1053 | /* test for non-zero parity sum */ | ||
1054 | xor_val_result = 0; | ||
1055 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1056 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
1057 | DMA_TO_DEVICE); | ||
1058 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
1059 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
1060 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
1061 | if (!tx) { | ||
1062 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | ||
1063 | err = -ENODEV; | ||
1064 | goto free_resources; | ||
1065 | } | ||
1066 | |||
1067 | async_tx_ack(tx); | ||
1068 | init_completion(&cmp); | ||
1069 | tx->callback = ioat3_dma_test_callback; | ||
1070 | tx->callback_param = &cmp; | ||
1071 | cookie = tx->tx_submit(tx); | ||
1072 | if (cookie < 0) { | ||
1073 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | ||
1074 | err = -ENODEV; | ||
1075 | goto free_resources; | ||
1076 | } | ||
1077 | dma->device_issue_pending(dma_chan); | ||
1078 | |||
1079 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1080 | |||
1081 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
1082 | dev_err(dev, "Self-test 2nd validate timed out\n"); | ||
1083 | err = -ENODEV; | ||
1084 | goto free_resources; | ||
1085 | } | ||
1086 | |||
1087 | if (xor_val_result != SUM_CHECK_P_RESULT) { | ||
1088 | dev_err(dev, "Self-test validate failed compare\n"); | ||
1089 | err = -ENODEV; | ||
1090 | goto free_resources; | ||
1091 | } | ||
1092 | |||
1093 | free_resources: | ||
1094 | dma->device_free_chan_resources(dma_chan); | ||
1095 | out: | ||
1096 | src_idx = IOAT_NUM_SRC_TEST; | ||
1097 | while (src_idx--) | ||
1098 | __free_page(xor_srcs[src_idx]); | ||
1099 | __free_page(dest); | ||
1100 | return err; | ||
1101 | } | ||
1102 | |||
1103 | static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | ||
1104 | { | ||
1105 | int rc = ioat_dma_self_test(device); | ||
1106 | |||
1107 | if (rc) | ||
1108 | return rc; | ||
1109 | |||
1110 | rc = ioat_xor_val_self_test(device); | ||
1111 | if (rc) | ||
1112 | return rc; | ||
1113 | |||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
1118 | { | ||
1119 | struct pci_dev *pdev = device->pdev; | ||
1120 | struct dma_device *dma; | ||
1121 | struct dma_chan *c; | ||
1122 | struct ioat_chan_common *chan; | ||
1123 | bool is_raid_device = false; | ||
1124 | int err; | ||
1125 | u16 dev_id; | ||
1126 | u32 cap; | ||
1127 | |||
1128 | device->enumerate_channels = ioat2_enumerate_channels; | ||
1129 | device->self_test = ioat3_dma_self_test; | ||
1130 | dma = &device->common; | ||
1131 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
1132 | dma->device_issue_pending = ioat2_issue_pending; | ||
1133 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||
1134 | dma->device_free_chan_resources = ioat2_free_chan_resources; | ||
1135 | |||
1136 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | ||
1137 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | ||
1138 | |||
1139 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | ||
1140 | if (cap & IOAT_CAP_XOR) { | ||
1141 | is_raid_device = true; | ||
1142 | dma->max_xor = 8; | ||
1143 | dma->xor_align = 2; | ||
1144 | |||
1145 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1146 | dma->device_prep_dma_xor = ioat3_prep_xor; | ||
1147 | |||
1148 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1149 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | ||
1150 | } | ||
1151 | if (cap & IOAT_CAP_PQ) { | ||
1152 | is_raid_device = true; | ||
1153 | dma_set_maxpq(dma, 8, 0); | ||
1154 | dma->pq_align = 2; | ||
1155 | |||
1156 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1157 | dma->device_prep_dma_pq = ioat3_prep_pq; | ||
1158 | |||
1159 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1160 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | ||
1161 | |||
1162 | if (!(cap & IOAT_CAP_XOR)) { | ||
1163 | dma->max_xor = 8; | ||
1164 | dma->xor_align = 2; | ||
1165 | |||
1166 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1167 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | ||
1168 | |||
1169 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1170 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | ||
1171 | } | ||
1172 | } | ||
1173 | if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { | ||
1174 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | ||
1175 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | ||
1176 | } | ||
1177 | |||
1178 | |||
1179 | if (is_raid_device) { | ||
1180 | dma->device_is_tx_complete = ioat3_is_complete; | ||
1181 | device->cleanup_tasklet = ioat3_cleanup_tasklet; | ||
1182 | device->timer_fn = ioat3_timer_event; | ||
1183 | } else { | ||
1184 | dma->device_is_tx_complete = ioat2_is_complete; | ||
1185 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | ||
1186 | device->timer_fn = ioat2_timer_event; | ||
1187 | } | ||
1188 | |||
1189 | /* -= IOAT ver.3 workarounds =- */ | ||
1190 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
1191 | * that can cause stability issues for IOAT ver.3 | ||
1192 | */ | ||
1193 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
1194 | |||
1195 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
1196 | * (workaround for spurious config parity error after restart) | ||
1197 | */ | ||
1198 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
1199 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
1200 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
1201 | |||
1202 | err = ioat_probe(device); | ||
1203 | if (err) | ||
1204 | return err; | ||
1205 | ioat_set_tcp_copy_break(262144); | ||
1206 | |||
1207 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1208 | chan = to_chan_common(c); | ||
1209 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
1210 | chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1211 | } | ||
1212 | |||
1213 | err = ioat_register(device); | ||
1214 | if (err) | ||
1215 | return err; | ||
1216 | |||
1217 | ioat_kobject_add(device, &ioat2_ktype); | ||
1218 | |||
1219 | if (dca) | ||
1220 | device->dca = ioat3_dca_init(pdev, device->reg_base); | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h new file mode 100644 index 000000000000..99afb12bd409 --- /dev/null +++ b/drivers/dma/ioat/hw.h | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef _IOAT_HW_H_ | ||
22 | #define _IOAT_HW_H_ | ||
23 | |||
24 | /* PCI Configuration Space Values */ | ||
25 | #define IOAT_PCI_VID 0x8086 | ||
26 | #define IOAT_MMIO_BAR 0 | ||
27 | |||
28 | /* CB device ID's */ | ||
29 | #define IOAT_PCI_DID_5000 0x1A38 | ||
30 | #define IOAT_PCI_DID_CNB 0x360B | ||
31 | #define IOAT_PCI_DID_SCNB 0x65FF | ||
32 | #define IOAT_PCI_DID_SNB 0x402F | ||
33 | |||
34 | #define IOAT_PCI_RID 0x00 | ||
35 | #define IOAT_PCI_SVID 0x8086 | ||
36 | #define IOAT_PCI_SID 0x8086 | ||
37 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | ||
38 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | ||
39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | ||
40 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | ||
41 | |||
42 | struct ioat_dma_descriptor { | ||
43 | uint32_t size; | ||
44 | union { | ||
45 | uint32_t ctl; | ||
46 | struct { | ||
47 | unsigned int int_en:1; | ||
48 | unsigned int src_snoop_dis:1; | ||
49 | unsigned int dest_snoop_dis:1; | ||
50 | unsigned int compl_write:1; | ||
51 | unsigned int fence:1; | ||
52 | unsigned int null:1; | ||
53 | unsigned int src_brk:1; | ||
54 | unsigned int dest_brk:1; | ||
55 | unsigned int bundle:1; | ||
56 | unsigned int dest_dca:1; | ||
57 | unsigned int hint:1; | ||
58 | unsigned int rsvd2:13; | ||
59 | #define IOAT_OP_COPY 0x00 | ||
60 | unsigned int op:8; | ||
61 | } ctl_f; | ||
62 | }; | ||
63 | uint64_t src_addr; | ||
64 | uint64_t dst_addr; | ||
65 | uint64_t next; | ||
66 | uint64_t rsv1; | ||
67 | uint64_t rsv2; | ||
68 | /* store some driver data in an unused portion of the descriptor */ | ||
69 | union { | ||
70 | uint64_t user1; | ||
71 | uint64_t tx_cnt; | ||
72 | }; | ||
73 | uint64_t user2; | ||
74 | }; | ||
75 | |||
76 | struct ioat_fill_descriptor { | ||
77 | uint32_t size; | ||
78 | union { | ||
79 | uint32_t ctl; | ||
80 | struct { | ||
81 | unsigned int int_en:1; | ||
82 | unsigned int rsvd:1; | ||
83 | unsigned int dest_snoop_dis:1; | ||
84 | unsigned int compl_write:1; | ||
85 | unsigned int fence:1; | ||
86 | unsigned int rsvd2:2; | ||
87 | unsigned int dest_brk:1; | ||
88 | unsigned int bundle:1; | ||
89 | unsigned int rsvd4:15; | ||
90 | #define IOAT_OP_FILL 0x01 | ||
91 | unsigned int op:8; | ||
92 | } ctl_f; | ||
93 | }; | ||
94 | uint64_t src_data; | ||
95 | uint64_t dst_addr; | ||
96 | uint64_t next; | ||
97 | uint64_t rsv1; | ||
98 | uint64_t next_dst_addr; | ||
99 | uint64_t user1; | ||
100 | uint64_t user2; | ||
101 | }; | ||
102 | |||
103 | struct ioat_xor_descriptor { | ||
104 | uint32_t size; | ||
105 | union { | ||
106 | uint32_t ctl; | ||
107 | struct { | ||
108 | unsigned int int_en:1; | ||
109 | unsigned int src_snoop_dis:1; | ||
110 | unsigned int dest_snoop_dis:1; | ||
111 | unsigned int compl_write:1; | ||
112 | unsigned int fence:1; | ||
113 | unsigned int src_cnt:3; | ||
114 | unsigned int bundle:1; | ||
115 | unsigned int dest_dca:1; | ||
116 | unsigned int hint:1; | ||
117 | unsigned int rsvd:13; | ||
118 | #define IOAT_OP_XOR 0x87 | ||
119 | #define IOAT_OP_XOR_VAL 0x88 | ||
120 | unsigned int op:8; | ||
121 | } ctl_f; | ||
122 | }; | ||
123 | uint64_t src_addr; | ||
124 | uint64_t dst_addr; | ||
125 | uint64_t next; | ||
126 | uint64_t src_addr2; | ||
127 | uint64_t src_addr3; | ||
128 | uint64_t src_addr4; | ||
129 | uint64_t src_addr5; | ||
130 | }; | ||
131 | |||
132 | struct ioat_xor_ext_descriptor { | ||
133 | uint64_t src_addr6; | ||
134 | uint64_t src_addr7; | ||
135 | uint64_t src_addr8; | ||
136 | uint64_t next; | ||
137 | uint64_t rsvd[4]; | ||
138 | }; | ||
139 | |||
140 | struct ioat_pq_descriptor { | ||
141 | uint32_t size; | ||
142 | union { | ||
143 | uint32_t ctl; | ||
144 | struct { | ||
145 | unsigned int int_en:1; | ||
146 | unsigned int src_snoop_dis:1; | ||
147 | unsigned int dest_snoop_dis:1; | ||
148 | unsigned int compl_write:1; | ||
149 | unsigned int fence:1; | ||
150 | unsigned int src_cnt:3; | ||
151 | unsigned int bundle:1; | ||
152 | unsigned int dest_dca:1; | ||
153 | unsigned int hint:1; | ||
154 | unsigned int p_disable:1; | ||
155 | unsigned int q_disable:1; | ||
156 | unsigned int rsvd:11; | ||
157 | #define IOAT_OP_PQ 0x89 | ||
158 | #define IOAT_OP_PQ_VAL 0x8a | ||
159 | unsigned int op:8; | ||
160 | } ctl_f; | ||
161 | }; | ||
162 | uint64_t src_addr; | ||
163 | uint64_t p_addr; | ||
164 | uint64_t next; | ||
165 | uint64_t src_addr2; | ||
166 | uint64_t src_addr3; | ||
167 | uint8_t coef[8]; | ||
168 | uint64_t q_addr; | ||
169 | }; | ||
170 | |||
171 | struct ioat_pq_ext_descriptor { | ||
172 | uint64_t src_addr4; | ||
173 | uint64_t src_addr5; | ||
174 | uint64_t src_addr6; | ||
175 | uint64_t next; | ||
176 | uint64_t src_addr7; | ||
177 | uint64_t src_addr8; | ||
178 | uint64_t rsvd[2]; | ||
179 | }; | ||
180 | |||
181 | struct ioat_pq_update_descriptor { | ||
182 | uint32_t size; | ||
183 | union { | ||
184 | uint32_t ctl; | ||
185 | struct { | ||
186 | unsigned int int_en:1; | ||
187 | unsigned int src_snoop_dis:1; | ||
188 | unsigned int dest_snoop_dis:1; | ||
189 | unsigned int compl_write:1; | ||
190 | unsigned int fence:1; | ||
191 | unsigned int src_cnt:3; | ||
192 | unsigned int bundle:1; | ||
193 | unsigned int dest_dca:1; | ||
194 | unsigned int hint:1; | ||
195 | unsigned int p_disable:1; | ||
196 | unsigned int q_disable:1; | ||
197 | unsigned int rsvd:3; | ||
198 | unsigned int coef:8; | ||
199 | #define IOAT_OP_PQ_UP 0x8b | ||
200 | unsigned int op:8; | ||
201 | } ctl_f; | ||
202 | }; | ||
203 | uint64_t src_addr; | ||
204 | uint64_t p_addr; | ||
205 | uint64_t next; | ||
206 | uint64_t src_addr2; | ||
207 | uint64_t p_src; | ||
208 | uint64_t q_src; | ||
209 | uint64_t q_addr; | ||
210 | }; | ||
211 | |||
212 | struct ioat_raw_descriptor { | ||
213 | uint64_t field[8]; | ||
214 | }; | ||
215 | #endif | ||
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c new file mode 100644 index 000000000000..d545fae30f37 --- /dev/null +++ b/drivers/dma/ioat/pci.c | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2007 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
25 | * copy operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dca.h> | ||
33 | #include "dma.h" | ||
34 | #include "dma_v2.h" | ||
35 | #include "registers.h" | ||
36 | #include "hw.h" | ||
37 | |||
38 | MODULE_VERSION(IOAT_DMA_VERSION); | ||
39 | MODULE_LICENSE("Dual BSD/GPL"); | ||
40 | MODULE_AUTHOR("Intel Corporation"); | ||
41 | |||
42 | static struct pci_device_id ioat_pci_tbl[] = { | ||
43 | /* I/OAT v1 platforms */ | ||
44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | ||
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, | ||
46 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, | ||
47 | { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, | ||
48 | |||
49 | /* I/OAT v2 platforms */ | ||
50 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, | ||
51 | |||
52 | /* I/OAT v3 platforms */ | ||
53 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | ||
54 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | ||
55 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | ||
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | ||
57 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | ||
58 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | ||
59 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | ||
60 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | ||
61 | |||
62 | /* I/OAT v3.2 platforms */ | ||
63 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, | ||
64 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, | ||
65 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, | ||
66 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, | ||
67 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, | ||
68 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, | ||
69 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, | ||
70 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, | ||
71 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, | ||
72 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, | ||
73 | |||
74 | { 0, } | ||
75 | }; | ||
76 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | ||
77 | |||
78 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, | ||
79 | const struct pci_device_id *id); | ||
80 | static void __devexit ioat_remove(struct pci_dev *pdev); | ||
81 | |||
82 | static int ioat_dca_enabled = 1; | ||
83 | module_param(ioat_dca_enabled, int, 0644); | ||
84 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | ||
85 | |||
86 | struct kmem_cache *ioat2_cache; | ||
87 | |||
88 | #define DRV_NAME "ioatdma" | ||
89 | |||
90 | static struct pci_driver ioat_pci_driver = { | ||
91 | .name = DRV_NAME, | ||
92 | .id_table = ioat_pci_tbl, | ||
93 | .probe = ioat_pci_probe, | ||
94 | .remove = __devexit_p(ioat_remove), | ||
95 | }; | ||
96 | |||
97 | static struct ioatdma_device * | ||
98 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) | ||
99 | { | ||
100 | struct device *dev = &pdev->dev; | ||
101 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); | ||
102 | |||
103 | if (!d) | ||
104 | return NULL; | ||
105 | d->pdev = pdev; | ||
106 | d->reg_base = iobase; | ||
107 | return d; | ||
108 | } | ||
109 | |||
110 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
111 | { | ||
112 | void __iomem * const *iomap; | ||
113 | struct device *dev = &pdev->dev; | ||
114 | struct ioatdma_device *device; | ||
115 | int err; | ||
116 | |||
117 | err = pcim_enable_device(pdev); | ||
118 | if (err) | ||
119 | return err; | ||
120 | |||
121 | err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); | ||
122 | if (err) | ||
123 | return err; | ||
124 | iomap = pcim_iomap_table(pdev); | ||
125 | if (!iomap) | ||
126 | return -ENOMEM; | ||
127 | |||
128 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
129 | if (err) | ||
130 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
131 | if (err) | ||
132 | return err; | ||
133 | |||
134 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
135 | if (err) | ||
136 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
137 | if (err) | ||
138 | return err; | ||
139 | |||
140 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); | ||
141 | if (!device) | ||
142 | return -ENOMEM; | ||
143 | |||
144 | pci_set_master(pdev); | ||
145 | |||
146 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); | ||
147 | if (!device) | ||
148 | return -ENOMEM; | ||
149 | pci_set_drvdata(pdev, device); | ||
150 | |||
151 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
152 | if (device->version == IOAT_VER_1_2) | ||
153 | err = ioat1_dma_probe(device, ioat_dca_enabled); | ||
154 | else if (device->version == IOAT_VER_2_0) | ||
155 | err = ioat2_dma_probe(device, ioat_dca_enabled); | ||
156 | else if (device->version >= IOAT_VER_3_0) | ||
157 | err = ioat3_dma_probe(device, ioat_dca_enabled); | ||
158 | else | ||
159 | return -ENODEV; | ||
160 | |||
161 | if (err) { | ||
162 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | ||
163 | return -ENODEV; | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static void __devexit ioat_remove(struct pci_dev *pdev) | ||
170 | { | ||
171 | struct ioatdma_device *device = pci_get_drvdata(pdev); | ||
172 | |||
173 | if (!device) | ||
174 | return; | ||
175 | |||
176 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
177 | if (device->dca) { | ||
178 | unregister_dca_provider(device->dca, &pdev->dev); | ||
179 | free_dca_provider(device->dca); | ||
180 | device->dca = NULL; | ||
181 | } | ||
182 | ioat_dma_remove(device); | ||
183 | } | ||
184 | |||
185 | static int __init ioat_init_module(void) | ||
186 | { | ||
187 | int err; | ||
188 | |||
189 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | ||
190 | DRV_NAME, IOAT_DMA_VERSION); | ||
191 | |||
192 | ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent), | ||
193 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
194 | if (!ioat2_cache) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | err = pci_register_driver(&ioat_pci_driver); | ||
198 | if (err) | ||
199 | kmem_cache_destroy(ioat2_cache); | ||
200 | |||
201 | return err; | ||
202 | } | ||
203 | module_init(ioat_init_module); | ||
204 | |||
205 | static void __exit ioat_exit_module(void) | ||
206 | { | ||
207 | pci_unregister_driver(&ioat_pci_driver); | ||
208 | kmem_cache_destroy(ioat2_cache); | ||
209 | } | ||
210 | module_exit(ioat_exit_module); | ||
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioat/registers.h index 49bc277424f8..63038e18ab03 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -64,18 +64,37 @@ | |||
64 | 64 | ||
65 | #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ | 65 | #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ |
66 | #define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 | 66 | #define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 |
67 | #define IOAT_DEVICE_MMIO_RESTRICTED 0x0002 | ||
68 | #define IOAT_DEVICE_MEMORY_BYPASS 0x0004 | ||
69 | #define IOAT_DEVICE_ADDRESS_REMAPPING 0x0008 | ||
70 | |||
71 | #define IOAT_DMA_CAP_OFFSET 0x10 /* 32-bit */ | ||
72 | #define IOAT_CAP_PAGE_BREAK 0x00000001 | ||
73 | #define IOAT_CAP_CRC 0x00000002 | ||
74 | #define IOAT_CAP_SKIP_MARKER 0x00000004 | ||
75 | #define IOAT_CAP_DCA 0x00000010 | ||
76 | #define IOAT_CAP_CRC_MOVE 0x00000020 | ||
77 | #define IOAT_CAP_FILL_BLOCK 0x00000040 | ||
78 | #define IOAT_CAP_APIC 0x00000080 | ||
79 | #define IOAT_CAP_XOR 0x00000100 | ||
80 | #define IOAT_CAP_PQ 0x00000200 | ||
67 | 81 | ||
68 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ | 82 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ |
69 | 83 | ||
70 | /* DMA Channel Registers */ | 84 | /* DMA Channel Registers */ |
71 | #define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ | 85 | #define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ |
72 | #define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 | 86 | #define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 |
87 | #define IOAT3_CHANCTRL_COMPL_DCA_EN 0x0200 | ||
73 | #define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 | 88 | #define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 |
74 | #define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 | 89 | #define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 |
75 | #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 | 90 | #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 |
76 | #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 | 91 | #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 |
77 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 | 92 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 |
78 | #define IOAT_CHANCTRL_INT_DISABLE 0x0001 | 93 | #define IOAT_CHANCTRL_INT_REARM 0x0001 |
94 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ | ||
95 | IOAT_CHANCTRL_ERR_COMPLETION_EN |\ | ||
96 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ | ||
97 | IOAT_CHANCTRL_ERR_INT_EN) | ||
79 | 98 | ||
80 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ | 99 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ |
81 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ | 100 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ |
@@ -94,14 +113,14 @@ | |||
94 | #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C | 113 | #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C |
95 | #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ | 114 | #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ |
96 | ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) | 115 | ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) |
97 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F | 116 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) |
98 | #define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 | 117 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL |
99 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 | 118 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL |
100 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 | 119 | #define IOAT_CHANSTS_STATUS 0x7ULL |
101 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 | 120 | #define IOAT_CHANSTS_ACTIVE 0x0 |
102 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 | 121 | #define IOAT_CHANSTS_DONE 0x1 |
103 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 | 122 | #define IOAT_CHANSTS_SUSPENDED 0x2 |
104 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 | 123 | #define IOAT_CHANSTS_HALTED 0x3 |
105 | 124 | ||
106 | 125 | ||
107 | 126 | ||
@@ -204,22 +223,27 @@ | |||
204 | #define IOAT_CDAR_OFFSET_HIGH 0x24 | 223 | #define IOAT_CDAR_OFFSET_HIGH 0x24 |
205 | 224 | ||
206 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ | 225 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ |
207 | #define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 | 226 | #define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 |
208 | #define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 | 227 | #define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 |
209 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 | 228 | #define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 |
210 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 | 229 | #define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 |
211 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 | 230 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 |
212 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 | 231 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 |
213 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 | 232 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 |
214 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 | 233 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 |
215 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 | 234 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 |
216 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 | 235 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 |
217 | #define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 | 236 | #define IOAT_CHANERR_CONTROL_ERR 0x0400 |
218 | #define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 | 237 | #define IOAT_CHANERR_LENGTH_ERR 0x0800 |
219 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 | 238 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 |
220 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 | 239 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 |
221 | #define IOAT_CHANERR_SOFT_ERR 0x4000 | 240 | #define IOAT_CHANERR_SOFT_ERR 0x4000 |
222 | #define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 | 241 | #define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 |
242 | #define IOAT_CHANERR_XOR_P_OR_CRC_ERR 0x10000 | ||
243 | #define IOAT_CHANERR_XOR_Q_ERR 0x20000 | ||
244 | #define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 | ||
245 | |||
246 | #define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR) | ||
223 | 247 | ||
224 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ | 248 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ |
225 | 249 | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c deleted file mode 100644 index a600fc0f7962..000000000000 --- a/drivers/dma/ioat_dma.c +++ /dev/null | |||
@@ -1,1741 +0,0 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
25 | * copy operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dmaengine.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/i7300_idle.h> | ||
37 | #include "ioatdma.h" | ||
38 | #include "ioatdma_registers.h" | ||
39 | #include "ioatdma_hw.h" | ||
40 | |||
41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | ||
42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | ||
43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | ||
44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | ||
45 | |||
46 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | ||
47 | static int ioat_pending_level = 4; | ||
48 | module_param(ioat_pending_level, int, 0644); | ||
49 | MODULE_PARM_DESC(ioat_pending_level, | ||
50 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
51 | |||
52 | #define RESET_DELAY msecs_to_jiffies(100) | ||
53 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | ||
54 | static void ioat_dma_chan_reset_part2(struct work_struct *work); | ||
55 | static void ioat_dma_chan_watchdog(struct work_struct *work); | ||
56 | |||
57 | /* | ||
58 | * workaround for IOAT ver.3.0 null descriptor issue | ||
59 | * (channel returns error when size is 0) | ||
60 | */ | ||
61 | #define NULL_DESC_BUFFER_SIZE 1 | ||
62 | |||
63 | /* internal functions */ | ||
64 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); | ||
65 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | ||
66 | |||
67 | static struct ioat_desc_sw * | ||
68 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | ||
69 | static struct ioat_desc_sw * | ||
70 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | ||
71 | |||
72 | static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( | ||
73 | struct ioatdma_device *device, | ||
74 | int index) | ||
75 | { | ||
76 | return device->idx[index]; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
81 | * @irq: interrupt id | ||
82 | * @data: interrupt data | ||
83 | */ | ||
84 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
85 | { | ||
86 | struct ioatdma_device *instance = data; | ||
87 | struct ioat_dma_chan *ioat_chan; | ||
88 | unsigned long attnstatus; | ||
89 | int bit; | ||
90 | u8 intrctrl; | ||
91 | |||
92 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
93 | |||
94 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
95 | return IRQ_NONE; | ||
96 | |||
97 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
98 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
99 | return IRQ_NONE; | ||
100 | } | ||
101 | |||
102 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
103 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
104 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | ||
105 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
106 | } | ||
107 | |||
108 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
109 | return IRQ_HANDLED; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
114 | * @irq: interrupt id | ||
115 | * @data: interrupt data | ||
116 | */ | ||
117 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
118 | { | ||
119 | struct ioat_dma_chan *ioat_chan = data; | ||
120 | |||
121 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
122 | |||
123 | return IRQ_HANDLED; | ||
124 | } | ||
125 | |||
126 | static void ioat_dma_cleanup_tasklet(unsigned long data); | ||
127 | |||
128 | /** | ||
129 | * ioat_dma_enumerate_channels - find and initialize the device's channels | ||
130 | * @device: the device to be enumerated | ||
131 | */ | ||
132 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | ||
133 | { | ||
134 | u8 xfercap_scale; | ||
135 | u32 xfercap; | ||
136 | int i; | ||
137 | struct ioat_dma_chan *ioat_chan; | ||
138 | |||
139 | /* | ||
140 | * IOAT ver.3 workarounds | ||
141 | */ | ||
142 | if (device->version == IOAT_VER_3_0) { | ||
143 | u32 chan_err_mask; | ||
144 | u16 dev_id; | ||
145 | u32 dmauncerrsts; | ||
146 | |||
147 | /* | ||
148 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
149 | * that can cause stability issues for IOAT ver.3 | ||
150 | */ | ||
151 | chan_err_mask = 0x3E07; | ||
152 | pci_write_config_dword(device->pdev, | ||
153 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | ||
154 | chan_err_mask); | ||
155 | |||
156 | /* | ||
157 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
158 | * (workaround for spurious config parity error after restart) | ||
159 | */ | ||
160 | pci_read_config_word(device->pdev, | ||
161 | IOAT_PCI_DEVICE_ID_OFFSET, | ||
162 | &dev_id); | ||
163 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
164 | dmauncerrsts = 0x10; | ||
165 | pci_write_config_dword(device->pdev, | ||
166 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
167 | dmauncerrsts); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
172 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
174 | |||
175 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
176 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { | ||
177 | device->common.chancnt--; | ||
178 | } | ||
179 | #endif | ||
180 | for (i = 0; i < device->common.chancnt; i++) { | ||
181 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); | ||
182 | if (!ioat_chan) { | ||
183 | device->common.chancnt = i; | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | ioat_chan->device = device; | ||
188 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); | ||
189 | ioat_chan->xfercap = xfercap; | ||
190 | ioat_chan->desccount = 0; | ||
191 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | ||
192 | if (ioat_chan->device->version == IOAT_VER_2_0) | ||
193 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | | ||
194 | IOAT_DMA_DCA_ANY_CPU, | ||
195 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
196 | else if (ioat_chan->device->version == IOAT_VER_3_0) | ||
197 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
198 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
199 | spin_lock_init(&ioat_chan->cleanup_lock); | ||
200 | spin_lock_init(&ioat_chan->desc_lock); | ||
201 | INIT_LIST_HEAD(&ioat_chan->free_desc); | ||
202 | INIT_LIST_HEAD(&ioat_chan->used_desc); | ||
203 | /* This should be made common somewhere in dmaengine.c */ | ||
204 | ioat_chan->common.device = &device->common; | ||
205 | list_add_tail(&ioat_chan->common.device_node, | ||
206 | &device->common.channels); | ||
207 | device->idx[i] = ioat_chan; | ||
208 | tasklet_init(&ioat_chan->cleanup_task, | ||
209 | ioat_dma_cleanup_tasklet, | ||
210 | (unsigned long) ioat_chan); | ||
211 | tasklet_disable(&ioat_chan->cleanup_task); | ||
212 | } | ||
213 | return device->common.chancnt; | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
218 | * descriptors to hw | ||
219 | * @chan: DMA channel handle | ||
220 | */ | ||
221 | static inline void __ioat1_dma_memcpy_issue_pending( | ||
222 | struct ioat_dma_chan *ioat_chan) | ||
223 | { | ||
224 | ioat_chan->pending = 0; | ||
225 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | ||
226 | } | ||
227 | |||
228 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
229 | { | ||
230 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
231 | |||
232 | if (ioat_chan->pending > 0) { | ||
233 | spin_lock_bh(&ioat_chan->desc_lock); | ||
234 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
235 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | static inline void __ioat2_dma_memcpy_issue_pending( | ||
240 | struct ioat_dma_chan *ioat_chan) | ||
241 | { | ||
242 | ioat_chan->pending = 0; | ||
243 | writew(ioat_chan->dmacount, | ||
244 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
245 | } | ||
246 | |||
247 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
248 | { | ||
249 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
250 | |||
251 | if (ioat_chan->pending > 0) { | ||
252 | spin_lock_bh(&ioat_chan->desc_lock); | ||
253 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
254 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | |||
259 | /** | ||
260 | * ioat_dma_chan_reset_part2 - reinit the channel after a reset | ||
261 | */ | ||
262 | static void ioat_dma_chan_reset_part2(struct work_struct *work) | ||
263 | { | ||
264 | struct ioat_dma_chan *ioat_chan = | ||
265 | container_of(work, struct ioat_dma_chan, work.work); | ||
266 | struct ioat_desc_sw *desc; | ||
267 | |||
268 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
269 | spin_lock_bh(&ioat_chan->desc_lock); | ||
270 | |||
271 | ioat_chan->completion_virt->low = 0; | ||
272 | ioat_chan->completion_virt->high = 0; | ||
273 | ioat_chan->pending = 0; | ||
274 | |||
275 | /* | ||
276 | * count the descriptors waiting, and be sure to do it | ||
277 | * right for both the CB1 line and the CB2 ring | ||
278 | */ | ||
279 | ioat_chan->dmacount = 0; | ||
280 | if (ioat_chan->used_desc.prev) { | ||
281 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
282 | do { | ||
283 | ioat_chan->dmacount++; | ||
284 | desc = to_ioat_desc(desc->node.next); | ||
285 | } while (&desc->node != ioat_chan->used_desc.next); | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * write the new starting descriptor address | ||
290 | * this puts channel engine into ARMED state | ||
291 | */ | ||
292 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
293 | switch (ioat_chan->device->version) { | ||
294 | case IOAT_VER_1_2: | ||
295 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
296 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
297 | writel(((u64) desc->async_tx.phys) >> 32, | ||
298 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
299 | |||
300 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | ||
301 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
302 | break; | ||
303 | case IOAT_VER_2_0: | ||
304 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
305 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
306 | writel(((u64) desc->async_tx.phys) >> 32, | ||
307 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
308 | |||
309 | /* tell the engine to go with what's left to be done */ | ||
310 | writew(ioat_chan->dmacount, | ||
311 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
312 | |||
313 | break; | ||
314 | } | ||
315 | dev_err(&ioat_chan->device->pdev->dev, | ||
316 | "chan%d reset - %d descs waiting, %d total desc\n", | ||
317 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
318 | |||
319 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
320 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * ioat_dma_reset_channel - restart a channel | ||
325 | * @ioat_chan: IOAT DMA channel handle | ||
326 | */ | ||
327 | static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | ||
328 | { | ||
329 | u32 chansts, chanerr; | ||
330 | |||
331 | if (!ioat_chan->used_desc.prev) | ||
332 | return; | ||
333 | |||
334 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
335 | chansts = (ioat_chan->completion_virt->low | ||
336 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | ||
337 | if (chanerr) { | ||
338 | dev_err(&ioat_chan->device->pdev->dev, | ||
339 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
340 | chan_num(ioat_chan), chansts, chanerr); | ||
341 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * whack it upside the head with a reset | ||
346 | * and wait for things to settle out. | ||
347 | * force the pending count to a really big negative | ||
348 | * to make sure no one forces an issue_pending | ||
349 | * while we're waiting. | ||
350 | */ | ||
351 | |||
352 | spin_lock_bh(&ioat_chan->desc_lock); | ||
353 | ioat_chan->pending = INT_MIN; | ||
354 | writeb(IOAT_CHANCMD_RESET, | ||
355 | ioat_chan->reg_base | ||
356 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
357 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
358 | |||
359 | /* schedule the 2nd half instead of sleeping a long time */ | ||
360 | schedule_delayed_work(&ioat_chan->work, RESET_DELAY); | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * ioat_dma_chan_watchdog - watch for stuck channels | ||
365 | */ | ||
366 | static void ioat_dma_chan_watchdog(struct work_struct *work) | ||
367 | { | ||
368 | struct ioatdma_device *device = | ||
369 | container_of(work, struct ioatdma_device, work.work); | ||
370 | struct ioat_dma_chan *ioat_chan; | ||
371 | int i; | ||
372 | |||
373 | union { | ||
374 | u64 full; | ||
375 | struct { | ||
376 | u32 low; | ||
377 | u32 high; | ||
378 | }; | ||
379 | } completion_hw; | ||
380 | unsigned long compl_desc_addr_hw; | ||
381 | |||
382 | for (i = 0; i < device->common.chancnt; i++) { | ||
383 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
384 | |||
385 | if (ioat_chan->device->version == IOAT_VER_1_2 | ||
386 | /* have we started processing anything yet */ | ||
387 | && ioat_chan->last_completion | ||
388 | /* have we completed any since last watchdog cycle? */ | ||
389 | && (ioat_chan->last_completion == | ||
390 | ioat_chan->watchdog_completion) | ||
391 | /* has TCP stuck on one cookie since last watchdog? */ | ||
392 | && (ioat_chan->watchdog_tcp_cookie == | ||
393 | ioat_chan->watchdog_last_tcp_cookie) | ||
394 | && (ioat_chan->watchdog_tcp_cookie != | ||
395 | ioat_chan->completed_cookie) | ||
396 | /* is there something in the chain to be processed? */ | ||
397 | /* CB1 chain always has at least the last one processed */ | ||
398 | && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) | ||
399 | && ioat_chan->pending == 0) { | ||
400 | |||
401 | /* | ||
402 | * check CHANSTS register for completed | ||
403 | * descriptor address. | ||
404 | * if it is different than completion writeback, | ||
405 | * it is not zero | ||
406 | * and it has changed since the last watchdog | ||
407 | * we can assume that channel | ||
408 | * is still working correctly | ||
409 | * and the problem is in completion writeback. | ||
410 | * update completion writeback | ||
411 | * with actual CHANSTS value | ||
412 | * else | ||
413 | * try resetting the channel | ||
414 | */ | ||
415 | |||
416 | completion_hw.low = readl(ioat_chan->reg_base + | ||
417 | IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); | ||
418 | completion_hw.high = readl(ioat_chan->reg_base + | ||
419 | IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); | ||
420 | #if (BITS_PER_LONG == 64) | ||
421 | compl_desc_addr_hw = | ||
422 | completion_hw.full | ||
423 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
424 | #else | ||
425 | compl_desc_addr_hw = | ||
426 | completion_hw.low & IOAT_LOW_COMPLETION_MASK; | ||
427 | #endif | ||
428 | |||
429 | if ((compl_desc_addr_hw != 0) | ||
430 | && (compl_desc_addr_hw != ioat_chan->watchdog_completion) | ||
431 | && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { | ||
432 | ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | ||
433 | ioat_chan->completion_virt->low = completion_hw.low; | ||
434 | ioat_chan->completion_virt->high = completion_hw.high; | ||
435 | } else { | ||
436 | ioat_dma_reset_channel(ioat_chan); | ||
437 | ioat_chan->watchdog_completion = 0; | ||
438 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * for version 2.0 if there are descriptors yet to be processed | ||
443 | * and the last completed hasn't changed since the last watchdog | ||
444 | * if they haven't hit the pending level | ||
445 | * issue the pending to push them through | ||
446 | * else | ||
447 | * try resetting the channel | ||
448 | */ | ||
449 | } else if (ioat_chan->device->version == IOAT_VER_2_0 | ||
450 | && ioat_chan->used_desc.prev | ||
451 | && ioat_chan->last_completion | ||
452 | && ioat_chan->last_completion == ioat_chan->watchdog_completion) { | ||
453 | |||
454 | if (ioat_chan->pending < ioat_pending_level) | ||
455 | ioat2_dma_memcpy_issue_pending(&ioat_chan->common); | ||
456 | else { | ||
457 | ioat_dma_reset_channel(ioat_chan); | ||
458 | ioat_chan->watchdog_completion = 0; | ||
459 | } | ||
460 | } else { | ||
461 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
462 | ioat_chan->watchdog_completion | ||
463 | = ioat_chan->last_completion; | ||
464 | } | ||
465 | |||
466 | ioat_chan->watchdog_last_tcp_cookie = | ||
467 | ioat_chan->watchdog_tcp_cookie; | ||
468 | } | ||
469 | |||
470 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
471 | } | ||
472 | |||
473 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
474 | { | ||
475 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | ||
476 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | ||
477 | struct ioat_desc_sw *prev, *new; | ||
478 | struct ioat_dma_descriptor *hw; | ||
479 | dma_cookie_t cookie; | ||
480 | LIST_HEAD(new_chain); | ||
481 | u32 copy; | ||
482 | size_t len; | ||
483 | dma_addr_t src, dst; | ||
484 | unsigned long orig_flags; | ||
485 | unsigned int desc_count = 0; | ||
486 | |||
487 | /* src and dest and len are stored in the initial descriptor */ | ||
488 | len = first->len; | ||
489 | src = first->src; | ||
490 | dst = first->dst; | ||
491 | orig_flags = first->async_tx.flags; | ||
492 | new = first; | ||
493 | |||
494 | spin_lock_bh(&ioat_chan->desc_lock); | ||
495 | prev = to_ioat_desc(ioat_chan->used_desc.prev); | ||
496 | prefetch(prev->hw); | ||
497 | do { | ||
498 | copy = min_t(size_t, len, ioat_chan->xfercap); | ||
499 | |||
500 | async_tx_ack(&new->async_tx); | ||
501 | |||
502 | hw = new->hw; | ||
503 | hw->size = copy; | ||
504 | hw->ctl = 0; | ||
505 | hw->src_addr = src; | ||
506 | hw->dst_addr = dst; | ||
507 | hw->next = 0; | ||
508 | |||
509 | /* chain together the physical address list for the HW */ | ||
510 | wmb(); | ||
511 | prev->hw->next = (u64) new->async_tx.phys; | ||
512 | |||
513 | len -= copy; | ||
514 | dst += copy; | ||
515 | src += copy; | ||
516 | |||
517 | list_add_tail(&new->node, &new_chain); | ||
518 | desc_count++; | ||
519 | prev = new; | ||
520 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); | ||
521 | |||
522 | if (!new) { | ||
523 | dev_err(&ioat_chan->device->pdev->dev, | ||
524 | "tx submit failed\n"); | ||
525 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
526 | return -ENOMEM; | ||
527 | } | ||
528 | |||
529 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
530 | if (first->async_tx.callback) { | ||
531 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | ||
532 | if (first != new) { | ||
533 | /* move callback into to last desc */ | ||
534 | new->async_tx.callback = first->async_tx.callback; | ||
535 | new->async_tx.callback_param | ||
536 | = first->async_tx.callback_param; | ||
537 | first->async_tx.callback = NULL; | ||
538 | first->async_tx.callback_param = NULL; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | new->tx_cnt = desc_count; | ||
543 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | ||
544 | |||
545 | /* store the original values for use in later cleanup */ | ||
546 | if (new != first) { | ||
547 | new->src = first->src; | ||
548 | new->dst = first->dst; | ||
549 | new->len = first->len; | ||
550 | } | ||
551 | |||
552 | /* cookie incr and addition to used_list must be atomic */ | ||
553 | cookie = ioat_chan->common.cookie; | ||
554 | cookie++; | ||
555 | if (cookie < 0) | ||
556 | cookie = 1; | ||
557 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | ||
558 | |||
559 | /* write address into NextDescriptor field of last desc in chain */ | ||
560 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = | ||
561 | first->async_tx.phys; | ||
562 | list_splice_tail(&new_chain, &ioat_chan->used_desc); | ||
563 | |||
564 | ioat_chan->dmacount += desc_count; | ||
565 | ioat_chan->pending += desc_count; | ||
566 | if (ioat_chan->pending >= ioat_pending_level) | ||
567 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
568 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
569 | |||
570 | return cookie; | ||
571 | } | ||
572 | |||
573 | static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | ||
574 | { | ||
575 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | ||
576 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | ||
577 | struct ioat_desc_sw *new; | ||
578 | struct ioat_dma_descriptor *hw; | ||
579 | dma_cookie_t cookie; | ||
580 | u32 copy; | ||
581 | size_t len; | ||
582 | dma_addr_t src, dst; | ||
583 | unsigned long orig_flags; | ||
584 | unsigned int desc_count = 0; | ||
585 | |||
586 | /* src and dest and len are stored in the initial descriptor */ | ||
587 | len = first->len; | ||
588 | src = first->src; | ||
589 | dst = first->dst; | ||
590 | orig_flags = first->async_tx.flags; | ||
591 | new = first; | ||
592 | |||
593 | /* | ||
594 | * ioat_chan->desc_lock is still in force in version 2 path | ||
595 | * it gets unlocked at end of this function | ||
596 | */ | ||
597 | do { | ||
598 | copy = min_t(size_t, len, ioat_chan->xfercap); | ||
599 | |||
600 | async_tx_ack(&new->async_tx); | ||
601 | |||
602 | hw = new->hw; | ||
603 | hw->size = copy; | ||
604 | hw->ctl = 0; | ||
605 | hw->src_addr = src; | ||
606 | hw->dst_addr = dst; | ||
607 | |||
608 | len -= copy; | ||
609 | dst += copy; | ||
610 | src += copy; | ||
611 | desc_count++; | ||
612 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | ||
613 | |||
614 | if (!new) { | ||
615 | dev_err(&ioat_chan->device->pdev->dev, | ||
616 | "tx submit failed\n"); | ||
617 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
618 | return -ENOMEM; | ||
619 | } | ||
620 | |||
621 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
622 | if (first->async_tx.callback) { | ||
623 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | ||
624 | if (first != new) { | ||
625 | /* move callback into to last desc */ | ||
626 | new->async_tx.callback = first->async_tx.callback; | ||
627 | new->async_tx.callback_param | ||
628 | = first->async_tx.callback_param; | ||
629 | first->async_tx.callback = NULL; | ||
630 | first->async_tx.callback_param = NULL; | ||
631 | } | ||
632 | } | ||
633 | |||
634 | new->tx_cnt = desc_count; | ||
635 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | ||
636 | |||
637 | /* store the original values for use in later cleanup */ | ||
638 | if (new != first) { | ||
639 | new->src = first->src; | ||
640 | new->dst = first->dst; | ||
641 | new->len = first->len; | ||
642 | } | ||
643 | |||
644 | /* cookie incr and addition to used_list must be atomic */ | ||
645 | cookie = ioat_chan->common.cookie; | ||
646 | cookie++; | ||
647 | if (cookie < 0) | ||
648 | cookie = 1; | ||
649 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | ||
650 | |||
651 | ioat_chan->dmacount += desc_count; | ||
652 | ioat_chan->pending += desc_count; | ||
653 | if (ioat_chan->pending >= ioat_pending_level) | ||
654 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
655 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
656 | |||
657 | return cookie; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
662 | * @ioat_chan: the channel supplying the memory pool for the descriptors | ||
663 | * @flags: allocation flags | ||
664 | */ | ||
665 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | ||
666 | struct ioat_dma_chan *ioat_chan, | ||
667 | gfp_t flags) | ||
668 | { | ||
669 | struct ioat_dma_descriptor *desc; | ||
670 | struct ioat_desc_sw *desc_sw; | ||
671 | struct ioatdma_device *ioatdma_device; | ||
672 | dma_addr_t phys; | ||
673 | |||
674 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); | ||
675 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
676 | if (unlikely(!desc)) | ||
677 | return NULL; | ||
678 | |||
679 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
680 | if (unlikely(!desc_sw)) { | ||
681 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
682 | return NULL; | ||
683 | } | ||
684 | |||
685 | memset(desc, 0, sizeof(*desc)); | ||
686 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | ||
687 | switch (ioat_chan->device->version) { | ||
688 | case IOAT_VER_1_2: | ||
689 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | ||
690 | break; | ||
691 | case IOAT_VER_2_0: | ||
692 | case IOAT_VER_3_0: | ||
693 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; | ||
694 | break; | ||
695 | } | ||
696 | |||
697 | desc_sw->hw = desc; | ||
698 | desc_sw->async_tx.phys = phys; | ||
699 | |||
700 | return desc_sw; | ||
701 | } | ||
702 | |||
703 | static int ioat_initial_desc_count = 256; | ||
704 | module_param(ioat_initial_desc_count, int, 0644); | ||
705 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
706 | "initial descriptors per channel (default: 256)"); | ||
707 | |||
708 | /** | ||
709 | * ioat2_dma_massage_chan_desc - link the descriptors into a circle | ||
710 | * @ioat_chan: the channel to be massaged | ||
711 | */ | ||
712 | static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | ||
713 | { | ||
714 | struct ioat_desc_sw *desc, *_desc; | ||
715 | |||
716 | /* setup used_desc */ | ||
717 | ioat_chan->used_desc.next = ioat_chan->free_desc.next; | ||
718 | ioat_chan->used_desc.prev = NULL; | ||
719 | |||
720 | /* pull free_desc out of the circle so that every node is a hw | ||
721 | * descriptor, but leave it pointing to the list | ||
722 | */ | ||
723 | ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; | ||
724 | ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; | ||
725 | |||
726 | /* circle link the hw descriptors */ | ||
727 | desc = to_ioat_desc(ioat_chan->free_desc.next); | ||
728 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
729 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { | ||
730 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /** | ||
735 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
736 | * @chan: the channel to be filled out | ||
737 | */ | ||
738 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | ||
739 | { | ||
740 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
741 | struct ioat_desc_sw *desc; | ||
742 | u16 chanctrl; | ||
743 | u32 chanerr; | ||
744 | int i; | ||
745 | LIST_HEAD(tmp_list); | ||
746 | |||
747 | /* have we already been set up? */ | ||
748 | if (!list_empty(&ioat_chan->free_desc)) | ||
749 | return ioat_chan->desccount; | ||
750 | |||
751 | /* Setup register to interrupt and write completion status on error */ | ||
752 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | | ||
753 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | | ||
754 | IOAT_CHANCTRL_ERR_COMPLETION_EN; | ||
755 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
756 | |||
757 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
758 | if (chanerr) { | ||
759 | dev_err(&ioat_chan->device->pdev->dev, | ||
760 | "CHANERR = %x, clearing\n", chanerr); | ||
761 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
762 | } | ||
763 | |||
764 | /* Allocate descriptors */ | ||
765 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
766 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | ||
767 | if (!desc) { | ||
768 | dev_err(&ioat_chan->device->pdev->dev, | ||
769 | "Only %d initial descriptors\n", i); | ||
770 | break; | ||
771 | } | ||
772 | list_add_tail(&desc->node, &tmp_list); | ||
773 | } | ||
774 | spin_lock_bh(&ioat_chan->desc_lock); | ||
775 | ioat_chan->desccount = i; | ||
776 | list_splice(&tmp_list, &ioat_chan->free_desc); | ||
777 | if (ioat_chan->device->version != IOAT_VER_1_2) | ||
778 | ioat2_dma_massage_chan_desc(ioat_chan); | ||
779 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
780 | |||
781 | /* allocate a completion writeback area */ | ||
782 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
783 | ioat_chan->completion_virt = | ||
784 | pci_pool_alloc(ioat_chan->device->completion_pool, | ||
785 | GFP_KERNEL, | ||
786 | &ioat_chan->completion_addr); | ||
787 | memset(ioat_chan->completion_virt, 0, | ||
788 | sizeof(*ioat_chan->completion_virt)); | ||
789 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, | ||
790 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
791 | writel(((u64) ioat_chan->completion_addr) >> 32, | ||
792 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
793 | |||
794 | tasklet_enable(&ioat_chan->cleanup_task); | ||
795 | ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ | ||
796 | return ioat_chan->desccount; | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * ioat_dma_free_chan_resources - release all the descriptors | ||
801 | * @chan: the channel to be cleaned | ||
802 | */ | ||
803 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) | ||
804 | { | ||
805 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
806 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); | ||
807 | struct ioat_desc_sw *desc, *_desc; | ||
808 | int in_use_descs = 0; | ||
809 | |||
810 | /* Before freeing channel resources first check | ||
811 | * if they have been previously allocated for this channel. | ||
812 | */ | ||
813 | if (ioat_chan->desccount == 0) | ||
814 | return; | ||
815 | |||
816 | tasklet_disable(&ioat_chan->cleanup_task); | ||
817 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
818 | |||
819 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
820 | * before removing DMA descriptor resources. | ||
821 | */ | ||
822 | writeb(IOAT_CHANCMD_RESET, | ||
823 | ioat_chan->reg_base | ||
824 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
825 | mdelay(100); | ||
826 | |||
827 | spin_lock_bh(&ioat_chan->desc_lock); | ||
828 | switch (ioat_chan->device->version) { | ||
829 | case IOAT_VER_1_2: | ||
830 | list_for_each_entry_safe(desc, _desc, | ||
831 | &ioat_chan->used_desc, node) { | ||
832 | in_use_descs++; | ||
833 | list_del(&desc->node); | ||
834 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
835 | desc->async_tx.phys); | ||
836 | kfree(desc); | ||
837 | } | ||
838 | list_for_each_entry_safe(desc, _desc, | ||
839 | &ioat_chan->free_desc, node) { | ||
840 | list_del(&desc->node); | ||
841 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
842 | desc->async_tx.phys); | ||
843 | kfree(desc); | ||
844 | } | ||
845 | break; | ||
846 | case IOAT_VER_2_0: | ||
847 | case IOAT_VER_3_0: | ||
848 | list_for_each_entry_safe(desc, _desc, | ||
849 | ioat_chan->free_desc.next, node) { | ||
850 | list_del(&desc->node); | ||
851 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
852 | desc->async_tx.phys); | ||
853 | kfree(desc); | ||
854 | } | ||
855 | desc = to_ioat_desc(ioat_chan->free_desc.next); | ||
856 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
857 | desc->async_tx.phys); | ||
858 | kfree(desc); | ||
859 | INIT_LIST_HEAD(&ioat_chan->free_desc); | ||
860 | INIT_LIST_HEAD(&ioat_chan->used_desc); | ||
861 | break; | ||
862 | } | ||
863 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
864 | |||
865 | pci_pool_free(ioatdma_device->completion_pool, | ||
866 | ioat_chan->completion_virt, | ||
867 | ioat_chan->completion_addr); | ||
868 | |||
869 | /* one is ok since we left it on there on purpose */ | ||
870 | if (in_use_descs > 1) | ||
871 | dev_err(&ioat_chan->device->pdev->dev, | ||
872 | "Freeing %d in use descriptors!\n", | ||
873 | in_use_descs - 1); | ||
874 | |||
875 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | ||
876 | ioat_chan->pending = 0; | ||
877 | ioat_chan->dmacount = 0; | ||
878 | ioat_chan->desccount = 0; | ||
879 | ioat_chan->watchdog_completion = 0; | ||
880 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
881 | ioat_chan->watchdog_tcp_cookie = | ||
882 | ioat_chan->watchdog_last_tcp_cookie = 0; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * ioat_dma_get_next_descriptor - return the next available descriptor | ||
887 | * @ioat_chan: IOAT DMA channel handle | ||
888 | * | ||
889 | * Gets the next descriptor from the chain, and must be called with the | ||
890 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
891 | * has run out. | ||
892 | */ | ||
893 | static struct ioat_desc_sw * | ||
894 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | ||
895 | { | ||
896 | struct ioat_desc_sw *new; | ||
897 | |||
898 | if (!list_empty(&ioat_chan->free_desc)) { | ||
899 | new = to_ioat_desc(ioat_chan->free_desc.next); | ||
900 | list_del(&new->node); | ||
901 | } else { | ||
902 | /* try to get another desc */ | ||
903 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
904 | if (!new) { | ||
905 | dev_err(&ioat_chan->device->pdev->dev, | ||
906 | "alloc failed\n"); | ||
907 | return NULL; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | prefetch(new->hw); | ||
912 | return new; | ||
913 | } | ||
914 | |||
915 | static struct ioat_desc_sw * | ||
916 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | ||
917 | { | ||
918 | struct ioat_desc_sw *new; | ||
919 | |||
920 | /* | ||
921 | * used.prev points to where to start processing | ||
922 | * used.next points to next free descriptor | ||
923 | * if used.prev == NULL, there are none waiting to be processed | ||
924 | * if used.next == used.prev.prev, there is only one free descriptor, | ||
925 | * and we need to use it to as a noop descriptor before | ||
926 | * linking in a new set of descriptors, since the device | ||
927 | * has probably already read the pointer to it | ||
928 | */ | ||
929 | if (ioat_chan->used_desc.prev && | ||
930 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { | ||
931 | |||
932 | struct ioat_desc_sw *desc; | ||
933 | struct ioat_desc_sw *noop_desc; | ||
934 | int i; | ||
935 | |||
936 | /* set up the noop descriptor */ | ||
937 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); | ||
938 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
939 | noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
940 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; | ||
941 | noop_desc->hw->src_addr = 0; | ||
942 | noop_desc->hw->dst_addr = 0; | ||
943 | |||
944 | ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; | ||
945 | ioat_chan->pending++; | ||
946 | ioat_chan->dmacount++; | ||
947 | |||
948 | /* try to get a few more descriptors */ | ||
949 | for (i = 16; i; i--) { | ||
950 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
951 | if (!desc) { | ||
952 | dev_err(&ioat_chan->device->pdev->dev, | ||
953 | "alloc failed\n"); | ||
954 | break; | ||
955 | } | ||
956 | list_add_tail(&desc->node, ioat_chan->used_desc.next); | ||
957 | |||
958 | desc->hw->next | ||
959 | = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
960 | to_ioat_desc(desc->node.prev)->hw->next | ||
961 | = desc->async_tx.phys; | ||
962 | ioat_chan->desccount++; | ||
963 | } | ||
964 | |||
965 | ioat_chan->used_desc.next = noop_desc->node.next; | ||
966 | } | ||
967 | new = to_ioat_desc(ioat_chan->used_desc.next); | ||
968 | prefetch(new); | ||
969 | ioat_chan->used_desc.next = new->node.next; | ||
970 | |||
971 | if (ioat_chan->used_desc.prev == NULL) | ||
972 | ioat_chan->used_desc.prev = &new->node; | ||
973 | |||
974 | prefetch(new->hw); | ||
975 | return new; | ||
976 | } | ||
977 | |||
978 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | ||
979 | struct ioat_dma_chan *ioat_chan) | ||
980 | { | ||
981 | if (!ioat_chan) | ||
982 | return NULL; | ||
983 | |||
984 | switch (ioat_chan->device->version) { | ||
985 | case IOAT_VER_1_2: | ||
986 | return ioat1_dma_get_next_descriptor(ioat_chan); | ||
987 | case IOAT_VER_2_0: | ||
988 | case IOAT_VER_3_0: | ||
989 | return ioat2_dma_get_next_descriptor(ioat_chan); | ||
990 | } | ||
991 | return NULL; | ||
992 | } | ||
993 | |||
994 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | ||
995 | struct dma_chan *chan, | ||
996 | dma_addr_t dma_dest, | ||
997 | dma_addr_t dma_src, | ||
998 | size_t len, | ||
999 | unsigned long flags) | ||
1000 | { | ||
1001 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
1002 | struct ioat_desc_sw *new; | ||
1003 | |||
1004 | spin_lock_bh(&ioat_chan->desc_lock); | ||
1005 | new = ioat_dma_get_next_descriptor(ioat_chan); | ||
1006 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1007 | |||
1008 | if (new) { | ||
1009 | new->len = len; | ||
1010 | new->dst = dma_dest; | ||
1011 | new->src = dma_src; | ||
1012 | new->async_tx.flags = flags; | ||
1013 | return &new->async_tx; | ||
1014 | } else { | ||
1015 | dev_err(&ioat_chan->device->pdev->dev, | ||
1016 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
1017 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
1018 | return NULL; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | ||
1023 | struct dma_chan *chan, | ||
1024 | dma_addr_t dma_dest, | ||
1025 | dma_addr_t dma_src, | ||
1026 | size_t len, | ||
1027 | unsigned long flags) | ||
1028 | { | ||
1029 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
1030 | struct ioat_desc_sw *new; | ||
1031 | |||
1032 | spin_lock_bh(&ioat_chan->desc_lock); | ||
1033 | new = ioat2_dma_get_next_descriptor(ioat_chan); | ||
1034 | |||
1035 | /* | ||
1036 | * leave ioat_chan->desc_lock set in ioat 2 path | ||
1037 | * it will get unlocked at end of tx_submit | ||
1038 | */ | ||
1039 | |||
1040 | if (new) { | ||
1041 | new->len = len; | ||
1042 | new->dst = dma_dest; | ||
1043 | new->src = dma_src; | ||
1044 | new->async_tx.flags = flags; | ||
1045 | return &new->async_tx; | ||
1046 | } else { | ||
1047 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1048 | dev_err(&ioat_chan->device->pdev->dev, | ||
1049 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
1050 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
1051 | return NULL; | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | static void ioat_dma_cleanup_tasklet(unsigned long data) | ||
1056 | { | ||
1057 | struct ioat_dma_chan *chan = (void *)data; | ||
1058 | ioat_dma_memcpy_cleanup(chan); | ||
1059 | writew(IOAT_CHANCTRL_INT_DISABLE, | ||
1060 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
1061 | } | ||
1062 | |||
1063 | static void | ||
1064 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | ||
1065 | { | ||
1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1067 | if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1068 | pci_unmap_single(ioat_chan->device->pdev, | ||
1069 | pci_unmap_addr(desc, dst), | ||
1070 | pci_unmap_len(desc, len), | ||
1071 | PCI_DMA_FROMDEVICE); | ||
1072 | else | ||
1073 | pci_unmap_page(ioat_chan->device->pdev, | ||
1074 | pci_unmap_addr(desc, dst), | ||
1075 | pci_unmap_len(desc, len), | ||
1076 | PCI_DMA_FROMDEVICE); | ||
1077 | } | ||
1078 | |||
1079 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1080 | if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1081 | pci_unmap_single(ioat_chan->device->pdev, | ||
1082 | pci_unmap_addr(desc, src), | ||
1083 | pci_unmap_len(desc, len), | ||
1084 | PCI_DMA_TODEVICE); | ||
1085 | else | ||
1086 | pci_unmap_page(ioat_chan->device->pdev, | ||
1087 | pci_unmap_addr(desc, src), | ||
1088 | pci_unmap_len(desc, len), | ||
1089 | PCI_DMA_TODEVICE); | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /** | ||
1094 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | ||
1095 | * @chan: ioat channel to be cleaned up | ||
1096 | */ | ||
1097 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | ||
1098 | { | ||
1099 | unsigned long phys_complete; | ||
1100 | struct ioat_desc_sw *desc, *_desc; | ||
1101 | dma_cookie_t cookie = 0; | ||
1102 | unsigned long desc_phys; | ||
1103 | struct ioat_desc_sw *latest_desc; | ||
1104 | |||
1105 | prefetch(ioat_chan->completion_virt); | ||
1106 | |||
1107 | if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) | ||
1108 | return; | ||
1109 | |||
1110 | /* The completion writeback can happen at any time, | ||
1111 | so reads by the driver need to be atomic operations | ||
1112 | The descriptor physical addresses are limited to 32-bits | ||
1113 | when the CPU can only do a 32-bit mov */ | ||
1114 | |||
1115 | #if (BITS_PER_LONG == 64) | ||
1116 | phys_complete = | ||
1117 | ioat_chan->completion_virt->full | ||
1118 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
1119 | #else | ||
1120 | phys_complete = | ||
1121 | ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | ||
1122 | #endif | ||
1123 | |||
1124 | if ((ioat_chan->completion_virt->full | ||
1125 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | ||
1126 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | ||
1127 | dev_err(&ioat_chan->device->pdev->dev, | ||
1128 | "Channel halted, chanerr = %x\n", | ||
1129 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); | ||
1130 | |||
1131 | /* TODO do something to salvage the situation */ | ||
1132 | } | ||
1133 | |||
1134 | if (phys_complete == ioat_chan->last_completion) { | ||
1135 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
1136 | /* | ||
1137 | * perhaps we're stuck so hard that the watchdog can't go off? | ||
1138 | * try to catch it after 2 seconds | ||
1139 | */ | ||
1140 | if (ioat_chan->device->version != IOAT_VER_3_0) { | ||
1141 | if (time_after(jiffies, | ||
1142 | ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | ||
1143 | ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); | ||
1144 | ioat_chan->last_completion_time = jiffies; | ||
1145 | } | ||
1146 | } | ||
1147 | return; | ||
1148 | } | ||
1149 | ioat_chan->last_completion_time = jiffies; | ||
1150 | |||
1151 | cookie = 0; | ||
1152 | if (!spin_trylock_bh(&ioat_chan->desc_lock)) { | ||
1153 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
1154 | return; | ||
1155 | } | ||
1156 | |||
1157 | switch (ioat_chan->device->version) { | ||
1158 | case IOAT_VER_1_2: | ||
1159 | list_for_each_entry_safe(desc, _desc, | ||
1160 | &ioat_chan->used_desc, node) { | ||
1161 | |||
1162 | /* | ||
1163 | * Incoming DMA requests may use multiple descriptors, | ||
1164 | * due to exceeding xfercap, perhaps. If so, only the | ||
1165 | * last one will have a cookie, and require unmapping. | ||
1166 | */ | ||
1167 | if (desc->async_tx.cookie) { | ||
1168 | cookie = desc->async_tx.cookie; | ||
1169 | ioat_dma_unmap(ioat_chan, desc); | ||
1170 | if (desc->async_tx.callback) { | ||
1171 | desc->async_tx.callback(desc->async_tx.callback_param); | ||
1172 | desc->async_tx.callback = NULL; | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | if (desc->async_tx.phys != phys_complete) { | ||
1177 | /* | ||
1178 | * a completed entry, but not the last, so clean | ||
1179 | * up if the client is done with the descriptor | ||
1180 | */ | ||
1181 | if (async_tx_test_ack(&desc->async_tx)) { | ||
1182 | list_move_tail(&desc->node, | ||
1183 | &ioat_chan->free_desc); | ||
1184 | } else | ||
1185 | desc->async_tx.cookie = 0; | ||
1186 | } else { | ||
1187 | /* | ||
1188 | * last used desc. Do not remove, so we can | ||
1189 | * append from it, but don't look at it next | ||
1190 | * time, either | ||
1191 | */ | ||
1192 | desc->async_tx.cookie = 0; | ||
1193 | |||
1194 | /* TODO check status bits? */ | ||
1195 | break; | ||
1196 | } | ||
1197 | } | ||
1198 | break; | ||
1199 | case IOAT_VER_2_0: | ||
1200 | case IOAT_VER_3_0: | ||
1201 | /* has some other thread has already cleaned up? */ | ||
1202 | if (ioat_chan->used_desc.prev == NULL) | ||
1203 | break; | ||
1204 | |||
1205 | /* work backwards to find latest finished desc */ | ||
1206 | desc = to_ioat_desc(ioat_chan->used_desc.next); | ||
1207 | latest_desc = NULL; | ||
1208 | do { | ||
1209 | desc = to_ioat_desc(desc->node.prev); | ||
1210 | desc_phys = (unsigned long)desc->async_tx.phys | ||
1211 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
1212 | if (desc_phys == phys_complete) { | ||
1213 | latest_desc = desc; | ||
1214 | break; | ||
1215 | } | ||
1216 | } while (&desc->node != ioat_chan->used_desc.prev); | ||
1217 | |||
1218 | if (latest_desc != NULL) { | ||
1219 | |||
1220 | /* work forwards to clear finished descriptors */ | ||
1221 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
1222 | &desc->node != latest_desc->node.next && | ||
1223 | &desc->node != ioat_chan->used_desc.next; | ||
1224 | desc = to_ioat_desc(desc->node.next)) { | ||
1225 | if (desc->async_tx.cookie) { | ||
1226 | cookie = desc->async_tx.cookie; | ||
1227 | desc->async_tx.cookie = 0; | ||
1228 | ioat_dma_unmap(ioat_chan, desc); | ||
1229 | if (desc->async_tx.callback) { | ||
1230 | desc->async_tx.callback(desc->async_tx.callback_param); | ||
1231 | desc->async_tx.callback = NULL; | ||
1232 | } | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | /* move used.prev up beyond those that are finished */ | ||
1237 | if (&desc->node == ioat_chan->used_desc.next) | ||
1238 | ioat_chan->used_desc.prev = NULL; | ||
1239 | else | ||
1240 | ioat_chan->used_desc.prev = &desc->node; | ||
1241 | } | ||
1242 | break; | ||
1243 | } | ||
1244 | |||
1245 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1246 | |||
1247 | ioat_chan->last_completion = phys_complete; | ||
1248 | if (cookie != 0) | ||
1249 | ioat_chan->completed_cookie = cookie; | ||
1250 | |||
1251 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | ||
1256 | * @chan: IOAT DMA channel handle | ||
1257 | * @cookie: DMA transaction identifier | ||
1258 | * @done: if not %NULL, updated with last completed transaction | ||
1259 | * @used: if not %NULL, updated with last used transaction | ||
1260 | */ | ||
1261 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | ||
1262 | dma_cookie_t cookie, | ||
1263 | dma_cookie_t *done, | ||
1264 | dma_cookie_t *used) | ||
1265 | { | ||
1266 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
1267 | dma_cookie_t last_used; | ||
1268 | dma_cookie_t last_complete; | ||
1269 | enum dma_status ret; | ||
1270 | |||
1271 | last_used = chan->cookie; | ||
1272 | last_complete = ioat_chan->completed_cookie; | ||
1273 | ioat_chan->watchdog_tcp_cookie = cookie; | ||
1274 | |||
1275 | if (done) | ||
1276 | *done = last_complete; | ||
1277 | if (used) | ||
1278 | *used = last_used; | ||
1279 | |||
1280 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1281 | if (ret == DMA_SUCCESS) | ||
1282 | return ret; | ||
1283 | |||
1284 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
1285 | |||
1286 | last_used = chan->cookie; | ||
1287 | last_complete = ioat_chan->completed_cookie; | ||
1288 | |||
1289 | if (done) | ||
1290 | *done = last_complete; | ||
1291 | if (used) | ||
1292 | *used = last_used; | ||
1293 | |||
1294 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
1295 | } | ||
1296 | |||
1297 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | ||
1298 | { | ||
1299 | struct ioat_desc_sw *desc; | ||
1300 | |||
1301 | spin_lock_bh(&ioat_chan->desc_lock); | ||
1302 | |||
1303 | desc = ioat_dma_get_next_descriptor(ioat_chan); | ||
1304 | |||
1305 | if (!desc) { | ||
1306 | dev_err(&ioat_chan->device->pdev->dev, | ||
1307 | "Unable to start null desc - get next desc failed\n"); | ||
1308 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1309 | return; | ||
1310 | } | ||
1311 | |||
1312 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL | ||
1313 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | ||
1314 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
1315 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
1316 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
1317 | desc->hw->src_addr = 0; | ||
1318 | desc->hw->dst_addr = 0; | ||
1319 | async_tx_ack(&desc->async_tx); | ||
1320 | switch (ioat_chan->device->version) { | ||
1321 | case IOAT_VER_1_2: | ||
1322 | desc->hw->next = 0; | ||
1323 | list_add_tail(&desc->node, &ioat_chan->used_desc); | ||
1324 | |||
1325 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
1326 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
1327 | writel(((u64) desc->async_tx.phys) >> 32, | ||
1328 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
1329 | |||
1330 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | ||
1331 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
1332 | break; | ||
1333 | case IOAT_VER_2_0: | ||
1334 | case IOAT_VER_3_0: | ||
1335 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
1336 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
1337 | writel(((u64) desc->async_tx.phys) >> 32, | ||
1338 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
1339 | |||
1340 | ioat_chan->dmacount++; | ||
1341 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
1342 | break; | ||
1343 | } | ||
1344 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1345 | } | ||
1346 | |||
1347 | /* | ||
1348 | * Perform a IOAT transaction to verify the HW works. | ||
1349 | */ | ||
1350 | #define IOAT_TEST_SIZE 2000 | ||
1351 | |||
1352 | static void ioat_dma_test_callback(void *dma_async_param) | ||
1353 | { | ||
1354 | struct completion *cmp = dma_async_param; | ||
1355 | |||
1356 | complete(cmp); | ||
1357 | } | ||
1358 | |||
1359 | /** | ||
1360 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
1361 | * @device: device to be tested | ||
1362 | */ | ||
1363 | static int ioat_dma_self_test(struct ioatdma_device *device) | ||
1364 | { | ||
1365 | int i; | ||
1366 | u8 *src; | ||
1367 | u8 *dest; | ||
1368 | struct dma_chan *dma_chan; | ||
1369 | struct dma_async_tx_descriptor *tx; | ||
1370 | dma_addr_t dma_dest, dma_src; | ||
1371 | dma_cookie_t cookie; | ||
1372 | int err = 0; | ||
1373 | struct completion cmp; | ||
1374 | unsigned long tmo; | ||
1375 | unsigned long flags; | ||
1376 | |||
1377 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
1378 | if (!src) | ||
1379 | return -ENOMEM; | ||
1380 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
1381 | if (!dest) { | ||
1382 | kfree(src); | ||
1383 | return -ENOMEM; | ||
1384 | } | ||
1385 | |||
1386 | /* Fill in src buffer */ | ||
1387 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
1388 | src[i] = (u8)i; | ||
1389 | |||
1390 | /* Start copy, using first DMA channel */ | ||
1391 | dma_chan = container_of(device->common.channels.next, | ||
1392 | struct dma_chan, | ||
1393 | device_node); | ||
1394 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { | ||
1395 | dev_err(&device->pdev->dev, | ||
1396 | "selftest cannot allocate chan resource\n"); | ||
1397 | err = -ENODEV; | ||
1398 | goto out; | ||
1399 | } | ||
1400 | |||
1401 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | ||
1402 | DMA_TO_DEVICE); | ||
1403 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
1404 | DMA_FROM_DEVICE); | ||
1405 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; | ||
1406 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
1407 | IOAT_TEST_SIZE, flags); | ||
1408 | if (!tx) { | ||
1409 | dev_err(&device->pdev->dev, | ||
1410 | "Self-test prep failed, disabling\n"); | ||
1411 | err = -ENODEV; | ||
1412 | goto free_resources; | ||
1413 | } | ||
1414 | |||
1415 | async_tx_ack(tx); | ||
1416 | init_completion(&cmp); | ||
1417 | tx->callback = ioat_dma_test_callback; | ||
1418 | tx->callback_param = &cmp; | ||
1419 | cookie = tx->tx_submit(tx); | ||
1420 | if (cookie < 0) { | ||
1421 | dev_err(&device->pdev->dev, | ||
1422 | "Self-test setup failed, disabling\n"); | ||
1423 | err = -ENODEV; | ||
1424 | goto free_resources; | ||
1425 | } | ||
1426 | device->common.device_issue_pending(dma_chan); | ||
1427 | |||
1428 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1429 | |||
1430 | if (tmo == 0 || | ||
1431 | device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | ||
1432 | != DMA_SUCCESS) { | ||
1433 | dev_err(&device->pdev->dev, | ||
1434 | "Self-test copy timed out, disabling\n"); | ||
1435 | err = -ENODEV; | ||
1436 | goto free_resources; | ||
1437 | } | ||
1438 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
1439 | dev_err(&device->pdev->dev, | ||
1440 | "Self-test copy failed compare, disabling\n"); | ||
1441 | err = -ENODEV; | ||
1442 | goto free_resources; | ||
1443 | } | ||
1444 | |||
1445 | free_resources: | ||
1446 | device->common.device_free_chan_resources(dma_chan); | ||
1447 | out: | ||
1448 | kfree(src); | ||
1449 | kfree(dest); | ||
1450 | return err; | ||
1451 | } | ||
1452 | |||
1453 | static char ioat_interrupt_style[32] = "msix"; | ||
1454 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
1455 | sizeof(ioat_interrupt_style), 0644); | ||
1456 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
1457 | "set ioat interrupt style: msix (default), " | ||
1458 | "msix-single-vector, msi, intx)"); | ||
1459 | |||
1460 | /** | ||
1461 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
1462 | * @device: ioat device | ||
1463 | */ | ||
1464 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
1465 | { | ||
1466 | struct ioat_dma_chan *ioat_chan; | ||
1467 | int err, i, j, msixcnt; | ||
1468 | u8 intrctrl = 0; | ||
1469 | |||
1470 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
1471 | goto msix; | ||
1472 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
1473 | goto msix_single_vector; | ||
1474 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
1475 | goto msi; | ||
1476 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
1477 | goto intx; | ||
1478 | dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", | ||
1479 | ioat_interrupt_style); | ||
1480 | goto err_no_irq; | ||
1481 | |||
1482 | msix: | ||
1483 | /* The number of MSI-X vectors should equal the number of channels */ | ||
1484 | msixcnt = device->common.chancnt; | ||
1485 | for (i = 0; i < msixcnt; i++) | ||
1486 | device->msix_entries[i].entry = i; | ||
1487 | |||
1488 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); | ||
1489 | if (err < 0) | ||
1490 | goto msi; | ||
1491 | if (err > 0) | ||
1492 | goto msix_single_vector; | ||
1493 | |||
1494 | for (i = 0; i < msixcnt; i++) { | ||
1495 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
1496 | err = request_irq(device->msix_entries[i].vector, | ||
1497 | ioat_dma_do_interrupt_msix, | ||
1498 | 0, "ioat-msix", ioat_chan); | ||
1499 | if (err) { | ||
1500 | for (j = 0; j < i; j++) { | ||
1501 | ioat_chan = | ||
1502 | ioat_lookup_chan_by_index(device, j); | ||
1503 | free_irq(device->msix_entries[j].vector, | ||
1504 | ioat_chan); | ||
1505 | } | ||
1506 | goto msix_single_vector; | ||
1507 | } | ||
1508 | } | ||
1509 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
1510 | device->irq_mode = msix_multi_vector; | ||
1511 | goto done; | ||
1512 | |||
1513 | msix_single_vector: | ||
1514 | device->msix_entries[0].entry = 0; | ||
1515 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); | ||
1516 | if (err) | ||
1517 | goto msi; | ||
1518 | |||
1519 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, | ||
1520 | 0, "ioat-msix", device); | ||
1521 | if (err) { | ||
1522 | pci_disable_msix(device->pdev); | ||
1523 | goto msi; | ||
1524 | } | ||
1525 | device->irq_mode = msix_single_vector; | ||
1526 | goto done; | ||
1527 | |||
1528 | msi: | ||
1529 | err = pci_enable_msi(device->pdev); | ||
1530 | if (err) | ||
1531 | goto intx; | ||
1532 | |||
1533 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
1534 | 0, "ioat-msi", device); | ||
1535 | if (err) { | ||
1536 | pci_disable_msi(device->pdev); | ||
1537 | goto intx; | ||
1538 | } | ||
1539 | /* | ||
1540 | * CB 1.2 devices need a bit set in configuration space to enable MSI | ||
1541 | */ | ||
1542 | if (device->version == IOAT_VER_1_2) { | ||
1543 | u32 dmactrl; | ||
1544 | pci_read_config_dword(device->pdev, | ||
1545 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1546 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1547 | pci_write_config_dword(device->pdev, | ||
1548 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1549 | } | ||
1550 | device->irq_mode = msi; | ||
1551 | goto done; | ||
1552 | |||
1553 | intx: | ||
1554 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
1555 | IRQF_SHARED, "ioat-intx", device); | ||
1556 | if (err) | ||
1557 | goto err_no_irq; | ||
1558 | device->irq_mode = intx; | ||
1559 | |||
1560 | done: | ||
1561 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
1562 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1563 | return 0; | ||
1564 | |||
1565 | err_no_irq: | ||
1566 | /* Disable all interrupt generation */ | ||
1567 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1568 | dev_err(&device->pdev->dev, "no usable interrupts\n"); | ||
1569 | device->irq_mode = none; | ||
1570 | return -1; | ||
1571 | } | ||
1572 | |||
1573 | /** | ||
1574 | * ioat_dma_remove_interrupts - remove whatever interrupts were set | ||
1575 | * @device: ioat device | ||
1576 | */ | ||
1577 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) | ||
1578 | { | ||
1579 | struct ioat_dma_chan *ioat_chan; | ||
1580 | int i; | ||
1581 | |||
1582 | /* Disable all interrupt generation */ | ||
1583 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1584 | |||
1585 | switch (device->irq_mode) { | ||
1586 | case msix_multi_vector: | ||
1587 | for (i = 0; i < device->common.chancnt; i++) { | ||
1588 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
1589 | free_irq(device->msix_entries[i].vector, ioat_chan); | ||
1590 | } | ||
1591 | pci_disable_msix(device->pdev); | ||
1592 | break; | ||
1593 | case msix_single_vector: | ||
1594 | free_irq(device->msix_entries[0].vector, device); | ||
1595 | pci_disable_msix(device->pdev); | ||
1596 | break; | ||
1597 | case msi: | ||
1598 | free_irq(device->pdev->irq, device); | ||
1599 | pci_disable_msi(device->pdev); | ||
1600 | break; | ||
1601 | case intx: | ||
1602 | free_irq(device->pdev->irq, device); | ||
1603 | break; | ||
1604 | case none: | ||
1605 | dev_warn(&device->pdev->dev, | ||
1606 | "call to %s without interrupts setup\n", __func__); | ||
1607 | } | ||
1608 | device->irq_mode = none; | ||
1609 | } | ||
1610 | |||
1611 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | ||
1612 | void __iomem *iobase) | ||
1613 | { | ||
1614 | int err; | ||
1615 | struct ioatdma_device *device; | ||
1616 | |||
1617 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
1618 | if (!device) { | ||
1619 | err = -ENOMEM; | ||
1620 | goto err_kzalloc; | ||
1621 | } | ||
1622 | device->pdev = pdev; | ||
1623 | device->reg_base = iobase; | ||
1624 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
1625 | |||
1626 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
1627 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
1628 | sizeof(struct ioat_dma_descriptor), | ||
1629 | 64, 0); | ||
1630 | if (!device->dma_pool) { | ||
1631 | err = -ENOMEM; | ||
1632 | goto err_dma_pool; | ||
1633 | } | ||
1634 | |||
1635 | device->completion_pool = pci_pool_create("completion_pool", pdev, | ||
1636 | sizeof(u64), SMP_CACHE_BYTES, | ||
1637 | SMP_CACHE_BYTES); | ||
1638 | if (!device->completion_pool) { | ||
1639 | err = -ENOMEM; | ||
1640 | goto err_completion_pool; | ||
1641 | } | ||
1642 | |||
1643 | INIT_LIST_HEAD(&device->common.channels); | ||
1644 | ioat_dma_enumerate_channels(device); | ||
1645 | |||
1646 | device->common.device_alloc_chan_resources = | ||
1647 | ioat_dma_alloc_chan_resources; | ||
1648 | device->common.device_free_chan_resources = | ||
1649 | ioat_dma_free_chan_resources; | ||
1650 | device->common.dev = &pdev->dev; | ||
1651 | |||
1652 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | ||
1653 | device->common.device_is_tx_complete = ioat_dma_is_complete; | ||
1654 | switch (device->version) { | ||
1655 | case IOAT_VER_1_2: | ||
1656 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1657 | device->common.device_issue_pending = | ||
1658 | ioat1_dma_memcpy_issue_pending; | ||
1659 | break; | ||
1660 | case IOAT_VER_2_0: | ||
1661 | case IOAT_VER_3_0: | ||
1662 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1663 | device->common.device_issue_pending = | ||
1664 | ioat2_dma_memcpy_issue_pending; | ||
1665 | break; | ||
1666 | } | ||
1667 | |||
1668 | dev_err(&device->pdev->dev, | ||
1669 | "Intel(R) I/OAT DMA Engine found," | ||
1670 | " %d channels, device version 0x%02x, driver version %s\n", | ||
1671 | device->common.chancnt, device->version, IOAT_DMA_VERSION); | ||
1672 | |||
1673 | if (!device->common.chancnt) { | ||
1674 | dev_err(&device->pdev->dev, | ||
1675 | "Intel(R) I/OAT DMA Engine problem found: " | ||
1676 | "zero channels detected\n"); | ||
1677 | goto err_setup_interrupts; | ||
1678 | } | ||
1679 | |||
1680 | err = ioat_dma_setup_interrupts(device); | ||
1681 | if (err) | ||
1682 | goto err_setup_interrupts; | ||
1683 | |||
1684 | err = ioat_dma_self_test(device); | ||
1685 | if (err) | ||
1686 | goto err_self_test; | ||
1687 | |||
1688 | ioat_set_tcp_copy_break(device); | ||
1689 | |||
1690 | dma_async_device_register(&device->common); | ||
1691 | |||
1692 | if (device->version != IOAT_VER_3_0) { | ||
1693 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1694 | schedule_delayed_work(&device->work, | ||
1695 | WATCHDOG_DELAY); | ||
1696 | } | ||
1697 | |||
1698 | return device; | ||
1699 | |||
1700 | err_self_test: | ||
1701 | ioat_dma_remove_interrupts(device); | ||
1702 | err_setup_interrupts: | ||
1703 | pci_pool_destroy(device->completion_pool); | ||
1704 | err_completion_pool: | ||
1705 | pci_pool_destroy(device->dma_pool); | ||
1706 | err_dma_pool: | ||
1707 | kfree(device); | ||
1708 | err_kzalloc: | ||
1709 | dev_err(&pdev->dev, | ||
1710 | "Intel(R) I/OAT DMA Engine initialization failed\n"); | ||
1711 | return NULL; | ||
1712 | } | ||
1713 | |||
1714 | void ioat_dma_remove(struct ioatdma_device *device) | ||
1715 | { | ||
1716 | struct dma_chan *chan, *_chan; | ||
1717 | struct ioat_dma_chan *ioat_chan; | ||
1718 | |||
1719 | if (device->version != IOAT_VER_3_0) | ||
1720 | cancel_delayed_work(&device->work); | ||
1721 | |||
1722 | ioat_dma_remove_interrupts(device); | ||
1723 | |||
1724 | dma_async_device_unregister(&device->common); | ||
1725 | |||
1726 | pci_pool_destroy(device->dma_pool); | ||
1727 | pci_pool_destroy(device->completion_pool); | ||
1728 | |||
1729 | iounmap(device->reg_base); | ||
1730 | pci_release_regions(device->pdev); | ||
1731 | pci_disable_device(device->pdev); | ||
1732 | |||
1733 | list_for_each_entry_safe(chan, _chan, | ||
1734 | &device->common.channels, device_node) { | ||
1735 | ioat_chan = to_ioat_chan(chan); | ||
1736 | list_del(&chan->device_node); | ||
1737 | kfree(ioat_chan); | ||
1738 | } | ||
1739 | kfree(device); | ||
1740 | } | ||
1741 | |||
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h deleted file mode 100644 index a52ff4bd4601..000000000000 --- a/drivers/dma/ioatdma.h +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef IOATDMA_H | ||
22 | #define IOATDMA_H | ||
23 | |||
24 | #include <linux/dmaengine.h> | ||
25 | #include "ioatdma_hw.h" | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/dmapool.h> | ||
28 | #include <linux/cache.h> | ||
29 | #include <linux/pci_ids.h> | ||
30 | #include <net/tcp.h> | ||
31 | |||
32 | #define IOAT_DMA_VERSION "3.64" | ||
33 | |||
34 | enum ioat_interrupt { | ||
35 | none = 0, | ||
36 | msix_multi_vector = 1, | ||
37 | msix_single_vector = 2, | ||
38 | msi = 3, | ||
39 | intx = 4, | ||
40 | }; | ||
41 | |||
42 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | ||
43 | #define IOAT_DMA_DCA_ANY_CPU ~0 | ||
44 | #define IOAT_WATCHDOG_PERIOD (2 * HZ) | ||
45 | |||
46 | |||
47 | /** | ||
48 | * struct ioatdma_device - internal representation of a IOAT device | ||
49 | * @pdev: PCI-Express device | ||
50 | * @reg_base: MMIO register space base address | ||
51 | * @dma_pool: for allocating DMA descriptors | ||
52 | * @common: embedded struct dma_device | ||
53 | * @version: version of ioatdma device | ||
54 | * @irq_mode: which style irq to use | ||
55 | * @msix_entries: irq handlers | ||
56 | * @idx: per channel data | ||
57 | */ | ||
58 | |||
59 | struct ioatdma_device { | ||
60 | struct pci_dev *pdev; | ||
61 | void __iomem *reg_base; | ||
62 | struct pci_pool *dma_pool; | ||
63 | struct pci_pool *completion_pool; | ||
64 | struct dma_device common; | ||
65 | u8 version; | ||
66 | enum ioat_interrupt irq_mode; | ||
67 | struct delayed_work work; | ||
68 | struct msix_entry msix_entries[4]; | ||
69 | struct ioat_dma_chan *idx[4]; | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * struct ioat_dma_chan - internal representation of a DMA channel | ||
74 | */ | ||
75 | struct ioat_dma_chan { | ||
76 | |||
77 | void __iomem *reg_base; | ||
78 | |||
79 | dma_cookie_t completed_cookie; | ||
80 | unsigned long last_completion; | ||
81 | unsigned long last_completion_time; | ||
82 | |||
83 | size_t xfercap; /* XFERCAP register value expanded out */ | ||
84 | |||
85 | spinlock_t cleanup_lock; | ||
86 | spinlock_t desc_lock; | ||
87 | struct list_head free_desc; | ||
88 | struct list_head used_desc; | ||
89 | unsigned long watchdog_completion; | ||
90 | int watchdog_tcp_cookie; | ||
91 | u32 watchdog_last_tcp_cookie; | ||
92 | struct delayed_work work; | ||
93 | |||
94 | int pending; | ||
95 | int dmacount; | ||
96 | int desccount; | ||
97 | |||
98 | struct ioatdma_device *device; | ||
99 | struct dma_chan common; | ||
100 | |||
101 | dma_addr_t completion_addr; | ||
102 | union { | ||
103 | u64 full; /* HW completion writeback */ | ||
104 | struct { | ||
105 | u32 low; | ||
106 | u32 high; | ||
107 | }; | ||
108 | } *completion_virt; | ||
109 | unsigned long last_compl_desc_addr_hw; | ||
110 | struct tasklet_struct cleanup_task; | ||
111 | }; | ||
112 | |||
113 | /* wrapper around hardware descriptor format + additional software fields */ | ||
114 | |||
115 | /** | ||
116 | * struct ioat_desc_sw - wrapper around hardware descriptor | ||
117 | * @hw: hardware DMA descriptor | ||
118 | * @node: this descriptor will either be on the free list, | ||
119 | * or attached to a transaction list (async_tx.tx_list) | ||
120 | * @tx_cnt: number of descriptors required to complete the transaction | ||
121 | * @async_tx: the generic software descriptor for all engines | ||
122 | */ | ||
123 | struct ioat_desc_sw { | ||
124 | struct ioat_dma_descriptor *hw; | ||
125 | struct list_head node; | ||
126 | int tx_cnt; | ||
127 | size_t len; | ||
128 | dma_addr_t src; | ||
129 | dma_addr_t dst; | ||
130 | struct dma_async_tx_descriptor async_tx; | ||
131 | }; | ||
132 | |||
133 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) | ||
134 | { | ||
135 | #ifdef CONFIG_NET_DMA | ||
136 | switch (dev->version) { | ||
137 | case IOAT_VER_1_2: | ||
138 | sysctl_tcp_dma_copybreak = 4096; | ||
139 | break; | ||
140 | case IOAT_VER_2_0: | ||
141 | sysctl_tcp_dma_copybreak = 2048; | ||
142 | break; | ||
143 | case IOAT_VER_3_0: | ||
144 | sysctl_tcp_dma_copybreak = 262144; | ||
145 | break; | ||
146 | } | ||
147 | #endif | ||
148 | } | ||
149 | |||
150 | #if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) | ||
151 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | ||
152 | void __iomem *iobase); | ||
153 | void ioat_dma_remove(struct ioatdma_device *device); | ||
154 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
155 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
156 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
157 | #else | ||
158 | #define ioat_dma_probe(pdev, iobase) NULL | ||
159 | #define ioat_dma_remove(device) do { } while (0) | ||
160 | #define ioat_dca_init(pdev, iobase) NULL | ||
161 | #define ioat2_dca_init(pdev, iobase) NULL | ||
162 | #define ioat3_dca_init(pdev, iobase) NULL | ||
163 | #endif | ||
164 | |||
165 | #endif /* IOATDMA_H */ | ||
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h deleted file mode 100644 index afa57eef86c9..000000000000 --- a/drivers/dma/ioatdma_hw.h +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef _IOAT_HW_H_ | ||
22 | #define _IOAT_HW_H_ | ||
23 | |||
24 | /* PCI Configuration Space Values */ | ||
25 | #define IOAT_PCI_VID 0x8086 | ||
26 | |||
27 | /* CB device ID's */ | ||
28 | #define IOAT_PCI_DID_5000 0x1A38 | ||
29 | #define IOAT_PCI_DID_CNB 0x360B | ||
30 | #define IOAT_PCI_DID_SCNB 0x65FF | ||
31 | #define IOAT_PCI_DID_SNB 0x402F | ||
32 | |||
33 | #define IOAT_PCI_RID 0x00 | ||
34 | #define IOAT_PCI_SVID 0x8086 | ||
35 | #define IOAT_PCI_SID 0x8086 | ||
36 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | ||
37 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | ||
38 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | ||
39 | |||
40 | struct ioat_dma_descriptor { | ||
41 | uint32_t size; | ||
42 | uint32_t ctl; | ||
43 | uint64_t src_addr; | ||
44 | uint64_t dst_addr; | ||
45 | uint64_t next; | ||
46 | uint64_t rsv1; | ||
47 | uint64_t rsv2; | ||
48 | uint64_t user1; | ||
49 | uint64_t user2; | ||
50 | }; | ||
51 | |||
52 | #define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 | ||
53 | #define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 | ||
54 | #define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 | ||
55 | #define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 | ||
56 | #define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 | ||
57 | #define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 | ||
58 | #define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 | ||
59 | #define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 | ||
60 | #define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 | ||
61 | #define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 | ||
62 | #define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 | ||
63 | |||
64 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 | ||
65 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 | ||
66 | |||
67 | #define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 | ||
68 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 | ||
69 | |||
70 | #endif | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 2f052265122f..645ca8d54ec4 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/memory.h> | 32 | #include <linux/memory.h> |
33 | #include <linux/ioport.h> | 33 | #include <linux/ioport.h> |
34 | #include <linux/raid/pq.h> | ||
34 | 35 | ||
35 | #include <mach/adma.h> | 36 | #include <mach/adma.h> |
36 | 37 | ||
@@ -57,65 +58,110 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) | |||
57 | } | 58 | } |
58 | } | 59 | } |
59 | 60 | ||
61 | static void | ||
62 | iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
63 | { | ||
64 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
65 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
66 | struct device *dev = &iop_chan->device->pdev->dev; | ||
67 | u32 len = unmap->unmap_len; | ||
68 | enum dma_ctrl_flags flags = tx->flags; | ||
69 | u32 src_cnt; | ||
70 | dma_addr_t addr; | ||
71 | dma_addr_t dest; | ||
72 | |||
73 | src_cnt = unmap->unmap_src_cnt; | ||
74 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
75 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
76 | enum dma_data_direction dir; | ||
77 | |||
78 | if (src_cnt > 1) /* is xor? */ | ||
79 | dir = DMA_BIDIRECTIONAL; | ||
80 | else | ||
81 | dir = DMA_FROM_DEVICE; | ||
82 | |||
83 | dma_unmap_page(dev, dest, len, dir); | ||
84 | } | ||
85 | |||
86 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
87 | while (src_cnt--) { | ||
88 | addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); | ||
89 | if (addr == dest) | ||
90 | continue; | ||
91 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
92 | } | ||
93 | } | ||
94 | desc->group_head = NULL; | ||
95 | } | ||
96 | |||
97 | static void | ||
98 | iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
99 | { | ||
100 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
101 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
102 | struct device *dev = &iop_chan->device->pdev->dev; | ||
103 | u32 len = unmap->unmap_len; | ||
104 | enum dma_ctrl_flags flags = tx->flags; | ||
105 | u32 src_cnt = unmap->unmap_src_cnt; | ||
106 | dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
107 | dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); | ||
108 | int i; | ||
109 | |||
110 | if (tx->flags & DMA_PREP_CONTINUE) | ||
111 | src_cnt -= 3; | ||
112 | |||
113 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { | ||
114 | dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); | ||
115 | dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); | ||
116 | } | ||
117 | |||
118 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
119 | dma_addr_t addr; | ||
120 | |||
121 | for (i = 0; i < src_cnt; i++) { | ||
122 | addr = iop_desc_get_src_addr(unmap, iop_chan, i); | ||
123 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
124 | } | ||
125 | if (desc->pq_check_result) { | ||
126 | dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); | ||
127 | dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | desc->group_head = NULL; | ||
132 | } | ||
133 | |||
134 | |||
60 | static dma_cookie_t | 135 | static dma_cookie_t |
61 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | 136 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, |
62 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 137 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
63 | { | 138 | { |
64 | BUG_ON(desc->async_tx.cookie < 0); | 139 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
65 | if (desc->async_tx.cookie > 0) { | 140 | |
66 | cookie = desc->async_tx.cookie; | 141 | BUG_ON(tx->cookie < 0); |
67 | desc->async_tx.cookie = 0; | 142 | if (tx->cookie > 0) { |
143 | cookie = tx->cookie; | ||
144 | tx->cookie = 0; | ||
68 | 145 | ||
69 | /* call the callback (must not sleep or submit new | 146 | /* call the callback (must not sleep or submit new |
70 | * operations to this channel) | 147 | * operations to this channel) |
71 | */ | 148 | */ |
72 | if (desc->async_tx.callback) | 149 | if (tx->callback) |
73 | desc->async_tx.callback( | 150 | tx->callback(tx->callback_param); |
74 | desc->async_tx.callback_param); | ||
75 | 151 | ||
76 | /* unmap dma addresses | 152 | /* unmap dma addresses |
77 | * (unmap_single vs unmap_page?) | 153 | * (unmap_single vs unmap_page?) |
78 | */ | 154 | */ |
79 | if (desc->group_head && desc->unmap_len) { | 155 | if (desc->group_head && desc->unmap_len) { |
80 | struct iop_adma_desc_slot *unmap = desc->group_head; | 156 | if (iop_desc_is_pq(desc)) |
81 | struct device *dev = | 157 | iop_desc_unmap_pq(iop_chan, desc); |
82 | &iop_chan->device->pdev->dev; | 158 | else |
83 | u32 len = unmap->unmap_len; | 159 | iop_desc_unmap(iop_chan, desc); |
84 | enum dma_ctrl_flags flags = desc->async_tx.flags; | ||
85 | u32 src_cnt; | ||
86 | dma_addr_t addr; | ||
87 | dma_addr_t dest; | ||
88 | |||
89 | src_cnt = unmap->unmap_src_cnt; | ||
90 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
91 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
92 | enum dma_data_direction dir; | ||
93 | |||
94 | if (src_cnt > 1) /* is xor? */ | ||
95 | dir = DMA_BIDIRECTIONAL; | ||
96 | else | ||
97 | dir = DMA_FROM_DEVICE; | ||
98 | |||
99 | dma_unmap_page(dev, dest, len, dir); | ||
100 | } | ||
101 | |||
102 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
103 | while (src_cnt--) { | ||
104 | addr = iop_desc_get_src_addr(unmap, | ||
105 | iop_chan, | ||
106 | src_cnt); | ||
107 | if (addr == dest) | ||
108 | continue; | ||
109 | dma_unmap_page(dev, addr, len, | ||
110 | DMA_TO_DEVICE); | ||
111 | } | ||
112 | } | ||
113 | desc->group_head = NULL; | ||
114 | } | 160 | } |
115 | } | 161 | } |
116 | 162 | ||
117 | /* run dependent operations */ | 163 | /* run dependent operations */ |
118 | dma_run_dependencies(&desc->async_tx); | 164 | dma_run_dependencies(tx); |
119 | 165 | ||
120 | return cookie; | 166 | return cookie; |
121 | } | 167 | } |
@@ -287,7 +333,12 @@ static void iop_adma_tasklet(unsigned long data) | |||
287 | { | 333 | { |
288 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; | 334 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; |
289 | 335 | ||
290 | spin_lock(&iop_chan->lock); | 336 | /* lockdep will flag depedency submissions as potentially |
337 | * recursive locking, this is not the case as a dependency | ||
338 | * submission will never recurse a channels submit routine. | ||
339 | * There are checks in async_tx.c to prevent this. | ||
340 | */ | ||
341 | spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); | ||
291 | __iop_adma_slot_cleanup(iop_chan); | 342 | __iop_adma_slot_cleanup(iop_chan); |
292 | spin_unlock(&iop_chan->lock); | 343 | spin_unlock(&iop_chan->lock); |
293 | } | 344 | } |
@@ -370,7 +421,7 @@ retry: | |||
370 | } | 421 | } |
371 | alloc_tail->group_head = alloc_start; | 422 | alloc_tail->group_head = alloc_start; |
372 | alloc_tail->async_tx.cookie = -EBUSY; | 423 | alloc_tail->async_tx.cookie = -EBUSY; |
373 | list_splice(&chain, &alloc_tail->async_tx.tx_list); | 424 | list_splice(&chain, &alloc_tail->tx_list); |
374 | iop_chan->last_used = last_used; | 425 | iop_chan->last_used = last_used; |
375 | iop_desc_clear_next_desc(alloc_start); | 426 | iop_desc_clear_next_desc(alloc_start); |
376 | iop_desc_clear_next_desc(alloc_tail); | 427 | iop_desc_clear_next_desc(alloc_tail); |
@@ -429,7 +480,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
429 | 480 | ||
430 | old_chain_tail = list_entry(iop_chan->chain.prev, | 481 | old_chain_tail = list_entry(iop_chan->chain.prev, |
431 | struct iop_adma_desc_slot, chain_node); | 482 | struct iop_adma_desc_slot, chain_node); |
432 | list_splice_init(&sw_desc->async_tx.tx_list, | 483 | list_splice_init(&sw_desc->tx_list, |
433 | &old_chain_tail->chain_node); | 484 | &old_chain_tail->chain_node); |
434 | 485 | ||
435 | /* fix up the hardware chain */ | 486 | /* fix up the hardware chain */ |
@@ -496,6 +547,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
496 | 547 | ||
497 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 548 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
498 | slot->async_tx.tx_submit = iop_adma_tx_submit; | 549 | slot->async_tx.tx_submit = iop_adma_tx_submit; |
550 | INIT_LIST_HEAD(&slot->tx_list); | ||
499 | INIT_LIST_HEAD(&slot->chain_node); | 551 | INIT_LIST_HEAD(&slot->chain_node); |
500 | INIT_LIST_HEAD(&slot->slot_node); | 552 | INIT_LIST_HEAD(&slot->slot_node); |
501 | hw_desc = (char *) iop_chan->device->dma_desc_pool; | 553 | hw_desc = (char *) iop_chan->device->dma_desc_pool; |
@@ -660,9 +712,9 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
660 | } | 712 | } |
661 | 713 | ||
662 | static struct dma_async_tx_descriptor * | 714 | static struct dma_async_tx_descriptor * |
663 | iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | 715 | iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, |
664 | unsigned int src_cnt, size_t len, u32 *result, | 716 | unsigned int src_cnt, size_t len, u32 *result, |
665 | unsigned long flags) | 717 | unsigned long flags) |
666 | { | 718 | { |
667 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 719 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
668 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 720 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -696,6 +748,118 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
696 | return sw_desc ? &sw_desc->async_tx : NULL; | 748 | return sw_desc ? &sw_desc->async_tx : NULL; |
697 | } | 749 | } |
698 | 750 | ||
751 | static struct dma_async_tx_descriptor * | ||
752 | iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
753 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
754 | unsigned long flags) | ||
755 | { | ||
756 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
757 | struct iop_adma_desc_slot *sw_desc, *g; | ||
758 | int slot_cnt, slots_per_op; | ||
759 | int continue_srcs; | ||
760 | |||
761 | if (unlikely(!len)) | ||
762 | return NULL; | ||
763 | BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); | ||
764 | |||
765 | dev_dbg(iop_chan->device->common.dev, | ||
766 | "%s src_cnt: %d len: %u flags: %lx\n", | ||
767 | __func__, src_cnt, len, flags); | ||
768 | |||
769 | if (dmaf_p_disabled_continue(flags)) | ||
770 | continue_srcs = 1+src_cnt; | ||
771 | else if (dmaf_continue(flags)) | ||
772 | continue_srcs = 3+src_cnt; | ||
773 | else | ||
774 | continue_srcs = 0+src_cnt; | ||
775 | |||
776 | spin_lock_bh(&iop_chan->lock); | ||
777 | slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); | ||
778 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | ||
779 | if (sw_desc) { | ||
780 | int i; | ||
781 | |||
782 | g = sw_desc->group_head; | ||
783 | iop_desc_set_byte_count(g, iop_chan, len); | ||
784 | |||
785 | /* even if P is disabled its destination address (bits | ||
786 | * [3:0]) must match Q. It is ok if P points to an | ||
787 | * invalid address, it won't be written. | ||
788 | */ | ||
789 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
790 | dst[0] = dst[1] & 0x7; | ||
791 | |||
792 | iop_desc_set_pq_addr(g, dst); | ||
793 | sw_desc->unmap_src_cnt = src_cnt; | ||
794 | sw_desc->unmap_len = len; | ||
795 | sw_desc->async_tx.flags = flags; | ||
796 | for (i = 0; i < src_cnt; i++) | ||
797 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); | ||
798 | |||
799 | /* if we are continuing a previous operation factor in | ||
800 | * the old p and q values, see the comment for dma_maxpq | ||
801 | * in include/linux/dmaengine.h | ||
802 | */ | ||
803 | if (dmaf_p_disabled_continue(flags)) | ||
804 | iop_desc_set_pq_src_addr(g, i++, dst[1], 1); | ||
805 | else if (dmaf_continue(flags)) { | ||
806 | iop_desc_set_pq_src_addr(g, i++, dst[0], 0); | ||
807 | iop_desc_set_pq_src_addr(g, i++, dst[1], 1); | ||
808 | iop_desc_set_pq_src_addr(g, i++, dst[1], 0); | ||
809 | } | ||
810 | iop_desc_init_pq(g, i, flags); | ||
811 | } | ||
812 | spin_unlock_bh(&iop_chan->lock); | ||
813 | |||
814 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
815 | } | ||
816 | |||
817 | static struct dma_async_tx_descriptor * | ||
818 | iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
819 | unsigned int src_cnt, const unsigned char *scf, | ||
820 | size_t len, enum sum_check_flags *pqres, | ||
821 | unsigned long flags) | ||
822 | { | ||
823 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
824 | struct iop_adma_desc_slot *sw_desc, *g; | ||
825 | int slot_cnt, slots_per_op; | ||
826 | |||
827 | if (unlikely(!len)) | ||
828 | return NULL; | ||
829 | BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); | ||
830 | |||
831 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", | ||
832 | __func__, src_cnt, len); | ||
833 | |||
834 | spin_lock_bh(&iop_chan->lock); | ||
835 | slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); | ||
836 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | ||
837 | if (sw_desc) { | ||
838 | /* for validate operations p and q are tagged onto the | ||
839 | * end of the source list | ||
840 | */ | ||
841 | int pq_idx = src_cnt; | ||
842 | |||
843 | g = sw_desc->group_head; | ||
844 | iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); | ||
845 | iop_desc_set_pq_zero_sum_byte_count(g, len); | ||
846 | g->pq_check_result = pqres; | ||
847 | pr_debug("\t%s: g->pq_check_result: %p\n", | ||
848 | __func__, g->pq_check_result); | ||
849 | sw_desc->unmap_src_cnt = src_cnt+2; | ||
850 | sw_desc->unmap_len = len; | ||
851 | sw_desc->async_tx.flags = flags; | ||
852 | while (src_cnt--) | ||
853 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, | ||
854 | src[src_cnt], | ||
855 | scf[src_cnt]); | ||
856 | iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); | ||
857 | } | ||
858 | spin_unlock_bh(&iop_chan->lock); | ||
859 | |||
860 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
861 | } | ||
862 | |||
699 | static void iop_adma_free_chan_resources(struct dma_chan *chan) | 863 | static void iop_adma_free_chan_resources(struct dma_chan *chan) |
700 | { | 864 | { |
701 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 865 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
@@ -906,7 +1070,7 @@ out: | |||
906 | 1070 | ||
907 | #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ | 1071 | #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ |
908 | static int __devinit | 1072 | static int __devinit |
909 | iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | 1073 | iop_adma_xor_val_self_test(struct iop_adma_device *device) |
910 | { | 1074 | { |
911 | int i, src_idx; | 1075 | int i, src_idx; |
912 | struct page *dest; | 1076 | struct page *dest; |
@@ -1002,7 +1166,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1002 | PAGE_SIZE, DMA_TO_DEVICE); | 1166 | PAGE_SIZE, DMA_TO_DEVICE); |
1003 | 1167 | ||
1004 | /* skip zero sum if the capability is not present */ | 1168 | /* skip zero sum if the capability is not present */ |
1005 | if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) | 1169 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) |
1006 | goto free_resources; | 1170 | goto free_resources; |
1007 | 1171 | ||
1008 | /* zero sum the sources with the destintation page */ | 1172 | /* zero sum the sources with the destintation page */ |
@@ -1016,10 +1180,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1016 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, | 1180 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
1017 | zero_sum_srcs[i], 0, PAGE_SIZE, | 1181 | zero_sum_srcs[i], 0, PAGE_SIZE, |
1018 | DMA_TO_DEVICE); | 1182 | DMA_TO_DEVICE); |
1019 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 1183 | tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, |
1020 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 1184 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1021 | &zero_sum_result, | 1185 | &zero_sum_result, |
1022 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1023 | 1187 | ||
1024 | cookie = iop_adma_tx_submit(tx); | 1188 | cookie = iop_adma_tx_submit(tx); |
1025 | iop_adma_issue_pending(dma_chan); | 1189 | iop_adma_issue_pending(dma_chan); |
@@ -1072,10 +1236,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1072 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, | 1236 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
1073 | zero_sum_srcs[i], 0, PAGE_SIZE, | 1237 | zero_sum_srcs[i], 0, PAGE_SIZE, |
1074 | DMA_TO_DEVICE); | 1238 | DMA_TO_DEVICE); |
1075 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 1239 | tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, |
1076 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 1240 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1077 | &zero_sum_result, | 1241 | &zero_sum_result, |
1078 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1242 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1079 | 1243 | ||
1080 | cookie = iop_adma_tx_submit(tx); | 1244 | cookie = iop_adma_tx_submit(tx); |
1081 | iop_adma_issue_pending(dma_chan); | 1245 | iop_adma_issue_pending(dma_chan); |
@@ -1105,6 +1269,170 @@ out: | |||
1105 | return err; | 1269 | return err; |
1106 | } | 1270 | } |
1107 | 1271 | ||
1272 | #ifdef CONFIG_MD_RAID6_PQ | ||
1273 | static int __devinit | ||
1274 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | ||
1275 | { | ||
1276 | /* combined sources, software pq results, and extra hw pq results */ | ||
1277 | struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; | ||
1278 | /* ptr to the extra hw pq buffers defined above */ | ||
1279 | struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; | ||
1280 | /* address conversion buffers (dma_map / page_address) */ | ||
1281 | void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; | ||
1282 | dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; | ||
1283 | dma_addr_t pq_dest[2]; | ||
1284 | |||
1285 | int i; | ||
1286 | struct dma_async_tx_descriptor *tx; | ||
1287 | struct dma_chan *dma_chan; | ||
1288 | dma_cookie_t cookie; | ||
1289 | u32 zero_sum_result; | ||
1290 | int err = 0; | ||
1291 | struct device *dev; | ||
1292 | |||
1293 | dev_dbg(device->common.dev, "%s\n", __func__); | ||
1294 | |||
1295 | for (i = 0; i < ARRAY_SIZE(pq); i++) { | ||
1296 | pq[i] = alloc_page(GFP_KERNEL); | ||
1297 | if (!pq[i]) { | ||
1298 | while (i--) | ||
1299 | __free_page(pq[i]); | ||
1300 | return -ENOMEM; | ||
1301 | } | ||
1302 | } | ||
1303 | |||
1304 | /* Fill in src buffers */ | ||
1305 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { | ||
1306 | pq_sw[i] = page_address(pq[i]); | ||
1307 | memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE); | ||
1308 | } | ||
1309 | pq_sw[i] = page_address(pq[i]); | ||
1310 | pq_sw[i+1] = page_address(pq[i+1]); | ||
1311 | |||
1312 | dma_chan = container_of(device->common.channels.next, | ||
1313 | struct dma_chan, | ||
1314 | device_node); | ||
1315 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { | ||
1316 | err = -ENODEV; | ||
1317 | goto out; | ||
1318 | } | ||
1319 | |||
1320 | dev = dma_chan->device->dev; | ||
1321 | |||
1322 | /* initialize the dests */ | ||
1323 | memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); | ||
1324 | memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); | ||
1325 | |||
1326 | /* test pq */ | ||
1327 | pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1328 | pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1329 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) | ||
1330 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1331 | DMA_TO_DEVICE); | ||
1332 | |||
1333 | tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, | ||
1334 | IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, | ||
1335 | PAGE_SIZE, | ||
1336 | DMA_PREP_INTERRUPT | | ||
1337 | DMA_CTRL_ACK); | ||
1338 | |||
1339 | cookie = iop_adma_tx_submit(tx); | ||
1340 | iop_adma_issue_pending(dma_chan); | ||
1341 | msleep(8); | ||
1342 | |||
1343 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1344 | DMA_SUCCESS) { | ||
1345 | dev_err(dev, "Self-test pq timed out, disabling\n"); | ||
1346 | err = -ENODEV; | ||
1347 | goto free_resources; | ||
1348 | } | ||
1349 | |||
1350 | raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); | ||
1351 | |||
1352 | if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], | ||
1353 | page_address(pq_hw[0]), PAGE_SIZE) != 0) { | ||
1354 | dev_err(dev, "Self-test p failed compare, disabling\n"); | ||
1355 | err = -ENODEV; | ||
1356 | goto free_resources; | ||
1357 | } | ||
1358 | if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], | ||
1359 | page_address(pq_hw[1]), PAGE_SIZE) != 0) { | ||
1360 | dev_err(dev, "Self-test q failed compare, disabling\n"); | ||
1361 | err = -ENODEV; | ||
1362 | goto free_resources; | ||
1363 | } | ||
1364 | |||
1365 | /* test correct zero sum using the software generated pq values */ | ||
1366 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) | ||
1367 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1368 | DMA_TO_DEVICE); | ||
1369 | |||
1370 | zero_sum_result = ~0; | ||
1371 | tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], | ||
1372 | pq_src, IOP_ADMA_NUM_SRC_TEST, | ||
1373 | raid6_gfexp, PAGE_SIZE, &zero_sum_result, | ||
1374 | DMA_PREP_INTERRUPT|DMA_CTRL_ACK); | ||
1375 | |||
1376 | cookie = iop_adma_tx_submit(tx); | ||
1377 | iop_adma_issue_pending(dma_chan); | ||
1378 | msleep(8); | ||
1379 | |||
1380 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1381 | DMA_SUCCESS) { | ||
1382 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); | ||
1383 | err = -ENODEV; | ||
1384 | goto free_resources; | ||
1385 | } | ||
1386 | |||
1387 | if (zero_sum_result != 0) { | ||
1388 | dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", | ||
1389 | zero_sum_result); | ||
1390 | err = -ENODEV; | ||
1391 | goto free_resources; | ||
1392 | } | ||
1393 | |||
1394 | /* test incorrect zero sum */ | ||
1395 | i = IOP_ADMA_NUM_SRC_TEST; | ||
1396 | memset(pq_sw[i] + 100, 0, 100); | ||
1397 | memset(pq_sw[i+1] + 200, 0, 200); | ||
1398 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) | ||
1399 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1400 | DMA_TO_DEVICE); | ||
1401 | |||
1402 | zero_sum_result = 0; | ||
1403 | tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], | ||
1404 | pq_src, IOP_ADMA_NUM_SRC_TEST, | ||
1405 | raid6_gfexp, PAGE_SIZE, &zero_sum_result, | ||
1406 | DMA_PREP_INTERRUPT|DMA_CTRL_ACK); | ||
1407 | |||
1408 | cookie = iop_adma_tx_submit(tx); | ||
1409 | iop_adma_issue_pending(dma_chan); | ||
1410 | msleep(8); | ||
1411 | |||
1412 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1413 | DMA_SUCCESS) { | ||
1414 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); | ||
1415 | err = -ENODEV; | ||
1416 | goto free_resources; | ||
1417 | } | ||
1418 | |||
1419 | if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { | ||
1420 | dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", | ||
1421 | zero_sum_result); | ||
1422 | err = -ENODEV; | ||
1423 | goto free_resources; | ||
1424 | } | ||
1425 | |||
1426 | free_resources: | ||
1427 | iop_adma_free_chan_resources(dma_chan); | ||
1428 | out: | ||
1429 | i = ARRAY_SIZE(pq); | ||
1430 | while (i--) | ||
1431 | __free_page(pq[i]); | ||
1432 | return err; | ||
1433 | } | ||
1434 | #endif | ||
1435 | |||
1108 | static int __devexit iop_adma_remove(struct platform_device *dev) | 1436 | static int __devexit iop_adma_remove(struct platform_device *dev) |
1109 | { | 1437 | { |
1110 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1438 | struct iop_adma_device *device = platform_get_drvdata(dev); |
@@ -1192,9 +1520,16 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1192 | dma_dev->max_xor = iop_adma_get_max_xor(); | 1520 | dma_dev->max_xor = iop_adma_get_max_xor(); |
1193 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; | 1521 | dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; |
1194 | } | 1522 | } |
1195 | if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) | 1523 | if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) |
1196 | dma_dev->device_prep_dma_zero_sum = | 1524 | dma_dev->device_prep_dma_xor_val = |
1197 | iop_adma_prep_dma_zero_sum; | 1525 | iop_adma_prep_dma_xor_val; |
1526 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | ||
1527 | dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); | ||
1528 | dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; | ||
1529 | } | ||
1530 | if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) | ||
1531 | dma_dev->device_prep_dma_pq_val = | ||
1532 | iop_adma_prep_dma_pq_val; | ||
1198 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | 1533 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
1199 | dma_dev->device_prep_dma_interrupt = | 1534 | dma_dev->device_prep_dma_interrupt = |
1200 | iop_adma_prep_dma_interrupt; | 1535 | iop_adma_prep_dma_interrupt; |
@@ -1248,23 +1583,35 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1248 | } | 1583 | } |
1249 | 1584 | ||
1250 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || | 1585 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || |
1251 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | 1586 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { |
1252 | ret = iop_adma_xor_zero_sum_self_test(adev); | 1587 | ret = iop_adma_xor_val_self_test(adev); |
1253 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1588 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1254 | if (ret) | 1589 | if (ret) |
1255 | goto err_free_iop_chan; | 1590 | goto err_free_iop_chan; |
1256 | } | 1591 | } |
1257 | 1592 | ||
1593 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && | ||
1594 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { | ||
1595 | #ifdef CONFIG_MD_RAID6_PQ | ||
1596 | ret = iop_adma_pq_zero_sum_self_test(adev); | ||
1597 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); | ||
1598 | #else | ||
1599 | /* can not test raid6, so do not publish capability */ | ||
1600 | dma_cap_clear(DMA_PQ, dma_dev->cap_mask); | ||
1601 | dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); | ||
1602 | ret = 0; | ||
1603 | #endif | ||
1604 | if (ret) | ||
1605 | goto err_free_iop_chan; | ||
1606 | } | ||
1607 | |||
1258 | dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " | 1608 | dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " |
1259 | "( %s%s%s%s%s%s%s%s%s%s)\n", | 1609 | "( %s%s%s%s%s%s%s)\n", |
1260 | dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", | 1610 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", |
1261 | dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", | 1611 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", |
1262 | dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", | ||
1263 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1612 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1264 | dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", | 1613 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", |
1265 | dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", | ||
1266 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | 1614 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", |
1267 | dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", | ||
1268 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1615 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1269 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1616 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1270 | 1617 | ||
@@ -1296,7 +1643,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1296 | if (sw_desc) { | 1643 | if (sw_desc) { |
1297 | grp_start = sw_desc->group_head; | 1644 | grp_start = sw_desc->group_head; |
1298 | 1645 | ||
1299 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1646 | list_splice_init(&sw_desc->tx_list, &iop_chan->chain); |
1300 | async_tx_ack(&sw_desc->async_tx); | 1647 | async_tx_ack(&sw_desc->async_tx); |
1301 | iop_desc_init_memcpy(grp_start, 0); | 1648 | iop_desc_init_memcpy(grp_start, 0); |
1302 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1649 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
@@ -1352,7 +1699,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1352 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | 1699 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); |
1353 | if (sw_desc) { | 1700 | if (sw_desc) { |
1354 | grp_start = sw_desc->group_head; | 1701 | grp_start = sw_desc->group_head; |
1355 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1702 | list_splice_init(&sw_desc->tx_list, &iop_chan->chain); |
1356 | async_tx_ack(&sw_desc->async_tx); | 1703 | async_tx_ack(&sw_desc->async_tx); |
1357 | iop_desc_init_null_xor(grp_start, 2, 0); | 1704 | iop_desc_init_null_xor(grp_start, 2, 0); |
1358 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1705 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index 9f6fe46a9b87..c0a272c73682 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c | |||
@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | |||
183 | iov_byte_offset, | 183 | iov_byte_offset, |
184 | kdata, | 184 | kdata, |
185 | copy); | 185 | copy); |
186 | /* poll for a descriptor slot */ | ||
187 | if (unlikely(dma_cookie < 0)) { | ||
188 | dma_async_issue_pending(chan); | ||
189 | continue; | ||
190 | } | ||
186 | 191 | ||
187 | len -= copy; | 192 | len -= copy; |
188 | iov[iovec_idx].iov_len -= copy; | 193 | iov[iovec_idx].iov_len -= copy; |
@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | |||
248 | page, | 253 | page, |
249 | offset, | 254 | offset, |
250 | copy); | 255 | copy); |
256 | /* poll for a descriptor slot */ | ||
257 | if (unlikely(dma_cookie < 0)) { | ||
258 | dma_async_issue_pending(chan); | ||
259 | continue; | ||
260 | } | ||
251 | 261 | ||
252 | len -= copy; | 262 | len -= copy; |
253 | iov[iovec_idx].iov_len -= copy; | 263 | iov[iovec_idx].iov_len -= copy; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 3f23eabe09f2..466ab10c1ff1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -517,7 +517,7 @@ retry: | |||
517 | } | 517 | } |
518 | alloc_tail->group_head = alloc_start; | 518 | alloc_tail->group_head = alloc_start; |
519 | alloc_tail->async_tx.cookie = -EBUSY; | 519 | alloc_tail->async_tx.cookie = -EBUSY; |
520 | list_splice(&chain, &alloc_tail->async_tx.tx_list); | 520 | list_splice(&chain, &alloc_tail->tx_list); |
521 | mv_chan->last_used = last_used; | 521 | mv_chan->last_used = last_used; |
522 | mv_desc_clear_next_desc(alloc_start); | 522 | mv_desc_clear_next_desc(alloc_start); |
523 | mv_desc_clear_next_desc(alloc_tail); | 523 | mv_desc_clear_next_desc(alloc_tail); |
@@ -565,14 +565,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
565 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | 565 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); |
566 | 566 | ||
567 | if (list_empty(&mv_chan->chain)) | 567 | if (list_empty(&mv_chan->chain)) |
568 | list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); | 568 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
569 | else { | 569 | else { |
570 | new_hw_chain = 0; | 570 | new_hw_chain = 0; |
571 | 571 | ||
572 | old_chain_tail = list_entry(mv_chan->chain.prev, | 572 | old_chain_tail = list_entry(mv_chan->chain.prev, |
573 | struct mv_xor_desc_slot, | 573 | struct mv_xor_desc_slot, |
574 | chain_node); | 574 | chain_node); |
575 | list_splice_init(&grp_start->async_tx.tx_list, | 575 | list_splice_init(&grp_start->tx_list, |
576 | &old_chain_tail->chain_node); | 576 | &old_chain_tail->chain_node); |
577 | 577 | ||
578 | if (!mv_can_chain(grp_start)) | 578 | if (!mv_can_chain(grp_start)) |
@@ -632,6 +632,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
632 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 632 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
633 | INIT_LIST_HEAD(&slot->chain_node); | 633 | INIT_LIST_HEAD(&slot->chain_node); |
634 | INIT_LIST_HEAD(&slot->slot_node); | 634 | INIT_LIST_HEAD(&slot->slot_node); |
635 | INIT_LIST_HEAD(&slot->tx_list); | ||
635 | hw_desc = (char *) mv_chan->device->dma_desc_pool; | 636 | hw_desc = (char *) mv_chan->device->dma_desc_pool; |
636 | slot->async_tx.phys = | 637 | slot->async_tx.phys = |
637 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | 638 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06cafe1ef521..977b592e976b 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -126,9 +126,8 @@ struct mv_xor_chan { | |||
126 | * @idx: pool index | 126 | * @idx: pool index |
127 | * @unmap_src_cnt: number of xor sources | 127 | * @unmap_src_cnt: number of xor sources |
128 | * @unmap_len: transaction bytecount | 128 | * @unmap_len: transaction bytecount |
129 | * @tx_list: list of slots that make up a multi-descriptor transaction | ||
129 | * @async_tx: support for the async_tx api | 130 | * @async_tx: support for the async_tx api |
130 | * @group_list: list of slots that make up a multi-descriptor transaction | ||
131 | * for example transfer lengths larger than the supported hw max | ||
132 | * @xor_check_result: result of zero sum | 131 | * @xor_check_result: result of zero sum |
133 | * @crc32_result: result crc calculation | 132 | * @crc32_result: result crc calculation |
134 | */ | 133 | */ |
@@ -145,6 +144,7 @@ struct mv_xor_desc_slot { | |||
145 | u16 unmap_src_cnt; | 144 | u16 unmap_src_cnt; |
146 | u32 value; | 145 | u32 value; |
147 | size_t unmap_len; | 146 | size_t unmap_len; |
147 | struct list_head tx_list; | ||
148 | struct dma_async_tx_descriptor async_tx; | 148 | struct dma_async_tx_descriptor async_tx; |
149 | union { | 149 | union { |
150 | u32 *xor_check_result; | 150 | u32 *xor_check_result; |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c new file mode 100644 index 000000000000..b3b065c4e5c1 --- /dev/null +++ b/drivers/dma/shdma.c | |||
@@ -0,0 +1,786 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * base is drivers/dma/flsdma.c | ||
5 | * | ||
6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
9 | * | ||
10 | * This is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
16 | * - MAX DMA size is 16MB. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/dmapool.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <cpu/dma.h> | ||
29 | #include <asm/dma-sh.h> | ||
30 | #include "shdma.h" | ||
31 | |||
32 | /* DMA descriptor control */ | ||
33 | #define DESC_LAST (-1) | ||
34 | #define DESC_COMP (1) | ||
35 | #define DESC_NCOMP (0) | ||
36 | |||
37 | #define NR_DESCS_PER_CHANNEL 32 | ||
38 | /* | ||
39 | * Define the default configuration for dual address memory-memory transfer. | ||
40 | * The 0x400 value represents auto-request, external->external. | ||
41 | * | ||
42 | * And this driver set 4byte burst mode. | ||
43 | * If you want to change mode, you need to change RS_DEFAULT of value. | ||
44 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) | ||
45 | */ | ||
46 | #define RS_DEFAULT (RS_DUAL) | ||
47 | |||
48 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | ||
49 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | ||
50 | { | ||
51 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | ||
52 | } | ||
53 | |||
54 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | ||
55 | { | ||
56 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | ||
57 | } | ||
58 | |||
59 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
60 | { | ||
61 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
62 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Reset DMA controller | ||
67 | * | ||
68 | * SH7780 has two DMAOR register | ||
69 | */ | ||
70 | static void sh_dmae_ctl_stop(int id) | ||
71 | { | ||
72 | unsigned short dmaor = dmaor_read_reg(id); | ||
73 | |||
74 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | ||
75 | dmaor_write_reg(id, dmaor); | ||
76 | } | ||
77 | |||
78 | static int sh_dmae_rst(int id) | ||
79 | { | ||
80 | unsigned short dmaor; | ||
81 | |||
82 | sh_dmae_ctl_stop(id); | ||
83 | dmaor = (dmaor_read_reg(id)|DMAOR_INIT); | ||
84 | |||
85 | dmaor_write_reg(id, dmaor); | ||
86 | if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { | ||
87 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int dmae_is_idle(struct sh_dmae_chan *sh_chan) | ||
94 | { | ||
95 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | ||
96 | if (chcr & CHCR_DE) { | ||
97 | if (!(chcr & CHCR_TE)) | ||
98 | return -EBUSY; /* working */ | ||
99 | } | ||
100 | return 0; /* waiting */ | ||
101 | } | ||
102 | |||
103 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | ||
104 | { | ||
105 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | ||
106 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | ||
107 | } | ||
108 | |||
109 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) | ||
110 | { | ||
111 | sh_dmae_writel(sh_chan, hw.sar, SAR); | ||
112 | sh_dmae_writel(sh_chan, hw.dar, DAR); | ||
113 | sh_dmae_writel(sh_chan, | ||
114 | (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); | ||
115 | } | ||
116 | |||
117 | static void dmae_start(struct sh_dmae_chan *sh_chan) | ||
118 | { | ||
119 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | ||
120 | |||
121 | chcr |= (CHCR_DE|CHCR_IE); | ||
122 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
123 | } | ||
124 | |||
125 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
126 | { | ||
127 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | ||
128 | |||
129 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | ||
130 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
131 | } | ||
132 | |||
133 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | ||
134 | { | ||
135 | int ret = dmae_is_idle(sh_chan); | ||
136 | /* When DMA was working, can not set data to CHCR */ | ||
137 | if (ret) | ||
138 | return ret; | ||
139 | |||
140 | sh_dmae_writel(sh_chan, val, CHCR); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | #define DMARS1_ADDR 0x04 | ||
145 | #define DMARS2_ADDR 0x08 | ||
146 | #define DMARS_SHIFT 8 | ||
147 | #define DMARS_CHAN_MSK 0x01 | ||
148 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | ||
149 | { | ||
150 | u32 addr; | ||
151 | int shift = 0; | ||
152 | int ret = dmae_is_idle(sh_chan); | ||
153 | if (ret) | ||
154 | return ret; | ||
155 | |||
156 | if (sh_chan->id & DMARS_CHAN_MSK) | ||
157 | shift = DMARS_SHIFT; | ||
158 | |||
159 | switch (sh_chan->id) { | ||
160 | /* DMARS0 */ | ||
161 | case 0: | ||
162 | case 1: | ||
163 | addr = SH_DMARS_BASE; | ||
164 | break; | ||
165 | /* DMARS1 */ | ||
166 | case 2: | ||
167 | case 3: | ||
168 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | ||
169 | break; | ||
170 | /* DMARS2 */ | ||
171 | case 4: | ||
172 | case 5: | ||
173 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | ||
174 | break; | ||
175 | default: | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | ctrl_outw((val << shift) | | ||
180 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | ||
181 | addr); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | ||
187 | { | ||
188 | struct sh_desc *desc = tx_to_sh_desc(tx); | ||
189 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | ||
190 | dma_cookie_t cookie; | ||
191 | |||
192 | spin_lock_bh(&sh_chan->desc_lock); | ||
193 | |||
194 | cookie = sh_chan->common.cookie; | ||
195 | cookie++; | ||
196 | if (cookie < 0) | ||
197 | cookie = 1; | ||
198 | |||
199 | /* If desc only in the case of 1 */ | ||
200 | if (desc->async_tx.cookie != -EBUSY) | ||
201 | desc->async_tx.cookie = cookie; | ||
202 | sh_chan->common.cookie = desc->async_tx.cookie; | ||
203 | |||
204 | list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); | ||
205 | |||
206 | spin_unlock_bh(&sh_chan->desc_lock); | ||
207 | |||
208 | return cookie; | ||
209 | } | ||
210 | |||
211 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | ||
212 | { | ||
213 | struct sh_desc *desc, *_desc, *ret = NULL; | ||
214 | |||
215 | spin_lock_bh(&sh_chan->desc_lock); | ||
216 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { | ||
217 | if (async_tx_test_ack(&desc->async_tx)) { | ||
218 | list_del(&desc->node); | ||
219 | ret = desc; | ||
220 | break; | ||
221 | } | ||
222 | } | ||
223 | spin_unlock_bh(&sh_chan->desc_lock); | ||
224 | |||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) | ||
229 | { | ||
230 | if (desc) { | ||
231 | spin_lock_bh(&sh_chan->desc_lock); | ||
232 | |||
233 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | ||
234 | list_add(&desc->node, &sh_chan->ld_free); | ||
235 | |||
236 | spin_unlock_bh(&sh_chan->desc_lock); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | ||
241 | { | ||
242 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
243 | struct sh_desc *desc; | ||
244 | |||
245 | spin_lock_bh(&sh_chan->desc_lock); | ||
246 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
247 | spin_unlock_bh(&sh_chan->desc_lock); | ||
248 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | ||
249 | if (!desc) { | ||
250 | spin_lock_bh(&sh_chan->desc_lock); | ||
251 | break; | ||
252 | } | ||
253 | dma_async_tx_descriptor_init(&desc->async_tx, | ||
254 | &sh_chan->common); | ||
255 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | ||
256 | desc->async_tx.flags = DMA_CTRL_ACK; | ||
257 | INIT_LIST_HEAD(&desc->tx_list); | ||
258 | sh_dmae_put_desc(sh_chan, desc); | ||
259 | |||
260 | spin_lock_bh(&sh_chan->desc_lock); | ||
261 | sh_chan->descs_allocated++; | ||
262 | } | ||
263 | spin_unlock_bh(&sh_chan->desc_lock); | ||
264 | |||
265 | return sh_chan->descs_allocated; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * sh_dma_free_chan_resources - Free all resources of the channel. | ||
270 | */ | ||
271 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | ||
272 | { | ||
273 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
274 | struct sh_desc *desc, *_desc; | ||
275 | LIST_HEAD(list); | ||
276 | |||
277 | BUG_ON(!list_empty(&sh_chan->ld_queue)); | ||
278 | spin_lock_bh(&sh_chan->desc_lock); | ||
279 | |||
280 | list_splice_init(&sh_chan->ld_free, &list); | ||
281 | sh_chan->descs_allocated = 0; | ||
282 | |||
283 | spin_unlock_bh(&sh_chan->desc_lock); | ||
284 | |||
285 | list_for_each_entry_safe(desc, _desc, &list, node) | ||
286 | kfree(desc); | ||
287 | } | ||
288 | |||
289 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
290 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
291 | size_t len, unsigned long flags) | ||
292 | { | ||
293 | struct sh_dmae_chan *sh_chan; | ||
294 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
295 | size_t copy_size; | ||
296 | |||
297 | if (!chan) | ||
298 | return NULL; | ||
299 | |||
300 | if (!len) | ||
301 | return NULL; | ||
302 | |||
303 | sh_chan = to_sh_chan(chan); | ||
304 | |||
305 | do { | ||
306 | /* Allocate the link descriptor from DMA pool */ | ||
307 | new = sh_dmae_get_desc(sh_chan); | ||
308 | if (!new) { | ||
309 | dev_err(sh_chan->dev, | ||
310 | "No free memory for link descriptor\n"); | ||
311 | goto err_get_desc; | ||
312 | } | ||
313 | |||
314 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX); | ||
315 | |||
316 | new->hw.sar = dma_src; | ||
317 | new->hw.dar = dma_dest; | ||
318 | new->hw.tcr = copy_size; | ||
319 | if (!first) | ||
320 | first = new; | ||
321 | |||
322 | new->mark = DESC_NCOMP; | ||
323 | async_tx_ack(&new->async_tx); | ||
324 | |||
325 | prev = new; | ||
326 | len -= copy_size; | ||
327 | dma_src += copy_size; | ||
328 | dma_dest += copy_size; | ||
329 | /* Insert the link descriptor to the LD ring */ | ||
330 | list_add_tail(&new->node, &first->tx_list); | ||
331 | } while (len); | ||
332 | |||
333 | new->async_tx.flags = flags; /* client is in control of this ack */ | ||
334 | new->async_tx.cookie = -EBUSY; /* Last desc */ | ||
335 | |||
336 | return &first->async_tx; | ||
337 | |||
338 | err_get_desc: | ||
339 | sh_dmae_put_desc(sh_chan, first); | ||
340 | return NULL; | ||
341 | |||
342 | } | ||
343 | |||
344 | /* | ||
345 | * sh_chan_ld_cleanup - Clean up link descriptors | ||
346 | * | ||
347 | * This function clean up the ld_queue of DMA channel. | ||
348 | */ | ||
349 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) | ||
350 | { | ||
351 | struct sh_desc *desc, *_desc; | ||
352 | |||
353 | spin_lock_bh(&sh_chan->desc_lock); | ||
354 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | ||
355 | dma_async_tx_callback callback; | ||
356 | void *callback_param; | ||
357 | |||
358 | /* non send data */ | ||
359 | if (desc->mark == DESC_NCOMP) | ||
360 | break; | ||
361 | |||
362 | /* send data sesc */ | ||
363 | callback = desc->async_tx.callback; | ||
364 | callback_param = desc->async_tx.callback_param; | ||
365 | |||
366 | /* Remove from ld_queue list */ | ||
367 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | ||
368 | |||
369 | dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", | ||
370 | desc); | ||
371 | |||
372 | list_move(&desc->node, &sh_chan->ld_free); | ||
373 | /* Run the link descriptor callback function */ | ||
374 | if (callback) { | ||
375 | spin_unlock_bh(&sh_chan->desc_lock); | ||
376 | dev_dbg(sh_chan->dev, "link descriptor %p callback\n", | ||
377 | desc); | ||
378 | callback(callback_param); | ||
379 | spin_lock_bh(&sh_chan->desc_lock); | ||
380 | } | ||
381 | } | ||
382 | spin_unlock_bh(&sh_chan->desc_lock); | ||
383 | } | ||
384 | |||
385 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | ||
386 | { | ||
387 | struct list_head *ld_node; | ||
388 | struct sh_dmae_regs hw; | ||
389 | |||
390 | /* DMA work check */ | ||
391 | if (dmae_is_idle(sh_chan)) | ||
392 | return; | ||
393 | |||
394 | /* Find the first un-transfer desciptor */ | ||
395 | for (ld_node = sh_chan->ld_queue.next; | ||
396 | (ld_node != &sh_chan->ld_queue) | ||
397 | && (to_sh_desc(ld_node)->mark == DESC_COMP); | ||
398 | ld_node = ld_node->next) | ||
399 | cpu_relax(); | ||
400 | |||
401 | if (ld_node != &sh_chan->ld_queue) { | ||
402 | /* Get the ld start address from ld_queue */ | ||
403 | hw = to_sh_desc(ld_node)->hw; | ||
404 | dmae_set_reg(sh_chan, hw); | ||
405 | dmae_start(sh_chan); | ||
406 | } | ||
407 | } | ||
408 | |||
409 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | ||
410 | { | ||
411 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
412 | sh_chan_xfer_ld_queue(sh_chan); | ||
413 | } | ||
414 | |||
415 | static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | ||
416 | dma_cookie_t cookie, | ||
417 | dma_cookie_t *done, | ||
418 | dma_cookie_t *used) | ||
419 | { | ||
420 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
421 | dma_cookie_t last_used; | ||
422 | dma_cookie_t last_complete; | ||
423 | |||
424 | sh_dmae_chan_ld_cleanup(sh_chan); | ||
425 | |||
426 | last_used = chan->cookie; | ||
427 | last_complete = sh_chan->completed_cookie; | ||
428 | if (last_complete == -EBUSY) | ||
429 | last_complete = last_used; | ||
430 | |||
431 | if (done) | ||
432 | *done = last_complete; | ||
433 | |||
434 | if (used) | ||
435 | *used = last_used; | ||
436 | |||
437 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
438 | } | ||
439 | |||
440 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | ||
441 | { | ||
442 | irqreturn_t ret = IRQ_NONE; | ||
443 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | ||
444 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | ||
445 | |||
446 | if (chcr & CHCR_TE) { | ||
447 | /* DMA stop */ | ||
448 | dmae_halt(sh_chan); | ||
449 | |||
450 | ret = IRQ_HANDLED; | ||
451 | tasklet_schedule(&sh_chan->tasklet); | ||
452 | } | ||
453 | |||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | #if defined(CONFIG_CPU_SH4) | ||
458 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
459 | { | ||
460 | int err = 0; | ||
461 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | ||
462 | |||
463 | /* IRQ Multi */ | ||
464 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | ||
465 | int cnt = 0; | ||
466 | switch (irq) { | ||
467 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | ||
468 | case DMTE6_IRQ: | ||
469 | cnt++; | ||
470 | #endif | ||
471 | case DMTE0_IRQ: | ||
472 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | ||
473 | disable_irq(irq); | ||
474 | return IRQ_HANDLED; | ||
475 | } | ||
476 | default: | ||
477 | return IRQ_NONE; | ||
478 | } | ||
479 | } else { | ||
480 | /* reset dma controller */ | ||
481 | err = sh_dmae_rst(0); | ||
482 | if (err) | ||
483 | return err; | ||
484 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
485 | err = sh_dmae_rst(1); | ||
486 | if (err) | ||
487 | return err; | ||
488 | } | ||
489 | disable_irq(irq); | ||
490 | return IRQ_HANDLED; | ||
491 | } | ||
492 | } | ||
493 | #endif | ||
494 | |||
495 | static void dmae_do_tasklet(unsigned long data) | ||
496 | { | ||
497 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | ||
498 | struct sh_desc *desc, *_desc, *cur_desc = NULL; | ||
499 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | ||
500 | list_for_each_entry_safe(desc, _desc, | ||
501 | &sh_chan->ld_queue, node) { | ||
502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { | ||
503 | cur_desc = desc; | ||
504 | break; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | if (cur_desc) { | ||
509 | switch (cur_desc->async_tx.cookie) { | ||
510 | case 0: /* other desc data */ | ||
511 | break; | ||
512 | case -EBUSY: /* last desc */ | ||
513 | sh_chan->completed_cookie = | ||
514 | cur_desc->async_tx.cookie; | ||
515 | break; | ||
516 | default: /* first desc ( 0 < )*/ | ||
517 | sh_chan->completed_cookie = | ||
518 | cur_desc->async_tx.cookie - 1; | ||
519 | break; | ||
520 | } | ||
521 | cur_desc->mark = DESC_COMP; | ||
522 | } | ||
523 | /* Next desc */ | ||
524 | sh_chan_xfer_ld_queue(sh_chan); | ||
525 | sh_dmae_chan_ld_cleanup(sh_chan); | ||
526 | } | ||
527 | |||
528 | static unsigned int get_dmae_irq(unsigned int id) | ||
529 | { | ||
530 | unsigned int irq = 0; | ||
531 | if (id < ARRAY_SIZE(dmte_irq_map)) | ||
532 | irq = dmte_irq_map[id]; | ||
533 | return irq; | ||
534 | } | ||
535 | |||
536 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | ||
537 | { | ||
538 | int err; | ||
539 | unsigned int irq = get_dmae_irq(id); | ||
540 | unsigned long irqflags = IRQF_DISABLED; | ||
541 | struct sh_dmae_chan *new_sh_chan; | ||
542 | |||
543 | /* alloc channel */ | ||
544 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | ||
545 | if (!new_sh_chan) { | ||
546 | dev_err(shdev->common.dev, "No free memory for allocating " | ||
547 | "dma channels!\n"); | ||
548 | return -ENOMEM; | ||
549 | } | ||
550 | |||
551 | new_sh_chan->dev = shdev->common.dev; | ||
552 | new_sh_chan->id = id; | ||
553 | |||
554 | /* Init DMA tasklet */ | ||
555 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | ||
556 | (unsigned long)new_sh_chan); | ||
557 | |||
558 | /* Init the channel */ | ||
559 | dmae_init(new_sh_chan); | ||
560 | |||
561 | spin_lock_init(&new_sh_chan->desc_lock); | ||
562 | |||
563 | /* Init descripter manage list */ | ||
564 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | ||
565 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | ||
566 | |||
567 | /* copy struct dma_device */ | ||
568 | new_sh_chan->common.device = &shdev->common; | ||
569 | |||
570 | /* Add the channel to DMA device channel list */ | ||
571 | list_add_tail(&new_sh_chan->common.device_node, | ||
572 | &shdev->common.channels); | ||
573 | shdev->common.chancnt++; | ||
574 | |||
575 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | ||
576 | irqflags = IRQF_SHARED; | ||
577 | #if defined(DMTE6_IRQ) | ||
578 | if (irq >= DMTE6_IRQ) | ||
579 | irq = DMTE6_IRQ; | ||
580 | else | ||
581 | #endif | ||
582 | irq = DMTE0_IRQ; | ||
583 | } | ||
584 | |||
585 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
586 | "sh-dmae%d", new_sh_chan->id); | ||
587 | |||
588 | /* set up channel irq */ | ||
589 | err = request_irq(irq, &sh_dmae_interrupt, | ||
590 | irqflags, new_sh_chan->dev_id, new_sh_chan); | ||
591 | if (err) { | ||
592 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | ||
593 | "with return %d\n", id, err); | ||
594 | goto err_no_irq; | ||
595 | } | ||
596 | |||
597 | /* CHCR register control function */ | ||
598 | new_sh_chan->set_chcr = dmae_set_chcr; | ||
599 | /* DMARS register control function */ | ||
600 | new_sh_chan->set_dmars = dmae_set_dmars; | ||
601 | |||
602 | shdev->chan[id] = new_sh_chan; | ||
603 | return 0; | ||
604 | |||
605 | err_no_irq: | ||
606 | /* remove from dmaengine device node */ | ||
607 | list_del(&new_sh_chan->common.device_node); | ||
608 | kfree(new_sh_chan); | ||
609 | return err; | ||
610 | } | ||
611 | |||
612 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | ||
613 | { | ||
614 | int i; | ||
615 | |||
616 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | ||
617 | if (shdev->chan[i]) { | ||
618 | struct sh_dmae_chan *shchan = shdev->chan[i]; | ||
619 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | ||
620 | free_irq(dmte_irq_map[i], shchan); | ||
621 | |||
622 | list_del(&shchan->common.device_node); | ||
623 | kfree(shchan); | ||
624 | shdev->chan[i] = NULL; | ||
625 | } | ||
626 | } | ||
627 | shdev->common.chancnt = 0; | ||
628 | } | ||
629 | |||
630 | static int __init sh_dmae_probe(struct platform_device *pdev) | ||
631 | { | ||
632 | int err = 0, cnt, ecnt; | ||
633 | unsigned long irqflags = IRQF_DISABLED; | ||
634 | #if defined(CONFIG_CPU_SH4) | ||
635 | int eirq[] = { DMAE0_IRQ, | ||
636 | #if defined(DMAE1_IRQ) | ||
637 | DMAE1_IRQ | ||
638 | #endif | ||
639 | }; | ||
640 | #endif | ||
641 | struct sh_dmae_device *shdev; | ||
642 | |||
643 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | ||
644 | if (!shdev) { | ||
645 | dev_err(&pdev->dev, "No enough memory\n"); | ||
646 | err = -ENOMEM; | ||
647 | goto shdev_err; | ||
648 | } | ||
649 | |||
650 | /* get platform data */ | ||
651 | if (!pdev->dev.platform_data) | ||
652 | goto shdev_err; | ||
653 | |||
654 | /* platform data */ | ||
655 | memcpy(&shdev->pdata, pdev->dev.platform_data, | ||
656 | sizeof(struct sh_dmae_pdata)); | ||
657 | |||
658 | /* reset dma controller */ | ||
659 | err = sh_dmae_rst(0); | ||
660 | if (err) | ||
661 | goto rst_err; | ||
662 | |||
663 | /* SH7780/85/23 has DMAOR1 */ | ||
664 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
665 | err = sh_dmae_rst(1); | ||
666 | if (err) | ||
667 | goto rst_err; | ||
668 | } | ||
669 | |||
670 | INIT_LIST_HEAD(&shdev->common.channels); | ||
671 | |||
672 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | ||
673 | shdev->common.device_alloc_chan_resources | ||
674 | = sh_dmae_alloc_chan_resources; | ||
675 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | ||
676 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | ||
677 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | ||
678 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | ||
679 | shdev->common.dev = &pdev->dev; | ||
680 | |||
681 | #if defined(CONFIG_CPU_SH4) | ||
682 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | ||
683 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | ||
684 | irqflags = IRQF_SHARED; | ||
685 | eirq[0] = DMTE0_IRQ; | ||
686 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | ||
687 | eirq[1] = DMTE6_IRQ; | ||
688 | #endif | ||
689 | } | ||
690 | |||
691 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | ||
692 | err = request_irq(eirq[ecnt], sh_dmae_err, | ||
693 | irqflags, "DMAC Address Error", shdev); | ||
694 | if (err) { | ||
695 | dev_err(&pdev->dev, "DMA device request_irq" | ||
696 | "error (irq %d) with return %d\n", | ||
697 | eirq[ecnt], err); | ||
698 | goto eirq_err; | ||
699 | } | ||
700 | } | ||
701 | #endif /* CONFIG_CPU_SH4 */ | ||
702 | |||
703 | /* Create DMA Channel */ | ||
704 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | ||
705 | err = sh_dmae_chan_probe(shdev, cnt); | ||
706 | if (err) | ||
707 | goto chan_probe_err; | ||
708 | } | ||
709 | |||
710 | platform_set_drvdata(pdev, shdev); | ||
711 | dma_async_device_register(&shdev->common); | ||
712 | |||
713 | return err; | ||
714 | |||
715 | chan_probe_err: | ||
716 | sh_dmae_chan_remove(shdev); | ||
717 | |||
718 | eirq_err: | ||
719 | for (ecnt-- ; ecnt >= 0; ecnt--) | ||
720 | free_irq(eirq[ecnt], shdev); | ||
721 | |||
722 | rst_err: | ||
723 | kfree(shdev); | ||
724 | |||
725 | shdev_err: | ||
726 | return err; | ||
727 | } | ||
728 | |||
729 | static int __exit sh_dmae_remove(struct platform_device *pdev) | ||
730 | { | ||
731 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
732 | |||
733 | dma_async_device_unregister(&shdev->common); | ||
734 | |||
735 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | ||
736 | free_irq(DMTE0_IRQ, shdev); | ||
737 | #if defined(DMTE6_IRQ) | ||
738 | free_irq(DMTE6_IRQ, shdev); | ||
739 | #endif | ||
740 | } | ||
741 | |||
742 | /* channel data remove */ | ||
743 | sh_dmae_chan_remove(shdev); | ||
744 | |||
745 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | ||
746 | free_irq(DMAE0_IRQ, shdev); | ||
747 | #if defined(DMAE1_IRQ) | ||
748 | free_irq(DMAE1_IRQ, shdev); | ||
749 | #endif | ||
750 | } | ||
751 | kfree(shdev); | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
757 | { | ||
758 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
759 | sh_dmae_ctl_stop(0); | ||
760 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
761 | sh_dmae_ctl_stop(1); | ||
762 | } | ||
763 | |||
764 | static struct platform_driver sh_dmae_driver = { | ||
765 | .remove = __exit_p(sh_dmae_remove), | ||
766 | .shutdown = sh_dmae_shutdown, | ||
767 | .driver = { | ||
768 | .name = "sh-dma-engine", | ||
769 | }, | ||
770 | }; | ||
771 | |||
772 | static int __init sh_dmae_init(void) | ||
773 | { | ||
774 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | ||
775 | } | ||
776 | module_init(sh_dmae_init); | ||
777 | |||
778 | static void __exit sh_dmae_exit(void) | ||
779 | { | ||
780 | platform_driver_unregister(&sh_dmae_driver); | ||
781 | } | ||
782 | module_exit(sh_dmae_exit); | ||
783 | |||
784 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | ||
785 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | ||
786 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h new file mode 100644 index 000000000000..2b4bc15a2c0a --- /dev/null +++ b/drivers/dma/shdma.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
5 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
6 | * | ||
7 | * This is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef __DMA_SHDMA_H | ||
14 | #define __DMA_SHDMA_H | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/dmapool.h> | ||
18 | #include <linux/dmaengine.h> | ||
19 | |||
20 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | ||
21 | |||
22 | struct sh_dmae_regs { | ||
23 | u32 sar; /* SAR / source address */ | ||
24 | u32 dar; /* DAR / destination address */ | ||
25 | u32 tcr; /* TCR / transfer count */ | ||
26 | }; | ||
27 | |||
28 | struct sh_desc { | ||
29 | struct list_head tx_list; | ||
30 | struct sh_dmae_regs hw; | ||
31 | struct list_head node; | ||
32 | struct dma_async_tx_descriptor async_tx; | ||
33 | int mark; | ||
34 | }; | ||
35 | |||
36 | struct sh_dmae_chan { | ||
37 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
38 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
39 | struct list_head ld_queue; /* Link descriptors queue */ | ||
40 | struct list_head ld_free; /* Link descriptors free */ | ||
41 | struct dma_chan common; /* DMA common channel */ | ||
42 | struct device *dev; /* Channel device */ | ||
43 | struct tasklet_struct tasklet; /* Tasklet */ | ||
44 | int descs_allocated; /* desc count */ | ||
45 | int id; /* Raw id of this channel */ | ||
46 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
47 | |||
48 | /* Set chcr */ | ||
49 | int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); | ||
50 | /* Set DMA resource */ | ||
51 | int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); | ||
52 | }; | ||
53 | |||
54 | struct sh_dmae_device { | ||
55 | struct dma_device common; | ||
56 | struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; | ||
57 | struct sh_dmae_pdata pdata; | ||
58 | }; | ||
59 | |||
60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | ||
61 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) | ||
62 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) | ||
63 | |||
64 | #endif /* __DMA_SHDMA_H */ | ||
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 7837930146a4..fb6bb64e8861 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -180,9 +180,8 @@ static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) | |||
180 | 180 | ||
181 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) | 181 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) |
182 | { | 182 | { |
183 | if (!list_empty(&desc->txd.tx_list)) | 183 | if (!list_empty(&desc->tx_list)) |
184 | desc = list_entry(desc->txd.tx_list.prev, | 184 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); |
185 | struct txx9dmac_desc, desc_node); | ||
186 | return desc; | 185 | return desc; |
187 | } | 186 | } |
188 | 187 | ||
@@ -197,6 +196,7 @@ static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, | |||
197 | desc = kzalloc(sizeof(*desc), flags); | 196 | desc = kzalloc(sizeof(*desc), flags); |
198 | if (!desc) | 197 | if (!desc) |
199 | return NULL; | 198 | return NULL; |
199 | INIT_LIST_HEAD(&desc->tx_list); | ||
200 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); | 200 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); |
201 | desc->txd.tx_submit = txx9dmac_tx_submit; | 201 | desc->txd.tx_submit = txx9dmac_tx_submit; |
202 | /* txd.flags will be overwritten in prep funcs */ | 202 | /* txd.flags will be overwritten in prep funcs */ |
@@ -245,7 +245,7 @@ static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, | |||
245 | struct txx9dmac_dev *ddev = dc->ddev; | 245 | struct txx9dmac_dev *ddev = dc->ddev; |
246 | struct txx9dmac_desc *child; | 246 | struct txx9dmac_desc *child; |
247 | 247 | ||
248 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 248 | list_for_each_entry(child, &desc->tx_list, desc_node) |
249 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | 249 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
250 | child->txd.phys, ddev->descsize, | 250 | child->txd.phys, ddev->descsize, |
251 | DMA_TO_DEVICE); | 251 | DMA_TO_DEVICE); |
@@ -267,11 +267,11 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |||
267 | txx9dmac_sync_desc_for_cpu(dc, desc); | 267 | txx9dmac_sync_desc_for_cpu(dc, desc); |
268 | 268 | ||
269 | spin_lock_bh(&dc->lock); | 269 | spin_lock_bh(&dc->lock); |
270 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 270 | list_for_each_entry(child, &desc->tx_list, desc_node) |
271 | dev_vdbg(chan2dev(&dc->chan), | 271 | dev_vdbg(chan2dev(&dc->chan), |
272 | "moving child desc %p to freelist\n", | 272 | "moving child desc %p to freelist\n", |
273 | child); | 273 | child); |
274 | list_splice_init(&desc->txd.tx_list, &dc->free_list); | 274 | list_splice_init(&desc->tx_list, &dc->free_list); |
275 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", | 275 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", |
276 | desc); | 276 | desc); |
277 | list_add(&desc->desc_node, &dc->free_list); | 277 | list_add(&desc->desc_node, &dc->free_list); |
@@ -429,7 +429,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
429 | param = txd->callback_param; | 429 | param = txd->callback_param; |
430 | 430 | ||
431 | txx9dmac_sync_desc_for_cpu(dc, desc); | 431 | txx9dmac_sync_desc_for_cpu(dc, desc); |
432 | list_splice_init(&txd->tx_list, &dc->free_list); | 432 | list_splice_init(&desc->tx_list, &dc->free_list); |
433 | list_move(&desc->desc_node, &dc->free_list); | 433 | list_move(&desc->desc_node, &dc->free_list); |
434 | 434 | ||
435 | if (!ds) { | 435 | if (!ds) { |
@@ -571,7 +571,7 @@ static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) | |||
571 | "Bad descriptor submitted for DMA! (cookie: %d)\n", | 571 | "Bad descriptor submitted for DMA! (cookie: %d)\n", |
572 | bad_desc->txd.cookie); | 572 | bad_desc->txd.cookie); |
573 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); | 573 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); |
574 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 574 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
575 | txx9dmac_dump_desc(dc, &child->hwdesc); | 575 | txx9dmac_dump_desc(dc, &child->hwdesc); |
576 | /* Pretend the descriptor completed successfully */ | 576 | /* Pretend the descriptor completed successfully */ |
577 | txx9dmac_descriptor_complete(dc, bad_desc); | 577 | txx9dmac_descriptor_complete(dc, bad_desc); |
@@ -613,7 +613,7 @@ static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) | |||
613 | return; | 613 | return; |
614 | } | 614 | } |
615 | 615 | ||
616 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 616 | list_for_each_entry(child, &desc->tx_list, desc_node) |
617 | if (desc_read_CHAR(dc, child) == chain) { | 617 | if (desc_read_CHAR(dc, child) == chain) { |
618 | /* Currently in progress */ | 618 | /* Currently in progress */ |
619 | if (csr & TXX9_DMA_CSR_ABCHC) | 619 | if (csr & TXX9_DMA_CSR_ABCHC) |
@@ -823,8 +823,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
823 | dma_sync_single_for_device(chan2parent(&dc->chan), | 823 | dma_sync_single_for_device(chan2parent(&dc->chan), |
824 | prev->txd.phys, ddev->descsize, | 824 | prev->txd.phys, ddev->descsize, |
825 | DMA_TO_DEVICE); | 825 | DMA_TO_DEVICE); |
826 | list_add_tail(&desc->desc_node, | 826 | list_add_tail(&desc->desc_node, &first->tx_list); |
827 | &first->txd.tx_list); | ||
828 | } | 827 | } |
829 | prev = desc; | 828 | prev = desc; |
830 | } | 829 | } |
@@ -919,8 +918,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
919 | prev->txd.phys, | 918 | prev->txd.phys, |
920 | ddev->descsize, | 919 | ddev->descsize, |
921 | DMA_TO_DEVICE); | 920 | DMA_TO_DEVICE); |
922 | list_add_tail(&desc->desc_node, | 921 | list_add_tail(&desc->desc_node, &first->tx_list); |
923 | &first->txd.tx_list); | ||
924 | } | 922 | } |
925 | prev = desc; | 923 | prev = desc; |
926 | } | 924 | } |
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index c907ff01d276..365d42366b9f 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h | |||
@@ -231,6 +231,7 @@ struct txx9dmac_desc { | |||
231 | 231 | ||
232 | /* THEN values for driver housekeeping */ | 232 | /* THEN values for driver housekeeping */ |
233 | struct list_head desc_node ____cacheline_aligned; | 233 | struct list_head desc_node ____cacheline_aligned; |
234 | struct list_head tx_list; | ||
234 | struct dma_async_tx_descriptor txd; | 235 | struct dma_async_tx_descriptor txd; |
235 | size_t len; | 236 | size_t len; |
236 | }; | 237 | }; |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index a3ca18e2d7cf..02127e59fe8e 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -133,6 +133,13 @@ config EDAC_I3000 | |||
133 | Support for error detection and correction on the Intel | 133 | Support for error detection and correction on the Intel |
134 | 3000 and 3010 server chipsets. | 134 | 3000 and 3010 server chipsets. |
135 | 135 | ||
136 | config EDAC_I3200 | ||
137 | tristate "Intel 3200" | ||
138 | depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL | ||
139 | help | ||
140 | Support for error detection and correction on the Intel | ||
141 | 3200 and 3210 server chipsets. | ||
142 | |||
136 | config EDAC_X38 | 143 | config EDAC_X38 |
137 | tristate "Intel X38" | 144 | tristate "Intel X38" |
138 | depends on EDAC_MM_EDAC && PCI && X86 | 145 | depends on EDAC_MM_EDAC && PCI && X86 |
@@ -176,11 +183,11 @@ config EDAC_I5100 | |||
176 | San Clemente MCH. | 183 | San Clemente MCH. |
177 | 184 | ||
178 | config EDAC_MPC85XX | 185 | config EDAC_MPC85XX |
179 | tristate "Freescale MPC85xx" | 186 | tristate "Freescale MPC83xx / MPC85xx" |
180 | depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx | 187 | depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx) |
181 | help | 188 | help |
182 | Support for error detection and correction on the Freescale | 189 | Support for error detection and correction on the Freescale |
183 | MPC8560, MPC8540, MPC8548 | 190 | MPC8349, MPC8560, MPC8540, MPC8548 |
184 | 191 | ||
185 | config EDAC_MV64X60 | 192 | config EDAC_MV64X60 |
186 | tristate "Marvell MV64x60" | 193 | tristate "Marvell MV64x60" |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index cfa033ce53a7..7a473bbe8abd 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | |||
32 | obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o | 32 | obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o |
33 | obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o | 33 | obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o |
34 | obj-$(CONFIG_EDAC_I3000) += i3000_edac.o | 34 | obj-$(CONFIG_EDAC_I3000) += i3000_edac.o |
35 | obj-$(CONFIG_EDAC_I3200) += i3200_edac.o | ||
35 | obj-$(CONFIG_EDAC_X38) += x38_edac.o | 36 | obj-$(CONFIG_EDAC_X38) += x38_edac.o |
36 | obj-$(CONFIG_EDAC_I82860) += i82860_edac.o | 37 | obj-$(CONFIG_EDAC_I82860) += i82860_edac.o |
37 | obj-$(CONFIG_EDAC_R82600) += r82600_edac.o | 38 | obj-$(CONFIG_EDAC_R82600) += r82600_edac.o |
@@ -49,3 +50,4 @@ obj-$(CONFIG_EDAC_CELL) += cell_edac.o | |||
49 | obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o | 50 | obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o |
50 | obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o | 51 | obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o |
51 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o | 52 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o |
53 | |||
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 8c54196b5aba..3d50274f1348 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c | |||
@@ -885,14 +885,14 @@ static int __devinit cpc925_probe(struct platform_device *pdev) | |||
885 | 885 | ||
886 | if (!devm_request_mem_region(&pdev->dev, | 886 | if (!devm_request_mem_region(&pdev->dev, |
887 | r->start, | 887 | r->start, |
888 | r->end - r->start + 1, | 888 | resource_size(r), |
889 | pdev->name)) { | 889 | pdev->name)) { |
890 | cpc925_printk(KERN_ERR, "Unable to request mem region\n"); | 890 | cpc925_printk(KERN_ERR, "Unable to request mem region\n"); |
891 | res = -EBUSY; | 891 | res = -EBUSY; |
892 | goto err1; | 892 | goto err1; |
893 | } | 893 | } |
894 | 894 | ||
895 | vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1); | 895 | vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); |
896 | if (!vbase) { | 896 | if (!vbase) { |
897 | cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); | 897 | cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); |
898 | res = -ENOMEM; | 898 | res = -ENOMEM; |
@@ -953,7 +953,7 @@ err3: | |||
953 | cpc925_mc_exit(mci); | 953 | cpc925_mc_exit(mci); |
954 | edac_mc_free(mci); | 954 | edac_mc_free(mci); |
955 | err2: | 955 | err2: |
956 | devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1); | 956 | devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); |
957 | err1: | 957 | err1: |
958 | devres_release_group(&pdev->dev, cpc925_probe); | 958 | devres_release_group(&pdev->dev, cpc925_probe); |
959 | out: | 959 | out: |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index b02a6a69a8f0..d5e13c94714f 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -356,7 +356,6 @@ static void complete_edac_device_list_del(struct rcu_head *head) | |||
356 | 356 | ||
357 | edac_dev = container_of(head, struct edac_device_ctl_info, rcu); | 357 | edac_dev = container_of(head, struct edac_device_ctl_info, rcu); |
358 | INIT_LIST_HEAD(&edac_dev->link); | 358 | INIT_LIST_HEAD(&edac_dev->link); |
359 | complete(&edac_dev->removal_complete); | ||
360 | } | 359 | } |
361 | 360 | ||
362 | /* | 361 | /* |
@@ -369,10 +368,8 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info | |||
369 | *edac_device) | 368 | *edac_device) |
370 | { | 369 | { |
371 | list_del_rcu(&edac_device->link); | 370 | list_del_rcu(&edac_device->link); |
372 | |||
373 | init_completion(&edac_device->removal_complete); | ||
374 | call_rcu(&edac_device->rcu, complete_edac_device_list_del); | 371 | call_rcu(&edac_device->rcu, complete_edac_device_list_del); |
375 | wait_for_completion(&edac_device->removal_complete); | 372 | rcu_barrier(); |
376 | } | 373 | } |
377 | 374 | ||
378 | /* | 375 | /* |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 335b7ebdb11c..b629c41756f0 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -418,16 +418,14 @@ static void complete_mc_list_del(struct rcu_head *head) | |||
418 | 418 | ||
419 | mci = container_of(head, struct mem_ctl_info, rcu); | 419 | mci = container_of(head, struct mem_ctl_info, rcu); |
420 | INIT_LIST_HEAD(&mci->link); | 420 | INIT_LIST_HEAD(&mci->link); |
421 | complete(&mci->complete); | ||
422 | } | 421 | } |
423 | 422 | ||
424 | static void del_mc_from_global_list(struct mem_ctl_info *mci) | 423 | static void del_mc_from_global_list(struct mem_ctl_info *mci) |
425 | { | 424 | { |
426 | atomic_dec(&edac_handlers); | 425 | atomic_dec(&edac_handlers); |
427 | list_del_rcu(&mci->link); | 426 | list_del_rcu(&mci->link); |
428 | init_completion(&mci->complete); | ||
429 | call_rcu(&mci->rcu, complete_mc_list_del); | 427 | call_rcu(&mci->rcu, complete_mc_list_del); |
430 | wait_for_completion(&mci->complete); | 428 | rcu_barrier(); |
431 | } | 429 | } |
432 | 430 | ||
433 | /** | 431 | /** |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 30b585b1d60b..efb5d5650783 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -174,7 +174,6 @@ static void complete_edac_pci_list_del(struct rcu_head *head) | |||
174 | 174 | ||
175 | pci = container_of(head, struct edac_pci_ctl_info, rcu); | 175 | pci = container_of(head, struct edac_pci_ctl_info, rcu); |
176 | INIT_LIST_HEAD(&pci->link); | 176 | INIT_LIST_HEAD(&pci->link); |
177 | complete(&pci->complete); | ||
178 | } | 177 | } |
179 | 178 | ||
180 | /* | 179 | /* |
@@ -185,9 +184,8 @@ static void complete_edac_pci_list_del(struct rcu_head *head) | |||
185 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) | 184 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) |
186 | { | 185 | { |
187 | list_del_rcu(&pci->link); | 186 | list_del_rcu(&pci->link); |
188 | init_completion(&pci->complete); | ||
189 | call_rcu(&pci->rcu, complete_edac_pci_list_del); | 187 | call_rcu(&pci->rcu, complete_edac_pci_list_del); |
190 | wait_for_completion(&pci->complete); | 188 | rcu_barrier(); |
191 | } | 189 | } |
192 | 190 | ||
193 | #if 0 | 191 | #if 0 |
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c new file mode 100644 index 000000000000..fde4db91c4d2 --- /dev/null +++ b/drivers/edac/i3200_edac.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Intel 3200/3210 Memory Controller kernel module | ||
3 | * Copyright (C) 2008-2009 Akamai Technologies, Inc. | ||
4 | * Portions by Hitoshi Mitake <h.mitake@gmail.com>. | ||
5 | * | ||
6 | * This file may be distributed under the terms of the | ||
7 | * GNU General Public License. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/pci.h> | ||
13 | #include <linux/pci_ids.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/edac.h> | ||
16 | #include <linux/io.h> | ||
17 | #include "edac_core.h" | ||
18 | |||
19 | #define I3200_REVISION "1.1" | ||
20 | |||
21 | #define EDAC_MOD_STR "i3200_edac" | ||
22 | |||
23 | #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 | ||
24 | |||
25 | #define I3200_RANKS 8 | ||
26 | #define I3200_RANKS_PER_CHANNEL 4 | ||
27 | #define I3200_CHANNELS 2 | ||
28 | |||
29 | /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ | ||
30 | |||
31 | #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ | ||
32 | #define I3200_MCHBAR_HIGH 0x4c | ||
33 | #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ | ||
34 | #define I3200_MMR_WINDOW_SIZE 16384 | ||
35 | |||
36 | #define I3200_TOM 0xa0 /* Top of Memory (16b) | ||
37 | * | ||
38 | * 15:10 reserved | ||
39 | * 9:0 total populated physical memory | ||
40 | */ | ||
41 | #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ | ||
42 | #define I3200_TOM_SHIFT 26 /* 64MiB grain */ | ||
43 | |||
44 | #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) | ||
45 | * | ||
46 | * 15 reserved | ||
47 | * 14 Isochronous TBWRR Run Behind FIFO Full | ||
48 | * (ITCV) | ||
49 | * 13 Isochronous TBWRR Run Behind FIFO Put | ||
50 | * (ITSTV) | ||
51 | * 12 reserved | ||
52 | * 11 MCH Thermal Sensor Event | ||
53 | * for SMI/SCI/SERR (GTSE) | ||
54 | * 10 reserved | ||
55 | * 9 LOCK to non-DRAM Memory Flag (LCKF) | ||
56 | * 8 reserved | ||
57 | * 7 DRAM Throttle Flag (DTF) | ||
58 | * 6:2 reserved | ||
59 | * 1 Multi-bit DRAM ECC Error Flag (DMERR) | ||
60 | * 0 Single-bit DRAM ECC Error Flag (DSERR) | ||
61 | */ | ||
62 | #define I3200_ERRSTS_UE 0x0002 | ||
63 | #define I3200_ERRSTS_CE 0x0001 | ||
64 | #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) | ||
65 | |||
66 | |||
67 | /* Intel MMIO register space - device 0 function 0 - MMR space */ | ||
68 | |||
69 | #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) | ||
70 | * | ||
71 | * 15:10 reserved | ||
72 | * 9:0 Channel 0 DRAM Rank Boundary Address | ||
73 | */ | ||
74 | #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ | ||
75 | #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ | ||
76 | #define I3200_DRB_SHIFT 26 /* 64MiB grain */ | ||
77 | |||
78 | #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) | ||
79 | * | ||
80 | * 63:48 Error Column Address (ERRCOL) | ||
81 | * 47:32 Error Row Address (ERRROW) | ||
82 | * 31:29 Error Bank Address (ERRBANK) | ||
83 | * 28:27 Error Rank Address (ERRRANK) | ||
84 | * 26:24 reserved | ||
85 | * 23:16 Error Syndrome (ERRSYND) | ||
86 | * 15: 2 reserved | ||
87 | * 1 Multiple Bit Error Status (MERRSTS) | ||
88 | * 0 Correctable Error Status (CERRSTS) | ||
89 | */ | ||
90 | #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ | ||
91 | #define I3200_ECCERRLOG_CE 0x1 | ||
92 | #define I3200_ECCERRLOG_UE 0x2 | ||
93 | #define I3200_ECCERRLOG_RANK_BITS 0x18000000 | ||
94 | #define I3200_ECCERRLOG_RANK_SHIFT 27 | ||
95 | #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 | ||
96 | #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 | ||
97 | #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ | ||
98 | |||
99 | struct i3200_priv { | ||
100 | void __iomem *window; | ||
101 | }; | ||
102 | |||
103 | static int nr_channels; | ||
104 | |||
105 | static int how_many_channels(struct pci_dev *pdev) | ||
106 | { | ||
107 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ | ||
108 | |||
109 | pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); | ||
110 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ | ||
111 | debugf0("In single channel mode.\n"); | ||
112 | return 1; | ||
113 | } else { | ||
114 | debugf0("In dual channel mode.\n"); | ||
115 | return 2; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static unsigned long eccerrlog_syndrome(u64 log) | ||
120 | { | ||
121 | return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> | ||
122 | I3200_ECCERRLOG_SYNDROME_SHIFT; | ||
123 | } | ||
124 | |||
125 | static int eccerrlog_row(int channel, u64 log) | ||
126 | { | ||
127 | u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> | ||
128 | I3200_ECCERRLOG_RANK_SHIFT); | ||
129 | return rank | (channel * I3200_RANKS_PER_CHANNEL); | ||
130 | } | ||
131 | |||
132 | enum i3200_chips { | ||
133 | I3200 = 0, | ||
134 | }; | ||
135 | |||
136 | struct i3200_dev_info { | ||
137 | const char *ctl_name; | ||
138 | }; | ||
139 | |||
140 | struct i3200_error_info { | ||
141 | u16 errsts; | ||
142 | u16 errsts2; | ||
143 | u64 eccerrlog[I3200_CHANNELS]; | ||
144 | }; | ||
145 | |||
146 | static const struct i3200_dev_info i3200_devs[] = { | ||
147 | [I3200] = { | ||
148 | .ctl_name = "i3200" | ||
149 | }, | ||
150 | }; | ||
151 | |||
152 | static struct pci_dev *mci_pdev; | ||
153 | static int i3200_registered = 1; | ||
154 | |||
155 | |||
156 | static void i3200_clear_error_info(struct mem_ctl_info *mci) | ||
157 | { | ||
158 | struct pci_dev *pdev; | ||
159 | |||
160 | pdev = to_pci_dev(mci->dev); | ||
161 | |||
162 | /* | ||
163 | * Clear any error bits. | ||
164 | * (Yes, we really clear bits by writing 1 to them.) | ||
165 | */ | ||
166 | pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, | ||
167 | I3200_ERRSTS_BITS); | ||
168 | } | ||
169 | |||
170 | static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, | ||
171 | struct i3200_error_info *info) | ||
172 | { | ||
173 | struct pci_dev *pdev; | ||
174 | struct i3200_priv *priv = mci->pvt_info; | ||
175 | void __iomem *window = priv->window; | ||
176 | |||
177 | pdev = to_pci_dev(mci->dev); | ||
178 | |||
179 | /* | ||
180 | * This is a mess because there is no atomic way to read all the | ||
181 | * registers at once and the registers can transition from CE being | ||
182 | * overwritten by UE. | ||
183 | */ | ||
184 | pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); | ||
185 | if (!(info->errsts & I3200_ERRSTS_BITS)) | ||
186 | return; | ||
187 | |||
188 | info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); | ||
189 | if (nr_channels == 2) | ||
190 | info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); | ||
191 | |||
192 | pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); | ||
193 | |||
194 | /* | ||
195 | * If the error is the same for both reads then the first set | ||
196 | * of reads is valid. If there is a change then there is a CE | ||
197 | * with no info and the second set of reads is valid and | ||
198 | * should be UE info. | ||
199 | */ | ||
200 | if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { | ||
201 | info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); | ||
202 | if (nr_channels == 2) | ||
203 | info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); | ||
204 | } | ||
205 | |||
206 | i3200_clear_error_info(mci); | ||
207 | } | ||
208 | |||
209 | static void i3200_process_error_info(struct mem_ctl_info *mci, | ||
210 | struct i3200_error_info *info) | ||
211 | { | ||
212 | int channel; | ||
213 | u64 log; | ||
214 | |||
215 | if (!(info->errsts & I3200_ERRSTS_BITS)) | ||
216 | return; | ||
217 | |||
218 | if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { | ||
219 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); | ||
220 | info->errsts = info->errsts2; | ||
221 | } | ||
222 | |||
223 | for (channel = 0; channel < nr_channels; channel++) { | ||
224 | log = info->eccerrlog[channel]; | ||
225 | if (log & I3200_ECCERRLOG_UE) { | ||
226 | edac_mc_handle_ue(mci, 0, 0, | ||
227 | eccerrlog_row(channel, log), | ||
228 | "i3200 UE"); | ||
229 | } else if (log & I3200_ECCERRLOG_CE) { | ||
230 | edac_mc_handle_ce(mci, 0, 0, | ||
231 | eccerrlog_syndrome(log), | ||
232 | eccerrlog_row(channel, log), 0, | ||
233 | "i3200 CE"); | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static void i3200_check(struct mem_ctl_info *mci) | ||
239 | { | ||
240 | struct i3200_error_info info; | ||
241 | |||
242 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); | ||
243 | i3200_get_and_clear_error_info(mci, &info); | ||
244 | i3200_process_error_info(mci, &info); | ||
245 | } | ||
246 | |||
247 | |||
248 | void __iomem *i3200_map_mchbar(struct pci_dev *pdev) | ||
249 | { | ||
250 | union { | ||
251 | u64 mchbar; | ||
252 | struct { | ||
253 | u32 mchbar_low; | ||
254 | u32 mchbar_high; | ||
255 | }; | ||
256 | } u; | ||
257 | void __iomem *window; | ||
258 | |||
259 | pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); | ||
260 | pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); | ||
261 | u.mchbar &= I3200_MCHBAR_MASK; | ||
262 | |||
263 | if (u.mchbar != (resource_size_t)u.mchbar) { | ||
264 | printk(KERN_ERR | ||
265 | "i3200: mmio space beyond accessible range (0x%llx)\n", | ||
266 | (unsigned long long)u.mchbar); | ||
267 | return NULL; | ||
268 | } | ||
269 | |||
270 | window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); | ||
271 | if (!window) | ||
272 | printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", | ||
273 | (unsigned long long)u.mchbar); | ||
274 | |||
275 | return window; | ||
276 | } | ||
277 | |||
278 | |||
279 | static void i3200_get_drbs(void __iomem *window, | ||
280 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) | ||
281 | { | ||
282 | int i; | ||
283 | |||
284 | for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { | ||
285 | drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; | ||
286 | drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static bool i3200_is_stacked(struct pci_dev *pdev, | ||
291 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) | ||
292 | { | ||
293 | u16 tom; | ||
294 | |||
295 | pci_read_config_word(pdev, I3200_TOM, &tom); | ||
296 | tom &= I3200_TOM_MASK; | ||
297 | |||
298 | return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; | ||
299 | } | ||
300 | |||
301 | static unsigned long drb_to_nr_pages( | ||
302 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, | ||
303 | int channel, int rank) | ||
304 | { | ||
305 | int n; | ||
306 | |||
307 | n = drbs[channel][rank]; | ||
308 | if (rank > 0) | ||
309 | n -= drbs[channel][rank - 1]; | ||
310 | if (stacked && (channel == 1) && | ||
311 | drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) | ||
312 | n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; | ||
313 | |||
314 | n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); | ||
315 | return n; | ||
316 | } | ||
317 | |||
318 | static int i3200_probe1(struct pci_dev *pdev, int dev_idx) | ||
319 | { | ||
320 | int rc; | ||
321 | int i; | ||
322 | struct mem_ctl_info *mci = NULL; | ||
323 | unsigned long last_page; | ||
324 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; | ||
325 | bool stacked; | ||
326 | void __iomem *window; | ||
327 | struct i3200_priv *priv; | ||
328 | |||
329 | debugf0("MC: %s()\n", __func__); | ||
330 | |||
331 | window = i3200_map_mchbar(pdev); | ||
332 | if (!window) | ||
333 | return -ENODEV; | ||
334 | |||
335 | i3200_get_drbs(window, drbs); | ||
336 | nr_channels = how_many_channels(pdev); | ||
337 | |||
338 | mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, | ||
339 | nr_channels, 0); | ||
340 | if (!mci) | ||
341 | return -ENOMEM; | ||
342 | |||
343 | debugf3("MC: %s(): init mci\n", __func__); | ||
344 | |||
345 | mci->dev = &pdev->dev; | ||
346 | mci->mtype_cap = MEM_FLAG_DDR2; | ||
347 | |||
348 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | ||
349 | mci->edac_cap = EDAC_FLAG_SECDED; | ||
350 | |||
351 | mci->mod_name = EDAC_MOD_STR; | ||
352 | mci->mod_ver = I3200_REVISION; | ||
353 | mci->ctl_name = i3200_devs[dev_idx].ctl_name; | ||
354 | mci->dev_name = pci_name(pdev); | ||
355 | mci->edac_check = i3200_check; | ||
356 | mci->ctl_page_to_phys = NULL; | ||
357 | priv = mci->pvt_info; | ||
358 | priv->window = window; | ||
359 | |||
360 | stacked = i3200_is_stacked(pdev, drbs); | ||
361 | |||
362 | /* | ||
363 | * The dram rank boundary (DRB) reg values are boundary addresses | ||
364 | * for each DRAM rank with a granularity of 64MB. DRB regs are | ||
365 | * cumulative; the last one will contain the total memory | ||
366 | * contained in all ranks. | ||
367 | */ | ||
368 | last_page = -1UL; | ||
369 | for (i = 0; i < mci->nr_csrows; i++) { | ||
370 | unsigned long nr_pages; | ||
371 | struct csrow_info *csrow = &mci->csrows[i]; | ||
372 | |||
373 | nr_pages = drb_to_nr_pages(drbs, stacked, | ||
374 | i / I3200_RANKS_PER_CHANNEL, | ||
375 | i % I3200_RANKS_PER_CHANNEL); | ||
376 | |||
377 | if (nr_pages == 0) { | ||
378 | csrow->mtype = MEM_EMPTY; | ||
379 | continue; | ||
380 | } | ||
381 | |||
382 | csrow->first_page = last_page + 1; | ||
383 | last_page += nr_pages; | ||
384 | csrow->last_page = last_page; | ||
385 | csrow->nr_pages = nr_pages; | ||
386 | |||
387 | csrow->grain = nr_pages << PAGE_SHIFT; | ||
388 | csrow->mtype = MEM_DDR2; | ||
389 | csrow->dtype = DEV_UNKNOWN; | ||
390 | csrow->edac_mode = EDAC_UNKNOWN; | ||
391 | } | ||
392 | |||
393 | i3200_clear_error_info(mci); | ||
394 | |||
395 | rc = -ENODEV; | ||
396 | if (edac_mc_add_mc(mci)) { | ||
397 | debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); | ||
398 | goto fail; | ||
399 | } | ||
400 | |||
401 | /* get this far and it's successful */ | ||
402 | debugf3("MC: %s(): success\n", __func__); | ||
403 | return 0; | ||
404 | |||
405 | fail: | ||
406 | iounmap(window); | ||
407 | if (mci) | ||
408 | edac_mc_free(mci); | ||
409 | |||
410 | return rc; | ||
411 | } | ||
412 | |||
413 | static int __devinit i3200_init_one(struct pci_dev *pdev, | ||
414 | const struct pci_device_id *ent) | ||
415 | { | ||
416 | int rc; | ||
417 | |||
418 | debugf0("MC: %s()\n", __func__); | ||
419 | |||
420 | if (pci_enable_device(pdev) < 0) | ||
421 | return -EIO; | ||
422 | |||
423 | rc = i3200_probe1(pdev, ent->driver_data); | ||
424 | if (!mci_pdev) | ||
425 | mci_pdev = pci_dev_get(pdev); | ||
426 | |||
427 | return rc; | ||
428 | } | ||
429 | |||
430 | static void __devexit i3200_remove_one(struct pci_dev *pdev) | ||
431 | { | ||
432 | struct mem_ctl_info *mci; | ||
433 | struct i3200_priv *priv; | ||
434 | |||
435 | debugf0("%s()\n", __func__); | ||
436 | |||
437 | mci = edac_mc_del_mc(&pdev->dev); | ||
438 | if (!mci) | ||
439 | return; | ||
440 | |||
441 | priv = mci->pvt_info; | ||
442 | iounmap(priv->window); | ||
443 | |||
444 | edac_mc_free(mci); | ||
445 | } | ||
446 | |||
447 | static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { | ||
448 | { | ||
449 | PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
450 | I3200}, | ||
451 | { | ||
452 | 0, | ||
453 | } /* 0 terminated list. */ | ||
454 | }; | ||
455 | |||
456 | MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); | ||
457 | |||
458 | static struct pci_driver i3200_driver = { | ||
459 | .name = EDAC_MOD_STR, | ||
460 | .probe = i3200_init_one, | ||
461 | .remove = __devexit_p(i3200_remove_one), | ||
462 | .id_table = i3200_pci_tbl, | ||
463 | }; | ||
464 | |||
465 | static int __init i3200_init(void) | ||
466 | { | ||
467 | int pci_rc; | ||
468 | |||
469 | debugf3("MC: %s()\n", __func__); | ||
470 | |||
471 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
472 | opstate_init(); | ||
473 | |||
474 | pci_rc = pci_register_driver(&i3200_driver); | ||
475 | if (pci_rc < 0) | ||
476 | goto fail0; | ||
477 | |||
478 | if (!mci_pdev) { | ||
479 | i3200_registered = 0; | ||
480 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
481 | PCI_DEVICE_ID_INTEL_3200_HB, NULL); | ||
482 | if (!mci_pdev) { | ||
483 | debugf0("i3200 pci_get_device fail\n"); | ||
484 | pci_rc = -ENODEV; | ||
485 | goto fail1; | ||
486 | } | ||
487 | |||
488 | pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); | ||
489 | if (pci_rc < 0) { | ||
490 | debugf0("i3200 init fail\n"); | ||
491 | pci_rc = -ENODEV; | ||
492 | goto fail1; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | return 0; | ||
497 | |||
498 | fail1: | ||
499 | pci_unregister_driver(&i3200_driver); | ||
500 | |||
501 | fail0: | ||
502 | if (mci_pdev) | ||
503 | pci_dev_put(mci_pdev); | ||
504 | |||
505 | return pci_rc; | ||
506 | } | ||
507 | |||
508 | static void __exit i3200_exit(void) | ||
509 | { | ||
510 | debugf3("MC: %s()\n", __func__); | ||
511 | |||
512 | pci_unregister_driver(&i3200_driver); | ||
513 | if (!i3200_registered) { | ||
514 | i3200_remove_one(mci_pdev); | ||
515 | pci_dev_put(mci_pdev); | ||
516 | } | ||
517 | } | ||
518 | |||
519 | module_init(i3200_init); | ||
520 | module_exit(i3200_exit); | ||
521 | |||
522 | MODULE_LICENSE("GPL"); | ||
523 | MODULE_AUTHOR("Akamai Technologies, Inc."); | ||
524 | MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); | ||
525 | |||
526 | module_param(edac_op_state, int, 0444); | ||
527 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 3f2ccfc6407c..157f6504f25e 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -41,7 +41,9 @@ static u32 orig_pci_err_en; | |||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static u32 orig_l2_err_disable; | 43 | static u32 orig_l2_err_disable; |
44 | #ifdef CONFIG_MPC85xx | ||
44 | static u32 orig_hid1[2]; | 45 | static u32 orig_hid1[2]; |
46 | #endif | ||
45 | 47 | ||
46 | /************************ MC SYSFS parts ***********************************/ | 48 | /************************ MC SYSFS parts ***********************************/ |
47 | 49 | ||
@@ -646,6 +648,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = { | |||
646 | { .compatible = "fsl,mpc8560-l2-cache-controller", }, | 648 | { .compatible = "fsl,mpc8560-l2-cache-controller", }, |
647 | { .compatible = "fsl,mpc8568-l2-cache-controller", }, | 649 | { .compatible = "fsl,mpc8568-l2-cache-controller", }, |
648 | { .compatible = "fsl,mpc8572-l2-cache-controller", }, | 650 | { .compatible = "fsl,mpc8572-l2-cache-controller", }, |
651 | { .compatible = "fsl,p2020-l2-cache-controller", }, | ||
649 | {}, | 652 | {}, |
650 | }; | 653 | }; |
651 | 654 | ||
@@ -788,19 +791,20 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
788 | csrow = &mci->csrows[index]; | 791 | csrow = &mci->csrows[index]; |
789 | cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + | 792 | cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + |
790 | (index * MPC85XX_MC_CS_BNDS_OFS)); | 793 | (index * MPC85XX_MC_CS_BNDS_OFS)); |
791 | start = (cs_bnds & 0xfff0000) << 4; | 794 | |
792 | end = ((cs_bnds & 0xfff) << 20); | 795 | start = (cs_bnds & 0xffff0000) >> 16; |
793 | if (start) | 796 | end = (cs_bnds & 0x0000ffff); |
794 | start |= 0xfffff; | ||
795 | if (end) | ||
796 | end |= 0xfffff; | ||
797 | 797 | ||
798 | if (start == end) | 798 | if (start == end) |
799 | continue; /* not populated */ | 799 | continue; /* not populated */ |
800 | 800 | ||
801 | start <<= (24 - PAGE_SHIFT); | ||
802 | end <<= (24 - PAGE_SHIFT); | ||
803 | end |= (1 << (24 - PAGE_SHIFT)) - 1; | ||
804 | |||
801 | csrow->first_page = start >> PAGE_SHIFT; | 805 | csrow->first_page = start >> PAGE_SHIFT; |
802 | csrow->last_page = end >> PAGE_SHIFT; | 806 | csrow->last_page = end >> PAGE_SHIFT; |
803 | csrow->nr_pages = csrow->last_page + 1 - csrow->first_page; | 807 | csrow->nr_pages = end + 1 - start; |
804 | csrow->grain = 8; | 808 | csrow->grain = 8; |
805 | csrow->mtype = mtype; | 809 | csrow->mtype = mtype; |
806 | csrow->dtype = DEV_UNKNOWN; | 810 | csrow->dtype = DEV_UNKNOWN; |
@@ -984,6 +988,8 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { | |||
984 | { .compatible = "fsl,mpc8560-memory-controller", }, | 988 | { .compatible = "fsl,mpc8560-memory-controller", }, |
985 | { .compatible = "fsl,mpc8568-memory-controller", }, | 989 | { .compatible = "fsl,mpc8568-memory-controller", }, |
986 | { .compatible = "fsl,mpc8572-memory-controller", }, | 990 | { .compatible = "fsl,mpc8572-memory-controller", }, |
991 | { .compatible = "fsl,mpc8349-memory-controller", }, | ||
992 | { .compatible = "fsl,p2020-memory-controller", }, | ||
987 | {}, | 993 | {}, |
988 | }; | 994 | }; |
989 | 995 | ||
@@ -999,13 +1005,13 @@ static struct of_platform_driver mpc85xx_mc_err_driver = { | |||
999 | }, | 1005 | }, |
1000 | }; | 1006 | }; |
1001 | 1007 | ||
1002 | 1008 | #ifdef CONFIG_MPC85xx | |
1003 | static void __init mpc85xx_mc_clear_rfxe(void *data) | 1009 | static void __init mpc85xx_mc_clear_rfxe(void *data) |
1004 | { | 1010 | { |
1005 | orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); | 1011 | orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); |
1006 | mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000)); | 1012 | mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000)); |
1007 | } | 1013 | } |
1008 | 1014 | #endif | |
1009 | 1015 | ||
1010 | static int __init mpc85xx_mc_init(void) | 1016 | static int __init mpc85xx_mc_init(void) |
1011 | { | 1017 | { |
@@ -1038,26 +1044,32 @@ static int __init mpc85xx_mc_init(void) | |||
1038 | printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); | 1044 | printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); |
1039 | #endif | 1045 | #endif |
1040 | 1046 | ||
1047 | #ifdef CONFIG_MPC85xx | ||
1041 | /* | 1048 | /* |
1042 | * need to clear HID1[RFXE] to disable machine check int | 1049 | * need to clear HID1[RFXE] to disable machine check int |
1043 | * so we can catch it | 1050 | * so we can catch it |
1044 | */ | 1051 | */ |
1045 | if (edac_op_state == EDAC_OPSTATE_INT) | 1052 | if (edac_op_state == EDAC_OPSTATE_INT) |
1046 | on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); | 1053 | on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); |
1054 | #endif | ||
1047 | 1055 | ||
1048 | return 0; | 1056 | return 0; |
1049 | } | 1057 | } |
1050 | 1058 | ||
1051 | module_init(mpc85xx_mc_init); | 1059 | module_init(mpc85xx_mc_init); |
1052 | 1060 | ||
1061 | #ifdef CONFIG_MPC85xx | ||
1053 | static void __exit mpc85xx_mc_restore_hid1(void *data) | 1062 | static void __exit mpc85xx_mc_restore_hid1(void *data) |
1054 | { | 1063 | { |
1055 | mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]); | 1064 | mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]); |
1056 | } | 1065 | } |
1066 | #endif | ||
1057 | 1067 | ||
1058 | static void __exit mpc85xx_mc_exit(void) | 1068 | static void __exit mpc85xx_mc_exit(void) |
1059 | { | 1069 | { |
1070 | #ifdef CONFIG_MPC85xx | ||
1060 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); | 1071 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); |
1072 | #endif | ||
1061 | #ifdef CONFIG_PCI | 1073 | #ifdef CONFIG_PCI |
1062 | of_unregister_platform_driver(&mpc85xx_pci_err_driver); | 1074 | of_unregister_platform_driver(&mpc85xx_pci_err_driver); |
1063 | #endif | 1075 | #endif |
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 5131aaae8e03..a6b9fec13a74 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c | |||
@@ -90,7 +90,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev) | |||
90 | return -ENOENT; | 90 | return -ENOENT; |
91 | } | 91 | } |
92 | 92 | ||
93 | pci_serr = ioremap(r->start, r->end - r->start + 1); | 93 | pci_serr = ioremap(r->start, resource_size(r)); |
94 | if (!pci_serr) | 94 | if (!pci_serr) |
95 | return -ENOMEM; | 95 | return -ENOMEM; |
96 | 96 | ||
@@ -140,7 +140,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) | |||
140 | 140 | ||
141 | if (!devm_request_mem_region(&pdev->dev, | 141 | if (!devm_request_mem_region(&pdev->dev, |
142 | r->start, | 142 | r->start, |
143 | r->end - r->start + 1, | 143 | resource_size(r), |
144 | pdata->name)) { | 144 | pdata->name)) { |
145 | printk(KERN_ERR "%s: Error while requesting mem region\n", | 145 | printk(KERN_ERR "%s: Error while requesting mem region\n", |
146 | __func__); | 146 | __func__); |
@@ -150,7 +150,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) | |||
150 | 150 | ||
151 | pdata->pci_vbase = devm_ioremap(&pdev->dev, | 151 | pdata->pci_vbase = devm_ioremap(&pdev->dev, |
152 | r->start, | 152 | r->start, |
153 | r->end - r->start + 1); | 153 | resource_size(r)); |
154 | if (!pdata->pci_vbase) { | 154 | if (!pdata->pci_vbase) { |
155 | printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); | 155 | printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); |
156 | res = -ENOMEM; | 156 | res = -ENOMEM; |
@@ -306,7 +306,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) | |||
306 | 306 | ||
307 | if (!devm_request_mem_region(&pdev->dev, | 307 | if (!devm_request_mem_region(&pdev->dev, |
308 | r->start, | 308 | r->start, |
309 | r->end - r->start + 1, | 309 | resource_size(r), |
310 | pdata->name)) { | 310 | pdata->name)) { |
311 | printk(KERN_ERR "%s: Error while request mem region\n", | 311 | printk(KERN_ERR "%s: Error while request mem region\n", |
312 | __func__); | 312 | __func__); |
@@ -316,7 +316,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) | |||
316 | 316 | ||
317 | pdata->sram_vbase = devm_ioremap(&pdev->dev, | 317 | pdata->sram_vbase = devm_ioremap(&pdev->dev, |
318 | r->start, | 318 | r->start, |
319 | r->end - r->start + 1); | 319 | resource_size(r)); |
320 | if (!pdata->sram_vbase) { | 320 | if (!pdata->sram_vbase) { |
321 | printk(KERN_ERR "%s: Unable to setup SRAM err regs\n", | 321 | printk(KERN_ERR "%s: Unable to setup SRAM err regs\n", |
322 | __func__); | 322 | __func__); |
@@ -474,7 +474,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
474 | 474 | ||
475 | if (!devm_request_mem_region(&pdev->dev, | 475 | if (!devm_request_mem_region(&pdev->dev, |
476 | r->start, | 476 | r->start, |
477 | r->end - r->start + 1, | 477 | resource_size(r), |
478 | pdata->name)) { | 478 | pdata->name)) { |
479 | printk(KERN_ERR "%s: Error while requesting mem region\n", | 479 | printk(KERN_ERR "%s: Error while requesting mem region\n", |
480 | __func__); | 480 | __func__); |
@@ -484,7 +484,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
484 | 484 | ||
485 | pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev, | 485 | pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev, |
486 | r->start, | 486 | r->start, |
487 | r->end - r->start + 1); | 487 | resource_size(r)); |
488 | if (!pdata->cpu_vbase[0]) { | 488 | if (!pdata->cpu_vbase[0]) { |
489 | printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); | 489 | printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); |
490 | res = -ENOMEM; | 490 | res = -ENOMEM; |
@@ -501,7 +501,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
501 | 501 | ||
502 | if (!devm_request_mem_region(&pdev->dev, | 502 | if (!devm_request_mem_region(&pdev->dev, |
503 | r->start, | 503 | r->start, |
504 | r->end - r->start + 1, | 504 | resource_size(r), |
505 | pdata->name)) { | 505 | pdata->name)) { |
506 | printk(KERN_ERR "%s: Error while requesting mem region\n", | 506 | printk(KERN_ERR "%s: Error while requesting mem region\n", |
507 | __func__); | 507 | __func__); |
@@ -511,7 +511,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
511 | 511 | ||
512 | pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev, | 512 | pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev, |
513 | r->start, | 513 | r->start, |
514 | r->end - r->start + 1); | 514 | resource_size(r)); |
515 | if (!pdata->cpu_vbase[1]) { | 515 | if (!pdata->cpu_vbase[1]) { |
516 | printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); | 516 | printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); |
517 | res = -ENOMEM; | 517 | res = -ENOMEM; |
@@ -726,7 +726,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) | |||
726 | 726 | ||
727 | if (!devm_request_mem_region(&pdev->dev, | 727 | if (!devm_request_mem_region(&pdev->dev, |
728 | r->start, | 728 | r->start, |
729 | r->end - r->start + 1, | 729 | resource_size(r), |
730 | pdata->name)) { | 730 | pdata->name)) { |
731 | printk(KERN_ERR "%s: Error while requesting mem region\n", | 731 | printk(KERN_ERR "%s: Error while requesting mem region\n", |
732 | __func__); | 732 | __func__); |
@@ -736,7 +736,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) | |||
736 | 736 | ||
737 | pdata->mc_vbase = devm_ioremap(&pdev->dev, | 737 | pdata->mc_vbase = devm_ioremap(&pdev->dev, |
738 | r->start, | 738 | r->start, |
739 | r->end - r->start + 1); | 739 | resource_size(r)); |
740 | if (!pdata->mc_vbase) { | 740 | if (!pdata->mc_vbase) { |
741 | printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); | 741 | printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); |
742 | res = -ENOMEM; | 742 | res = -ENOMEM; |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e4d971c8b9d0..f831ea159291 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -102,6 +102,7 @@ config DRM_I915 | |||
102 | select BACKLIGHT_CLASS_DEVICE if ACPI | 102 | select BACKLIGHT_CLASS_DEVICE if ACPI |
103 | select INPUT if ACPI | 103 | select INPUT if ACPI |
104 | select ACPI_VIDEO if ACPI | 104 | select ACPI_VIDEO if ACPI |
105 | select ACPI_BUTTON if ACPI | ||
105 | help | 106 | help |
106 | Choose this option if you have a system that has Intel 830M, 845G, | 107 | Choose this option if you have a system that has Intel 830M, 845G, |
107 | 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the | 108 | 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 230c9ffdd5e9..80391995bdec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
145 | kref_init(&obj->refcount); | 158 | kref_init(&obj->refcount); |
146 | kref_init(&obj->handlecount); | 159 | kref_init(&obj->handlecount); |
147 | obj->size = size; | 160 | obj->size = size; |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5269dfa5f620..fa7b9be096bc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
9 | i915_gem.o \ | 9 | i915_gem.o \ |
10 | i915_gem_debug.o \ | 10 | i915_gem_debug.o \ |
11 | i915_gem_tiling.o \ | 11 | i915_gem_tiling.o \ |
12 | i915_trace_points.o \ | ||
12 | intel_display.o \ | 13 | intel_display.o \ |
13 | intel_crt.o \ | 14 | intel_crt.o \ |
14 | intel_lvds.o \ | 15 | intel_lvds.o \ |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1e3bdcee863c..f8ce9a3a420d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
96 | { | 96 | { |
97 | struct drm_gem_object *obj = obj_priv->obj; | 97 | struct drm_gem_object *obj = obj_priv->obj; |
98 | 98 | ||
99 | seq_printf(m, " %p: %s %08x %08x %d", | 99 | seq_printf(m, " %p: %s %8zd %08x %08x %d %s", |
100 | obj, | 100 | obj, |
101 | get_pin_flag(obj_priv), | 101 | get_pin_flag(obj_priv), |
102 | obj->size, | ||
102 | obj->read_domains, obj->write_domain, | 103 | obj->read_domains, obj->write_domain, |
103 | obj_priv->last_rendering_seqno); | 104 | obj_priv->last_rendering_seqno, |
105 | obj_priv->dirty ? "dirty" : ""); | ||
104 | 106 | ||
105 | if (obj->name) | 107 | if (obj->name) |
106 | seq_printf(m, " (name: %d)", obj->name); | 108 | seq_printf(m, " (name: %d)", obj->name); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5a49a1867b35..45d507ebd3ff 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | ||
36 | #include <linux/vgaarb.h> | 37 | #include <linux/vgaarb.h> |
37 | 38 | ||
38 | /* Really want an OS-independent resettable timer. Would like to have | 39 | /* Really want an OS-independent resettable timer. Would like to have |
@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
50 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 51 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
51 | int i; | 52 | int i; |
52 | 53 | ||
54 | trace_i915_ring_wait_begin (dev); | ||
55 | |||
53 | for (i = 0; i < 100000; i++) { | 56 | for (i = 0; i < 100000; i++) { |
54 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 57 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
55 | acthd = I915_READ(acthd_reg); | 58 | acthd = I915_READ(acthd_reg); |
56 | ring->space = ring->head - (ring->tail + 8); | 59 | ring->space = ring->head - (ring->tail + 8); |
57 | if (ring->space < 0) | 60 | if (ring->space < 0) |
58 | ring->space += ring->Size; | 61 | ring->space += ring->Size; |
59 | if (ring->space >= n) | 62 | if (ring->space >= n) { |
63 | trace_i915_ring_wait_end (dev); | ||
60 | return 0; | 64 | return 0; |
65 | } | ||
61 | 66 | ||
62 | if (dev->primary->master) { | 67 | if (dev->primary->master) { |
63 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 68 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
@@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
77 | 82 | ||
78 | } | 83 | } |
79 | 84 | ||
85 | trace_i915_ring_wait_end (dev); | ||
80 | return -EBUSY; | 86 | return -EBUSY; |
81 | } | 87 | } |
82 | 88 | ||
@@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
922 | * how much was set aside so we can use it for our own purposes. | 928 | * how much was set aside so we can use it for our own purposes. |
923 | */ | 929 | */ |
924 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | 930 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, |
925 | uint32_t *preallocated_size) | 931 | uint32_t *preallocated_size, |
932 | uint32_t *start) | ||
926 | { | 933 | { |
927 | struct drm_i915_private *dev_priv = dev->dev_private; | 934 | struct drm_i915_private *dev_priv = dev->dev_private; |
928 | u16 tmp = 0; | 935 | u16 tmp = 0; |
@@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | |||
1009 | return -1; | 1016 | return -1; |
1010 | } | 1017 | } |
1011 | *preallocated_size = stolen - overhead; | 1018 | *preallocated_size = stolen - overhead; |
1019 | *start = overhead; | ||
1012 | 1020 | ||
1013 | return 0; | 1021 | return 0; |
1014 | } | 1022 | } |
1015 | 1023 | ||
1024 | #define PTE_ADDRESS_MASK 0xfffff000 | ||
1025 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | ||
1026 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | ||
1027 | #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ | ||
1028 | #define PTE_MAPPING_TYPE_CACHED (3 << 1) | ||
1029 | #define PTE_MAPPING_TYPE_MASK (3 << 1) | ||
1030 | #define PTE_VALID (1 << 0) | ||
1031 | |||
1032 | /** | ||
1033 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | ||
1034 | * @dev: drm device | ||
1035 | * @gtt_addr: address to translate | ||
1036 | * | ||
1037 | * Some chip functions require allocations from stolen space but need the | ||
1038 | * physical address of the memory in question. We use this routine | ||
1039 | * to get a physical address suitable for register programming from a given | ||
1040 | * GTT address. | ||
1041 | */ | ||
1042 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | ||
1043 | unsigned long gtt_addr) | ||
1044 | { | ||
1045 | unsigned long *gtt; | ||
1046 | unsigned long entry, phys; | ||
1047 | int gtt_bar = IS_I9XX(dev) ? 0 : 1; | ||
1048 | int gtt_offset, gtt_size; | ||
1049 | |||
1050 | if (IS_I965G(dev)) { | ||
1051 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | ||
1052 | gtt_offset = 2*1024*1024; | ||
1053 | gtt_size = 2*1024*1024; | ||
1054 | } else { | ||
1055 | gtt_offset = 512*1024; | ||
1056 | gtt_size = 512*1024; | ||
1057 | } | ||
1058 | } else { | ||
1059 | gtt_bar = 3; | ||
1060 | gtt_offset = 0; | ||
1061 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | ||
1062 | } | ||
1063 | |||
1064 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | ||
1065 | gtt_size); | ||
1066 | if (!gtt) { | ||
1067 | DRM_ERROR("ioremap of GTT failed\n"); | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | ||
1072 | |||
1073 | DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | ||
1074 | |||
1075 | /* Mask out these reserved bits on this hardware. */ | ||
1076 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | ||
1077 | IS_I945G(dev) || IS_I945GM(dev)) { | ||
1078 | entry &= ~PTE_ADDRESS_MASK_HIGH; | ||
1079 | } | ||
1080 | |||
1081 | /* If it's not a mapping type we know, then bail. */ | ||
1082 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | ||
1083 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | ||
1084 | iounmap(gtt); | ||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | if (!(entry & PTE_VALID)) { | ||
1089 | DRM_ERROR("bad GTT entry in stolen space\n"); | ||
1090 | iounmap(gtt); | ||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | iounmap(gtt); | ||
1095 | |||
1096 | phys =(entry & PTE_ADDRESS_MASK) | | ||
1097 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | ||
1098 | |||
1099 | DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | ||
1100 | |||
1101 | return phys; | ||
1102 | } | ||
1103 | |||
1104 | static void i915_warn_stolen(struct drm_device *dev) | ||
1105 | { | ||
1106 | DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); | ||
1107 | DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); | ||
1108 | } | ||
1109 | |||
1110 | static void i915_setup_compression(struct drm_device *dev, int size) | ||
1111 | { | ||
1112 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1113 | struct drm_mm_node *compressed_fb, *compressed_llb; | ||
1114 | unsigned long cfb_base, ll_base; | ||
1115 | |||
1116 | /* Leave 1M for line length buffer & misc. */ | ||
1117 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | ||
1118 | if (!compressed_fb) { | ||
1119 | i915_warn_stolen(dev); | ||
1120 | return; | ||
1121 | } | ||
1122 | |||
1123 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | ||
1124 | if (!compressed_fb) { | ||
1125 | i915_warn_stolen(dev); | ||
1126 | return; | ||
1127 | } | ||
1128 | |||
1129 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); | ||
1130 | if (!cfb_base) { | ||
1131 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1132 | drm_mm_put_block(compressed_fb); | ||
1133 | } | ||
1134 | |||
1135 | if (!IS_GM45(dev)) { | ||
1136 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | ||
1137 | 4096, 0); | ||
1138 | if (!compressed_llb) { | ||
1139 | i915_warn_stolen(dev); | ||
1140 | return; | ||
1141 | } | ||
1142 | |||
1143 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | ||
1144 | if (!compressed_llb) { | ||
1145 | i915_warn_stolen(dev); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | ||
1150 | if (!ll_base) { | ||
1151 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1152 | drm_mm_put_block(compressed_fb); | ||
1153 | drm_mm_put_block(compressed_llb); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | dev_priv->cfb_size = size; | ||
1158 | |||
1159 | if (IS_GM45(dev)) { | ||
1160 | g4x_disable_fbc(dev); | ||
1161 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | ||
1162 | } else { | ||
1163 | i8xx_disable_fbc(dev); | ||
1164 | I915_WRITE(FBC_CFB_BASE, cfb_base); | ||
1165 | I915_WRITE(FBC_LL_BASE, ll_base); | ||
1166 | } | ||
1167 | |||
1168 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | ||
1169 | ll_base, size >> 20); | ||
1170 | } | ||
1171 | |||
1016 | /* true = enable decode, false = disable decoder */ | 1172 | /* true = enable decode, false = disable decoder */ |
1017 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1173 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1018 | { | 1174 | { |
@@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) | |||
1027 | } | 1183 | } |
1028 | 1184 | ||
1029 | static int i915_load_modeset_init(struct drm_device *dev, | 1185 | static int i915_load_modeset_init(struct drm_device *dev, |
1186 | unsigned long prealloc_start, | ||
1030 | unsigned long prealloc_size, | 1187 | unsigned long prealloc_size, |
1031 | unsigned long agp_size) | 1188 | unsigned long agp_size) |
1032 | { | 1189 | { |
@@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1047 | 1204 | ||
1048 | /* Basic memrange allocator for stolen space (aka vram) */ | 1205 | /* Basic memrange allocator for stolen space (aka vram) */ |
1049 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1206 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1207 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | ||
1208 | |||
1209 | /* We're off and running w/KMS */ | ||
1210 | dev_priv->mm.suspended = 0; | ||
1050 | 1211 | ||
1051 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1212 | /* Let GEM Manage from end of prealloc space to end of aperture. |
1052 | * | 1213 | * |
@@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1059 | */ | 1220 | */ |
1060 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | 1221 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); |
1061 | 1222 | ||
1223 | mutex_lock(&dev->struct_mutex); | ||
1062 | ret = i915_gem_init_ringbuffer(dev); | 1224 | ret = i915_gem_init_ringbuffer(dev); |
1225 | mutex_unlock(&dev->struct_mutex); | ||
1063 | if (ret) | 1226 | if (ret) |
1064 | goto out; | 1227 | goto out; |
1065 | 1228 | ||
1229 | /* Try to set up FBC with a reasonable compressed buffer size */ | ||
1230 | if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && | ||
1231 | i915_powersave) { | ||
1232 | int cfb_size; | ||
1233 | |||
1234 | /* Try to get an 8M buffer... */ | ||
1235 | if (prealloc_size > (9*1024*1024)) | ||
1236 | cfb_size = 8*1024*1024; | ||
1237 | else /* fall back to 7/8 of the stolen space */ | ||
1238 | cfb_size = prealloc_size * 7 / 8; | ||
1239 | i915_setup_compression(dev, cfb_size); | ||
1240 | } | ||
1241 | |||
1066 | /* Allow hardware batchbuffers unless told otherwise. | 1242 | /* Allow hardware batchbuffers unless told otherwise. |
1067 | */ | 1243 | */ |
1068 | dev_priv->allow_batchbuffer = 1; | 1244 | dev_priv->allow_batchbuffer = 1; |
@@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1180 | struct drm_i915_private *dev_priv = dev->dev_private; | 1356 | struct drm_i915_private *dev_priv = dev->dev_private; |
1181 | resource_size_t base, size; | 1357 | resource_size_t base, size; |
1182 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1358 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
1183 | uint32_t agp_size, prealloc_size; | 1359 | uint32_t agp_size, prealloc_size, prealloc_start; |
1184 | 1360 | ||
1185 | /* i915 has 4 more counters */ | 1361 | /* i915 has 4 more counters */ |
1186 | dev->counters += 4; | 1362 | dev->counters += 4; |
@@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1234 | "performance may suffer.\n"); | 1410 | "performance may suffer.\n"); |
1235 | } | 1411 | } |
1236 | 1412 | ||
1237 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | 1413 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); |
1238 | if (ret) | 1414 | if (ret) |
1239 | goto out_iomapfree; | 1415 | goto out_iomapfree; |
1240 | 1416 | ||
@@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1300 | return ret; | 1476 | return ret; |
1301 | } | 1477 | } |
1302 | 1478 | ||
1479 | /* Start out suspended */ | ||
1480 | dev_priv->mm.suspended = 1; | ||
1481 | |||
1303 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1482 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1304 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); | 1483 | ret = i915_load_modeset_init(dev, prealloc_start, |
1484 | prealloc_size, agp_size); | ||
1305 | if (ret < 0) { | 1485 | if (ret < 0) { |
1306 | DRM_ERROR("failed to init modeset\n"); | 1486 | DRM_ERROR("failed to init modeset\n"); |
1307 | goto out_workqueue_free; | 1487 | goto out_workqueue_free; |
@@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1313 | if (!IS_IGDNG(dev)) | 1493 | if (!IS_IGDNG(dev)) |
1314 | intel_opregion_init(dev, 0); | 1494 | intel_opregion_init(dev, 0); |
1315 | 1495 | ||
1496 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | ||
1497 | (unsigned long) dev); | ||
1316 | return 0; | 1498 | return 0; |
1317 | 1499 | ||
1318 | out_workqueue_free: | 1500 | out_workqueue_free: |
@@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1333 | struct drm_i915_private *dev_priv = dev->dev_private; | 1515 | struct drm_i915_private *dev_priv = dev->dev_private; |
1334 | 1516 | ||
1335 | destroy_workqueue(dev_priv->wq); | 1517 | destroy_workqueue(dev_priv->wq); |
1518 | del_timer_sync(&dev_priv->hangcheck_timer); | ||
1336 | 1519 | ||
1337 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1520 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1338 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1521 | if (dev_priv->mm.gtt_mtrr >= 0) { |
@@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1472 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | 1655 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), |
1473 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), | 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), |
1474 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | 1657 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), |
1658 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), | ||
1475 | }; | 1659 | }; |
1476 | 1660 | ||
1477 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1661 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dbe568c9327b..b93814c0d3e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
89 | pci_set_power_state(dev->pdev, PCI_D3hot); | 89 | pci_set_power_state(dev->pdev, PCI_D3hot); |
90 | } | 90 | } |
91 | 91 | ||
92 | dev_priv->suspended = 1; | ||
93 | |||
92 | return 0; | 94 | return 0; |
93 | } | 95 | } |
94 | 96 | ||
@@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev) | |||
97 | struct drm_i915_private *dev_priv = dev->dev_private; | 99 | struct drm_i915_private *dev_priv = dev->dev_private; |
98 | int ret = 0; | 100 | int ret = 0; |
99 | 101 | ||
100 | pci_set_power_state(dev->pdev, PCI_D0); | ||
101 | pci_restore_state(dev->pdev); | ||
102 | if (pci_enable_device(dev->pdev)) | 102 | if (pci_enable_device(dev->pdev)) |
103 | return -1; | 103 | return -1; |
104 | pci_set_master(dev->pdev); | 104 | pci_set_master(dev->pdev); |
@@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev) | |||
124 | drm_helper_resume_force_mode(dev); | 124 | drm_helper_resume_force_mode(dev); |
125 | } | 125 | } |
126 | 126 | ||
127 | dev_priv->suspended = 0; | ||
128 | |||
127 | return ret; | 129 | return ret; |
128 | } | 130 | } |
129 | 131 | ||
132 | /** | ||
133 | * i965_reset - reset chip after a hang | ||
134 | * @dev: drm device to reset | ||
135 | * @flags: reset domains | ||
136 | * | ||
137 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | ||
138 | * reset or otherwise an error code. | ||
139 | * | ||
140 | * Procedure is fairly simple: | ||
141 | * - reset the chip using the reset reg | ||
142 | * - re-init context state | ||
143 | * - re-init hardware status page | ||
144 | * - re-init ring buffer | ||
145 | * - re-init interrupt state | ||
146 | * - re-init display | ||
147 | */ | ||
148 | int i965_reset(struct drm_device *dev, u8 flags) | ||
149 | { | ||
150 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
151 | unsigned long timeout; | ||
152 | u8 gdrst; | ||
153 | /* | ||
154 | * We really should only reset the display subsystem if we actually | ||
155 | * need to | ||
156 | */ | ||
157 | bool need_display = true; | ||
158 | |||
159 | mutex_lock(&dev->struct_mutex); | ||
160 | |||
161 | /* | ||
162 | * Clear request list | ||
163 | */ | ||
164 | i915_gem_retire_requests(dev); | ||
165 | |||
166 | if (need_display) | ||
167 | i915_save_display(dev); | ||
168 | |||
169 | if (IS_I965G(dev) || IS_G4X(dev)) { | ||
170 | /* | ||
171 | * Set the domains we want to reset, then the reset bit (bit 0). | ||
172 | * Clear the reset bit after a while and wait for hardware status | ||
173 | * bit (bit 1) to be set | ||
174 | */ | ||
175 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | ||
176 | pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); | ||
177 | udelay(50); | ||
178 | pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); | ||
179 | |||
180 | /* ...we don't want to loop forever though, 500ms should be plenty */ | ||
181 | timeout = jiffies + msecs_to_jiffies(500); | ||
182 | do { | ||
183 | udelay(100); | ||
184 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | ||
185 | } while ((gdrst & 0x1) && time_after(timeout, jiffies)); | ||
186 | |||
187 | if (gdrst & 0x1) { | ||
188 | WARN(true, "i915: Failed to reset chip\n"); | ||
189 | mutex_unlock(&dev->struct_mutex); | ||
190 | return -EIO; | ||
191 | } | ||
192 | } else { | ||
193 | DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); | ||
194 | return -ENODEV; | ||
195 | } | ||
196 | |||
197 | /* Ok, now get things going again... */ | ||
198 | |||
199 | /* | ||
200 | * Everything depends on having the GTT running, so we need to start | ||
201 | * there. Fortunately we don't need to do this unless we reset the | ||
202 | * chip at a PCI level. | ||
203 | * | ||
204 | * Next we need to restore the context, but we don't use those | ||
205 | * yet either... | ||
206 | * | ||
207 | * Ring buffer needs to be re-initialized in the KMS case, or if X | ||
208 | * was running at the time of the reset (i.e. we weren't VT | ||
209 | * switched away). | ||
210 | */ | ||
211 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | ||
212 | !dev_priv->mm.suspended) { | ||
213 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | ||
214 | struct drm_gem_object *obj = ring->ring_obj; | ||
215 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
216 | dev_priv->mm.suspended = 0; | ||
217 | |||
218 | /* Stop the ring if it's running. */ | ||
219 | I915_WRITE(PRB0_CTL, 0); | ||
220 | I915_WRITE(PRB0_TAIL, 0); | ||
221 | I915_WRITE(PRB0_HEAD, 0); | ||
222 | |||
223 | /* Initialize the ring. */ | ||
224 | I915_WRITE(PRB0_START, obj_priv->gtt_offset); | ||
225 | I915_WRITE(PRB0_CTL, | ||
226 | ((obj->size - 4096) & RING_NR_PAGES) | | ||
227 | RING_NO_REPORT | | ||
228 | RING_VALID); | ||
229 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
230 | i915_kernel_lost_context(dev); | ||
231 | else { | ||
232 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
233 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
234 | ring->space = ring->head - (ring->tail + 8); | ||
235 | if (ring->space < 0) | ||
236 | ring->space += ring->Size; | ||
237 | } | ||
238 | |||
239 | mutex_unlock(&dev->struct_mutex); | ||
240 | drm_irq_uninstall(dev); | ||
241 | drm_irq_install(dev); | ||
242 | mutex_lock(&dev->struct_mutex); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Display needs restore too... | ||
247 | */ | ||
248 | if (need_display) | ||
249 | i915_restore_display(dev); | ||
250 | |||
251 | mutex_unlock(&dev->struct_mutex); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | |||
130 | static int __devinit | 256 | static int __devinit |
131 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 257 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
132 | { | 258 | { |
@@ -234,6 +360,8 @@ static int __init i915_init(void) | |||
234 | { | 360 | { |
235 | driver.num_ioctls = i915_max_ioctl; | 361 | driver.num_ioctls = i915_max_ioctl; |
236 | 362 | ||
363 | i915_gem_shrinker_init(); | ||
364 | |||
237 | /* | 365 | /* |
238 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 366 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
239 | * explicitly disabled with the module pararmeter. | 367 | * explicitly disabled with the module pararmeter. |
@@ -260,6 +388,7 @@ static int __init i915_init(void) | |||
260 | 388 | ||
261 | static void __exit i915_exit(void) | 389 | static void __exit i915_exit(void) |
262 | { | 390 | { |
391 | i915_gem_shrinker_exit(); | ||
263 | drm_exit(&driver); | 392 | drm_exit(&driver); |
264 | } | 393 | } |
265 | 394 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0632f8e76ac..b24b2d145b75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -48,6 +48,11 @@ enum pipe { | |||
48 | PIPE_B, | 48 | PIPE_B, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | enum plane { | ||
52 | PLANE_A = 0, | ||
53 | PLANE_B, | ||
54 | }; | ||
55 | |||
51 | #define I915_NUM_PIPE 2 | 56 | #define I915_NUM_PIPE 2 |
52 | 57 | ||
53 | /* Interface history: | 58 | /* Interface history: |
@@ -148,6 +153,23 @@ struct drm_i915_error_state { | |||
148 | struct timeval time; | 153 | struct timeval time; |
149 | }; | 154 | }; |
150 | 155 | ||
156 | struct drm_i915_display_funcs { | ||
157 | void (*dpms)(struct drm_crtc *crtc, int mode); | ||
158 | bool (*fbc_enabled)(struct drm_crtc *crtc); | ||
159 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); | ||
160 | void (*disable_fbc)(struct drm_device *dev); | ||
161 | int (*get_display_clock_speed)(struct drm_device *dev); | ||
162 | int (*get_fifo_size)(struct drm_device *dev, int plane); | ||
163 | void (*update_wm)(struct drm_device *dev, int planea_clock, | ||
164 | int planeb_clock, int sr_hdisplay, int pixel_size); | ||
165 | /* clock updates for mode set */ | ||
166 | /* cursor updates */ | ||
167 | /* render clock increase/decrease */ | ||
168 | /* display clock increase/decrease */ | ||
169 | /* pll clock increase/decrease */ | ||
170 | /* clock gating init */ | ||
171 | }; | ||
172 | |||
151 | typedef struct drm_i915_private { | 173 | typedef struct drm_i915_private { |
152 | struct drm_device *dev; | 174 | struct drm_device *dev; |
153 | 175 | ||
@@ -198,10 +220,21 @@ typedef struct drm_i915_private { | |||
198 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 220 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
199 | int vblank_pipe; | 221 | int vblank_pipe; |
200 | 222 | ||
223 | /* For hangcheck timer */ | ||
224 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ | ||
225 | struct timer_list hangcheck_timer; | ||
226 | int hangcheck_count; | ||
227 | uint32_t last_acthd; | ||
228 | |||
201 | bool cursor_needs_physical; | 229 | bool cursor_needs_physical; |
202 | 230 | ||
203 | struct drm_mm vram; | 231 | struct drm_mm vram; |
204 | 232 | ||
233 | unsigned long cfb_size; | ||
234 | unsigned long cfb_pitch; | ||
235 | int cfb_fence; | ||
236 | int cfb_plane; | ||
237 | |||
205 | int irq_enabled; | 238 | int irq_enabled; |
206 | 239 | ||
207 | struct intel_opregion opregion; | 240 | struct intel_opregion opregion; |
@@ -222,6 +255,8 @@ typedef struct drm_i915_private { | |||
222 | unsigned int edp_support:1; | 255 | unsigned int edp_support:1; |
223 | int lvds_ssc_freq; | 256 | int lvds_ssc_freq; |
224 | 257 | ||
258 | struct notifier_block lid_notifier; | ||
259 | |||
225 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ | 260 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ |
226 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 261 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
227 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 262 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
@@ -234,7 +269,11 @@ typedef struct drm_i915_private { | |||
234 | struct work_struct error_work; | 269 | struct work_struct error_work; |
235 | struct workqueue_struct *wq; | 270 | struct workqueue_struct *wq; |
236 | 271 | ||
272 | /* Display functions */ | ||
273 | struct drm_i915_display_funcs display; | ||
274 | |||
237 | /* Register state */ | 275 | /* Register state */ |
276 | bool suspended; | ||
238 | u8 saveLBB; | 277 | u8 saveLBB; |
239 | u32 saveDSPACNTR; | 278 | u32 saveDSPACNTR; |
240 | u32 saveDSPBCNTR; | 279 | u32 saveDSPBCNTR; |
@@ -350,6 +389,15 @@ typedef struct drm_i915_private { | |||
350 | int gtt_mtrr; | 389 | int gtt_mtrr; |
351 | 390 | ||
352 | /** | 391 | /** |
392 | * Membership on list of all loaded devices, used to evict | ||
393 | * inactive buffers under memory pressure. | ||
394 | * | ||
395 | * Modifications should only be done whilst holding the | ||
396 | * shrink_list_lock spinlock. | ||
397 | */ | ||
398 | struct list_head shrink_list; | ||
399 | |||
400 | /** | ||
353 | * List of objects currently involved in rendering from the | 401 | * List of objects currently involved in rendering from the |
354 | * ringbuffer. | 402 | * ringbuffer. |
355 | * | 403 | * |
@@ -432,7 +480,7 @@ typedef struct drm_i915_private { | |||
432 | * It prevents command submission from occuring and makes | 480 | * It prevents command submission from occuring and makes |
433 | * every pending request fail | 481 | * every pending request fail |
434 | */ | 482 | */ |
435 | int wedged; | 483 | atomic_t wedged; |
436 | 484 | ||
437 | /** Bit 6 swizzling required for X tiling */ | 485 | /** Bit 6 swizzling required for X tiling */ |
438 | uint32_t bit_6_swizzle_x; | 486 | uint32_t bit_6_swizzle_x; |
@@ -491,10 +539,7 @@ struct drm_i915_gem_object { | |||
491 | * This is the same as gtt_space->start | 539 | * This is the same as gtt_space->start |
492 | */ | 540 | */ |
493 | uint32_t gtt_offset; | 541 | uint32_t gtt_offset; |
494 | /** | 542 | |
495 | * Required alignment for the object | ||
496 | */ | ||
497 | uint32_t gtt_alignment; | ||
498 | /** | 543 | /** |
499 | * Fake offset for use by mmap(2) | 544 | * Fake offset for use by mmap(2) |
500 | */ | 545 | */ |
@@ -541,6 +586,11 @@ struct drm_i915_gem_object { | |||
541 | * in an execbuffer object list. | 586 | * in an execbuffer object list. |
542 | */ | 587 | */ |
543 | int in_execbuffer; | 588 | int in_execbuffer; |
589 | |||
590 | /** | ||
591 | * Advice: are the backing pages purgeable? | ||
592 | */ | ||
593 | int madv; | ||
544 | }; | 594 | }; |
545 | 595 | ||
546 | /** | 596 | /** |
@@ -585,6 +635,8 @@ extern int i915_max_ioctl; | |||
585 | extern unsigned int i915_fbpercrtc; | 635 | extern unsigned int i915_fbpercrtc; |
586 | extern unsigned int i915_powersave; | 636 | extern unsigned int i915_powersave; |
587 | 637 | ||
638 | extern void i915_save_display(struct drm_device *dev); | ||
639 | extern void i915_restore_display(struct drm_device *dev); | ||
588 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | 640 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
589 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | 641 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
590 | 642 | ||
@@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | |||
604 | extern int i915_emit_box(struct drm_device *dev, | 656 | extern int i915_emit_box(struct drm_device *dev, |
605 | struct drm_clip_rect *boxes, | 657 | struct drm_clip_rect *boxes, |
606 | int i, int DR1, int DR4); | 658 | int i, int DR1, int DR4); |
659 | extern int i965_reset(struct drm_device *dev, u8 flags); | ||
607 | 660 | ||
608 | /* i915_irq.c */ | 661 | /* i915_irq.c */ |
662 | void i915_hangcheck_elapsed(unsigned long data); | ||
609 | extern int i915_irq_emit(struct drm_device *dev, void *data, | 663 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
610 | struct drm_file *file_priv); | 664 | struct drm_file *file_priv); |
611 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 665 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
@@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
676 | struct drm_file *file_priv); | 730 | struct drm_file *file_priv); |
677 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 731 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
678 | struct drm_file *file_priv); | 732 | struct drm_file *file_priv); |
733 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||
734 | struct drm_file *file_priv); | ||
679 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | 735 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
680 | struct drm_file *file_priv); | 736 | struct drm_file *file_priv); |
681 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 737 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
@@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj); | |||
695 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 751 | void i915_gem_release_mmap(struct drm_gem_object *obj); |
696 | void i915_gem_lastclose(struct drm_device *dev); | 752 | void i915_gem_lastclose(struct drm_device *dev); |
697 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 753 | uint32_t i915_get_gem_seqno(struct drm_device *dev); |
754 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | ||
698 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 755 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
699 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 756 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); |
700 | void i915_gem_retire_requests(struct drm_device *dev); | 757 | void i915_gem_retire_requests(struct drm_device *dev); |
@@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj); | |||
720 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 777 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
721 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 778 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
722 | 779 | ||
780 | void i915_gem_shrinker_init(void); | ||
781 | void i915_gem_shrinker_exit(void); | ||
782 | |||
723 | /* i915_gem_tiling.c */ | 783 | /* i915_gem_tiling.c */ |
724 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 784 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
725 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 785 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
@@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } | |||
767 | extern void intel_modeset_init(struct drm_device *dev); | 827 | extern void intel_modeset_init(struct drm_device *dev); |
768 | extern void intel_modeset_cleanup(struct drm_device *dev); | 828 | extern void intel_modeset_cleanup(struct drm_device *dev); |
769 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 829 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
830 | extern void i8xx_disable_fbc(struct drm_device *dev); | ||
831 | extern void g4x_disable_fbc(struct drm_device *dev); | ||
770 | 832 | ||
771 | /** | 833 | /** |
772 | * Lock test for when it's just for synchronization of ring access. | 834 | * Lock test for when it's just for synchronization of ring access. |
@@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
864 | (dev)->pci_device == 0x2E12 || \ | 926 | (dev)->pci_device == 0x2E12 || \ |
865 | (dev)->pci_device == 0x2E22 || \ | 927 | (dev)->pci_device == 0x2E22 || \ |
866 | (dev)->pci_device == 0x2E32 || \ | 928 | (dev)->pci_device == 0x2E32 || \ |
929 | (dev)->pci_device == 0x2E42 || \ | ||
867 | (dev)->pci_device == 0x0042 || \ | 930 | (dev)->pci_device == 0x0042 || \ |
868 | (dev)->pci_device == 0x0046) | 931 | (dev)->pci_device == 0x0046) |
869 | 932 | ||
@@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
876 | (dev)->pci_device == 0x2E12 || \ | 939 | (dev)->pci_device == 0x2E12 || \ |
877 | (dev)->pci_device == 0x2E22 || \ | 940 | (dev)->pci_device == 0x2E22 || \ |
878 | (dev)->pci_device == 0x2E32 || \ | 941 | (dev)->pci_device == 0x2E32 || \ |
942 | (dev)->pci_device == 0x2E42 || \ | ||
879 | IS_GM45(dev)) | 943 | IS_GM45(dev)) |
880 | 944 | ||
881 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) | 945 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) |
@@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
909 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 973 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
910 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 974 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
911 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | 975 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) |
912 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) | 976 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) |
913 | /* dsparb controlled by hw only */ | 977 | /* dsparb controlled by hw only */ |
914 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 978 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
915 | 979 | ||
916 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) | 980 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) |
917 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 981 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
982 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) | ||
918 | 983 | ||
919 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 984 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
920 | 985 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c67317112f4a..40727d4c2919 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | ||
32 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
33 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | |||
48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
49 | unsigned alignment); | 50 | unsigned alignment); |
50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 51 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
51 | static int i915_gem_evict_something(struct drm_device *dev); | 52 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); |
53 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | ||
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 54 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
53 | struct drm_i915_gem_pwrite *args, | 55 | struct drm_i915_gem_pwrite *args, |
54 | struct drm_file *file_priv); | 56 | struct drm_file *file_priv); |
55 | 57 | ||
58 | static LIST_HEAD(shrink_list); | ||
59 | static DEFINE_SPINLOCK(shrink_list_lock); | ||
60 | |||
56 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 61 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
57 | unsigned long end) | 62 | unsigned long end) |
58 | { | 63 | { |
@@ -316,6 +321,45 @@ fail_unlock: | |||
316 | return ret; | 321 | return ret; |
317 | } | 322 | } |
318 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | ||
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | ret = i915_gem_object_get_pages(obj); | ||
342 | |||
343 | /* If we've insufficient memory to map in the pages, attempt | ||
344 | * to make some space by throwing out some old buffers. | ||
345 | */ | ||
346 | if (ret == -ENOMEM) { | ||
347 | struct drm_device *dev = obj->dev; | ||
348 | gfp_t gfp; | ||
349 | |||
350 | ret = i915_gem_evict_something(dev, obj->size); | ||
351 | if (ret) | ||
352 | return ret; | ||
353 | |||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | ||
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | ||
359 | |||
360 | return ret; | ||
361 | } | ||
362 | |||
319 | /** | 363 | /** |
320 | * This is the fallback shmem pread path, which allocates temporary storage | 364 | * This is the fallback shmem pread path, which allocates temporary storage |
321 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 365 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
367 | 411 | ||
368 | mutex_lock(&dev->struct_mutex); | 412 | mutex_lock(&dev->struct_mutex); |
369 | 413 | ||
370 | ret = i915_gem_object_get_pages(obj); | 414 | ret = i915_gem_object_get_pages_or_evict(obj); |
371 | if (ret != 0) | 415 | if (ret) |
372 | goto fail_unlock; | 416 | goto fail_unlock; |
373 | 417 | ||
374 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 418 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, |
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
842 | 886 | ||
843 | mutex_lock(&dev->struct_mutex); | 887 | mutex_lock(&dev->struct_mutex); |
844 | 888 | ||
845 | ret = i915_gem_object_get_pages(obj); | 889 | ret = i915_gem_object_get_pages_or_evict(obj); |
846 | if (ret != 0) | 890 | if (ret) |
847 | goto fail_unlock; | 891 | goto fail_unlock; |
848 | 892 | ||
849 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 893 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1155 | /* Now bind it into the GTT if needed */ | 1199 | /* Now bind it into the GTT if needed */ |
1156 | mutex_lock(&dev->struct_mutex); | 1200 | mutex_lock(&dev->struct_mutex); |
1157 | if (!obj_priv->gtt_space) { | 1201 | if (!obj_priv->gtt_space) { |
1158 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | 1202 | ret = i915_gem_object_bind_to_gtt(obj, 0); |
1159 | if (ret) { | 1203 | if (ret) |
1160 | mutex_unlock(&dev->struct_mutex); | 1204 | goto unlock; |
1161 | return VM_FAULT_SIGBUS; | ||
1162 | } | ||
1163 | |||
1164 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||
1165 | if (ret) { | ||
1166 | mutex_unlock(&dev->struct_mutex); | ||
1167 | return VM_FAULT_SIGBUS; | ||
1168 | } | ||
1169 | 1205 | ||
1170 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1206 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1207 | |||
1208 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||
1209 | if (ret) | ||
1210 | goto unlock; | ||
1171 | } | 1211 | } |
1172 | 1212 | ||
1173 | /* Need a new fence register? */ | 1213 | /* Need a new fence register? */ |
1174 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1214 | if (obj_priv->tiling_mode != I915_TILING_NONE) { |
1175 | ret = i915_gem_object_get_fence_reg(obj); | 1215 | ret = i915_gem_object_get_fence_reg(obj); |
1176 | if (ret) { | 1216 | if (ret) |
1177 | mutex_unlock(&dev->struct_mutex); | 1217 | goto unlock; |
1178 | return VM_FAULT_SIGBUS; | ||
1179 | } | ||
1180 | } | 1218 | } |
1181 | 1219 | ||
1182 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1220 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1184 | 1222 | ||
1185 | /* Finally, remap it using the new GTT offset */ | 1223 | /* Finally, remap it using the new GTT offset */ |
1186 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1224 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
1187 | 1225 | unlock: | |
1188 | mutex_unlock(&dev->struct_mutex); | 1226 | mutex_unlock(&dev->struct_mutex); |
1189 | 1227 | ||
1190 | switch (ret) { | 1228 | switch (ret) { |
1229 | case 0: | ||
1230 | case -ERESTARTSYS: | ||
1231 | return VM_FAULT_NOPAGE; | ||
1191 | case -ENOMEM: | 1232 | case -ENOMEM: |
1192 | case -EAGAIN: | 1233 | case -EAGAIN: |
1193 | return VM_FAULT_OOM; | 1234 | return VM_FAULT_OOM; |
1194 | case -EFAULT: | ||
1195 | case -EINVAL: | ||
1196 | return VM_FAULT_SIGBUS; | ||
1197 | default: | 1235 | default: |
1198 | return VM_FAULT_NOPAGE; | 1236 | return VM_FAULT_SIGBUS; |
1199 | } | 1237 | } |
1200 | } | 1238 | } |
1201 | 1239 | ||
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1388 | 1426 | ||
1389 | obj_priv = obj->driver_private; | 1427 | obj_priv = obj->driver_private; |
1390 | 1428 | ||
1429 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
1430 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | ||
1431 | drm_gem_object_unreference(obj); | ||
1432 | mutex_unlock(&dev->struct_mutex); | ||
1433 | return -EINVAL; | ||
1434 | } | ||
1435 | |||
1436 | |||
1391 | if (!obj_priv->mmap_offset) { | 1437 | if (!obj_priv->mmap_offset) { |
1392 | ret = i915_gem_create_mmap_offset(obj); | 1438 | ret = i915_gem_create_mmap_offset(obj); |
1393 | if (ret) { | 1439 | if (ret) { |
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1399 | 1445 | ||
1400 | args->offset = obj_priv->mmap_offset; | 1446 | args->offset = obj_priv->mmap_offset; |
1401 | 1447 | ||
1402 | obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); | ||
1403 | |||
1404 | /* Make sure the alignment is correct for fence regs etc */ | ||
1405 | if (obj_priv->agp_mem && | ||
1406 | (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { | ||
1407 | drm_gem_object_unreference(obj); | ||
1408 | mutex_unlock(&dev->struct_mutex); | ||
1409 | return -EINVAL; | ||
1410 | } | ||
1411 | |||
1412 | /* | 1448 | /* |
1413 | * Pull it into the GTT so that we have a page list (makes the | 1449 | * Pull it into the GTT so that we have a page list (makes the |
1414 | * initial fault faster and any subsequent flushing possible). | 1450 | * initial fault faster and any subsequent flushing possible). |
1415 | */ | 1451 | */ |
1416 | if (!obj_priv->agp_mem) { | 1452 | if (!obj_priv->agp_mem) { |
1417 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | 1453 | ret = i915_gem_object_bind_to_gtt(obj, 0); |
1418 | if (ret) { | 1454 | if (ret) { |
1419 | drm_gem_object_unreference(obj); | 1455 | drm_gem_object_unreference(obj); |
1420 | mutex_unlock(&dev->struct_mutex); | 1456 | mutex_unlock(&dev->struct_mutex); |
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1437 | int i; | 1473 | int i; |
1438 | 1474 | ||
1439 | BUG_ON(obj_priv->pages_refcount == 0); | 1475 | BUG_ON(obj_priv->pages_refcount == 0); |
1476 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | ||
1440 | 1477 | ||
1441 | if (--obj_priv->pages_refcount != 0) | 1478 | if (--obj_priv->pages_refcount != 0) |
1442 | return; | 1479 | return; |
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1444 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1481 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
1445 | i915_gem_object_save_bit_17_swizzle(obj); | 1482 | i915_gem_object_save_bit_17_swizzle(obj); |
1446 | 1483 | ||
1447 | for (i = 0; i < page_count; i++) | 1484 | if (obj_priv->madv == I915_MADV_DONTNEED) |
1448 | if (obj_priv->pages[i] != NULL) { | 1485 | obj_priv->dirty = 0; |
1449 | if (obj_priv->dirty) | 1486 | |
1450 | set_page_dirty(obj_priv->pages[i]); | 1487 | for (i = 0; i < page_count; i++) { |
1488 | if (obj_priv->pages[i] == NULL) | ||
1489 | break; | ||
1490 | |||
1491 | if (obj_priv->dirty) | ||
1492 | set_page_dirty(obj_priv->pages[i]); | ||
1493 | |||
1494 | if (obj_priv->madv == I915_MADV_WILLNEED) | ||
1451 | mark_page_accessed(obj_priv->pages[i]); | 1495 | mark_page_accessed(obj_priv->pages[i]); |
1452 | page_cache_release(obj_priv->pages[i]); | 1496 | |
1453 | } | 1497 | page_cache_release(obj_priv->pages[i]); |
1498 | } | ||
1454 | obj_priv->dirty = 0; | 1499 | obj_priv->dirty = 0; |
1455 | 1500 | ||
1456 | drm_free_large(obj_priv->pages); | 1501 | drm_free_large(obj_priv->pages); |
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1489 | obj_priv->last_rendering_seqno = 0; | 1534 | obj_priv->last_rendering_seqno = 0; |
1490 | } | 1535 | } |
1491 | 1536 | ||
1537 | /* Immediately discard the backing storage */ | ||
1538 | static void | ||
1539 | i915_gem_object_truncate(struct drm_gem_object *obj) | ||
1540 | { | ||
1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1542 | struct inode *inode; | ||
1543 | |||
1544 | inode = obj->filp->f_path.dentry->d_inode; | ||
1545 | if (inode->i_op->truncate) | ||
1546 | inode->i_op->truncate (inode); | ||
1547 | |||
1548 | obj_priv->madv = __I915_MADV_PURGED; | ||
1549 | } | ||
1550 | |||
1551 | static inline int | ||
1552 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | ||
1553 | { | ||
1554 | return obj_priv->madv == I915_MADV_DONTNEED; | ||
1555 | } | ||
1556 | |||
1492 | static void | 1557 | static void |
1493 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 1558 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) |
1494 | { | 1559 | { |
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1577 | 1642 | ||
1578 | if ((obj->write_domain & flush_domains) == | 1643 | if ((obj->write_domain & flush_domains) == |
1579 | obj->write_domain) { | 1644 | obj->write_domain) { |
1645 | uint32_t old_write_domain = obj->write_domain; | ||
1646 | |||
1580 | obj->write_domain = 0; | 1647 | obj->write_domain = 0; |
1581 | i915_gem_object_move_to_active(obj, seqno); | 1648 | i915_gem_object_move_to_active(obj, seqno); |
1649 | |||
1650 | trace_i915_gem_object_change_domain(obj, | ||
1651 | obj->read_domains, | ||
1652 | old_write_domain); | ||
1582 | } | 1653 | } |
1583 | } | 1654 | } |
1584 | 1655 | ||
1585 | } | 1656 | } |
1586 | 1657 | ||
1587 | if (was_empty && !dev_priv->mm.suspended) | 1658 | if (!dev_priv->mm.suspended) { |
1588 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1659 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
1660 | if (was_empty) | ||
1661 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | ||
1662 | } | ||
1589 | return seqno; | 1663 | return seqno; |
1590 | } | 1664 | } |
1591 | 1665 | ||
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev, | |||
1623 | { | 1697 | { |
1624 | drm_i915_private_t *dev_priv = dev->dev_private; | 1698 | drm_i915_private_t *dev_priv = dev->dev_private; |
1625 | 1699 | ||
1700 | trace_i915_gem_request_retire(dev, request->seqno); | ||
1701 | |||
1626 | /* Move any buffers on the active list that are no longer referenced | 1702 | /* Move any buffers on the active list that are no longer referenced |
1627 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1703 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1628 | */ | 1704 | */ |
@@ -1671,7 +1747,7 @@ out: | |||
1671 | /** | 1747 | /** |
1672 | * Returns true if seq1 is later than seq2. | 1748 | * Returns true if seq1 is later than seq2. |
1673 | */ | 1749 | */ |
1674 | static int | 1750 | bool |
1675 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | 1751 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1676 | { | 1752 | { |
1677 | return (int32_t)(seq1 - seq2) >= 0; | 1753 | return (int32_t)(seq1 - seq2) >= 0; |
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1709 | retiring_seqno = request->seqno; | 1785 | retiring_seqno = request->seqno; |
1710 | 1786 | ||
1711 | if (i915_seqno_passed(seqno, retiring_seqno) || | 1787 | if (i915_seqno_passed(seqno, retiring_seqno) || |
1712 | dev_priv->mm.wedged) { | 1788 | atomic_read(&dev_priv->mm.wedged)) { |
1713 | i915_gem_retire_request(dev, request); | 1789 | i915_gem_retire_request(dev, request); |
1714 | 1790 | ||
1715 | list_del(&request->list); | 1791 | list_del(&request->list); |
@@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1751 | 1827 | ||
1752 | BUG_ON(seqno == 0); | 1828 | BUG_ON(seqno == 0); |
1753 | 1829 | ||
1830 | if (atomic_read(&dev_priv->mm.wedged)) | ||
1831 | return -EIO; | ||
1832 | |||
1754 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | 1833 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { |
1755 | if (IS_IGDNG(dev)) | 1834 | if (IS_IGDNG(dev)) |
1756 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1835 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
@@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1763 | i915_driver_irq_postinstall(dev); | 1842 | i915_driver_irq_postinstall(dev); |
1764 | } | 1843 | } |
1765 | 1844 | ||
1845 | trace_i915_gem_request_wait_begin(dev, seqno); | ||
1846 | |||
1766 | dev_priv->mm.waiting_gem_seqno = seqno; | 1847 | dev_priv->mm.waiting_gem_seqno = seqno; |
1767 | i915_user_irq_get(dev); | 1848 | i915_user_irq_get(dev); |
1768 | ret = wait_event_interruptible(dev_priv->irq_queue, | 1849 | ret = wait_event_interruptible(dev_priv->irq_queue, |
1769 | i915_seqno_passed(i915_get_gem_seqno(dev), | 1850 | i915_seqno_passed(i915_get_gem_seqno(dev), |
1770 | seqno) || | 1851 | seqno) || |
1771 | dev_priv->mm.wedged); | 1852 | atomic_read(&dev_priv->mm.wedged)); |
1772 | i915_user_irq_put(dev); | 1853 | i915_user_irq_put(dev); |
1773 | dev_priv->mm.waiting_gem_seqno = 0; | 1854 | dev_priv->mm.waiting_gem_seqno = 0; |
1855 | |||
1856 | trace_i915_gem_request_wait_end(dev, seqno); | ||
1774 | } | 1857 | } |
1775 | if (dev_priv->mm.wedged) | 1858 | if (atomic_read(&dev_priv->mm.wedged)) |
1776 | ret = -EIO; | 1859 | ret = -EIO; |
1777 | 1860 | ||
1778 | if (ret && ret != -ERESTARTSYS) | 1861 | if (ret && ret != -ERESTARTSYS) |
@@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev, | |||
1803 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 1886 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
1804 | invalidate_domains, flush_domains); | 1887 | invalidate_domains, flush_domains); |
1805 | #endif | 1888 | #endif |
1889 | trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, | ||
1890 | invalidate_domains, flush_domains); | ||
1806 | 1891 | ||
1807 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 1892 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
1808 | drm_agp_chipset_flush(dev); | 1893 | drm_agp_chipset_flush(dev); |
@@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1915 | return -EINVAL; | 2000 | return -EINVAL; |
1916 | } | 2001 | } |
1917 | 2002 | ||
2003 | /* blow away mappings if mapped through GTT */ | ||
2004 | i915_gem_release_mmap(obj); | ||
2005 | |||
2006 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2007 | i915_gem_clear_fence_reg(obj); | ||
2008 | |||
1918 | /* Move the object to the CPU domain to ensure that | 2009 | /* Move the object to the CPU domain to ensure that |
1919 | * any possible CPU writes while it's not in the GTT | 2010 | * any possible CPU writes while it's not in the GTT |
1920 | * are flushed when we go to remap it. This will | 2011 | * are flushed when we go to remap it. This will |
@@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1928 | return ret; | 2019 | return ret; |
1929 | } | 2020 | } |
1930 | 2021 | ||
2022 | BUG_ON(obj_priv->active); | ||
2023 | |||
1931 | if (obj_priv->agp_mem != NULL) { | 2024 | if (obj_priv->agp_mem != NULL) { |
1932 | drm_unbind_agp(obj_priv->agp_mem); | 2025 | drm_unbind_agp(obj_priv->agp_mem); |
1933 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2026 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
1934 | obj_priv->agp_mem = NULL; | 2027 | obj_priv->agp_mem = NULL; |
1935 | } | 2028 | } |
1936 | 2029 | ||
1937 | BUG_ON(obj_priv->active); | ||
1938 | |||
1939 | /* blow away mappings if mapped through GTT */ | ||
1940 | i915_gem_release_mmap(obj); | ||
1941 | |||
1942 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
1943 | i915_gem_clear_fence_reg(obj); | ||
1944 | |||
1945 | i915_gem_object_put_pages(obj); | 2030 | i915_gem_object_put_pages(obj); |
2031 | BUG_ON(obj_priv->pages_refcount); | ||
1946 | 2032 | ||
1947 | if (obj_priv->gtt_space) { | 2033 | if (obj_priv->gtt_space) { |
1948 | atomic_dec(&dev->gtt_count); | 2034 | atomic_dec(&dev->gtt_count); |
@@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1956 | if (!list_empty(&obj_priv->list)) | 2042 | if (!list_empty(&obj_priv->list)) |
1957 | list_del_init(&obj_priv->list); | 2043 | list_del_init(&obj_priv->list); |
1958 | 2044 | ||
2045 | if (i915_gem_object_is_purgeable(obj_priv)) | ||
2046 | i915_gem_object_truncate(obj); | ||
2047 | |||
2048 | trace_i915_gem_object_unbind(obj); | ||
2049 | |||
1959 | return 0; | 2050 | return 0; |
1960 | } | 2051 | } |
1961 | 2052 | ||
2053 | static struct drm_gem_object * | ||
2054 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | ||
2055 | { | ||
2056 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2057 | struct drm_i915_gem_object *obj_priv; | ||
2058 | struct drm_gem_object *best = NULL; | ||
2059 | struct drm_gem_object *first = NULL; | ||
2060 | |||
2061 | /* Try to find the smallest clean object */ | ||
2062 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
2063 | struct drm_gem_object *obj = obj_priv->obj; | ||
2064 | if (obj->size >= min_size) { | ||
2065 | if ((!obj_priv->dirty || | ||
2066 | i915_gem_object_is_purgeable(obj_priv)) && | ||
2067 | (!best || obj->size < best->size)) { | ||
2068 | best = obj; | ||
2069 | if (best->size == min_size) | ||
2070 | return best; | ||
2071 | } | ||
2072 | if (!first) | ||
2073 | first = obj; | ||
2074 | } | ||
2075 | } | ||
2076 | |||
2077 | return best ? best : first; | ||
2078 | } | ||
2079 | |||
1962 | static int | 2080 | static int |
1963 | i915_gem_evict_something(struct drm_device *dev) | 2081 | i915_gem_evict_everything(struct drm_device *dev) |
2082 | { | ||
2083 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2084 | uint32_t seqno; | ||
2085 | int ret; | ||
2086 | bool lists_empty; | ||
2087 | |||
2088 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2089 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2090 | list_empty(&dev_priv->mm.flushing_list) && | ||
2091 | list_empty(&dev_priv->mm.active_list)); | ||
2092 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2093 | |||
2094 | if (lists_empty) | ||
2095 | return -ENOSPC; | ||
2096 | |||
2097 | /* Flush everything (on to the inactive lists) and evict */ | ||
2098 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2099 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
2100 | if (seqno == 0) | ||
2101 | return -ENOMEM; | ||
2102 | |||
2103 | ret = i915_wait_request(dev, seqno); | ||
2104 | if (ret) | ||
2105 | return ret; | ||
2106 | |||
2107 | ret = i915_gem_evict_from_inactive_list(dev); | ||
2108 | if (ret) | ||
2109 | return ret; | ||
2110 | |||
2111 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2112 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2113 | list_empty(&dev_priv->mm.flushing_list) && | ||
2114 | list_empty(&dev_priv->mm.active_list)); | ||
2115 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2116 | BUG_ON(!lists_empty); | ||
2117 | |||
2118 | return 0; | ||
2119 | } | ||
2120 | |||
2121 | static int | ||
2122 | i915_gem_evict_something(struct drm_device *dev, int min_size) | ||
1964 | { | 2123 | { |
1965 | drm_i915_private_t *dev_priv = dev->dev_private; | 2124 | drm_i915_private_t *dev_priv = dev->dev_private; |
1966 | struct drm_gem_object *obj; | 2125 | struct drm_gem_object *obj; |
1967 | struct drm_i915_gem_object *obj_priv; | 2126 | int ret; |
1968 | int ret = 0; | ||
1969 | 2127 | ||
1970 | for (;;) { | 2128 | for (;;) { |
2129 | i915_gem_retire_requests(dev); | ||
2130 | |||
1971 | /* If there's an inactive buffer available now, grab it | 2131 | /* If there's an inactive buffer available now, grab it |
1972 | * and be done. | 2132 | * and be done. |
1973 | */ | 2133 | */ |
1974 | if (!list_empty(&dev_priv->mm.inactive_list)) { | 2134 | obj = i915_gem_find_inactive_object(dev, min_size); |
1975 | obj_priv = list_first_entry(&dev_priv->mm.inactive_list, | 2135 | if (obj) { |
1976 | struct drm_i915_gem_object, | 2136 | struct drm_i915_gem_object *obj_priv; |
1977 | list); | 2137 | |
1978 | obj = obj_priv->obj; | ||
1979 | BUG_ON(obj_priv->pin_count != 0); | ||
1980 | #if WATCH_LRU | 2138 | #if WATCH_LRU |
1981 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2139 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
1982 | #endif | 2140 | #endif |
2141 | obj_priv = obj->driver_private; | ||
2142 | BUG_ON(obj_priv->pin_count != 0); | ||
1983 | BUG_ON(obj_priv->active); | 2143 | BUG_ON(obj_priv->active); |
1984 | 2144 | ||
1985 | /* Wait on the rendering and unbind the buffer. */ | 2145 | /* Wait on the rendering and unbind the buffer. */ |
1986 | ret = i915_gem_object_unbind(obj); | 2146 | return i915_gem_object_unbind(obj); |
1987 | break; | ||
1988 | } | 2147 | } |
1989 | 2148 | ||
1990 | /* If we didn't get anything, but the ring is still processing | 2149 | /* If we didn't get anything, but the ring is still processing |
1991 | * things, wait for one of those things to finish and hopefully | 2150 | * things, wait for the next to finish and hopefully leave us |
1992 | * leave us a buffer to evict. | 2151 | * a buffer to evict. |
1993 | */ | 2152 | */ |
1994 | if (!list_empty(&dev_priv->mm.request_list)) { | 2153 | if (!list_empty(&dev_priv->mm.request_list)) { |
1995 | struct drm_i915_gem_request *request; | 2154 | struct drm_i915_gem_request *request; |
@@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2000 | 2159 | ||
2001 | ret = i915_wait_request(dev, request->seqno); | 2160 | ret = i915_wait_request(dev, request->seqno); |
2002 | if (ret) | 2161 | if (ret) |
2003 | break; | 2162 | return ret; |
2004 | 2163 | ||
2005 | /* if waiting caused an object to become inactive, | 2164 | continue; |
2006 | * then loop around and wait for it. Otherwise, we | ||
2007 | * assume that waiting freed and unbound something, | ||
2008 | * so there should now be some space in the GTT | ||
2009 | */ | ||
2010 | if (!list_empty(&dev_priv->mm.inactive_list)) | ||
2011 | continue; | ||
2012 | break; | ||
2013 | } | 2165 | } |
2014 | 2166 | ||
2015 | /* If we didn't have anything on the request list but there | 2167 | /* If we didn't have anything on the request list but there |
@@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2018 | * will get moved to inactive. | 2170 | * will get moved to inactive. |
2019 | */ | 2171 | */ |
2020 | if (!list_empty(&dev_priv->mm.flushing_list)) { | 2172 | if (!list_empty(&dev_priv->mm.flushing_list)) { |
2021 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 2173 | struct drm_i915_gem_object *obj_priv; |
2022 | struct drm_i915_gem_object, | ||
2023 | list); | ||
2024 | obj = obj_priv->obj; | ||
2025 | 2174 | ||
2026 | i915_gem_flush(dev, | 2175 | /* Find an object that we can immediately reuse */ |
2027 | obj->write_domain, | 2176 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { |
2028 | obj->write_domain); | 2177 | obj = obj_priv->obj; |
2029 | i915_add_request(dev, NULL, obj->write_domain); | 2178 | if (obj->size >= min_size) |
2179 | break; | ||
2030 | 2180 | ||
2031 | obj = NULL; | 2181 | obj = NULL; |
2032 | continue; | 2182 | } |
2033 | } | ||
2034 | 2183 | ||
2035 | DRM_ERROR("inactive empty %d request empty %d " | 2184 | if (obj != NULL) { |
2036 | "flushing empty %d\n", | 2185 | uint32_t seqno; |
2037 | list_empty(&dev_priv->mm.inactive_list), | ||
2038 | list_empty(&dev_priv->mm.request_list), | ||
2039 | list_empty(&dev_priv->mm.flushing_list)); | ||
2040 | /* If we didn't do any of the above, there's nothing to be done | ||
2041 | * and we just can't fit it in. | ||
2042 | */ | ||
2043 | return -ENOSPC; | ||
2044 | } | ||
2045 | return ret; | ||
2046 | } | ||
2047 | 2186 | ||
2048 | static int | 2187 | i915_gem_flush(dev, |
2049 | i915_gem_evict_everything(struct drm_device *dev) | 2188 | obj->write_domain, |
2050 | { | 2189 | obj->write_domain); |
2051 | int ret; | 2190 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2191 | if (seqno == 0) | ||
2192 | return -ENOMEM; | ||
2052 | 2193 | ||
2053 | for (;;) { | 2194 | ret = i915_wait_request(dev, seqno); |
2054 | ret = i915_gem_evict_something(dev); | 2195 | if (ret) |
2055 | if (ret != 0) | 2196 | return ret; |
2056 | break; | 2197 | |
2198 | continue; | ||
2199 | } | ||
2200 | } | ||
2201 | |||
2202 | /* If we didn't do any of the above, there's no single buffer | ||
2203 | * large enough to swap out for the new one, so just evict | ||
2204 | * everything and start again. (This should be rare.) | ||
2205 | */ | ||
2206 | if (!list_empty (&dev_priv->mm.inactive_list)) | ||
2207 | return i915_gem_evict_from_inactive_list(dev); | ||
2208 | else | ||
2209 | return i915_gem_evict_everything(dev); | ||
2057 | } | 2210 | } |
2058 | if (ret == -ENOSPC) | ||
2059 | return 0; | ||
2060 | return ret; | ||
2061 | } | 2211 | } |
2062 | 2212 | ||
2063 | int | 2213 | int |
@@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2080 | BUG_ON(obj_priv->pages != NULL); | 2230 | BUG_ON(obj_priv->pages != NULL); |
2081 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | 2231 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); |
2082 | if (obj_priv->pages == NULL) { | 2232 | if (obj_priv->pages == NULL) { |
2083 | DRM_ERROR("Faled to allocate page list\n"); | ||
2084 | obj_priv->pages_refcount--; | 2233 | obj_priv->pages_refcount--; |
2085 | return -ENOMEM; | 2234 | return -ENOMEM; |
2086 | } | 2235 | } |
@@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2091 | page = read_mapping_page(mapping, i, NULL); | 2240 | page = read_mapping_page(mapping, i, NULL); |
2092 | if (IS_ERR(page)) { | 2241 | if (IS_ERR(page)) { |
2093 | ret = PTR_ERR(page); | 2242 | ret = PTR_ERR(page); |
2094 | DRM_ERROR("read_mapping_page failed: %d\n", ret); | ||
2095 | i915_gem_object_put_pages(obj); | 2243 | i915_gem_object_put_pages(obj); |
2096 | return ret; | 2244 | return ret; |
2097 | } | 2245 | } |
@@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2328 | else | 2476 | else |
2329 | i830_write_fence_reg(reg); | 2477 | i830_write_fence_reg(reg); |
2330 | 2478 | ||
2479 | trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); | ||
2480 | |||
2331 | return 0; | 2481 | return 0; |
2332 | } | 2482 | } |
2333 | 2483 | ||
@@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2410 | drm_i915_private_t *dev_priv = dev->dev_private; | 2560 | drm_i915_private_t *dev_priv = dev->dev_private; |
2411 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2561 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2412 | struct drm_mm_node *free_space; | 2562 | struct drm_mm_node *free_space; |
2413 | int page_count, ret; | 2563 | bool retry_alloc = false; |
2564 | int ret; | ||
2414 | 2565 | ||
2415 | if (dev_priv->mm.suspended) | 2566 | if (dev_priv->mm.suspended) |
2416 | return -EBUSY; | 2567 | return -EBUSY; |
2568 | |||
2569 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
2570 | DRM_ERROR("Attempting to bind a purgeable object\n"); | ||
2571 | return -EINVAL; | ||
2572 | } | ||
2573 | |||
2417 | if (alignment == 0) | 2574 | if (alignment == 0) |
2418 | alignment = i915_gem_get_gtt_alignment(obj); | 2575 | alignment = i915_gem_get_gtt_alignment(obj); |
2419 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { | 2576 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { |
@@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2433 | } | 2590 | } |
2434 | } | 2591 | } |
2435 | if (obj_priv->gtt_space == NULL) { | 2592 | if (obj_priv->gtt_space == NULL) { |
2436 | bool lists_empty; | ||
2437 | |||
2438 | /* If the gtt is empty and we're still having trouble | 2593 | /* If the gtt is empty and we're still having trouble |
2439 | * fitting our object in, we're out of memory. | 2594 | * fitting our object in, we're out of memory. |
2440 | */ | 2595 | */ |
2441 | #if WATCH_LRU | 2596 | #if WATCH_LRU |
2442 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2597 | DRM_INFO("%s: GTT full, evicting something\n", __func__); |
2443 | #endif | 2598 | #endif |
2444 | spin_lock(&dev_priv->mm.active_list_lock); | 2599 | ret = i915_gem_evict_something(dev, obj->size); |
2445 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 2600 | if (ret) |
2446 | list_empty(&dev_priv->mm.flushing_list) && | ||
2447 | list_empty(&dev_priv->mm.active_list)); | ||
2448 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2449 | if (lists_empty) { | ||
2450 | DRM_ERROR("GTT full, but LRU list empty\n"); | ||
2451 | return -ENOSPC; | ||
2452 | } | ||
2453 | |||
2454 | ret = i915_gem_evict_something(dev); | ||
2455 | if (ret != 0) { | ||
2456 | if (ret != -ERESTARTSYS) | ||
2457 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | ||
2458 | return ret; | 2601 | return ret; |
2459 | } | 2602 | |
2460 | goto search_free; | 2603 | goto search_free; |
2461 | } | 2604 | } |
2462 | 2605 | ||
@@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2464 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2607 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2465 | obj->size, obj_priv->gtt_offset); | 2608 | obj->size, obj_priv->gtt_offset); |
2466 | #endif | 2609 | #endif |
2610 | if (retry_alloc) { | ||
2611 | i915_gem_object_set_page_gfp_mask (obj, | ||
2612 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2613 | } | ||
2467 | ret = i915_gem_object_get_pages(obj); | 2614 | ret = i915_gem_object_get_pages(obj); |
2615 | if (retry_alloc) { | ||
2616 | i915_gem_object_set_page_gfp_mask (obj, | ||
2617 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2618 | } | ||
2468 | if (ret) { | 2619 | if (ret) { |
2469 | drm_mm_put_block(obj_priv->gtt_space); | 2620 | drm_mm_put_block(obj_priv->gtt_space); |
2470 | obj_priv->gtt_space = NULL; | 2621 | obj_priv->gtt_space = NULL; |
2622 | |||
2623 | if (ret == -ENOMEM) { | ||
2624 | /* first try to clear up some space from the GTT */ | ||
2625 | ret = i915_gem_evict_something(dev, obj->size); | ||
2626 | if (ret) { | ||
2627 | /* now try to shrink everyone else */ | ||
2628 | if (! retry_alloc) { | ||
2629 | retry_alloc = true; | ||
2630 | goto search_free; | ||
2631 | } | ||
2632 | |||
2633 | return ret; | ||
2634 | } | ||
2635 | |||
2636 | goto search_free; | ||
2637 | } | ||
2638 | |||
2471 | return ret; | 2639 | return ret; |
2472 | } | 2640 | } |
2473 | 2641 | ||
2474 | page_count = obj->size / PAGE_SIZE; | ||
2475 | /* Create an AGP memory structure pointing at our pages, and bind it | 2642 | /* Create an AGP memory structure pointing at our pages, and bind it |
2476 | * into the GTT. | 2643 | * into the GTT. |
2477 | */ | 2644 | */ |
2478 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2645 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
2479 | obj_priv->pages, | 2646 | obj_priv->pages, |
2480 | page_count, | 2647 | obj->size >> PAGE_SHIFT, |
2481 | obj_priv->gtt_offset, | 2648 | obj_priv->gtt_offset, |
2482 | obj_priv->agp_type); | 2649 | obj_priv->agp_type); |
2483 | if (obj_priv->agp_mem == NULL) { | 2650 | if (obj_priv->agp_mem == NULL) { |
2484 | i915_gem_object_put_pages(obj); | 2651 | i915_gem_object_put_pages(obj); |
2485 | drm_mm_put_block(obj_priv->gtt_space); | 2652 | drm_mm_put_block(obj_priv->gtt_space); |
2486 | obj_priv->gtt_space = NULL; | 2653 | obj_priv->gtt_space = NULL; |
2487 | return -ENOMEM; | 2654 | |
2655 | ret = i915_gem_evict_something(dev, obj->size); | ||
2656 | if (ret) | ||
2657 | return ret; | ||
2658 | |||
2659 | goto search_free; | ||
2488 | } | 2660 | } |
2489 | atomic_inc(&dev->gtt_count); | 2661 | atomic_inc(&dev->gtt_count); |
2490 | atomic_add(obj->size, &dev->gtt_memory); | 2662 | atomic_add(obj->size, &dev->gtt_memory); |
@@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2496 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2668 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); |
2497 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2669 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); |
2498 | 2670 | ||
2671 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | ||
2672 | |||
2499 | return 0; | 2673 | return 0; |
2500 | } | 2674 | } |
2501 | 2675 | ||
@@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2511 | if (obj_priv->pages == NULL) | 2685 | if (obj_priv->pages == NULL) |
2512 | return; | 2686 | return; |
2513 | 2687 | ||
2514 | /* XXX: The 865 in particular appears to be weird in how it handles | 2688 | trace_i915_gem_object_clflush(obj); |
2515 | * cache flushing. We haven't figured it out, but the | ||
2516 | * clflush+agp_chipset_flush doesn't appear to successfully get the | ||
2517 | * data visible to the PGU, while wbinvd + agp_chipset_flush does. | ||
2518 | */ | ||
2519 | if (IS_I865G(obj->dev)) { | ||
2520 | wbinvd(); | ||
2521 | return; | ||
2522 | } | ||
2523 | 2689 | ||
2524 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2690 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); |
2525 | } | 2691 | } |
@@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2530 | { | 2696 | { |
2531 | struct drm_device *dev = obj->dev; | 2697 | struct drm_device *dev = obj->dev; |
2532 | uint32_t seqno; | 2698 | uint32_t seqno; |
2699 | uint32_t old_write_domain; | ||
2533 | 2700 | ||
2534 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2701 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2535 | return; | 2702 | return; |
2536 | 2703 | ||
2537 | /* Queue the GPU write cache flushing we need. */ | 2704 | /* Queue the GPU write cache flushing we need. */ |
2705 | old_write_domain = obj->write_domain; | ||
2538 | i915_gem_flush(dev, 0, obj->write_domain); | 2706 | i915_gem_flush(dev, 0, obj->write_domain); |
2539 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2707 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2540 | obj->write_domain = 0; | 2708 | obj->write_domain = 0; |
2541 | i915_gem_object_move_to_active(obj, seqno); | 2709 | i915_gem_object_move_to_active(obj, seqno); |
2710 | |||
2711 | trace_i915_gem_object_change_domain(obj, | ||
2712 | obj->read_domains, | ||
2713 | old_write_domain); | ||
2542 | } | 2714 | } |
2543 | 2715 | ||
2544 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2716 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2545 | static void | 2717 | static void |
2546 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2718 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) |
2547 | { | 2719 | { |
2720 | uint32_t old_write_domain; | ||
2721 | |||
2548 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2722 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) |
2549 | return; | 2723 | return; |
2550 | 2724 | ||
@@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | |||
2552 | * to it immediately go to main memory as far as we know, so there's | 2726 | * to it immediately go to main memory as far as we know, so there's |
2553 | * no chipset flush. It also doesn't land in render cache. | 2727 | * no chipset flush. It also doesn't land in render cache. |
2554 | */ | 2728 | */ |
2729 | old_write_domain = obj->write_domain; | ||
2555 | obj->write_domain = 0; | 2730 | obj->write_domain = 0; |
2731 | |||
2732 | trace_i915_gem_object_change_domain(obj, | ||
2733 | obj->read_domains, | ||
2734 | old_write_domain); | ||
2556 | } | 2735 | } |
2557 | 2736 | ||
2558 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2737 | /** Flushes the CPU write domain for the object if it's dirty. */ |
@@ -2560,13 +2739,19 @@ static void | |||
2560 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2739 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) |
2561 | { | 2740 | { |
2562 | struct drm_device *dev = obj->dev; | 2741 | struct drm_device *dev = obj->dev; |
2742 | uint32_t old_write_domain; | ||
2563 | 2743 | ||
2564 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2744 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) |
2565 | return; | 2745 | return; |
2566 | 2746 | ||
2567 | i915_gem_clflush_object(obj); | 2747 | i915_gem_clflush_object(obj); |
2568 | drm_agp_chipset_flush(dev); | 2748 | drm_agp_chipset_flush(dev); |
2749 | old_write_domain = obj->write_domain; | ||
2569 | obj->write_domain = 0; | 2750 | obj->write_domain = 0; |
2751 | |||
2752 | trace_i915_gem_object_change_domain(obj, | ||
2753 | obj->read_domains, | ||
2754 | old_write_domain); | ||
2570 | } | 2755 | } |
2571 | 2756 | ||
2572 | /** | 2757 | /** |
@@ -2579,6 +2764,7 @@ int | |||
2579 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2764 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
2580 | { | 2765 | { |
2581 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2766 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2767 | uint32_t old_write_domain, old_read_domains; | ||
2582 | int ret; | 2768 | int ret; |
2583 | 2769 | ||
2584 | /* Not valid to be called on unbound objects. */ | 2770 | /* Not valid to be called on unbound objects. */ |
@@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2591 | if (ret != 0) | 2777 | if (ret != 0) |
2592 | return ret; | 2778 | return ret; |
2593 | 2779 | ||
2780 | old_write_domain = obj->write_domain; | ||
2781 | old_read_domains = obj->read_domains; | ||
2782 | |||
2594 | /* If we're writing through the GTT domain, then CPU and GPU caches | 2783 | /* If we're writing through the GTT domain, then CPU and GPU caches |
2595 | * will need to be invalidated at next use. | 2784 | * will need to be invalidated at next use. |
2596 | */ | 2785 | */ |
@@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2609 | obj_priv->dirty = 1; | 2798 | obj_priv->dirty = 1; |
2610 | } | 2799 | } |
2611 | 2800 | ||
2801 | trace_i915_gem_object_change_domain(obj, | ||
2802 | old_read_domains, | ||
2803 | old_write_domain); | ||
2804 | |||
2612 | return 0; | 2805 | return 0; |
2613 | } | 2806 | } |
2614 | 2807 | ||
@@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2621 | static int | 2814 | static int |
2622 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 2815 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) |
2623 | { | 2816 | { |
2817 | uint32_t old_write_domain, old_read_domains; | ||
2624 | int ret; | 2818 | int ret; |
2625 | 2819 | ||
2626 | i915_gem_object_flush_gpu_write_domain(obj); | 2820 | i915_gem_object_flush_gpu_write_domain(obj); |
@@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2636 | */ | 2830 | */ |
2637 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 2831 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2638 | 2832 | ||
2833 | old_write_domain = obj->write_domain; | ||
2834 | old_read_domains = obj->read_domains; | ||
2835 | |||
2639 | /* Flush the CPU cache if it's still invalid. */ | 2836 | /* Flush the CPU cache if it's still invalid. */ |
2640 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 2837 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2641 | i915_gem_clflush_object(obj); | 2838 | i915_gem_clflush_object(obj); |
@@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2656 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 2853 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
2657 | } | 2854 | } |
2658 | 2855 | ||
2856 | trace_i915_gem_object_change_domain(obj, | ||
2857 | old_read_domains, | ||
2858 | old_write_domain); | ||
2859 | |||
2659 | return 0; | 2860 | return 0; |
2660 | } | 2861 | } |
2661 | 2862 | ||
@@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2777 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2978 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2778 | uint32_t invalidate_domains = 0; | 2979 | uint32_t invalidate_domains = 0; |
2779 | uint32_t flush_domains = 0; | 2980 | uint32_t flush_domains = 0; |
2981 | uint32_t old_read_domains; | ||
2780 | 2982 | ||
2781 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | 2983 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
2782 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | 2984 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
@@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2823 | i915_gem_clflush_object(obj); | 3025 | i915_gem_clflush_object(obj); |
2824 | } | 3026 | } |
2825 | 3027 | ||
3028 | old_read_domains = obj->read_domains; | ||
3029 | |||
2826 | /* The actual obj->write_domain will be updated with | 3030 | /* The actual obj->write_domain will be updated with |
2827 | * pending_write_domain after we emit the accumulated flush for all | 3031 | * pending_write_domain after we emit the accumulated flush for all |
2828 | * of our domain changes in execbuffers (which clears objects' | 3032 | * of our domain changes in execbuffers (which clears objects' |
@@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2841 | obj->read_domains, obj->write_domain, | 3045 | obj->read_domains, obj->write_domain, |
2842 | dev->invalidate_domains, dev->flush_domains); | 3046 | dev->invalidate_domains, dev->flush_domains); |
2843 | #endif | 3047 | #endif |
3048 | |||
3049 | trace_i915_gem_object_change_domain(obj, | ||
3050 | old_read_domains, | ||
3051 | obj->write_domain); | ||
2844 | } | 3052 | } |
2845 | 3053 | ||
2846 | /** | 3054 | /** |
@@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2893 | uint64_t offset, uint64_t size) | 3101 | uint64_t offset, uint64_t size) |
2894 | { | 3102 | { |
2895 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3103 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3104 | uint32_t old_read_domains; | ||
2896 | int i, ret; | 3105 | int i, ret; |
2897 | 3106 | ||
2898 | if (offset == 0 && size == obj->size) | 3107 | if (offset == 0 && size == obj->size) |
@@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2939 | */ | 3148 | */ |
2940 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3149 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2941 | 3150 | ||
3151 | old_read_domains = obj->read_domains; | ||
2942 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3152 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
2943 | 3153 | ||
3154 | trace_i915_gem_object_change_domain(obj, | ||
3155 | old_read_domains, | ||
3156 | obj->write_domain); | ||
3157 | |||
2944 | return 0; | 3158 | return 0; |
2945 | } | 3159 | } |
2946 | 3160 | ||
@@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2984 | } | 3198 | } |
2985 | target_obj_priv = target_obj->driver_private; | 3199 | target_obj_priv = target_obj->driver_private; |
2986 | 3200 | ||
3201 | #if WATCH_RELOC | ||
3202 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3203 | "read %08x write %08x gtt %08x " | ||
3204 | "presumed %08x delta %08x\n", | ||
3205 | __func__, | ||
3206 | obj, | ||
3207 | (int) reloc->offset, | ||
3208 | (int) reloc->target_handle, | ||
3209 | (int) reloc->read_domains, | ||
3210 | (int) reloc->write_domain, | ||
3211 | (int) target_obj_priv->gtt_offset, | ||
3212 | (int) reloc->presumed_offset, | ||
3213 | reloc->delta); | ||
3214 | #endif | ||
3215 | |||
2987 | /* The target buffer should have appeared before us in the | 3216 | /* The target buffer should have appeared before us in the |
2988 | * exec_object list, so it should have a GTT space bound by now. | 3217 | * exec_object list, so it should have a GTT space bound by now. |
2989 | */ | 3218 | */ |
@@ -2995,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2995 | return -EINVAL; | 3224 | return -EINVAL; |
2996 | } | 3225 | } |
2997 | 3226 | ||
2998 | if (reloc->offset > obj->size - 4) { | 3227 | /* Validate that the target is in a valid r/w GPU domain */ |
2999 | DRM_ERROR("Relocation beyond object bounds: " | ||
3000 | "obj %p target %d offset %d size %d.\n", | ||
3001 | obj, reloc->target_handle, | ||
3002 | (int) reloc->offset, (int) obj->size); | ||
3003 | drm_gem_object_unreference(target_obj); | ||
3004 | i915_gem_object_unpin(obj); | ||
3005 | return -EINVAL; | ||
3006 | } | ||
3007 | if (reloc->offset & 3) { | ||
3008 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3009 | "obj %p target %d offset %d.\n", | ||
3010 | obj, reloc->target_handle, | ||
3011 | (int) reloc->offset); | ||
3012 | drm_gem_object_unreference(target_obj); | ||
3013 | i915_gem_object_unpin(obj); | ||
3014 | return -EINVAL; | ||
3015 | } | ||
3016 | |||
3017 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3228 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
3018 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | 3229 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { |
3019 | DRM_ERROR("reloc with read/write CPU domains: " | 3230 | DRM_ERROR("reloc with read/write CPU domains: " |
@@ -3027,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3027 | i915_gem_object_unpin(obj); | 3238 | i915_gem_object_unpin(obj); |
3028 | return -EINVAL; | 3239 | return -EINVAL; |
3029 | } | 3240 | } |
3030 | |||
3031 | if (reloc->write_domain && target_obj->pending_write_domain && | 3241 | if (reloc->write_domain && target_obj->pending_write_domain && |
3032 | reloc->write_domain != target_obj->pending_write_domain) { | 3242 | reloc->write_domain != target_obj->pending_write_domain) { |
3033 | DRM_ERROR("Write domain conflict: " | 3243 | DRM_ERROR("Write domain conflict: " |
@@ -3042,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3042 | return -EINVAL; | 3252 | return -EINVAL; |
3043 | } | 3253 | } |
3044 | 3254 | ||
3045 | #if WATCH_RELOC | ||
3046 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3047 | "read %08x write %08x gtt %08x " | ||
3048 | "presumed %08x delta %08x\n", | ||
3049 | __func__, | ||
3050 | obj, | ||
3051 | (int) reloc->offset, | ||
3052 | (int) reloc->target_handle, | ||
3053 | (int) reloc->read_domains, | ||
3054 | (int) reloc->write_domain, | ||
3055 | (int) target_obj_priv->gtt_offset, | ||
3056 | (int) reloc->presumed_offset, | ||
3057 | reloc->delta); | ||
3058 | #endif | ||
3059 | |||
3060 | target_obj->pending_read_domains |= reloc->read_domains; | 3255 | target_obj->pending_read_domains |= reloc->read_domains; |
3061 | target_obj->pending_write_domain |= reloc->write_domain; | 3256 | target_obj->pending_write_domain |= reloc->write_domain; |
3062 | 3257 | ||
@@ -3068,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3068 | continue; | 3263 | continue; |
3069 | } | 3264 | } |
3070 | 3265 | ||
3266 | /* Check that the relocation address is valid... */ | ||
3267 | if (reloc->offset > obj->size - 4) { | ||
3268 | DRM_ERROR("Relocation beyond object bounds: " | ||
3269 | "obj %p target %d offset %d size %d.\n", | ||
3270 | obj, reloc->target_handle, | ||
3271 | (int) reloc->offset, (int) obj->size); | ||
3272 | drm_gem_object_unreference(target_obj); | ||
3273 | i915_gem_object_unpin(obj); | ||
3274 | return -EINVAL; | ||
3275 | } | ||
3276 | if (reloc->offset & 3) { | ||
3277 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3278 | "obj %p target %d offset %d.\n", | ||
3279 | obj, reloc->target_handle, | ||
3280 | (int) reloc->offset); | ||
3281 | drm_gem_object_unreference(target_obj); | ||
3282 | i915_gem_object_unpin(obj); | ||
3283 | return -EINVAL; | ||
3284 | } | ||
3285 | |||
3286 | /* and points to somewhere within the target object. */ | ||
3287 | if (reloc->delta >= target_obj->size) { | ||
3288 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3289 | "obj %p target %d delta %d size %d.\n", | ||
3290 | obj, reloc->target_handle, | ||
3291 | (int) reloc->delta, (int) target_obj->size); | ||
3292 | drm_gem_object_unreference(target_obj); | ||
3293 | i915_gem_object_unpin(obj); | ||
3294 | return -EINVAL; | ||
3295 | } | ||
3296 | |||
3071 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 3297 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
3072 | if (ret != 0) { | 3298 | if (ret != 0) { |
3073 | drm_gem_object_unreference(target_obj); | 3299 | drm_gem_object_unreference(target_obj); |
@@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, | |||
3126 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 3352 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
3127 | exec_len = (uint32_t) exec->batch_len; | 3353 | exec_len = (uint32_t) exec->batch_len; |
3128 | 3354 | ||
3355 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); | ||
3356 | |||
3129 | count = nbox ? nbox : 1; | 3357 | count = nbox ? nbox : 1; |
3130 | 3358 | ||
3131 | for (i = 0; i < count; i++) { | 3359 | for (i = 0; i < count; i++) { |
@@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3363 | 3591 | ||
3364 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3592 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3365 | 3593 | ||
3366 | if (dev_priv->mm.wedged) { | 3594 | if (atomic_read(&dev_priv->mm.wedged)) { |
3367 | DRM_ERROR("Execbuf while wedged\n"); | 3595 | DRM_ERROR("Execbuf while wedged\n"); |
3368 | mutex_unlock(&dev->struct_mutex); | 3596 | mutex_unlock(&dev->struct_mutex); |
3369 | ret = -EIO; | 3597 | ret = -EIO; |
@@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3421 | 3649 | ||
3422 | /* error other than GTT full, or we've already tried again */ | 3650 | /* error other than GTT full, or we've already tried again */ |
3423 | if (ret != -ENOSPC || pin_tries >= 1) { | 3651 | if (ret != -ENOSPC || pin_tries >= 1) { |
3424 | if (ret != -ERESTARTSYS) | 3652 | if (ret != -ERESTARTSYS) { |
3425 | DRM_ERROR("Failed to pin buffers %d\n", ret); | 3653 | unsigned long long total_size = 0; |
3654 | for (i = 0; i < args->buffer_count; i++) | ||
3655 | total_size += object_list[i]->size; | ||
3656 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", | ||
3657 | pinned+1, args->buffer_count, | ||
3658 | total_size, ret); | ||
3659 | DRM_ERROR("%d objects [%d pinned], " | ||
3660 | "%d object bytes [%d pinned], " | ||
3661 | "%d/%d gtt bytes\n", | ||
3662 | atomic_read(&dev->object_count), | ||
3663 | atomic_read(&dev->pin_count), | ||
3664 | atomic_read(&dev->object_memory), | ||
3665 | atomic_read(&dev->pin_memory), | ||
3666 | atomic_read(&dev->gtt_memory), | ||
3667 | dev->gtt_total); | ||
3668 | } | ||
3426 | goto err; | 3669 | goto err; |
3427 | } | 3670 | } |
3428 | 3671 | ||
@@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3433 | 3676 | ||
3434 | /* evict everyone we can from the aperture */ | 3677 | /* evict everyone we can from the aperture */ |
3435 | ret = i915_gem_evict_everything(dev); | 3678 | ret = i915_gem_evict_everything(dev); |
3436 | if (ret) | 3679 | if (ret && ret != -ENOSPC) |
3437 | goto err; | 3680 | goto err; |
3438 | } | 3681 | } |
3439 | 3682 | ||
@@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3489 | 3732 | ||
3490 | for (i = 0; i < args->buffer_count; i++) { | 3733 | for (i = 0; i < args->buffer_count; i++) { |
3491 | struct drm_gem_object *obj = object_list[i]; | 3734 | struct drm_gem_object *obj = object_list[i]; |
3735 | uint32_t old_write_domain = obj->write_domain; | ||
3492 | 3736 | ||
3493 | obj->write_domain = obj->pending_write_domain; | 3737 | obj->write_domain = obj->pending_write_domain; |
3738 | trace_i915_gem_object_change_domain(obj, | ||
3739 | obj->read_domains, | ||
3740 | old_write_domain); | ||
3494 | } | 3741 | } |
3495 | 3742 | ||
3496 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3743 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3607 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3854 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3608 | if (obj_priv->gtt_space == NULL) { | 3855 | if (obj_priv->gtt_space == NULL) { |
3609 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 3856 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
3610 | if (ret != 0) { | 3857 | if (ret) |
3611 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3612 | DRM_ERROR("Failure to bind: %d\n", ret); | ||
3613 | return ret; | 3858 | return ret; |
3614 | } | ||
3615 | } | 3859 | } |
3616 | /* | 3860 | /* |
3617 | * Pre-965 chips need a fence register set up in order to | 3861 | * Pre-965 chips need a fence register set up in order to |
@@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3691 | } | 3935 | } |
3692 | obj_priv = obj->driver_private; | 3936 | obj_priv = obj->driver_private; |
3693 | 3937 | ||
3938 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
3939 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | ||
3940 | drm_gem_object_unreference(obj); | ||
3941 | mutex_unlock(&dev->struct_mutex); | ||
3942 | return -EINVAL; | ||
3943 | } | ||
3944 | |||
3694 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 3945 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
3695 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 3946 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3696 | args->handle); | 3947 | args->handle); |
@@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | |||
3803 | return i915_gem_ring_throttle(dev, file_priv); | 4054 | return i915_gem_ring_throttle(dev, file_priv); |
3804 | } | 4055 | } |
3805 | 4056 | ||
4057 | int | ||
4058 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||
4059 | struct drm_file *file_priv) | ||
4060 | { | ||
4061 | struct drm_i915_gem_madvise *args = data; | ||
4062 | struct drm_gem_object *obj; | ||
4063 | struct drm_i915_gem_object *obj_priv; | ||
4064 | |||
4065 | switch (args->madv) { | ||
4066 | case I915_MADV_DONTNEED: | ||
4067 | case I915_MADV_WILLNEED: | ||
4068 | break; | ||
4069 | default: | ||
4070 | return -EINVAL; | ||
4071 | } | ||
4072 | |||
4073 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4074 | if (obj == NULL) { | ||
4075 | DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", | ||
4076 | args->handle); | ||
4077 | return -EBADF; | ||
4078 | } | ||
4079 | |||
4080 | mutex_lock(&dev->struct_mutex); | ||
4081 | obj_priv = obj->driver_private; | ||
4082 | |||
4083 | if (obj_priv->pin_count) { | ||
4084 | drm_gem_object_unreference(obj); | ||
4085 | mutex_unlock(&dev->struct_mutex); | ||
4086 | |||
4087 | DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); | ||
4088 | return -EINVAL; | ||
4089 | } | ||
4090 | |||
4091 | if (obj_priv->madv != __I915_MADV_PURGED) | ||
4092 | obj_priv->madv = args->madv; | ||
4093 | |||
4094 | /* if the object is no longer bound, discard its backing storage */ | ||
4095 | if (i915_gem_object_is_purgeable(obj_priv) && | ||
4096 | obj_priv->gtt_space == NULL) | ||
4097 | i915_gem_object_truncate(obj); | ||
4098 | |||
4099 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | ||
4100 | |||
4101 | drm_gem_object_unreference(obj); | ||
4102 | mutex_unlock(&dev->struct_mutex); | ||
4103 | |||
4104 | return 0; | ||
4105 | } | ||
4106 | |||
3806 | int i915_gem_init_object(struct drm_gem_object *obj) | 4107 | int i915_gem_init_object(struct drm_gem_object *obj) |
3807 | { | 4108 | { |
3808 | struct drm_i915_gem_object *obj_priv; | 4109 | struct drm_i915_gem_object *obj_priv; |
@@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
3827 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4128 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
3828 | INIT_LIST_HEAD(&obj_priv->list); | 4129 | INIT_LIST_HEAD(&obj_priv->list); |
3829 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4130 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4131 | obj_priv->madv = I915_MADV_WILLNEED; | ||
4132 | |||
4133 | trace_i915_gem_object_create(obj); | ||
3830 | 4134 | ||
3831 | return 0; | 4135 | return 0; |
3832 | } | 4136 | } |
@@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
3836 | struct drm_device *dev = obj->dev; | 4140 | struct drm_device *dev = obj->dev; |
3837 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4141 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3838 | 4142 | ||
4143 | trace_i915_gem_object_destroy(obj); | ||
4144 | |||
3839 | while (obj_priv->pin_count > 0) | 4145 | while (obj_priv->pin_count > 0) |
3840 | i915_gem_object_unpin(obj); | 4146 | i915_gem_object_unpin(obj); |
3841 | 4147 | ||
@@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
3844 | 4150 | ||
3845 | i915_gem_object_unbind(obj); | 4151 | i915_gem_object_unbind(obj); |
3846 | 4152 | ||
3847 | i915_gem_free_mmap_offset(obj); | 4153 | if (obj_priv->mmap_offset) |
4154 | i915_gem_free_mmap_offset(obj); | ||
3848 | 4155 | ||
3849 | kfree(obj_priv->page_cpu_valid); | 4156 | kfree(obj_priv->page_cpu_valid); |
3850 | kfree(obj_priv->bit_17); | 4157 | kfree(obj_priv->bit_17); |
3851 | kfree(obj->driver_private); | 4158 | kfree(obj->driver_private); |
3852 | } | 4159 | } |
3853 | 4160 | ||
3854 | /** Unbinds all objects that are on the given buffer list. */ | 4161 | /** Unbinds all inactive objects. */ |
3855 | static int | 4162 | static int |
3856 | i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | 4163 | i915_gem_evict_from_inactive_list(struct drm_device *dev) |
3857 | { | 4164 | { |
3858 | struct drm_gem_object *obj; | 4165 | drm_i915_private_t *dev_priv = dev->dev_private; |
3859 | struct drm_i915_gem_object *obj_priv; | ||
3860 | int ret; | ||
3861 | 4166 | ||
3862 | while (!list_empty(head)) { | 4167 | while (!list_empty(&dev_priv->mm.inactive_list)) { |
3863 | obj_priv = list_first_entry(head, | 4168 | struct drm_gem_object *obj; |
3864 | struct drm_i915_gem_object, | 4169 | int ret; |
3865 | list); | ||
3866 | obj = obj_priv->obj; | ||
3867 | 4170 | ||
3868 | if (obj_priv->pin_count != 0) { | 4171 | obj = list_first_entry(&dev_priv->mm.inactive_list, |
3869 | DRM_ERROR("Pinned object in unbind list\n"); | 4172 | struct drm_i915_gem_object, |
3870 | mutex_unlock(&dev->struct_mutex); | 4173 | list)->obj; |
3871 | return -EINVAL; | ||
3872 | } | ||
3873 | 4174 | ||
3874 | ret = i915_gem_object_unbind(obj); | 4175 | ret = i915_gem_object_unbind(obj); |
3875 | if (ret != 0) { | 4176 | if (ret != 0) { |
3876 | DRM_ERROR("Error unbinding object in LeaveVT: %d\n", | 4177 | DRM_ERROR("Error unbinding object: %d\n", ret); |
3877 | ret); | ||
3878 | mutex_unlock(&dev->struct_mutex); | ||
3879 | return ret; | 4178 | return ret; |
3880 | } | 4179 | } |
3881 | } | 4180 | } |
3882 | 4181 | ||
3883 | |||
3884 | return 0; | 4182 | return 0; |
3885 | } | 4183 | } |
3886 | 4184 | ||
@@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3902 | * We need to replace this with a semaphore, or something. | 4200 | * We need to replace this with a semaphore, or something. |
3903 | */ | 4201 | */ |
3904 | dev_priv->mm.suspended = 1; | 4202 | dev_priv->mm.suspended = 1; |
4203 | del_timer(&dev_priv->hangcheck_timer); | ||
3905 | 4204 | ||
3906 | /* Cancel the retire work handler, wait for it to finish if running | 4205 | /* Cancel the retire work handler, wait for it to finish if running |
3907 | */ | 4206 | */ |
@@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3931 | if (last_seqno == cur_seqno) { | 4230 | if (last_seqno == cur_seqno) { |
3932 | if (stuck++ > 100) { | 4231 | if (stuck++ > 100) { |
3933 | DRM_ERROR("hardware wedged\n"); | 4232 | DRM_ERROR("hardware wedged\n"); |
3934 | dev_priv->mm.wedged = 1; | 4233 | atomic_set(&dev_priv->mm.wedged, 1); |
3935 | DRM_WAKEUP(&dev_priv->irq_queue); | 4234 | DRM_WAKEUP(&dev_priv->irq_queue); |
3936 | break; | 4235 | break; |
3937 | } | 4236 | } |
@@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3944 | i915_gem_retire_requests(dev); | 4243 | i915_gem_retire_requests(dev); |
3945 | 4244 | ||
3946 | spin_lock(&dev_priv->mm.active_list_lock); | 4245 | spin_lock(&dev_priv->mm.active_list_lock); |
3947 | if (!dev_priv->mm.wedged) { | 4246 | if (!atomic_read(&dev_priv->mm.wedged)) { |
3948 | /* Active and flushing should now be empty as we've | 4247 | /* Active and flushing should now be empty as we've |
3949 | * waited for a sequence higher than any pending execbuffer | 4248 | * waited for a sequence higher than any pending execbuffer |
3950 | */ | 4249 | */ |
@@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev) | |||
3962 | * the GPU domains and just stuff them onto inactive. | 4261 | * the GPU domains and just stuff them onto inactive. |
3963 | */ | 4262 | */ |
3964 | while (!list_empty(&dev_priv->mm.active_list)) { | 4263 | while (!list_empty(&dev_priv->mm.active_list)) { |
3965 | struct drm_i915_gem_object *obj_priv; | 4264 | struct drm_gem_object *obj; |
4265 | uint32_t old_write_domain; | ||
3966 | 4266 | ||
3967 | obj_priv = list_first_entry(&dev_priv->mm.active_list, | 4267 | obj = list_first_entry(&dev_priv->mm.active_list, |
3968 | struct drm_i915_gem_object, | 4268 | struct drm_i915_gem_object, |
3969 | list); | 4269 | list)->obj; |
3970 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | 4270 | old_write_domain = obj->write_domain; |
3971 | i915_gem_object_move_to_inactive(obj_priv->obj); | 4271 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; |
4272 | i915_gem_object_move_to_inactive(obj); | ||
4273 | |||
4274 | trace_i915_gem_object_change_domain(obj, | ||
4275 | obj->read_domains, | ||
4276 | old_write_domain); | ||
3972 | } | 4277 | } |
3973 | spin_unlock(&dev_priv->mm.active_list_lock); | 4278 | spin_unlock(&dev_priv->mm.active_list_lock); |
3974 | 4279 | ||
3975 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 4280 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
3976 | struct drm_i915_gem_object *obj_priv; | 4281 | struct drm_gem_object *obj; |
4282 | uint32_t old_write_domain; | ||
3977 | 4283 | ||
3978 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 4284 | obj = list_first_entry(&dev_priv->mm.flushing_list, |
3979 | struct drm_i915_gem_object, | 4285 | struct drm_i915_gem_object, |
3980 | list); | 4286 | list)->obj; |
3981 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | 4287 | old_write_domain = obj->write_domain; |
3982 | i915_gem_object_move_to_inactive(obj_priv->obj); | 4288 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; |
4289 | i915_gem_object_move_to_inactive(obj); | ||
4290 | |||
4291 | trace_i915_gem_object_change_domain(obj, | ||
4292 | obj->read_domains, | ||
4293 | old_write_domain); | ||
3983 | } | 4294 | } |
3984 | 4295 | ||
3985 | 4296 | ||
3986 | /* Move all inactive buffers out of the GTT. */ | 4297 | /* Move all inactive buffers out of the GTT. */ |
3987 | ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); | 4298 | ret = i915_gem_evict_from_inactive_list(dev); |
3988 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4299 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); |
3989 | if (ret) { | 4300 | if (ret) { |
3990 | mutex_unlock(&dev->struct_mutex); | 4301 | mutex_unlock(&dev->struct_mutex); |
@@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4206 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4517 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4207 | return 0; | 4518 | return 0; |
4208 | 4519 | ||
4209 | if (dev_priv->mm.wedged) { | 4520 | if (atomic_read(&dev_priv->mm.wedged)) { |
4210 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 4521 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
4211 | dev_priv->mm.wedged = 0; | 4522 | atomic_set(&dev_priv->mm.wedged, 0); |
4212 | } | 4523 | } |
4213 | 4524 | ||
4214 | mutex_lock(&dev->struct_mutex); | 4525 | mutex_lock(&dev->struct_mutex); |
@@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev) | |||
4274 | i915_gem_retire_work_handler); | 4585 | i915_gem_retire_work_handler); |
4275 | dev_priv->mm.next_gem_seqno = 1; | 4586 | dev_priv->mm.next_gem_seqno = 1; |
4276 | 4587 | ||
4588 | spin_lock(&shrink_list_lock); | ||
4589 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | ||
4590 | spin_unlock(&shrink_list_lock); | ||
4591 | |||
4277 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4592 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4278 | dev_priv->fence_reg_start = 3; | 4593 | dev_priv->fence_reg_start = 3; |
4279 | 4594 | ||
@@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | |||
4491 | list_del_init(i915_file_priv->mm.request_list.next); | 4806 | list_del_init(i915_file_priv->mm.request_list.next); |
4492 | mutex_unlock(&dev->struct_mutex); | 4807 | mutex_unlock(&dev->struct_mutex); |
4493 | } | 4808 | } |
4809 | |||
4810 | static int | ||
4811 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | ||
4812 | { | ||
4813 | drm_i915_private_t *dev_priv, *next_dev; | ||
4814 | struct drm_i915_gem_object *obj_priv, *next_obj; | ||
4815 | int cnt = 0; | ||
4816 | int would_deadlock = 1; | ||
4817 | |||
4818 | /* "fast-path" to count number of available objects */ | ||
4819 | if (nr_to_scan == 0) { | ||
4820 | spin_lock(&shrink_list_lock); | ||
4821 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | ||
4822 | struct drm_device *dev = dev_priv->dev; | ||
4823 | |||
4824 | if (mutex_trylock(&dev->struct_mutex)) { | ||
4825 | list_for_each_entry(obj_priv, | ||
4826 | &dev_priv->mm.inactive_list, | ||
4827 | list) | ||
4828 | cnt++; | ||
4829 | mutex_unlock(&dev->struct_mutex); | ||
4830 | } | ||
4831 | } | ||
4832 | spin_unlock(&shrink_list_lock); | ||
4833 | |||
4834 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4835 | } | ||
4836 | |||
4837 | spin_lock(&shrink_list_lock); | ||
4838 | |||
4839 | /* first scan for clean buffers */ | ||
4840 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4841 | &shrink_list, mm.shrink_list) { | ||
4842 | struct drm_device *dev = dev_priv->dev; | ||
4843 | |||
4844 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4845 | continue; | ||
4846 | |||
4847 | spin_unlock(&shrink_list_lock); | ||
4848 | |||
4849 | i915_gem_retire_requests(dev); | ||
4850 | |||
4851 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4852 | &dev_priv->mm.inactive_list, | ||
4853 | list) { | ||
4854 | if (i915_gem_object_is_purgeable(obj_priv)) { | ||
4855 | i915_gem_object_unbind(obj_priv->obj); | ||
4856 | if (--nr_to_scan <= 0) | ||
4857 | break; | ||
4858 | } | ||
4859 | } | ||
4860 | |||
4861 | spin_lock(&shrink_list_lock); | ||
4862 | mutex_unlock(&dev->struct_mutex); | ||
4863 | |||
4864 | would_deadlock = 0; | ||
4865 | |||
4866 | if (nr_to_scan <= 0) | ||
4867 | break; | ||
4868 | } | ||
4869 | |||
4870 | /* second pass, evict/count anything still on the inactive list */ | ||
4871 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4872 | &shrink_list, mm.shrink_list) { | ||
4873 | struct drm_device *dev = dev_priv->dev; | ||
4874 | |||
4875 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4876 | continue; | ||
4877 | |||
4878 | spin_unlock(&shrink_list_lock); | ||
4879 | |||
4880 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4881 | &dev_priv->mm.inactive_list, | ||
4882 | list) { | ||
4883 | if (nr_to_scan > 0) { | ||
4884 | i915_gem_object_unbind(obj_priv->obj); | ||
4885 | nr_to_scan--; | ||
4886 | } else | ||
4887 | cnt++; | ||
4888 | } | ||
4889 | |||
4890 | spin_lock(&shrink_list_lock); | ||
4891 | mutex_unlock(&dev->struct_mutex); | ||
4892 | |||
4893 | would_deadlock = 0; | ||
4894 | } | ||
4895 | |||
4896 | spin_unlock(&shrink_list_lock); | ||
4897 | |||
4898 | if (would_deadlock) | ||
4899 | return -1; | ||
4900 | else if (cnt > 0) | ||
4901 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4902 | else | ||
4903 | return 0; | ||
4904 | } | ||
4905 | |||
4906 | static struct shrinker shrinker = { | ||
4907 | .shrink = i915_gem_shrink, | ||
4908 | .seeks = DEFAULT_SEEKS, | ||
4909 | }; | ||
4910 | |||
4911 | __init void | ||
4912 | i915_gem_shrinker_init(void) | ||
4913 | { | ||
4914 | register_shrinker(&shrinker); | ||
4915 | } | ||
4916 | |||
4917 | __exit void | ||
4918 | i915_gem_shrinker_exit(void) | ||
4919 | { | ||
4920 | unregister_shrinker(&shrinker); | ||
4921 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6c89f2ff2495..4dfeec7cdd42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drm.h" | 31 | #include "drm.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | #include "i915_drv.h" | 33 | #include "i915_drv.h" |
34 | #include "i915_trace.h" | ||
34 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
35 | 36 | ||
36 | #define MAX_NOPID ((u32)~0) | 37 | #define MAX_NOPID ((u32)~0) |
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
279 | } | 280 | } |
280 | 281 | ||
281 | if (gt_iir & GT_USER_INTERRUPT) { | 282 | if (gt_iir & GT_USER_INTERRUPT) { |
282 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 283 | u32 seqno = i915_get_gem_seqno(dev); |
284 | dev_priv->mm.irq_gem_seqno = seqno; | ||
285 | trace_i915_gem_request_complete(dev, seqno); | ||
283 | DRM_WAKEUP(&dev_priv->irq_queue); | 286 | DRM_WAKEUP(&dev_priv->irq_queue); |
284 | } | 287 | } |
285 | 288 | ||
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work) | |||
302 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 305 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
303 | error_work); | 306 | error_work); |
304 | struct drm_device *dev = dev_priv->dev; | 307 | struct drm_device *dev = dev_priv->dev; |
305 | char *event_string = "ERROR=1"; | 308 | char *error_event[] = { "ERROR=1", NULL }; |
306 | char *envp[] = { event_string, NULL }; | 309 | char *reset_event[] = { "RESET=1", NULL }; |
310 | char *reset_done_event[] = { "ERROR=0", NULL }; | ||
307 | 311 | ||
308 | DRM_DEBUG("generating error event\n"); | 312 | DRM_DEBUG("generating error event\n"); |
309 | 313 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | |
310 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); | 314 | |
315 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
316 | if (IS_I965G(dev)) { | ||
317 | DRM_DEBUG("resetting chip\n"); | ||
318 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | ||
319 | if (!i965_reset(dev, GDRST_RENDER)) { | ||
320 | atomic_set(&dev_priv->mm.wedged, 0); | ||
321 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | ||
322 | } | ||
323 | } else { | ||
324 | printk("reboot required\n"); | ||
325 | } | ||
326 | } | ||
311 | } | 327 | } |
312 | 328 | ||
313 | /** | 329 | /** |
@@ -372,7 +388,7 @@ out: | |||
372 | * so userspace knows something bad happened (should trigger collection | 388 | * so userspace knows something bad happened (should trigger collection |
373 | * of a ring dump etc.). | 389 | * of a ring dump etc.). |
374 | */ | 390 | */ |
375 | static void i915_handle_error(struct drm_device *dev) | 391 | static void i915_handle_error(struct drm_device *dev, bool wedged) |
376 | { | 392 | { |
377 | struct drm_i915_private *dev_priv = dev->dev_private; | 393 | struct drm_i915_private *dev_priv = dev->dev_private; |
378 | u32 eir = I915_READ(EIR); | 394 | u32 eir = I915_READ(EIR); |
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev) | |||
482 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 498 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
483 | } | 499 | } |
484 | 500 | ||
501 | if (wedged) { | ||
502 | atomic_set(&dev_priv->mm.wedged, 1); | ||
503 | |||
504 | /* | ||
505 | * Wakeup waiting processes so they don't hang | ||
506 | */ | ||
507 | printk("i915: Waking up sleeping processes\n"); | ||
508 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
509 | } | ||
510 | |||
485 | queue_work(dev_priv->wq, &dev_priv->error_work); | 511 | queue_work(dev_priv->wq, &dev_priv->error_work); |
486 | } | 512 | } |
487 | 513 | ||
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
527 | pipeb_stats = I915_READ(PIPEBSTAT); | 553 | pipeb_stats = I915_READ(PIPEBSTAT); |
528 | 554 | ||
529 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 555 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
530 | i915_handle_error(dev); | 556 | i915_handle_error(dev, false); |
531 | 557 | ||
532 | /* | 558 | /* |
533 | * Clear the PIPE(A|B)STAT regs before the IIR | 559 | * Clear the PIPE(A|B)STAT regs before the IIR |
@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
599 | } | 625 | } |
600 | 626 | ||
601 | if (iir & I915_USER_INTERRUPT) { | 627 | if (iir & I915_USER_INTERRUPT) { |
602 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 628 | u32 seqno = i915_get_gem_seqno(dev); |
629 | dev_priv->mm.irq_gem_seqno = seqno; | ||
630 | trace_i915_gem_request_complete(dev, seqno); | ||
603 | DRM_WAKEUP(&dev_priv->irq_queue); | 631 | DRM_WAKEUP(&dev_priv->irq_queue); |
632 | dev_priv->hangcheck_count = 0; | ||
633 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
604 | } | 634 | } |
605 | 635 | ||
606 | if (pipea_stats & vblank_status) { | 636 | if (pipea_stats & vblank_status) { |
@@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
880 | return -EINVAL; | 910 | return -EINVAL; |
881 | } | 911 | } |
882 | 912 | ||
913 | struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { | ||
914 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
915 | return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * This is called when the chip hasn't reported back with completed | ||
920 | * batchbuffers in a long time. The first time this is called we simply record | ||
921 | * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses | ||
922 | * again, we assume the chip is wedged and try to fix it. | ||
923 | */ | ||
924 | void i915_hangcheck_elapsed(unsigned long data) | ||
925 | { | ||
926 | struct drm_device *dev = (struct drm_device *)data; | ||
927 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
928 | uint32_t acthd; | ||
929 | |||
930 | if (!IS_I965G(dev)) | ||
931 | acthd = I915_READ(ACTHD); | ||
932 | else | ||
933 | acthd = I915_READ(ACTHD_I965); | ||
934 | |||
935 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
936 | if (list_empty(&dev_priv->mm.request_list) || | ||
937 | i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { | ||
938 | dev_priv->hangcheck_count = 0; | ||
939 | return; | ||
940 | } | ||
941 | |||
942 | if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { | ||
943 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | ||
944 | i915_handle_error(dev, true); | ||
945 | return; | ||
946 | } | ||
947 | |||
948 | /* Reset timer case chip hangs without another request being added */ | ||
949 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
950 | |||
951 | if (acthd != dev_priv->last_acthd) | ||
952 | dev_priv->hangcheck_count = 0; | ||
953 | else | ||
954 | dev_priv->hangcheck_count++; | ||
955 | |||
956 | dev_priv->last_acthd = acthd; | ||
957 | } | ||
958 | |||
883 | /* drm_dma.h hooks | 959 | /* drm_dma.h hooks |
884 | */ | 960 | */ |
885 | static void igdng_irq_preinstall(struct drm_device *dev) | 961 | static void igdng_irq_preinstall(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index e4b4e8898e39..2d5193556d3f 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
148 | struct drm_i915_private *dev_priv = dev->dev_private; | 148 | struct drm_i915_private *dev_priv = dev->dev_private; |
149 | struct opregion_asle *asle = dev_priv->opregion.asle; | 149 | struct opregion_asle *asle = dev_priv->opregion.asle; |
150 | u32 blc_pwm_ctl, blc_pwm_ctl2; | 150 | u32 blc_pwm_ctl, blc_pwm_ctl2; |
151 | u32 max_backlight, level, shift; | ||
151 | 152 | ||
152 | if (!(bclp & ASLE_BCLP_VALID)) | 153 | if (!(bclp & ASLE_BCLP_VALID)) |
153 | return ASLE_BACKLIGHT_FAIL; | 154 | return ASLE_BACKLIGHT_FAIL; |
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
157 | return ASLE_BACKLIGHT_FAIL; | 158 | return ASLE_BACKLIGHT_FAIL; |
158 | 159 | ||
159 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); | 160 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); |
160 | blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); | 161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); |
162 | 162 | ||
163 | if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) | 163 | if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) |
164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); | 164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); |
165 | else | 165 | else { |
166 | I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); | 166 | if (IS_IGD(dev)) { |
167 | 167 | blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | |
168 | max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
169 | BACKLIGHT_MODULATION_FREQ_SHIFT; | ||
170 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; | ||
171 | } else { | ||
172 | blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
173 | max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
174 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | ||
175 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT; | ||
176 | } | ||
177 | level = (bclp * max_backlight) / 255; | ||
178 | I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); | ||
179 | } | ||
168 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; | 180 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; |
169 | 181 | ||
170 | return 0; | 182 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3f7963553464..0466ddbeba32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -86,6 +86,10 @@ | |||
86 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 86 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
87 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) | 87 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) |
88 | #define LBB 0xf4 | 88 | #define LBB 0xf4 |
89 | #define GDRST 0xc0 | ||
90 | #define GDRST_FULL (0<<2) | ||
91 | #define GDRST_RENDER (1<<2) | ||
92 | #define GDRST_MEDIA (3<<2) | ||
89 | 93 | ||
90 | /* VGA stuff */ | 94 | /* VGA stuff */ |
91 | 95 | ||
@@ -344,9 +348,37 @@ | |||
344 | #define FBC_CTL_PLANEA (0<<0) | 348 | #define FBC_CTL_PLANEA (0<<0) |
345 | #define FBC_CTL_PLANEB (1<<0) | 349 | #define FBC_CTL_PLANEB (1<<0) |
346 | #define FBC_FENCE_OFF 0x0321b | 350 | #define FBC_FENCE_OFF 0x0321b |
351 | #define FBC_TAG 0x03300 | ||
347 | 352 | ||
348 | #define FBC_LL_SIZE (1536) | 353 | #define FBC_LL_SIZE (1536) |
349 | 354 | ||
355 | /* Framebuffer compression for GM45+ */ | ||
356 | #define DPFC_CB_BASE 0x3200 | ||
357 | #define DPFC_CONTROL 0x3208 | ||
358 | #define DPFC_CTL_EN (1<<31) | ||
359 | #define DPFC_CTL_PLANEA (0<<30) | ||
360 | #define DPFC_CTL_PLANEB (1<<30) | ||
361 | #define DPFC_CTL_FENCE_EN (1<<29) | ||
362 | #define DPFC_SR_EN (1<<10) | ||
363 | #define DPFC_CTL_LIMIT_1X (0<<6) | ||
364 | #define DPFC_CTL_LIMIT_2X (1<<6) | ||
365 | #define DPFC_CTL_LIMIT_4X (2<<6) | ||
366 | #define DPFC_RECOMP_CTL 0x320c | ||
367 | #define DPFC_RECOMP_STALL_EN (1<<27) | ||
368 | #define DPFC_RECOMP_STALL_WM_SHIFT (16) | ||
369 | #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) | ||
370 | #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) | ||
371 | #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) | ||
372 | #define DPFC_STATUS 0x3210 | ||
373 | #define DPFC_INVAL_SEG_SHIFT (16) | ||
374 | #define DPFC_INVAL_SEG_MASK (0x07ff0000) | ||
375 | #define DPFC_COMP_SEG_SHIFT (0) | ||
376 | #define DPFC_COMP_SEG_MASK (0x000003ff) | ||
377 | #define DPFC_STATUS2 0x3214 | ||
378 | #define DPFC_FENCE_YOFF 0x3218 | ||
379 | #define DPFC_CHICKEN 0x3224 | ||
380 | #define DPFC_HT_MODIFY (1<<31) | ||
381 | |||
350 | /* | 382 | /* |
351 | * GPIO regs | 383 | * GPIO regs |
352 | */ | 384 | */ |
@@ -2000,6 +2032,8 @@ | |||
2000 | #define PF_ENABLE (1<<31) | 2032 | #define PF_ENABLE (1<<31) |
2001 | #define PFA_WIN_SZ 0x68074 | 2033 | #define PFA_WIN_SZ 0x68074 |
2002 | #define PFB_WIN_SZ 0x68874 | 2034 | #define PFB_WIN_SZ 0x68874 |
2035 | #define PFA_WIN_POS 0x68070 | ||
2036 | #define PFB_WIN_POS 0x68870 | ||
2003 | 2037 | ||
2004 | /* legacy palette */ | 2038 | /* legacy palette */ |
2005 | #define LGC_PALETTE_A 0x4a000 | 2039 | #define LGC_PALETTE_A 0x4a000 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 20d4d19f5568..bd6d8d91ca9f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
228 | 228 | ||
229 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 229 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
230 | return; | 230 | return; |
231 | |||
231 | /* Pipe & plane A info */ | 232 | /* Pipe & plane A info */ |
232 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 233 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
233 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 234 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
285 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 286 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); |
286 | return; | 287 | return; |
287 | } | 288 | } |
289 | |||
288 | static void i915_restore_modeset_reg(struct drm_device *dev) | 290 | static void i915_restore_modeset_reg(struct drm_device *dev) |
289 | { | 291 | { |
290 | struct drm_i915_private *dev_priv = dev->dev_private; | 292 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
379 | 381 | ||
380 | return; | 382 | return; |
381 | } | 383 | } |
382 | int i915_save_state(struct drm_device *dev) | 384 | |
385 | void i915_save_display(struct drm_device *dev) | ||
383 | { | 386 | { |
384 | struct drm_i915_private *dev_priv = dev->dev_private; | 387 | struct drm_i915_private *dev_priv = dev->dev_private; |
385 | int i; | ||
386 | |||
387 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
388 | |||
389 | /* Render Standby */ | ||
390 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
391 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
392 | |||
393 | /* Hardware status page */ | ||
394 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
395 | 388 | ||
396 | /* Display arbitration control */ | 389 | /* Display arbitration control */ |
397 | dev_priv->saveDSPARB = I915_READ(DSPARB); | 390 | dev_priv->saveDSPARB = I915_READ(DSPARB); |
@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev) | |||
399 | /* This is only meaningful in non-KMS mode */ | 392 | /* This is only meaningful in non-KMS mode */ |
400 | /* Don't save them in KMS mode */ | 393 | /* Don't save them in KMS mode */ |
401 | i915_save_modeset_reg(dev); | 394 | i915_save_modeset_reg(dev); |
395 | |||
402 | /* Cursor state */ | 396 | /* Cursor state */ |
403 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | 397 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); |
404 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | 398 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); |
@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev) | |||
448 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | 442 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); |
449 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | 443 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); |
450 | 444 | ||
451 | /* Interrupt state */ | ||
452 | dev_priv->saveIIR = I915_READ(IIR); | ||
453 | dev_priv->saveIER = I915_READ(IER); | ||
454 | dev_priv->saveIMR = I915_READ(IMR); | ||
455 | |||
456 | /* VGA state */ | 445 | /* VGA state */ |
457 | dev_priv->saveVGA0 = I915_READ(VGA0); | 446 | dev_priv->saveVGA0 = I915_READ(VGA0); |
458 | dev_priv->saveVGA1 = I915_READ(VGA1); | 447 | dev_priv->saveVGA1 = I915_READ(VGA1); |
459 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 448 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
460 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 449 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); |
461 | 450 | ||
462 | /* Clock gating state */ | ||
463 | dev_priv->saveD_STATE = I915_READ(D_STATE); | ||
464 | dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); | ||
465 | |||
466 | /* Cache mode state */ | ||
467 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||
468 | |||
469 | /* Memory Arbitration state */ | ||
470 | dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | ||
471 | |||
472 | /* Scratch space */ | ||
473 | for (i = 0; i < 16; i++) { | ||
474 | dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | ||
475 | dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | ||
476 | } | ||
477 | for (i = 0; i < 3; i++) | ||
478 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | ||
479 | |||
480 | /* Fences */ | ||
481 | if (IS_I965G(dev)) { | ||
482 | for (i = 0; i < 16; i++) | ||
483 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
484 | } else { | ||
485 | for (i = 0; i < 8; i++) | ||
486 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
487 | |||
488 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
489 | for (i = 0; i < 8; i++) | ||
490 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
491 | } | ||
492 | i915_save_vga(dev); | 451 | i915_save_vga(dev); |
493 | |||
494 | return 0; | ||
495 | } | 452 | } |
496 | 453 | ||
497 | int i915_restore_state(struct drm_device *dev) | 454 | void i915_restore_display(struct drm_device *dev) |
498 | { | 455 | { |
499 | struct drm_i915_private *dev_priv = dev->dev_private; | 456 | struct drm_i915_private *dev_priv = dev->dev_private; |
500 | int i; | ||
501 | |||
502 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | ||
503 | |||
504 | /* Render Standby */ | ||
505 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
506 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
507 | |||
508 | /* Hardware status page */ | ||
509 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
510 | 457 | ||
511 | /* Display arbitration */ | 458 | /* Display arbitration */ |
512 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); | 459 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); |
513 | 460 | ||
514 | /* Fences */ | ||
515 | if (IS_I965G(dev)) { | ||
516 | for (i = 0; i < 16; i++) | ||
517 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
518 | } else { | ||
519 | for (i = 0; i < 8; i++) | ||
520 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
521 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
522 | for (i = 0; i < 8; i++) | ||
523 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
524 | } | ||
525 | |||
526 | /* Display port ratios (must be done before clock is set) */ | 461 | /* Display port ratios (must be done before clock is set) */ |
527 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 462 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
528 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | 463 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); |
@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev) | |||
534 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | 469 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); |
535 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | 470 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); |
536 | } | 471 | } |
472 | |||
537 | /* This is only meaningful in non-KMS mode */ | 473 | /* This is only meaningful in non-KMS mode */ |
538 | /* Don't restore them in KMS mode */ | 474 | /* Don't restore them in KMS mode */ |
539 | i915_restore_modeset_reg(dev); | 475 | i915_restore_modeset_reg(dev); |
476 | |||
540 | /* Cursor state */ | 477 | /* Cursor state */ |
541 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | 478 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); |
542 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | 479 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); |
@@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev) | |||
586 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 523 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
587 | DRM_UDELAY(150); | 524 | DRM_UDELAY(150); |
588 | 525 | ||
526 | i915_restore_vga(dev); | ||
527 | } | ||
528 | |||
529 | int i915_save_state(struct drm_device *dev) | ||
530 | { | ||
531 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
532 | int i; | ||
533 | |||
534 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
535 | |||
536 | /* Render Standby */ | ||
537 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
538 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
539 | |||
540 | /* Hardware status page */ | ||
541 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
542 | |||
543 | i915_save_display(dev); | ||
544 | |||
545 | /* Interrupt state */ | ||
546 | dev_priv->saveIER = I915_READ(IER); | ||
547 | dev_priv->saveIMR = I915_READ(IMR); | ||
548 | |||
549 | /* Clock gating state */ | ||
550 | dev_priv->saveD_STATE = I915_READ(D_STATE); | ||
551 | dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */ | ||
552 | |||
553 | /* Cache mode state */ | ||
554 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||
555 | |||
556 | /* Memory Arbitration state */ | ||
557 | dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | ||
558 | |||
559 | /* Scratch space */ | ||
560 | for (i = 0; i < 16; i++) { | ||
561 | dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | ||
562 | dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | ||
563 | } | ||
564 | for (i = 0; i < 3; i++) | ||
565 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | ||
566 | |||
567 | /* Fences */ | ||
568 | if (IS_I965G(dev)) { | ||
569 | for (i = 0; i < 16; i++) | ||
570 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
571 | } else { | ||
572 | for (i = 0; i < 8; i++) | ||
573 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
574 | |||
575 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
576 | for (i = 0; i < 8; i++) | ||
577 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
578 | } | ||
579 | |||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | int i915_restore_state(struct drm_device *dev) | ||
584 | { | ||
585 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
586 | int i; | ||
587 | |||
588 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | ||
589 | |||
590 | /* Render Standby */ | ||
591 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
592 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
593 | |||
594 | /* Hardware status page */ | ||
595 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
596 | |||
597 | /* Fences */ | ||
598 | if (IS_I965G(dev)) { | ||
599 | for (i = 0; i < 16; i++) | ||
600 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
601 | } else { | ||
602 | for (i = 0; i < 8; i++) | ||
603 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
604 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
605 | for (i = 0; i < 8; i++) | ||
606 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
607 | } | ||
608 | |||
609 | i915_restore_display(dev); | ||
610 | |||
611 | /* Interrupt state */ | ||
612 | I915_WRITE (IER, dev_priv->saveIER); | ||
613 | I915_WRITE (IMR, dev_priv->saveIMR); | ||
614 | |||
589 | /* Clock gating state */ | 615 | /* Clock gating state */ |
590 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); | 616 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); |
591 | I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); | 617 | I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); |
@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev) | |||
603 | for (i = 0; i < 3; i++) | 629 | for (i = 0; i < 3; i++) |
604 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 630 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
605 | 631 | ||
606 | i915_restore_vga(dev); | ||
607 | |||
608 | return 0; | 632 | return 0; |
609 | } | 633 | } |
610 | 634 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h new file mode 100644 index 000000000000..5567a40816f3 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -0,0 +1,315 @@ | |||
1 | #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _I915_TRACE_H_ | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #include <drm/drmP.h> | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM i915 | ||
12 | #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) | ||
13 | #define TRACE_INCLUDE_FILE i915_trace | ||
14 | |||
15 | /* object tracking */ | ||
16 | |||
17 | TRACE_EVENT(i915_gem_object_create, | ||
18 | |||
19 | TP_PROTO(struct drm_gem_object *obj), | ||
20 | |||
21 | TP_ARGS(obj), | ||
22 | |||
23 | TP_STRUCT__entry( | ||
24 | __field(struct drm_gem_object *, obj) | ||
25 | __field(u32, size) | ||
26 | ), | ||
27 | |||
28 | TP_fast_assign( | ||
29 | __entry->obj = obj; | ||
30 | __entry->size = obj->size; | ||
31 | ), | ||
32 | |||
33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) | ||
34 | ); | ||
35 | |||
36 | TRACE_EVENT(i915_gem_object_bind, | ||
37 | |||
38 | TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), | ||
39 | |||
40 | TP_ARGS(obj, gtt_offset), | ||
41 | |||
42 | TP_STRUCT__entry( | ||
43 | __field(struct drm_gem_object *, obj) | ||
44 | __field(u32, gtt_offset) | ||
45 | ), | ||
46 | |||
47 | TP_fast_assign( | ||
48 | __entry->obj = obj; | ||
49 | __entry->gtt_offset = gtt_offset; | ||
50 | ), | ||
51 | |||
52 | TP_printk("obj=%p, gtt_offset=%08x", | ||
53 | __entry->obj, __entry->gtt_offset) | ||
54 | ); | ||
55 | |||
56 | TRACE_EVENT(i915_gem_object_clflush, | ||
57 | |||
58 | TP_PROTO(struct drm_gem_object *obj), | ||
59 | |||
60 | TP_ARGS(obj), | ||
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __field(struct drm_gem_object *, obj) | ||
64 | ), | ||
65 | |||
66 | TP_fast_assign( | ||
67 | __entry->obj = obj; | ||
68 | ), | ||
69 | |||
70 | TP_printk("obj=%p", __entry->obj) | ||
71 | ); | ||
72 | |||
73 | TRACE_EVENT(i915_gem_object_change_domain, | ||
74 | |||
75 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | ||
76 | |||
77 | TP_ARGS(obj, old_read_domains, old_write_domain), | ||
78 | |||
79 | TP_STRUCT__entry( | ||
80 | __field(struct drm_gem_object *, obj) | ||
81 | __field(u32, read_domains) | ||
82 | __field(u32, write_domain) | ||
83 | ), | ||
84 | |||
85 | TP_fast_assign( | ||
86 | __entry->obj = obj; | ||
87 | __entry->read_domains = obj->read_domains | (old_read_domains << 16); | ||
88 | __entry->write_domain = obj->write_domain | (old_write_domain << 16); | ||
89 | ), | ||
90 | |||
91 | TP_printk("obj=%p, read=%04x, write=%04x", | ||
92 | __entry->obj, | ||
93 | __entry->read_domains, __entry->write_domain) | ||
94 | ); | ||
95 | |||
96 | TRACE_EVENT(i915_gem_object_get_fence, | ||
97 | |||
98 | TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), | ||
99 | |||
100 | TP_ARGS(obj, fence, tiling_mode), | ||
101 | |||
102 | TP_STRUCT__entry( | ||
103 | __field(struct drm_gem_object *, obj) | ||
104 | __field(int, fence) | ||
105 | __field(int, tiling_mode) | ||
106 | ), | ||
107 | |||
108 | TP_fast_assign( | ||
109 | __entry->obj = obj; | ||
110 | __entry->fence = fence; | ||
111 | __entry->tiling_mode = tiling_mode; | ||
112 | ), | ||
113 | |||
114 | TP_printk("obj=%p, fence=%d, tiling=%d", | ||
115 | __entry->obj, __entry->fence, __entry->tiling_mode) | ||
116 | ); | ||
117 | |||
118 | TRACE_EVENT(i915_gem_object_unbind, | ||
119 | |||
120 | TP_PROTO(struct drm_gem_object *obj), | ||
121 | |||
122 | TP_ARGS(obj), | ||
123 | |||
124 | TP_STRUCT__entry( | ||
125 | __field(struct drm_gem_object *, obj) | ||
126 | ), | ||
127 | |||
128 | TP_fast_assign( | ||
129 | __entry->obj = obj; | ||
130 | ), | ||
131 | |||
132 | TP_printk("obj=%p", __entry->obj) | ||
133 | ); | ||
134 | |||
135 | TRACE_EVENT(i915_gem_object_destroy, | ||
136 | |||
137 | TP_PROTO(struct drm_gem_object *obj), | ||
138 | |||
139 | TP_ARGS(obj), | ||
140 | |||
141 | TP_STRUCT__entry( | ||
142 | __field(struct drm_gem_object *, obj) | ||
143 | ), | ||
144 | |||
145 | TP_fast_assign( | ||
146 | __entry->obj = obj; | ||
147 | ), | ||
148 | |||
149 | TP_printk("obj=%p", __entry->obj) | ||
150 | ); | ||
151 | |||
152 | /* batch tracing */ | ||
153 | |||
154 | TRACE_EVENT(i915_gem_request_submit, | ||
155 | |||
156 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
157 | |||
158 | TP_ARGS(dev, seqno), | ||
159 | |||
160 | TP_STRUCT__entry( | ||
161 | __field(struct drm_device *, dev) | ||
162 | __field(u32, seqno) | ||
163 | ), | ||
164 | |||
165 | TP_fast_assign( | ||
166 | __entry->dev = dev; | ||
167 | __entry->seqno = seqno; | ||
168 | ), | ||
169 | |||
170 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
171 | ); | ||
172 | |||
173 | TRACE_EVENT(i915_gem_request_flush, | ||
174 | |||
175 | TP_PROTO(struct drm_device *dev, u32 seqno, | ||
176 | u32 flush_domains, u32 invalidate_domains), | ||
177 | |||
178 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | ||
179 | |||
180 | TP_STRUCT__entry( | ||
181 | __field(struct drm_device *, dev) | ||
182 | __field(u32, seqno) | ||
183 | __field(u32, flush_domains) | ||
184 | __field(u32, invalidate_domains) | ||
185 | ), | ||
186 | |||
187 | TP_fast_assign( | ||
188 | __entry->dev = dev; | ||
189 | __entry->seqno = seqno; | ||
190 | __entry->flush_domains = flush_domains; | ||
191 | __entry->invalidate_domains = invalidate_domains; | ||
192 | ), | ||
193 | |||
194 | TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", | ||
195 | __entry->dev, __entry->seqno, | ||
196 | __entry->flush_domains, __entry->invalidate_domains) | ||
197 | ); | ||
198 | |||
199 | |||
200 | TRACE_EVENT(i915_gem_request_complete, | ||
201 | |||
202 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
203 | |||
204 | TP_ARGS(dev, seqno), | ||
205 | |||
206 | TP_STRUCT__entry( | ||
207 | __field(struct drm_device *, dev) | ||
208 | __field(u32, seqno) | ||
209 | ), | ||
210 | |||
211 | TP_fast_assign( | ||
212 | __entry->dev = dev; | ||
213 | __entry->seqno = seqno; | ||
214 | ), | ||
215 | |||
216 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
217 | ); | ||
218 | |||
219 | TRACE_EVENT(i915_gem_request_retire, | ||
220 | |||
221 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
222 | |||
223 | TP_ARGS(dev, seqno), | ||
224 | |||
225 | TP_STRUCT__entry( | ||
226 | __field(struct drm_device *, dev) | ||
227 | __field(u32, seqno) | ||
228 | ), | ||
229 | |||
230 | TP_fast_assign( | ||
231 | __entry->dev = dev; | ||
232 | __entry->seqno = seqno; | ||
233 | ), | ||
234 | |||
235 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
236 | ); | ||
237 | |||
238 | TRACE_EVENT(i915_gem_request_wait_begin, | ||
239 | |||
240 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
241 | |||
242 | TP_ARGS(dev, seqno), | ||
243 | |||
244 | TP_STRUCT__entry( | ||
245 | __field(struct drm_device *, dev) | ||
246 | __field(u32, seqno) | ||
247 | ), | ||
248 | |||
249 | TP_fast_assign( | ||
250 | __entry->dev = dev; | ||
251 | __entry->seqno = seqno; | ||
252 | ), | ||
253 | |||
254 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
255 | ); | ||
256 | |||
257 | TRACE_EVENT(i915_gem_request_wait_end, | ||
258 | |||
259 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
260 | |||
261 | TP_ARGS(dev, seqno), | ||
262 | |||
263 | TP_STRUCT__entry( | ||
264 | __field(struct drm_device *, dev) | ||
265 | __field(u32, seqno) | ||
266 | ), | ||
267 | |||
268 | TP_fast_assign( | ||
269 | __entry->dev = dev; | ||
270 | __entry->seqno = seqno; | ||
271 | ), | ||
272 | |||
273 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
274 | ); | ||
275 | |||
276 | TRACE_EVENT(i915_ring_wait_begin, | ||
277 | |||
278 | TP_PROTO(struct drm_device *dev), | ||
279 | |||
280 | TP_ARGS(dev), | ||
281 | |||
282 | TP_STRUCT__entry( | ||
283 | __field(struct drm_device *, dev) | ||
284 | ), | ||
285 | |||
286 | TP_fast_assign( | ||
287 | __entry->dev = dev; | ||
288 | ), | ||
289 | |||
290 | TP_printk("dev=%p", __entry->dev) | ||
291 | ); | ||
292 | |||
293 | TRACE_EVENT(i915_ring_wait_end, | ||
294 | |||
295 | TP_PROTO(struct drm_device *dev), | ||
296 | |||
297 | TP_ARGS(dev), | ||
298 | |||
299 | TP_STRUCT__entry( | ||
300 | __field(struct drm_device *, dev) | ||
301 | ), | ||
302 | |||
303 | TP_fast_assign( | ||
304 | __entry->dev = dev; | ||
305 | ), | ||
306 | |||
307 | TP_printk("dev=%p", __entry->dev) | ||
308 | ); | ||
309 | |||
310 | #endif /* _I915_TRACE_H_ */ | ||
311 | |||
312 | /* This part must be outside protection */ | ||
313 | #undef TRACE_INCLUDE_PATH | ||
314 | #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 | ||
315 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c new file mode 100644 index 000000000000..ead876eb6ea0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace_points.c | |||
@@ -0,0 +1,11 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 Intel Corporation | ||
3 | * | ||
4 | * Authors: | ||
5 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
6 | */ | ||
7 | |||
8 | #include "i915_drv.h" | ||
9 | |||
10 | #define CREATE_TRACE_POINTS | ||
11 | #include "i915_trace.h" | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1e28c1652fd0..4337414846b6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
217 | if (IS_I85X(dev_priv->dev)) | 217 | if (IS_I85X(dev_priv->dev)) |
218 | dev_priv->lvds_ssc_freq = | 218 | dev_priv->lvds_ssc_freq = |
219 | general->ssc_freq ? 66 : 48; | 219 | general->ssc_freq ? 66 : 48; |
220 | else if (IS_IGDNG(dev_priv->dev)) | ||
221 | dev_priv->lvds_ssc_freq = | ||
222 | general->ssc_freq ? 100 : 120; | ||
220 | else | 223 | else |
221 | dev_priv->lvds_ssc_freq = | 224 | dev_priv->lvds_ssc_freq = |
222 | general->ssc_freq ? 100 : 96; | 225 | general->ssc_freq ? 100 : 96; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 88814fa2dfd2..212e22740fc1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
179 | { | 179 | { |
180 | struct drm_device *dev = connector->dev; | 180 | struct drm_device *dev = connector->dev; |
181 | struct drm_i915_private *dev_priv = dev->dev_private; | 181 | struct drm_i915_private *dev_priv = dev->dev_private; |
182 | u32 adpa, temp; | 182 | u32 adpa; |
183 | bool ret; | 183 | bool ret; |
184 | 184 | ||
185 | temp = adpa = I915_READ(PCH_ADPA); | 185 | adpa = I915_READ(PCH_ADPA); |
186 | |||
187 | adpa &= ~ADPA_DAC_ENABLE; | ||
188 | I915_WRITE(PCH_ADPA, adpa); | ||
189 | 186 | ||
190 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 187 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
191 | 188 | ||
@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
212 | else | 209 | else |
213 | ret = false; | 210 | ret = false; |
214 | 211 | ||
215 | /* restore origin register */ | ||
216 | I915_WRITE(PCH_ADPA, temp); | ||
217 | return ret; | 212 | return ret; |
218 | } | 213 | } |
219 | 214 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0227b1652906..93ff6c03733e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -24,6 +24,8 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/input.h> | ||
27 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
28 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
29 | #include "drmP.h" | 31 | #include "drmP.h" |
@@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
875 | refclk, best_clock); | 877 | refclk, best_clock); |
876 | 878 | ||
877 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 879 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
878 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 880 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
879 | LVDS_CLKB_POWER_UP) | 881 | LVDS_CLKB_POWER_UP) |
880 | clock.p2 = limit->p2.p2_fast; | 882 | clock.p2 = limit->p2.p2_fast; |
881 | else | 883 | else |
@@ -952,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev) | |||
952 | mdelay(20); | 954 | mdelay(20); |
953 | } | 955 | } |
954 | 956 | ||
957 | /* Parameters have changed, update FBC info */ | ||
958 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
959 | { | ||
960 | struct drm_device *dev = crtc->dev; | ||
961 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
962 | struct drm_framebuffer *fb = crtc->fb; | ||
963 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
964 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | ||
965 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
966 | int plane, i; | ||
967 | u32 fbc_ctl, fbc_ctl2; | ||
968 | |||
969 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | ||
970 | |||
971 | if (fb->pitch < dev_priv->cfb_pitch) | ||
972 | dev_priv->cfb_pitch = fb->pitch; | ||
973 | |||
974 | /* FBC_CTL wants 64B units */ | ||
975 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
976 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
977 | dev_priv->cfb_plane = intel_crtc->plane; | ||
978 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | ||
979 | |||
980 | /* Clear old tags */ | ||
981 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | ||
982 | I915_WRITE(FBC_TAG + (i * 4), 0); | ||
983 | |||
984 | /* Set it up... */ | ||
985 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | ||
986 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
987 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | ||
988 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
989 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
990 | |||
991 | /* enable it... */ | ||
992 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
993 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | ||
994 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | ||
995 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
996 | fbc_ctl |= dev_priv->cfb_fence; | ||
997 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
998 | |||
999 | DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", | ||
1000 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | ||
1001 | } | ||
1002 | |||
1003 | void i8xx_disable_fbc(struct drm_device *dev) | ||
1004 | { | ||
1005 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1006 | u32 fbc_ctl; | ||
1007 | |||
1008 | if (!I915_HAS_FBC(dev)) | ||
1009 | return; | ||
1010 | |||
1011 | /* Disable compression */ | ||
1012 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
1013 | fbc_ctl &= ~FBC_CTL_EN; | ||
1014 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
1015 | |||
1016 | /* Wait for compressing bit to clear */ | ||
1017 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) | ||
1018 | ; /* nothing */ | ||
1019 | |||
1020 | intel_wait_for_vblank(dev); | ||
1021 | |||
1022 | DRM_DEBUG("disabled FBC\n"); | ||
1023 | } | ||
1024 | |||
1025 | static bool i8xx_fbc_enabled(struct drm_crtc *crtc) | ||
1026 | { | ||
1027 | struct drm_device *dev = crtc->dev; | ||
1028 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1029 | |||
1030 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | ||
1031 | } | ||
1032 | |||
1033 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
1034 | { | ||
1035 | struct drm_device *dev = crtc->dev; | ||
1036 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1037 | struct drm_framebuffer *fb = crtc->fb; | ||
1038 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1039 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | ||
1040 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1041 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | ||
1042 | DPFC_CTL_PLANEB); | ||
1043 | unsigned long stall_watermark = 200; | ||
1044 | u32 dpfc_ctl; | ||
1045 | |||
1046 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1047 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
1048 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1049 | |||
1050 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | ||
1051 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1052 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | ||
1053 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | ||
1054 | } else { | ||
1055 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1056 | } | ||
1057 | |||
1058 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1059 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
1060 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
1061 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
1062 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | ||
1063 | |||
1064 | /* enable it... */ | ||
1065 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); | ||
1066 | |||
1067 | DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); | ||
1068 | } | ||
1069 | |||
1070 | void g4x_disable_fbc(struct drm_device *dev) | ||
1071 | { | ||
1072 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1073 | u32 dpfc_ctl; | ||
1074 | |||
1075 | /* Disable compression */ | ||
1076 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
1077 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
1078 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1079 | intel_wait_for_vblank(dev); | ||
1080 | |||
1081 | DRM_DEBUG("disabled FBC\n"); | ||
1082 | } | ||
1083 | |||
1084 | static bool g4x_fbc_enabled(struct drm_crtc *crtc) | ||
1085 | { | ||
1086 | struct drm_device *dev = crtc->dev; | ||
1087 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1088 | |||
1089 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | ||
1090 | } | ||
1091 | |||
1092 | /** | ||
1093 | * intel_update_fbc - enable/disable FBC as needed | ||
1094 | * @crtc: CRTC to point the compressor at | ||
1095 | * @mode: mode in use | ||
1096 | * | ||
1097 | * Set up the framebuffer compression hardware at mode set time. We | ||
1098 | * enable it if possible: | ||
1099 | * - plane A only (on pre-965) | ||
1100 | * - no pixel mulitply/line duplication | ||
1101 | * - no alpha buffer discard | ||
1102 | * - no dual wide | ||
1103 | * - framebuffer <= 2048 in width, 1536 in height | ||
1104 | * | ||
1105 | * We can't assume that any compression will take place (worst case), | ||
1106 | * so the compressed buffer has to be the same size as the uncompressed | ||
1107 | * one. It also must reside (along with the line length buffer) in | ||
1108 | * stolen memory. | ||
1109 | * | ||
1110 | * We need to enable/disable FBC on a global basis. | ||
1111 | */ | ||
1112 | static void intel_update_fbc(struct drm_crtc *crtc, | ||
1113 | struct drm_display_mode *mode) | ||
1114 | { | ||
1115 | struct drm_device *dev = crtc->dev; | ||
1116 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1117 | struct drm_framebuffer *fb = crtc->fb; | ||
1118 | struct intel_framebuffer *intel_fb; | ||
1119 | struct drm_i915_gem_object *obj_priv; | ||
1120 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1121 | int plane = intel_crtc->plane; | ||
1122 | |||
1123 | if (!i915_powersave) | ||
1124 | return; | ||
1125 | |||
1126 | if (!dev_priv->display.fbc_enabled || | ||
1127 | !dev_priv->display.enable_fbc || | ||
1128 | !dev_priv->display.disable_fbc) | ||
1129 | return; | ||
1130 | |||
1131 | if (!crtc->fb) | ||
1132 | return; | ||
1133 | |||
1134 | intel_fb = to_intel_framebuffer(fb); | ||
1135 | obj_priv = intel_fb->obj->driver_private; | ||
1136 | |||
1137 | /* | ||
1138 | * If FBC is already on, we just have to verify that we can | ||
1139 | * keep it that way... | ||
1140 | * Need to disable if: | ||
1141 | * - changing FBC params (stride, fence, mode) | ||
1142 | * - new fb is too large to fit in compressed buffer | ||
1143 | * - going to an unsupported config (interlace, pixel multiply, etc.) | ||
1144 | */ | ||
1145 | if (intel_fb->obj->size > dev_priv->cfb_size) { | ||
1146 | DRM_DEBUG("framebuffer too large, disabling compression\n"); | ||
1147 | goto out_disable; | ||
1148 | } | ||
1149 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | ||
1150 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | ||
1151 | DRM_DEBUG("mode incompatible with compression, disabling\n"); | ||
1152 | goto out_disable; | ||
1153 | } | ||
1154 | if ((mode->hdisplay > 2048) || | ||
1155 | (mode->vdisplay > 1536)) { | ||
1156 | DRM_DEBUG("mode too large for compression, disabling\n"); | ||
1157 | goto out_disable; | ||
1158 | } | ||
1159 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | ||
1160 | DRM_DEBUG("plane not 0, disabling compression\n"); | ||
1161 | goto out_disable; | ||
1162 | } | ||
1163 | if (obj_priv->tiling_mode != I915_TILING_X) { | ||
1164 | DRM_DEBUG("framebuffer not tiled, disabling compression\n"); | ||
1165 | goto out_disable; | ||
1166 | } | ||
1167 | |||
1168 | if (dev_priv->display.fbc_enabled(crtc)) { | ||
1169 | /* We can re-enable it in this case, but need to update pitch */ | ||
1170 | if (fb->pitch > dev_priv->cfb_pitch) | ||
1171 | dev_priv->display.disable_fbc(dev); | ||
1172 | if (obj_priv->fence_reg != dev_priv->cfb_fence) | ||
1173 | dev_priv->display.disable_fbc(dev); | ||
1174 | if (plane != dev_priv->cfb_plane) | ||
1175 | dev_priv->display.disable_fbc(dev); | ||
1176 | } | ||
1177 | |||
1178 | if (!dev_priv->display.fbc_enabled(crtc)) { | ||
1179 | /* Now try to turn it back on if possible */ | ||
1180 | dev_priv->display.enable_fbc(crtc, 500); | ||
1181 | } | ||
1182 | |||
1183 | return; | ||
1184 | |||
1185 | out_disable: | ||
1186 | DRM_DEBUG("unsupported config, disabling FBC\n"); | ||
1187 | /* Multiple disables should be harmless */ | ||
1188 | if (dev_priv->display.fbc_enabled(crtc)) | ||
1189 | dev_priv->display.disable_fbc(dev); | ||
1190 | } | ||
1191 | |||
955 | static int | 1192 | static int |
956 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 1193 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
957 | struct drm_framebuffer *old_fb) | 1194 | struct drm_framebuffer *old_fb) |
@@ -964,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
964 | struct drm_i915_gem_object *obj_priv; | 1201 | struct drm_i915_gem_object *obj_priv; |
965 | struct drm_gem_object *obj; | 1202 | struct drm_gem_object *obj; |
966 | int pipe = intel_crtc->pipe; | 1203 | int pipe = intel_crtc->pipe; |
1204 | int plane = intel_crtc->plane; | ||
967 | unsigned long Start, Offset; | 1205 | unsigned long Start, Offset; |
968 | int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); | 1206 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); |
969 | int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); | 1207 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); |
970 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | 1208 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; |
971 | int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); | 1209 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); |
972 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 1210 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
973 | u32 dspcntr, alignment; | 1211 | u32 dspcntr, alignment; |
974 | int ret; | 1212 | int ret; |
975 | 1213 | ||
@@ -979,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
979 | return 0; | 1217 | return 0; |
980 | } | 1218 | } |
981 | 1219 | ||
982 | switch (pipe) { | 1220 | switch (plane) { |
983 | case 0: | 1221 | case 0: |
984 | case 1: | 1222 | case 1: |
985 | break; | 1223 | break; |
986 | default: | 1224 | default: |
987 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 1225 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
988 | return -EINVAL; | 1226 | return -EINVAL; |
989 | } | 1227 | } |
990 | 1228 | ||
@@ -1086,6 +1324,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1086 | I915_READ(dspbase); | 1324 | I915_READ(dspbase); |
1087 | } | 1325 | } |
1088 | 1326 | ||
1327 | if ((IS_I965G(dev) || plane == 0)) | ||
1328 | intel_update_fbc(crtc, &crtc->mode); | ||
1329 | |||
1089 | intel_wait_for_vblank(dev); | 1330 | intel_wait_for_vblank(dev); |
1090 | 1331 | ||
1091 | if (old_fb) { | 1332 | if (old_fb) { |
@@ -1217,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1217 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1458 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1218 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | 1459 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; |
1219 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | 1460 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; |
1461 | int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; | ||
1220 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1462 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
1221 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1463 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
1222 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1464 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -1268,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1268 | } | 1510 | } |
1269 | } | 1511 | } |
1270 | 1512 | ||
1513 | /* Enable panel fitting for LVDS */ | ||
1514 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
1515 | temp = I915_READ(pf_ctl_reg); | ||
1516 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); | ||
1517 | |||
1518 | /* currently full aspect */ | ||
1519 | I915_WRITE(pf_win_pos, 0); | ||
1520 | |||
1521 | I915_WRITE(pf_win_size, | ||
1522 | (dev_priv->panel_fixed_mode->hdisplay << 16) | | ||
1523 | (dev_priv->panel_fixed_mode->vdisplay)); | ||
1524 | } | ||
1525 | |||
1271 | /* Enable CPU pipe */ | 1526 | /* Enable CPU pipe */ |
1272 | temp = I915_READ(pipeconf_reg); | 1527 | temp = I915_READ(pipeconf_reg); |
1273 | if ((temp & PIPEACONF_ENABLE) == 0) { | 1528 | if ((temp & PIPEACONF_ENABLE) == 0) { |
@@ -1532,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1532 | struct drm_i915_private *dev_priv = dev->dev_private; | 1787 | struct drm_i915_private *dev_priv = dev->dev_private; |
1533 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1788 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1534 | int pipe = intel_crtc->pipe; | 1789 | int pipe = intel_crtc->pipe; |
1790 | int plane = intel_crtc->plane; | ||
1535 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 1791 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
1536 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 1792 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
1537 | int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; | 1793 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; |
1538 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 1794 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
1539 | u32 temp; | 1795 | u32 temp; |
1540 | 1796 | ||
@@ -1577,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1577 | 1833 | ||
1578 | intel_crtc_load_lut(crtc); | 1834 | intel_crtc_load_lut(crtc); |
1579 | 1835 | ||
1836 | if ((IS_I965G(dev) || plane == 0)) | ||
1837 | intel_update_fbc(crtc, &crtc->mode); | ||
1838 | |||
1580 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 1839 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
1581 | //intel_crtc_dpms_video(crtc, true); TODO | 1840 | //intel_crtc_dpms_video(crtc, true); TODO |
1582 | intel_update_watermarks(dev); | 1841 | intel_update_watermarks(dev); |
@@ -1586,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1586 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 1845 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
1587 | //intel_crtc_dpms_video(crtc, FALSE); TODO | 1846 | //intel_crtc_dpms_video(crtc, FALSE); TODO |
1588 | 1847 | ||
1848 | if (dev_priv->cfb_plane == plane && | ||
1849 | dev_priv->display.disable_fbc) | ||
1850 | dev_priv->display.disable_fbc(dev); | ||
1851 | |||
1589 | /* Disable the VGA plane that we never use */ | 1852 | /* Disable the VGA plane that we never use */ |
1590 | i915_disable_vga(dev); | 1853 | i915_disable_vga(dev); |
1591 | 1854 | ||
@@ -1634,15 +1897,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1634 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | 1897 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) |
1635 | { | 1898 | { |
1636 | struct drm_device *dev = crtc->dev; | 1899 | struct drm_device *dev = crtc->dev; |
1900 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1637 | struct drm_i915_master_private *master_priv; | 1901 | struct drm_i915_master_private *master_priv; |
1638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1902 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1639 | int pipe = intel_crtc->pipe; | 1903 | int pipe = intel_crtc->pipe; |
1640 | bool enabled; | 1904 | bool enabled; |
1641 | 1905 | ||
1642 | if (IS_IGDNG(dev)) | 1906 | dev_priv->display.dpms(crtc, mode); |
1643 | igdng_crtc_dpms(crtc, mode); | ||
1644 | else | ||
1645 | i9xx_crtc_dpms(crtc, mode); | ||
1646 | 1907 | ||
1647 | intel_crtc->dpms_mode = mode; | 1908 | intel_crtc->dpms_mode = mode; |
1648 | 1909 | ||
@@ -1709,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1709 | return true; | 1970 | return true; |
1710 | } | 1971 | } |
1711 | 1972 | ||
1973 | static int i945_get_display_clock_speed(struct drm_device *dev) | ||
1974 | { | ||
1975 | return 400000; | ||
1976 | } | ||
1712 | 1977 | ||
1713 | /** Returns the core display clock speed for i830 - i945 */ | 1978 | static int i915_get_display_clock_speed(struct drm_device *dev) |
1714 | static int intel_get_core_clock_speed(struct drm_device *dev) | ||
1715 | { | 1979 | { |
1980 | return 333000; | ||
1981 | } | ||
1716 | 1982 | ||
1717 | /* Core clock values taken from the published datasheets. | 1983 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
1718 | * The 830 may go up to 166 Mhz, which we should check. | 1984 | { |
1719 | */ | 1985 | return 200000; |
1720 | if (IS_I945G(dev)) | 1986 | } |
1721 | return 400000; | ||
1722 | else if (IS_I915G(dev)) | ||
1723 | return 333000; | ||
1724 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) | ||
1725 | return 200000; | ||
1726 | else if (IS_I915GM(dev)) { | ||
1727 | u16 gcfgc = 0; | ||
1728 | 1987 | ||
1729 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | 1988 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
1989 | { | ||
1990 | u16 gcfgc = 0; | ||
1730 | 1991 | ||
1731 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | 1992 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
1732 | return 133000; | 1993 | |
1733 | else { | 1994 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
1734 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | 1995 | return 133000; |
1735 | case GC_DISPLAY_CLOCK_333_MHZ: | 1996 | else { |
1736 | return 333000; | 1997 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
1737 | default: | 1998 | case GC_DISPLAY_CLOCK_333_MHZ: |
1738 | case GC_DISPLAY_CLOCK_190_200_MHZ: | 1999 | return 333000; |
1739 | return 190000; | 2000 | default: |
1740 | } | 2001 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
1741 | } | 2002 | return 190000; |
1742 | } else if (IS_I865G(dev)) | ||
1743 | return 266000; | ||
1744 | else if (IS_I855(dev)) { | ||
1745 | u16 hpllcc = 0; | ||
1746 | /* Assume that the hardware is in the high speed state. This | ||
1747 | * should be the default. | ||
1748 | */ | ||
1749 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | ||
1750 | case GC_CLOCK_133_200: | ||
1751 | case GC_CLOCK_100_200: | ||
1752 | return 200000; | ||
1753 | case GC_CLOCK_166_250: | ||
1754 | return 250000; | ||
1755 | case GC_CLOCK_100_133: | ||
1756 | return 133000; | ||
1757 | } | 2003 | } |
1758 | } else /* 852, 830 */ | 2004 | } |
2005 | } | ||
2006 | |||
2007 | static int i865_get_display_clock_speed(struct drm_device *dev) | ||
2008 | { | ||
2009 | return 266000; | ||
2010 | } | ||
2011 | |||
2012 | static int i855_get_display_clock_speed(struct drm_device *dev) | ||
2013 | { | ||
2014 | u16 hpllcc = 0; | ||
2015 | /* Assume that the hardware is in the high speed state. This | ||
2016 | * should be the default. | ||
2017 | */ | ||
2018 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | ||
2019 | case GC_CLOCK_133_200: | ||
2020 | case GC_CLOCK_100_200: | ||
2021 | return 200000; | ||
2022 | case GC_CLOCK_166_250: | ||
2023 | return 250000; | ||
2024 | case GC_CLOCK_100_133: | ||
1759 | return 133000; | 2025 | return 133000; |
2026 | } | ||
2027 | |||
2028 | /* Shouldn't happen */ | ||
2029 | return 0; | ||
2030 | } | ||
1760 | 2031 | ||
1761 | return 0; /* Silence gcc warning */ | 2032 | static int i830_get_display_clock_speed(struct drm_device *dev) |
2033 | { | ||
2034 | return 133000; | ||
1762 | } | 2035 | } |
1763 | 2036 | ||
1764 | /** | 2037 | /** |
@@ -1921,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
1921 | { | 2194 | { |
1922 | long entries_required, wm_size; | 2195 | long entries_required, wm_size; |
1923 | 2196 | ||
1924 | entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; | 2197 | /* |
2198 | * Note: we need to make sure we don't overflow for various clock & | ||
2199 | * latency values. | ||
2200 | * clocks go from a few thousand to several hundred thousand. | ||
2201 | * latency is usually a few thousand | ||
2202 | */ | ||
2203 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | ||
2204 | 1000; | ||
1925 | entries_required /= wm->cacheline_size; | 2205 | entries_required /= wm->cacheline_size; |
1926 | 2206 | ||
1927 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); | 2207 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); |
@@ -1986,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | |||
1986 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | 2266 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { |
1987 | latency = &cxsr_latency_table[i]; | 2267 | latency = &cxsr_latency_table[i]; |
1988 | if (is_desktop == latency->is_desktop && | 2268 | if (is_desktop == latency->is_desktop && |
1989 | fsb == latency->fsb_freq && mem == latency->mem_freq) | 2269 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
1990 | break; | 2270 | return latency; |
1991 | } | 2271 | } |
1992 | if (i >= ARRAY_SIZE(cxsr_latency_table)) { | 2272 | |
1993 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | 2273 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); |
1994 | return NULL; | 2274 | |
1995 | } | 2275 | return NULL; |
1996 | return latency; | ||
1997 | } | 2276 | } |
1998 | 2277 | ||
1999 | static void igd_disable_cxsr(struct drm_device *dev) | 2278 | static void igd_disable_cxsr(struct drm_device *dev) |
@@ -2084,32 +2363,36 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2084 | */ | 2363 | */ |
2085 | const static int latency_ns = 5000; | 2364 | const static int latency_ns = 5000; |
2086 | 2365 | ||
2087 | static int intel_get_fifo_size(struct drm_device *dev, int plane) | 2366 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
2088 | { | 2367 | { |
2089 | struct drm_i915_private *dev_priv = dev->dev_private; | 2368 | struct drm_i915_private *dev_priv = dev->dev_private; |
2090 | uint32_t dsparb = I915_READ(DSPARB); | 2369 | uint32_t dsparb = I915_READ(DSPARB); |
2091 | int size; | 2370 | int size; |
2092 | 2371 | ||
2093 | if (IS_I9XX(dev)) { | 2372 | if (plane == 0) |
2094 | if (plane == 0) | ||
2095 | size = dsparb & 0x7f; | ||
2096 | else | ||
2097 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2098 | (dsparb & 0x7f); | ||
2099 | } else if (IS_I85X(dev)) { | ||
2100 | if (plane == 0) | ||
2101 | size = dsparb & 0x1ff; | ||
2102 | else | ||
2103 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2104 | (dsparb & 0x1ff); | ||
2105 | size >>= 1; /* Convert to cachelines */ | ||
2106 | } else if (IS_845G(dev)) { | ||
2107 | size = dsparb & 0x7f; | 2373 | size = dsparb & 0x7f; |
2108 | size >>= 2; /* Convert to cachelines */ | 2374 | else |
2109 | } else { | 2375 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - |
2110 | size = dsparb & 0x7f; | 2376 | (dsparb & 0x7f); |
2111 | size >>= 1; /* Convert to cachelines */ | 2377 | |
2112 | } | 2378 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", |
2379 | size); | ||
2380 | |||
2381 | return size; | ||
2382 | } | ||
2383 | |||
2384 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) | ||
2385 | { | ||
2386 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2387 | uint32_t dsparb = I915_READ(DSPARB); | ||
2388 | int size; | ||
2389 | |||
2390 | if (plane == 0) | ||
2391 | size = dsparb & 0x1ff; | ||
2392 | else | ||
2393 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2394 | (dsparb & 0x1ff); | ||
2395 | size >>= 1; /* Convert to cachelines */ | ||
2113 | 2396 | ||
2114 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2397 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", |
2115 | size); | 2398 | size); |
@@ -2117,7 +2400,38 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane) | |||
2117 | return size; | 2400 | return size; |
2118 | } | 2401 | } |
2119 | 2402 | ||
2120 | static void g4x_update_wm(struct drm_device *dev) | 2403 | static int i845_get_fifo_size(struct drm_device *dev, int plane) |
2404 | { | ||
2405 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2406 | uint32_t dsparb = I915_READ(DSPARB); | ||
2407 | int size; | ||
2408 | |||
2409 | size = dsparb & 0x7f; | ||
2410 | size >>= 2; /* Convert to cachelines */ | ||
2411 | |||
2412 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2413 | size); | ||
2414 | |||
2415 | return size; | ||
2416 | } | ||
2417 | |||
2418 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | ||
2419 | { | ||
2420 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2421 | uint32_t dsparb = I915_READ(DSPARB); | ||
2422 | int size; | ||
2423 | |||
2424 | size = dsparb & 0x7f; | ||
2425 | size >>= 1; /* Convert to cachelines */ | ||
2426 | |||
2427 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2428 | size); | ||
2429 | |||
2430 | return size; | ||
2431 | } | ||
2432 | |||
2433 | static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, | ||
2434 | int unused3, int unused4) | ||
2121 | { | 2435 | { |
2122 | struct drm_i915_private *dev_priv = dev->dev_private; | 2436 | struct drm_i915_private *dev_priv = dev->dev_private; |
2123 | u32 fw_blc_self = I915_READ(FW_BLC_SELF); | 2437 | u32 fw_blc_self = I915_READ(FW_BLC_SELF); |
@@ -2129,7 +2443,8 @@ static void g4x_update_wm(struct drm_device *dev) | |||
2129 | I915_WRITE(FW_BLC_SELF, fw_blc_self); | 2443 | I915_WRITE(FW_BLC_SELF, fw_blc_self); |
2130 | } | 2444 | } |
2131 | 2445 | ||
2132 | static void i965_update_wm(struct drm_device *dev) | 2446 | static void i965_update_wm(struct drm_device *dev, int unused, int unused2, |
2447 | int unused3, int unused4) | ||
2133 | { | 2448 | { |
2134 | struct drm_i915_private *dev_priv = dev->dev_private; | 2449 | struct drm_i915_private *dev_priv = dev->dev_private; |
2135 | 2450 | ||
@@ -2165,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2165 | cacheline_size = planea_params.cacheline_size; | 2480 | cacheline_size = planea_params.cacheline_size; |
2166 | 2481 | ||
2167 | /* Update per-plane FIFO sizes */ | 2482 | /* Update per-plane FIFO sizes */ |
2168 | planea_params.fifo_size = intel_get_fifo_size(dev, 0); | 2483 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
2169 | planeb_params.fifo_size = intel_get_fifo_size(dev, 1); | 2484 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
2170 | 2485 | ||
2171 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | 2486 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, |
2172 | pixel_size, latency_ns); | 2487 | pixel_size, latency_ns); |
@@ -2213,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2213 | I915_WRITE(FW_BLC2, fwater_hi); | 2528 | I915_WRITE(FW_BLC2, fwater_hi); |
2214 | } | 2529 | } |
2215 | 2530 | ||
2216 | static void i830_update_wm(struct drm_device *dev, int planea_clock, | 2531 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, |
2217 | int pixel_size) | 2532 | int unused2, int pixel_size) |
2218 | { | 2533 | { |
2219 | struct drm_i915_private *dev_priv = dev->dev_private; | 2534 | struct drm_i915_private *dev_priv = dev->dev_private; |
2220 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 2535 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
2221 | int planea_wm; | 2536 | int planea_wm; |
2222 | 2537 | ||
2223 | i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); | 2538 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
2224 | 2539 | ||
2225 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | 2540 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, |
2226 | pixel_size, latency_ns); | 2541 | pixel_size, latency_ns); |
@@ -2264,6 +2579,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, | |||
2264 | */ | 2579 | */ |
2265 | static void intel_update_watermarks(struct drm_device *dev) | 2580 | static void intel_update_watermarks(struct drm_device *dev) |
2266 | { | 2581 | { |
2582 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2267 | struct drm_crtc *crtc; | 2583 | struct drm_crtc *crtc; |
2268 | struct intel_crtc *intel_crtc; | 2584 | struct intel_crtc *intel_crtc; |
2269 | int sr_hdisplay = 0; | 2585 | int sr_hdisplay = 0; |
@@ -2302,15 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2302 | else if (IS_IGD(dev)) | 2618 | else if (IS_IGD(dev)) |
2303 | igd_disable_cxsr(dev); | 2619 | igd_disable_cxsr(dev); |
2304 | 2620 | ||
2305 | if (IS_G4X(dev)) | 2621 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
2306 | g4x_update_wm(dev); | 2622 | sr_hdisplay, pixel_size); |
2307 | else if (IS_I965G(dev)) | ||
2308 | i965_update_wm(dev); | ||
2309 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) | ||
2310 | i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, | ||
2311 | pixel_size); | ||
2312 | else | ||
2313 | i830_update_wm(dev, planea_clock, pixel_size); | ||
2314 | } | 2623 | } |
2315 | 2624 | ||
2316 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 2625 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
@@ -2323,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2323 | struct drm_i915_private *dev_priv = dev->dev_private; | 2632 | struct drm_i915_private *dev_priv = dev->dev_private; |
2324 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2633 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2325 | int pipe = intel_crtc->pipe; | 2634 | int pipe = intel_crtc->pipe; |
2635 | int plane = intel_crtc->plane; | ||
2326 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | 2636 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; |
2327 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 2637 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
2328 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | 2638 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; |
2329 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 2639 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
2330 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 2640 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
2331 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 2641 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
2332 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 2642 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
@@ -2334,8 +2644,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2334 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | 2644 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; |
2335 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | 2645 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; |
2336 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | 2646 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; |
2337 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | 2647 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; |
2338 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | 2648 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; |
2339 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 2649 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
2340 | int refclk, num_outputs = 0; | 2650 | int refclk, num_outputs = 0; |
2341 | intel_clock_t clock, reduced_clock; | 2651 | intel_clock_t clock, reduced_clock; |
@@ -2568,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2568 | enable color space conversion */ | 2878 | enable color space conversion */ |
2569 | if (!IS_IGDNG(dev)) { | 2879 | if (!IS_IGDNG(dev)) { |
2570 | if (pipe == 0) | 2880 | if (pipe == 0) |
2571 | dspcntr |= DISPPLANE_SEL_PIPE_A; | 2881 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
2572 | else | 2882 | else |
2573 | dspcntr |= DISPPLANE_SEL_PIPE_B; | 2883 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
2574 | } | 2884 | } |
@@ -2580,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2580 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the | 2890 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
2581 | * pipe == 0 check? | 2891 | * pipe == 0 check? |
2582 | */ | 2892 | */ |
2583 | if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) | 2893 | if (mode->clock > |
2894 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | ||
2584 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | 2895 | pipeconf |= PIPEACONF_DOUBLE_WIDE; |
2585 | else | 2896 | else |
2586 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 2897 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; |
@@ -2652,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2652 | udelay(150); | 2963 | udelay(150); |
2653 | 2964 | ||
2654 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | 2965 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { |
2655 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | 2966 | if (is_sdvo) { |
2656 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | 2967 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; |
2968 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
2657 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | 2969 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); |
2970 | } else | ||
2971 | I915_WRITE(dpll_md_reg, 0); | ||
2658 | } else { | 2972 | } else { |
2659 | /* write it again -- the BIOS does, after all */ | 2973 | /* write it again -- the BIOS does, after all */ |
2660 | I915_WRITE(dpll_reg, dpll); | 2974 | I915_WRITE(dpll_reg, dpll); |
@@ -2734,6 +3048,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2734 | /* Flush the plane changes */ | 3048 | /* Flush the plane changes */ |
2735 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 3049 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
2736 | 3050 | ||
3051 | if ((IS_I965G(dev) || plane == 0)) | ||
3052 | intel_update_fbc(crtc, &crtc->mode); | ||
3053 | |||
2737 | intel_update_watermarks(dev); | 3054 | intel_update_watermarks(dev); |
2738 | 3055 | ||
2739 | drm_vblank_post_modeset(dev, pipe); | 3056 | drm_vblank_post_modeset(dev, pipe); |
@@ -2778,6 +3095,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
2778 | struct drm_gem_object *bo; | 3095 | struct drm_gem_object *bo; |
2779 | struct drm_i915_gem_object *obj_priv; | 3096 | struct drm_i915_gem_object *obj_priv; |
2780 | int pipe = intel_crtc->pipe; | 3097 | int pipe = intel_crtc->pipe; |
3098 | int plane = intel_crtc->plane; | ||
2781 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | 3099 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; |
2782 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | 3100 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; |
2783 | uint32_t temp = I915_READ(control); | 3101 | uint32_t temp = I915_READ(control); |
@@ -2863,6 +3181,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
2863 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 3181 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
2864 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 3182 | drm_gem_object_unreference(intel_crtc->cursor_bo); |
2865 | } | 3183 | } |
3184 | |||
3185 | if ((IS_I965G(dev) || plane == 0)) | ||
3186 | intel_update_fbc(crtc, &crtc->mode); | ||
3187 | |||
2866 | mutex_unlock(&dev->struct_mutex); | 3188 | mutex_unlock(&dev->struct_mutex); |
2867 | 3189 | ||
2868 | intel_crtc->cursor_addr = addr; | 3190 | intel_crtc->cursor_addr = addr; |
@@ -3544,6 +3866,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
3544 | intel_crtc->lut_b[i] = i; | 3866 | intel_crtc->lut_b[i] = i; |
3545 | } | 3867 | } |
3546 | 3868 | ||
3869 | /* Swap pipes & planes for FBC on pre-965 */ | ||
3870 | intel_crtc->pipe = pipe; | ||
3871 | intel_crtc->plane = pipe; | ||
3872 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | ||
3873 | DRM_DEBUG("swapping pipes & planes for FBC\n"); | ||
3874 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | ||
3875 | } | ||
3876 | |||
3547 | intel_crtc->cursor_addr = 0; | 3877 | intel_crtc->cursor_addr = 0; |
3548 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | 3878 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; |
3549 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 3879 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
@@ -3826,6 +4156,73 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
3826 | } | 4156 | } |
3827 | } | 4157 | } |
3828 | 4158 | ||
4159 | /* Set up chip specific display functions */ | ||
4160 | static void intel_init_display(struct drm_device *dev) | ||
4161 | { | ||
4162 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4163 | |||
4164 | /* We always want a DPMS function */ | ||
4165 | if (IS_IGDNG(dev)) | ||
4166 | dev_priv->display.dpms = igdng_crtc_dpms; | ||
4167 | else | ||
4168 | dev_priv->display.dpms = i9xx_crtc_dpms; | ||
4169 | |||
4170 | /* Only mobile has FBC, leave pointers NULL for other chips */ | ||
4171 | if (IS_MOBILE(dev)) { | ||
4172 | if (IS_GM45(dev)) { | ||
4173 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | ||
4174 | dev_priv->display.enable_fbc = g4x_enable_fbc; | ||
4175 | dev_priv->display.disable_fbc = g4x_disable_fbc; | ||
4176 | } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { | ||
4177 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | ||
4178 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | ||
4179 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | ||
4180 | } | ||
4181 | /* 855GM needs testing */ | ||
4182 | } | ||
4183 | |||
4184 | /* Returns the core display clock speed */ | ||
4185 | if (IS_I945G(dev)) | ||
4186 | dev_priv->display.get_display_clock_speed = | ||
4187 | i945_get_display_clock_speed; | ||
4188 | else if (IS_I915G(dev)) | ||
4189 | dev_priv->display.get_display_clock_speed = | ||
4190 | i915_get_display_clock_speed; | ||
4191 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) | ||
4192 | dev_priv->display.get_display_clock_speed = | ||
4193 | i9xx_misc_get_display_clock_speed; | ||
4194 | else if (IS_I915GM(dev)) | ||
4195 | dev_priv->display.get_display_clock_speed = | ||
4196 | i915gm_get_display_clock_speed; | ||
4197 | else if (IS_I865G(dev)) | ||
4198 | dev_priv->display.get_display_clock_speed = | ||
4199 | i865_get_display_clock_speed; | ||
4200 | else if (IS_I855(dev)) | ||
4201 | dev_priv->display.get_display_clock_speed = | ||
4202 | i855_get_display_clock_speed; | ||
4203 | else /* 852, 830 */ | ||
4204 | dev_priv->display.get_display_clock_speed = | ||
4205 | i830_get_display_clock_speed; | ||
4206 | |||
4207 | /* For FIFO watermark updates */ | ||
4208 | if (IS_G4X(dev)) | ||
4209 | dev_priv->display.update_wm = g4x_update_wm; | ||
4210 | else if (IS_I965G(dev)) | ||
4211 | dev_priv->display.update_wm = i965_update_wm; | ||
4212 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | ||
4213 | dev_priv->display.update_wm = i9xx_update_wm; | ||
4214 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | ||
4215 | } else { | ||
4216 | if (IS_I85X(dev)) | ||
4217 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
4218 | else if (IS_845G(dev)) | ||
4219 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | ||
4220 | else | ||
4221 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
4222 | dev_priv->display.update_wm = i830_update_wm; | ||
4223 | } | ||
4224 | } | ||
4225 | |||
3829 | void intel_modeset_init(struct drm_device *dev) | 4226 | void intel_modeset_init(struct drm_device *dev) |
3830 | { | 4227 | { |
3831 | struct drm_i915_private *dev_priv = dev->dev_private; | 4228 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3839,6 +4236,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
3839 | 4236 | ||
3840 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 4237 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
3841 | 4238 | ||
4239 | intel_init_display(dev); | ||
4240 | |||
3842 | if (IS_I965G(dev)) { | 4241 | if (IS_I965G(dev)) { |
3843 | dev->mode_config.max_width = 8192; | 4242 | dev->mode_config.max_width = 8192; |
3844 | dev->mode_config.max_height = 8192; | 4243 | dev->mode_config.max_height = 8192; |
@@ -3904,6 +4303,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
3904 | 4303 | ||
3905 | mutex_unlock(&dev->struct_mutex); | 4304 | mutex_unlock(&dev->struct_mutex); |
3906 | 4305 | ||
4306 | if (dev_priv->display.disable_fbc) | ||
4307 | dev_priv->display.disable_fbc(dev); | ||
4308 | |||
3907 | drm_mode_config_cleanup(dev); | 4309 | drm_mode_config_cleanup(dev); |
3908 | } | 4310 | } |
3909 | 4311 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3ebbbabfe59b..8aa4b7f30daa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/i2c-id.h> | 29 | #include <linux/i2c-id.h> |
30 | #include <linux/i2c-algo-bit.h> | 30 | #include <linux/i2c-algo-bit.h> |
31 | #include "i915_drv.h" | ||
31 | #include "drm_crtc.h" | 32 | #include "drm_crtc.h" |
32 | 33 | ||
33 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
@@ -111,8 +112,8 @@ struct intel_output { | |||
111 | 112 | ||
112 | struct intel_crtc { | 113 | struct intel_crtc { |
113 | struct drm_crtc base; | 114 | struct drm_crtc base; |
114 | int pipe; | 115 | enum pipe pipe; |
115 | int plane; | 116 | enum plane plane; |
116 | struct drm_gem_object *cursor_bo; | 117 | struct drm_gem_object *cursor_bo; |
117 | uint32_t cursor_addr; | 118 | uint32_t cursor_addr; |
118 | u8 lut_r[256], lut_g[256], lut_b[256]; | 119 | u8 lut_r[256], lut_g[256], lut_b[256]; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index dafc0da1c256..98ae3d73577e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * Jesse Barnes <jesse.barnes@intel.com> | 27 | * Jesse Barnes <jesse.barnes@intel.com> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <acpi/button.h> | ||
30 | #include <linux/dmi.h> | 31 | #include <linux/dmi.h> |
31 | #include <linux/i2c.h> | 32 | #include <linux/i2c.h> |
32 | #include "drmP.h" | 33 | #include "drmP.h" |
@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
295 | goto out; | 296 | goto out; |
296 | } | 297 | } |
297 | 298 | ||
299 | /* full screen scale for now */ | ||
300 | if (IS_IGDNG(dev)) | ||
301 | goto out; | ||
302 | |||
298 | /* 965+ wants fuzzy fitting */ | 303 | /* 965+ wants fuzzy fitting */ |
299 | if (IS_I965G(dev)) | 304 | if (IS_I965G(dev)) |
300 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 305 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
322 | * to register description and PRM. | 327 | * to register description and PRM. |
323 | * Change the value here to see the borders for debugging | 328 | * Change the value here to see the borders for debugging |
324 | */ | 329 | */ |
325 | I915_WRITE(BCLRPAT_A, 0); | 330 | if (!IS_IGDNG(dev)) { |
326 | I915_WRITE(BCLRPAT_B, 0); | 331 | I915_WRITE(BCLRPAT_A, 0); |
332 | I915_WRITE(BCLRPAT_B, 0); | ||
333 | } | ||
327 | 334 | ||
328 | switch (lvds_priv->fitting_mode) { | 335 | switch (lvds_priv->fitting_mode) { |
329 | case DRM_MODE_SCALE_CENTER: | 336 | case DRM_MODE_SCALE_CENTER: |
@@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
572 | * settings. | 579 | * settings. |
573 | */ | 580 | */ |
574 | 581 | ||
575 | /* No panel fitting yet, fixme */ | ||
576 | if (IS_IGDNG(dev)) | 582 | if (IS_IGDNG(dev)) |
577 | return; | 583 | return; |
578 | 584 | ||
@@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
585 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); | 591 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); |
586 | } | 592 | } |
587 | 593 | ||
594 | /* Some lid devices report incorrect lid status, assume they're connected */ | ||
595 | static const struct dmi_system_id bad_lid_status[] = { | ||
596 | { | ||
597 | .ident = "Aspire One", | ||
598 | .matches = { | ||
599 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
600 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | ||
601 | }, | ||
602 | }, | ||
603 | { } | ||
604 | }; | ||
605 | |||
588 | /** | 606 | /** |
589 | * Detect the LVDS connection. | 607 | * Detect the LVDS connection. |
590 | * | 608 | * |
591 | * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have | 609 | * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means |
592 | * been set up if the LVDS was actually connected anyway. | 610 | * connected and closed means disconnected. We also send hotplug events as |
611 | * needed, using lid status notification from the input layer. | ||
593 | */ | 612 | */ |
594 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | 613 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) |
595 | { | 614 | { |
596 | return connector_status_connected; | 615 | enum drm_connector_status status = connector_status_connected; |
616 | |||
617 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | ||
618 | status = connector_status_disconnected; | ||
619 | |||
620 | return status; | ||
597 | } | 621 | } |
598 | 622 | ||
599 | /** | 623 | /** |
@@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
632 | return 0; | 656 | return 0; |
633 | } | 657 | } |
634 | 658 | ||
659 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | ||
660 | void *unused) | ||
661 | { | ||
662 | struct drm_i915_private *dev_priv = | ||
663 | container_of(nb, struct drm_i915_private, lid_notifier); | ||
664 | struct drm_device *dev = dev_priv->dev; | ||
665 | |||
666 | if (acpi_lid_open() && !dev_priv->suspended) { | ||
667 | mutex_lock(&dev->mode_config.mutex); | ||
668 | drm_helper_resume_force_mode(dev); | ||
669 | mutex_unlock(&dev->mode_config.mutex); | ||
670 | } | ||
671 | |||
672 | drm_sysfs_hotplug_event(dev_priv->dev); | ||
673 | |||
674 | return NOTIFY_OK; | ||
675 | } | ||
676 | |||
635 | /** | 677 | /** |
636 | * intel_lvds_destroy - unregister and free LVDS structures | 678 | * intel_lvds_destroy - unregister and free LVDS structures |
637 | * @connector: connector to free | 679 | * @connector: connector to free |
@@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
641 | */ | 683 | */ |
642 | static void intel_lvds_destroy(struct drm_connector *connector) | 684 | static void intel_lvds_destroy(struct drm_connector *connector) |
643 | { | 685 | { |
686 | struct drm_device *dev = connector->dev; | ||
644 | struct intel_output *intel_output = to_intel_output(connector); | 687 | struct intel_output *intel_output = to_intel_output(connector); |
688 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
645 | 689 | ||
646 | if (intel_output->ddc_bus) | 690 | if (intel_output->ddc_bus) |
647 | intel_i2c_destroy(intel_output->ddc_bus); | 691 | intel_i2c_destroy(intel_output->ddc_bus); |
692 | if (dev_priv->lid_notifier.notifier_call) | ||
693 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | ||
648 | drm_sysfs_connector_remove(connector); | 694 | drm_sysfs_connector_remove(connector); |
649 | drm_connector_cleanup(connector); | 695 | drm_connector_cleanup(connector); |
650 | kfree(connector); | 696 | kfree(connector); |
@@ -1011,6 +1057,11 @@ out: | |||
1011 | pwm |= PWM_PCH_ENABLE; | 1057 | pwm |= PWM_PCH_ENABLE; |
1012 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | 1058 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); |
1013 | } | 1059 | } |
1060 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | ||
1061 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | ||
1062 | DRM_DEBUG("lid notifier registration failed\n"); | ||
1063 | dev_priv->lid_notifier.notifier_call = NULL; | ||
1064 | } | ||
1014 | drm_sysfs_connector_add(connector); | 1065 | drm_sysfs_connector_add(connector); |
1015 | return; | 1066 | return; |
1016 | 1067 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0bf28efcf2c1..083bec2e50f9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -135,6 +135,30 @@ struct intel_sdvo_priv { | |||
135 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; | 135 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; |
136 | struct intel_sdvo_dtd save_output_dtd[16]; | 136 | struct intel_sdvo_dtd save_output_dtd[16]; |
137 | u32 save_SDVOX; | 137 | u32 save_SDVOX; |
138 | /* add the property for the SDVO-TV */ | ||
139 | struct drm_property *left_property; | ||
140 | struct drm_property *right_property; | ||
141 | struct drm_property *top_property; | ||
142 | struct drm_property *bottom_property; | ||
143 | struct drm_property *hpos_property; | ||
144 | struct drm_property *vpos_property; | ||
145 | |||
146 | /* add the property for the SDVO-TV/LVDS */ | ||
147 | struct drm_property *brightness_property; | ||
148 | struct drm_property *contrast_property; | ||
149 | struct drm_property *saturation_property; | ||
150 | struct drm_property *hue_property; | ||
151 | |||
152 | /* Add variable to record current setting for the above property */ | ||
153 | u32 left_margin, right_margin, top_margin, bottom_margin; | ||
154 | /* this is to get the range of margin.*/ | ||
155 | u32 max_hscan, max_vscan; | ||
156 | u32 max_hpos, cur_hpos; | ||
157 | u32 max_vpos, cur_vpos; | ||
158 | u32 cur_brightness, max_brightness; | ||
159 | u32 cur_contrast, max_contrast; | ||
160 | u32 cur_saturation, max_saturation; | ||
161 | u32 cur_hue, max_hue; | ||
138 | }; | 162 | }; |
139 | 163 | ||
140 | static bool | 164 | static bool |
@@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name { | |||
281 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | 305 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), |
282 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | 306 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), |
283 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | 307 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), |
308 | /* Add the op code for SDVO enhancements */ | ||
309 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), | ||
310 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), | ||
311 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), | ||
312 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), | ||
313 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), | ||
314 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), | ||
315 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), | ||
316 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), | ||
317 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), | ||
318 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), | ||
319 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), | ||
320 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), | ||
321 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), | ||
322 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), | ||
323 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), | ||
324 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), | ||
325 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), | ||
326 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), | ||
327 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), | ||
328 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), | ||
329 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), | ||
330 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), | ||
331 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), | ||
332 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), | ||
284 | /* HDMI op code */ | 333 | /* HDMI op code */ |
285 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | 334 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), |
286 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | 335 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), |
@@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) | |||
981 | 1030 | ||
982 | status = intel_sdvo_read_response(output, NULL, 0); | 1031 | status = intel_sdvo_read_response(output, NULL, 0); |
983 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1032 | if (status != SDVO_CMD_STATUS_SUCCESS) |
984 | DRM_DEBUG("%s: Failed to set TV format\n", | 1033 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", |
985 | SDVO_NAME(sdvo_priv)); | 1034 | SDVO_NAME(sdvo_priv)); |
986 | } | 1035 | } |
987 | 1036 | ||
@@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1792 | return 1; | 1841 | return 1; |
1793 | } | 1842 | } |
1794 | 1843 | ||
1844 | static | ||
1845 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | ||
1846 | { | ||
1847 | struct intel_output *intel_output = to_intel_output(connector); | ||
1848 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1849 | struct drm_device *dev = connector->dev; | ||
1850 | |||
1851 | if (sdvo_priv->is_tv) { | ||
1852 | if (sdvo_priv->left_property) | ||
1853 | drm_property_destroy(dev, sdvo_priv->left_property); | ||
1854 | if (sdvo_priv->right_property) | ||
1855 | drm_property_destroy(dev, sdvo_priv->right_property); | ||
1856 | if (sdvo_priv->top_property) | ||
1857 | drm_property_destroy(dev, sdvo_priv->top_property); | ||
1858 | if (sdvo_priv->bottom_property) | ||
1859 | drm_property_destroy(dev, sdvo_priv->bottom_property); | ||
1860 | if (sdvo_priv->hpos_property) | ||
1861 | drm_property_destroy(dev, sdvo_priv->hpos_property); | ||
1862 | if (sdvo_priv->vpos_property) | ||
1863 | drm_property_destroy(dev, sdvo_priv->vpos_property); | ||
1864 | } | ||
1865 | if (sdvo_priv->is_tv) { | ||
1866 | if (sdvo_priv->saturation_property) | ||
1867 | drm_property_destroy(dev, | ||
1868 | sdvo_priv->saturation_property); | ||
1869 | if (sdvo_priv->contrast_property) | ||
1870 | drm_property_destroy(dev, | ||
1871 | sdvo_priv->contrast_property); | ||
1872 | if (sdvo_priv->hue_property) | ||
1873 | drm_property_destroy(dev, sdvo_priv->hue_property); | ||
1874 | } | ||
1875 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
1876 | if (sdvo_priv->brightness_property) | ||
1877 | drm_property_destroy(dev, | ||
1878 | sdvo_priv->brightness_property); | ||
1879 | } | ||
1880 | return; | ||
1881 | } | ||
1882 | |||
1795 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1883 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1796 | { | 1884 | { |
1797 | struct intel_output *intel_output = to_intel_output(connector); | 1885 | struct intel_output *intel_output = to_intel_output(connector); |
@@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1812 | drm_property_destroy(connector->dev, | 1900 | drm_property_destroy(connector->dev, |
1813 | sdvo_priv->tv_format_property); | 1901 | sdvo_priv->tv_format_property); |
1814 | 1902 | ||
1903 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
1904 | intel_sdvo_destroy_enhance_property(connector); | ||
1905 | |||
1815 | drm_sysfs_connector_remove(connector); | 1906 | drm_sysfs_connector_remove(connector); |
1816 | drm_connector_cleanup(connector); | 1907 | drm_connector_cleanup(connector); |
1817 | 1908 | ||
@@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1829 | struct drm_crtc *crtc = encoder->crtc; | 1920 | struct drm_crtc *crtc = encoder->crtc; |
1830 | int ret = 0; | 1921 | int ret = 0; |
1831 | bool changed = false; | 1922 | bool changed = false; |
1923 | uint8_t cmd, status; | ||
1924 | uint16_t temp_value; | ||
1832 | 1925 | ||
1833 | ret = drm_connector_property_set_value(connector, property, val); | 1926 | ret = drm_connector_property_set_value(connector, property, val); |
1834 | if (ret < 0) | 1927 | if (ret < 0) |
@@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1845 | 1938 | ||
1846 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; | 1939 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; |
1847 | changed = true; | 1940 | changed = true; |
1848 | } else { | ||
1849 | ret = -EINVAL; | ||
1850 | goto out; | ||
1851 | } | 1941 | } |
1852 | 1942 | ||
1943 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
1944 | cmd = 0; | ||
1945 | temp_value = val; | ||
1946 | if (sdvo_priv->left_property == property) { | ||
1947 | drm_connector_property_set_value(connector, | ||
1948 | sdvo_priv->right_property, val); | ||
1949 | if (sdvo_priv->left_margin == temp_value) | ||
1950 | goto out; | ||
1951 | |||
1952 | sdvo_priv->left_margin = temp_value; | ||
1953 | sdvo_priv->right_margin = temp_value; | ||
1954 | temp_value = sdvo_priv->max_hscan - | ||
1955 | sdvo_priv->left_margin; | ||
1956 | cmd = SDVO_CMD_SET_OVERSCAN_H; | ||
1957 | } else if (sdvo_priv->right_property == property) { | ||
1958 | drm_connector_property_set_value(connector, | ||
1959 | sdvo_priv->left_property, val); | ||
1960 | if (sdvo_priv->right_margin == temp_value) | ||
1961 | goto out; | ||
1962 | |||
1963 | sdvo_priv->left_margin = temp_value; | ||
1964 | sdvo_priv->right_margin = temp_value; | ||
1965 | temp_value = sdvo_priv->max_hscan - | ||
1966 | sdvo_priv->left_margin; | ||
1967 | cmd = SDVO_CMD_SET_OVERSCAN_H; | ||
1968 | } else if (sdvo_priv->top_property == property) { | ||
1969 | drm_connector_property_set_value(connector, | ||
1970 | sdvo_priv->bottom_property, val); | ||
1971 | if (sdvo_priv->top_margin == temp_value) | ||
1972 | goto out; | ||
1973 | |||
1974 | sdvo_priv->top_margin = temp_value; | ||
1975 | sdvo_priv->bottom_margin = temp_value; | ||
1976 | temp_value = sdvo_priv->max_vscan - | ||
1977 | sdvo_priv->top_margin; | ||
1978 | cmd = SDVO_CMD_SET_OVERSCAN_V; | ||
1979 | } else if (sdvo_priv->bottom_property == property) { | ||
1980 | drm_connector_property_set_value(connector, | ||
1981 | sdvo_priv->top_property, val); | ||
1982 | if (sdvo_priv->bottom_margin == temp_value) | ||
1983 | goto out; | ||
1984 | sdvo_priv->top_margin = temp_value; | ||
1985 | sdvo_priv->bottom_margin = temp_value; | ||
1986 | temp_value = sdvo_priv->max_vscan - | ||
1987 | sdvo_priv->top_margin; | ||
1988 | cmd = SDVO_CMD_SET_OVERSCAN_V; | ||
1989 | } else if (sdvo_priv->hpos_property == property) { | ||
1990 | if (sdvo_priv->cur_hpos == temp_value) | ||
1991 | goto out; | ||
1992 | |||
1993 | cmd = SDVO_CMD_SET_POSITION_H; | ||
1994 | sdvo_priv->cur_hpos = temp_value; | ||
1995 | } else if (sdvo_priv->vpos_property == property) { | ||
1996 | if (sdvo_priv->cur_vpos == temp_value) | ||
1997 | goto out; | ||
1998 | |||
1999 | cmd = SDVO_CMD_SET_POSITION_V; | ||
2000 | sdvo_priv->cur_vpos = temp_value; | ||
2001 | } else if (sdvo_priv->saturation_property == property) { | ||
2002 | if (sdvo_priv->cur_saturation == temp_value) | ||
2003 | goto out; | ||
2004 | |||
2005 | cmd = SDVO_CMD_SET_SATURATION; | ||
2006 | sdvo_priv->cur_saturation = temp_value; | ||
2007 | } else if (sdvo_priv->contrast_property == property) { | ||
2008 | if (sdvo_priv->cur_contrast == temp_value) | ||
2009 | goto out; | ||
2010 | |||
2011 | cmd = SDVO_CMD_SET_CONTRAST; | ||
2012 | sdvo_priv->cur_contrast = temp_value; | ||
2013 | } else if (sdvo_priv->hue_property == property) { | ||
2014 | if (sdvo_priv->cur_hue == temp_value) | ||
2015 | goto out; | ||
2016 | |||
2017 | cmd = SDVO_CMD_SET_HUE; | ||
2018 | sdvo_priv->cur_hue = temp_value; | ||
2019 | } else if (sdvo_priv->brightness_property == property) { | ||
2020 | if (sdvo_priv->cur_brightness == temp_value) | ||
2021 | goto out; | ||
2022 | |||
2023 | cmd = SDVO_CMD_SET_BRIGHTNESS; | ||
2024 | sdvo_priv->cur_brightness = temp_value; | ||
2025 | } | ||
2026 | if (cmd) { | ||
2027 | intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); | ||
2028 | status = intel_sdvo_read_response(intel_output, | ||
2029 | NULL, 0); | ||
2030 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2031 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | ||
2032 | return -EINVAL; | ||
2033 | } | ||
2034 | changed = true; | ||
2035 | } | ||
2036 | } | ||
1853 | if (changed && crtc) | 2037 | if (changed && crtc) |
1854 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, | 2038 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, |
1855 | crtc->y, crtc->fb); | 2039 | crtc->y, crtc->fb); |
@@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2090 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 2274 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
2091 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2275 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2092 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2276 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2277 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2278 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2093 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2279 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2094 | 2280 | ||
2095 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2281 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
@@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
2176 | 2362 | ||
2177 | } | 2363 | } |
2178 | 2364 | ||
2365 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | ||
2366 | { | ||
2367 | struct intel_output *intel_output = to_intel_output(connector); | ||
2368 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
2369 | struct intel_sdvo_enhancements_reply sdvo_data; | ||
2370 | struct drm_device *dev = connector->dev; | ||
2371 | uint8_t status; | ||
2372 | uint16_t response, data_value[2]; | ||
2373 | |||
2374 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | ||
2375 | NULL, 0); | ||
2376 | status = intel_sdvo_read_response(intel_output, &sdvo_data, | ||
2377 | sizeof(sdvo_data)); | ||
2378 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2379 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | ||
2380 | return; | ||
2381 | } | ||
2382 | response = *((uint16_t *)&sdvo_data); | ||
2383 | if (!response) { | ||
2384 | DRM_DEBUG_KMS("No enhancement is supported\n"); | ||
2385 | return; | ||
2386 | } | ||
2387 | if (sdvo_priv->is_tv) { | ||
2388 | /* when horizontal overscan is supported, Add the left/right | ||
2389 | * property | ||
2390 | */ | ||
2391 | if (sdvo_data.overscan_h) { | ||
2392 | intel_sdvo_write_cmd(intel_output, | ||
2393 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | ||
2394 | status = intel_sdvo_read_response(intel_output, | ||
2395 | &data_value, 4); | ||
2396 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2397 | DRM_DEBUG_KMS("Incorrect SDVO max " | ||
2398 | "h_overscan\n"); | ||
2399 | return; | ||
2400 | } | ||
2401 | intel_sdvo_write_cmd(intel_output, | ||
2402 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | ||
2403 | status = intel_sdvo_read_response(intel_output, | ||
2404 | &response, 2); | ||
2405 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2406 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | ||
2407 | return; | ||
2408 | } | ||
2409 | sdvo_priv->max_hscan = data_value[0]; | ||
2410 | sdvo_priv->left_margin = data_value[0] - response; | ||
2411 | sdvo_priv->right_margin = sdvo_priv->left_margin; | ||
2412 | sdvo_priv->left_property = | ||
2413 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2414 | "left_margin", 2); | ||
2415 | sdvo_priv->left_property->values[0] = 0; | ||
2416 | sdvo_priv->left_property->values[1] = data_value[0]; | ||
2417 | drm_connector_attach_property(connector, | ||
2418 | sdvo_priv->left_property, | ||
2419 | sdvo_priv->left_margin); | ||
2420 | sdvo_priv->right_property = | ||
2421 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2422 | "right_margin", 2); | ||
2423 | sdvo_priv->right_property->values[0] = 0; | ||
2424 | sdvo_priv->right_property->values[1] = data_value[0]; | ||
2425 | drm_connector_attach_property(connector, | ||
2426 | sdvo_priv->right_property, | ||
2427 | sdvo_priv->right_margin); | ||
2428 | DRM_DEBUG_KMS("h_overscan: max %d, " | ||
2429 | "default %d, current %d\n", | ||
2430 | data_value[0], data_value[1], response); | ||
2431 | } | ||
2432 | if (sdvo_data.overscan_v) { | ||
2433 | intel_sdvo_write_cmd(intel_output, | ||
2434 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | ||
2435 | status = intel_sdvo_read_response(intel_output, | ||
2436 | &data_value, 4); | ||
2437 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2438 | DRM_DEBUG_KMS("Incorrect SDVO max " | ||
2439 | "v_overscan\n"); | ||
2440 | return; | ||
2441 | } | ||
2442 | intel_sdvo_write_cmd(intel_output, | ||
2443 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | ||
2444 | status = intel_sdvo_read_response(intel_output, | ||
2445 | &response, 2); | ||
2446 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2447 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | ||
2448 | return; | ||
2449 | } | ||
2450 | sdvo_priv->max_vscan = data_value[0]; | ||
2451 | sdvo_priv->top_margin = data_value[0] - response; | ||
2452 | sdvo_priv->bottom_margin = sdvo_priv->top_margin; | ||
2453 | sdvo_priv->top_property = | ||
2454 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2455 | "top_margin", 2); | ||
2456 | sdvo_priv->top_property->values[0] = 0; | ||
2457 | sdvo_priv->top_property->values[1] = data_value[0]; | ||
2458 | drm_connector_attach_property(connector, | ||
2459 | sdvo_priv->top_property, | ||
2460 | sdvo_priv->top_margin); | ||
2461 | sdvo_priv->bottom_property = | ||
2462 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2463 | "bottom_margin", 2); | ||
2464 | sdvo_priv->bottom_property->values[0] = 0; | ||
2465 | sdvo_priv->bottom_property->values[1] = data_value[0]; | ||
2466 | drm_connector_attach_property(connector, | ||
2467 | sdvo_priv->bottom_property, | ||
2468 | sdvo_priv->bottom_margin); | ||
2469 | DRM_DEBUG_KMS("v_overscan: max %d, " | ||
2470 | "default %d, current %d\n", | ||
2471 | data_value[0], data_value[1], response); | ||
2472 | } | ||
2473 | if (sdvo_data.position_h) { | ||
2474 | intel_sdvo_write_cmd(intel_output, | ||
2475 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | ||
2476 | status = intel_sdvo_read_response(intel_output, | ||
2477 | &data_value, 4); | ||
2478 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2479 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | ||
2480 | return; | ||
2481 | } | ||
2482 | intel_sdvo_write_cmd(intel_output, | ||
2483 | SDVO_CMD_GET_POSITION_H, NULL, 0); | ||
2484 | status = intel_sdvo_read_response(intel_output, | ||
2485 | &response, 2); | ||
2486 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2487 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | ||
2488 | return; | ||
2489 | } | ||
2490 | sdvo_priv->max_hpos = data_value[0]; | ||
2491 | sdvo_priv->cur_hpos = response; | ||
2492 | sdvo_priv->hpos_property = | ||
2493 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2494 | "hpos", 2); | ||
2495 | sdvo_priv->hpos_property->values[0] = 0; | ||
2496 | sdvo_priv->hpos_property->values[1] = data_value[0]; | ||
2497 | drm_connector_attach_property(connector, | ||
2498 | sdvo_priv->hpos_property, | ||
2499 | sdvo_priv->cur_hpos); | ||
2500 | DRM_DEBUG_KMS("h_position: max %d, " | ||
2501 | "default %d, current %d\n", | ||
2502 | data_value[0], data_value[1], response); | ||
2503 | } | ||
2504 | if (sdvo_data.position_v) { | ||
2505 | intel_sdvo_write_cmd(intel_output, | ||
2506 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | ||
2507 | status = intel_sdvo_read_response(intel_output, | ||
2508 | &data_value, 4); | ||
2509 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2510 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | ||
2511 | return; | ||
2512 | } | ||
2513 | intel_sdvo_write_cmd(intel_output, | ||
2514 | SDVO_CMD_GET_POSITION_V, NULL, 0); | ||
2515 | status = intel_sdvo_read_response(intel_output, | ||
2516 | &response, 2); | ||
2517 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2518 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | ||
2519 | return; | ||
2520 | } | ||
2521 | sdvo_priv->max_vpos = data_value[0]; | ||
2522 | sdvo_priv->cur_vpos = response; | ||
2523 | sdvo_priv->vpos_property = | ||
2524 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2525 | "vpos", 2); | ||
2526 | sdvo_priv->vpos_property->values[0] = 0; | ||
2527 | sdvo_priv->vpos_property->values[1] = data_value[0]; | ||
2528 | drm_connector_attach_property(connector, | ||
2529 | sdvo_priv->vpos_property, | ||
2530 | sdvo_priv->cur_vpos); | ||
2531 | DRM_DEBUG_KMS("v_position: max %d, " | ||
2532 | "default %d, current %d\n", | ||
2533 | data_value[0], data_value[1], response); | ||
2534 | } | ||
2535 | } | ||
2536 | if (sdvo_priv->is_tv) { | ||
2537 | if (sdvo_data.saturation) { | ||
2538 | intel_sdvo_write_cmd(intel_output, | ||
2539 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | ||
2540 | status = intel_sdvo_read_response(intel_output, | ||
2541 | &data_value, 4); | ||
2542 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2543 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | ||
2544 | return; | ||
2545 | } | ||
2546 | intel_sdvo_write_cmd(intel_output, | ||
2547 | SDVO_CMD_GET_SATURATION, NULL, 0); | ||
2548 | status = intel_sdvo_read_response(intel_output, | ||
2549 | &response, 2); | ||
2550 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2551 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | ||
2552 | return; | ||
2553 | } | ||
2554 | sdvo_priv->max_saturation = data_value[0]; | ||
2555 | sdvo_priv->cur_saturation = response; | ||
2556 | sdvo_priv->saturation_property = | ||
2557 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2558 | "saturation", 2); | ||
2559 | sdvo_priv->saturation_property->values[0] = 0; | ||
2560 | sdvo_priv->saturation_property->values[1] = | ||
2561 | data_value[0]; | ||
2562 | drm_connector_attach_property(connector, | ||
2563 | sdvo_priv->saturation_property, | ||
2564 | sdvo_priv->cur_saturation); | ||
2565 | DRM_DEBUG_KMS("saturation: max %d, " | ||
2566 | "default %d, current %d\n", | ||
2567 | data_value[0], data_value[1], response); | ||
2568 | } | ||
2569 | if (sdvo_data.contrast) { | ||
2570 | intel_sdvo_write_cmd(intel_output, | ||
2571 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | ||
2572 | status = intel_sdvo_read_response(intel_output, | ||
2573 | &data_value, 4); | ||
2574 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2575 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | ||
2576 | return; | ||
2577 | } | ||
2578 | intel_sdvo_write_cmd(intel_output, | ||
2579 | SDVO_CMD_GET_CONTRAST, NULL, 0); | ||
2580 | status = intel_sdvo_read_response(intel_output, | ||
2581 | &response, 2); | ||
2582 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2583 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | ||
2584 | return; | ||
2585 | } | ||
2586 | sdvo_priv->max_contrast = data_value[0]; | ||
2587 | sdvo_priv->cur_contrast = response; | ||
2588 | sdvo_priv->contrast_property = | ||
2589 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2590 | "contrast", 2); | ||
2591 | sdvo_priv->contrast_property->values[0] = 0; | ||
2592 | sdvo_priv->contrast_property->values[1] = data_value[0]; | ||
2593 | drm_connector_attach_property(connector, | ||
2594 | sdvo_priv->contrast_property, | ||
2595 | sdvo_priv->cur_contrast); | ||
2596 | DRM_DEBUG_KMS("contrast: max %d, " | ||
2597 | "default %d, current %d\n", | ||
2598 | data_value[0], data_value[1], response); | ||
2599 | } | ||
2600 | if (sdvo_data.hue) { | ||
2601 | intel_sdvo_write_cmd(intel_output, | ||
2602 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | ||
2603 | status = intel_sdvo_read_response(intel_output, | ||
2604 | &data_value, 4); | ||
2605 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2606 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | ||
2607 | return; | ||
2608 | } | ||
2609 | intel_sdvo_write_cmd(intel_output, | ||
2610 | SDVO_CMD_GET_HUE, NULL, 0); | ||
2611 | status = intel_sdvo_read_response(intel_output, | ||
2612 | &response, 2); | ||
2613 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2614 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | ||
2615 | return; | ||
2616 | } | ||
2617 | sdvo_priv->max_hue = data_value[0]; | ||
2618 | sdvo_priv->cur_hue = response; | ||
2619 | sdvo_priv->hue_property = | ||
2620 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2621 | "hue", 2); | ||
2622 | sdvo_priv->hue_property->values[0] = 0; | ||
2623 | sdvo_priv->hue_property->values[1] = | ||
2624 | data_value[0]; | ||
2625 | drm_connector_attach_property(connector, | ||
2626 | sdvo_priv->hue_property, | ||
2627 | sdvo_priv->cur_hue); | ||
2628 | DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", | ||
2629 | data_value[0], data_value[1], response); | ||
2630 | } | ||
2631 | } | ||
2632 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
2633 | if (sdvo_data.brightness) { | ||
2634 | intel_sdvo_write_cmd(intel_output, | ||
2635 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | ||
2636 | status = intel_sdvo_read_response(intel_output, | ||
2637 | &data_value, 4); | ||
2638 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2639 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | ||
2640 | return; | ||
2641 | } | ||
2642 | intel_sdvo_write_cmd(intel_output, | ||
2643 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | ||
2644 | status = intel_sdvo_read_response(intel_output, | ||
2645 | &response, 2); | ||
2646 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2647 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | ||
2648 | return; | ||
2649 | } | ||
2650 | sdvo_priv->max_brightness = data_value[0]; | ||
2651 | sdvo_priv->cur_brightness = response; | ||
2652 | sdvo_priv->brightness_property = | ||
2653 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2654 | "brightness", 2); | ||
2655 | sdvo_priv->brightness_property->values[0] = 0; | ||
2656 | sdvo_priv->brightness_property->values[1] = | ||
2657 | data_value[0]; | ||
2658 | drm_connector_attach_property(connector, | ||
2659 | sdvo_priv->brightness_property, | ||
2660 | sdvo_priv->cur_brightness); | ||
2661 | DRM_DEBUG_KMS("brightness: max %d, " | ||
2662 | "default %d, current %d\n", | ||
2663 | data_value[0], data_value[1], response); | ||
2664 | } | ||
2665 | } | ||
2666 | return; | ||
2667 | } | ||
2668 | |||
2179 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2669 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
2180 | { | 2670 | { |
2181 | struct drm_connector *connector; | 2671 | struct drm_connector *connector; |
@@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2264 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2754 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
2265 | if (sdvo_priv->is_tv) | 2755 | if (sdvo_priv->is_tv) |
2266 | intel_sdvo_tv_create_property(connector); | 2756 | intel_sdvo_tv_create_property(connector); |
2757 | |||
2758 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
2759 | intel_sdvo_create_enhance_property(connector); | ||
2760 | |||
2267 | drm_sysfs_connector_add(connector); | 2761 | drm_sysfs_connector_add(connector); |
2268 | 2762 | ||
2269 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2763 | intel_sdvo_select_ddc_bus(sdvo_priv); |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6bedd2fcfc15..737335ff2b21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -477,8 +477,8 @@ config I2C_PNX | |||
477 | will be called i2c-pnx. | 477 | will be called i2c-pnx. |
478 | 478 | ||
479 | config I2C_PXA | 479 | config I2C_PXA |
480 | tristate "Intel PXA2XX I2C adapter (EXPERIMENTAL)" | 480 | tristate "Intel PXA2XX I2C adapter" |
481 | depends on EXPERIMENTAL && ARCH_PXA | 481 | depends on ARCH_PXA || ARCH_MMP |
482 | help | 482 | help |
483 | If you have devices in the PXA I2C bus, say yes to this option. | 483 | If you have devices in the PXA I2C bus, say yes to this option. |
484 | This driver can also be built as a module. If so, the module | 484 | This driver can also be built as a module. If so, the module |
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index 949c97ff57e3..1f20a042a4f5 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c | |||
@@ -29,8 +29,8 @@ | |||
29 | 29 | ||
30 | #include <asm/idle.h> | 30 | #include <asm/idle.h> |
31 | 31 | ||
32 | #include "../dma/ioatdma_hw.h" | 32 | #include "../dma/ioat/hw.h" |
33 | #include "../dma/ioatdma_registers.h" | 33 | #include "../dma/ioat/registers.h" |
34 | 34 | ||
35 | #define I7300_IDLE_DRIVER_VERSION "1.55" | 35 | #define I7300_IDLE_DRIVER_VERSION "1.55" |
36 | #define I7300_PRINT "i7300_idle:" | 36 | #define I7300_PRINT "i7300_idle:" |
@@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void) | |||
126 | udelay(10); | 126 | udelay(10); |
127 | 127 | ||
128 | sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 128 | sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
129 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 129 | IOAT_CHANSTS_STATUS; |
130 | 130 | ||
131 | if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) | 131 | if (sts != IOAT_CHANSTS_ACTIVE) |
132 | break; | 132 | break; |
133 | 133 | ||
134 | } | 134 | } |
@@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl, | |||
160 | udelay(1000); | 160 | udelay(1000); |
161 | 161 | ||
162 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 162 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
163 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 163 | IOAT_CHANSTS_STATUS; |
164 | 164 | ||
165 | if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) { | 165 | if (chan_sts != IOAT_CHANSTS_DONE) { |
166 | /* Not complete, reset the channel */ | 166 | /* Not complete, reset the channel */ |
167 | writeb(IOAT_CHANCMD_RESET, | 167 | writeb(IOAT_CHANCMD_RESET, |
168 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); | 168 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); |
@@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void) | |||
288 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); | 288 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); |
289 | 289 | ||
290 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 290 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
291 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 291 | IOAT_CHANSTS_STATUS; |
292 | 292 | ||
293 | if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { | 293 | if (chan_sts != IOAT_CHANSTS_ACTIVE) { |
294 | writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); | 294 | writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); |
295 | break; | 295 | break; |
296 | } | 296 | } |
@@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 300 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
301 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 301 | IOAT_CHANSTS_STATUS; |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * We tried to reset multiple times. If IO A/T channel is still active | 304 | * We tried to reset multiple times. If IO A/T channel is still active |
305 | * flag an error and return without cleanup. Memory leak is better | 305 | * flag an error and return without cleanup. Memory leak is better |
306 | * than random corruption in that extreme error situation. | 306 | * than random corruption in that extreme error situation. |
307 | */ | 307 | */ |
308 | if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { | 308 | if (chan_sts == IOAT_CHANSTS_ACTIVE) { |
309 | printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." | 309 | printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." |
310 | " Not freeing resources\n"); | 310 | " Not freeing resources\n"); |
311 | return; | 311 | return; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 57a3c6f947b2..4e0f2829e0e5 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -37,7 +37,8 @@ | |||
37 | enum rmpp_state { | 37 | enum rmpp_state { |
38 | RMPP_STATE_ACTIVE, | 38 | RMPP_STATE_ACTIVE, |
39 | RMPP_STATE_TIMEOUT, | 39 | RMPP_STATE_TIMEOUT, |
40 | RMPP_STATE_COMPLETE | 40 | RMPP_STATE_COMPLETE, |
41 | RMPP_STATE_CANCELING | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | struct mad_rmpp_recv { | 44 | struct mad_rmpp_recv { |
@@ -87,18 +88,22 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) | |||
87 | 88 | ||
88 | spin_lock_irqsave(&agent->lock, flags); | 89 | spin_lock_irqsave(&agent->lock, flags); |
89 | list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { | 90 | list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { |
91 | if (rmpp_recv->state != RMPP_STATE_COMPLETE) | ||
92 | ib_free_recv_mad(rmpp_recv->rmpp_wc); | ||
93 | rmpp_recv->state = RMPP_STATE_CANCELING; | ||
94 | } | ||
95 | spin_unlock_irqrestore(&agent->lock, flags); | ||
96 | |||
97 | list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { | ||
90 | cancel_delayed_work(&rmpp_recv->timeout_work); | 98 | cancel_delayed_work(&rmpp_recv->timeout_work); |
91 | cancel_delayed_work(&rmpp_recv->cleanup_work); | 99 | cancel_delayed_work(&rmpp_recv->cleanup_work); |
92 | } | 100 | } |
93 | spin_unlock_irqrestore(&agent->lock, flags); | ||
94 | 101 | ||
95 | flush_workqueue(agent->qp_info->port_priv->wq); | 102 | flush_workqueue(agent->qp_info->port_priv->wq); |
96 | 103 | ||
97 | list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, | 104 | list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, |
98 | &agent->rmpp_list, list) { | 105 | &agent->rmpp_list, list) { |
99 | list_del(&rmpp_recv->list); | 106 | list_del(&rmpp_recv->list); |
100 | if (rmpp_recv->state != RMPP_STATE_COMPLETE) | ||
101 | ib_free_recv_mad(rmpp_recv->rmpp_wc); | ||
102 | destroy_rmpp_recv(rmpp_recv); | 107 | destroy_rmpp_recv(rmpp_recv); |
103 | } | 108 | } |
104 | } | 109 | } |
@@ -260,6 +265,10 @@ static void recv_cleanup_handler(struct work_struct *work) | |||
260 | unsigned long flags; | 265 | unsigned long flags; |
261 | 266 | ||
262 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); | 267 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); |
268 | if (rmpp_recv->state == RMPP_STATE_CANCELING) { | ||
269 | spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); | ||
270 | return; | ||
271 | } | ||
263 | list_del(&rmpp_recv->list); | 272 | list_del(&rmpp_recv->list); |
264 | spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); | 273 | spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); |
265 | destroy_rmpp_recv(rmpp_recv); | 274 | destroy_rmpp_recv(rmpp_recv); |
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index 056b2a4c6970..0aa0110e4b6c 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -68,11 +68,16 @@ static void catas_reset(struct work_struct *work) | |||
68 | spin_unlock_irq(&catas_lock); | 68 | spin_unlock_irq(&catas_lock); |
69 | 69 | ||
70 | list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { | 70 | list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { |
71 | struct pci_dev *pdev = dev->pdev; | ||
71 | ret = __mthca_restart_one(dev->pdev); | 72 | ret = __mthca_restart_one(dev->pdev); |
73 | /* 'dev' now is not valid */ | ||
72 | if (ret) | 74 | if (ret) |
73 | mthca_err(dev, "Reset failed (%d)\n", ret); | 75 | printk(KERN_ERR "mthca %s: Reset failed (%d)\n", |
74 | else | 76 | pci_name(pdev), ret); |
75 | mthca_dbg(dev, "Reset succeeded\n"); | 77 | else { |
78 | struct mthca_dev *d = pci_get_drvdata(pdev); | ||
79 | mthca_dbg(d, "Reset succeeded\n"); | ||
80 | } | ||
76 | } | 81 | } |
77 | 82 | ||
78 | mutex_unlock(&mthca_device_mutex); | 83 | mutex_unlock(&mthca_device_mutex); |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 538e409d4515..e593af3354b8 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -1566,7 +1566,6 @@ static const struct net_device_ops nes_netdev_ops = { | |||
1566 | .ndo_set_mac_address = nes_netdev_set_mac_address, | 1566 | .ndo_set_mac_address = nes_netdev_set_mac_address, |
1567 | .ndo_set_multicast_list = nes_netdev_set_multicast_list, | 1567 | .ndo_set_multicast_list = nes_netdev_set_multicast_list, |
1568 | .ndo_change_mtu = nes_netdev_change_mtu, | 1568 | .ndo_change_mtu = nes_netdev_change_mtu, |
1569 | .ndo_set_mac_address = eth_mac_addr, | ||
1570 | .ndo_validate_addr = eth_validate_addr, | 1569 | .ndo_validate_addr = eth_validate_addr, |
1571 | .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, | 1570 | .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, |
1572 | }; | 1571 | }; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 25874fc680c9..8763c1ea5eb4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -362,12 +362,19 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) | |||
362 | { | 362 | { |
363 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 363 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
364 | carrier_on_task); | 364 | carrier_on_task); |
365 | struct ib_port_attr attr; | ||
365 | 366 | ||
366 | /* | 367 | /* |
367 | * Take rtnl_lock to avoid racing with ipoib_stop() and | 368 | * Take rtnl_lock to avoid racing with ipoib_stop() and |
368 | * turning the carrier back on while a device is being | 369 | * turning the carrier back on while a device is being |
369 | * removed. | 370 | * removed. |
370 | */ | 371 | */ |
372 | if (ib_query_port(priv->ca, priv->port, &attr) || | ||
373 | attr.state != IB_PORT_ACTIVE) { | ||
374 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); | ||
375 | return; | ||
376 | } | ||
377 | |||
371 | rtnl_lock(); | 378 | rtnl_lock(); |
372 | netif_carrier_on(priv->dev); | 379 | netif_carrier_on(priv->dev); |
373 | rtnl_unlock(); | 380 | rtnl_unlock(); |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 76d6751f89a7..02f4f8f1db6f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -225,6 +225,7 @@ config INPUT_SGI_BTNS | |||
225 | config INPUT_WINBOND_CIR | 225 | config INPUT_WINBOND_CIR |
226 | tristate "Winbond IR remote control" | 226 | tristate "Winbond IR remote control" |
227 | depends on X86 && PNP | 227 | depends on X86 && PNP |
228 | select NEW_LEDS | ||
228 | select LEDS_CLASS | 229 | select LEDS_CLASS |
229 | select BITREVERSE | 230 | select BITREVERSE |
230 | help | 231 | help |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 020f9573fd82..2158377a1359 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -124,6 +124,8 @@ config MD_RAID456 | |||
124 | select MD_RAID6_PQ | 124 | select MD_RAID6_PQ |
125 | select ASYNC_MEMCPY | 125 | select ASYNC_MEMCPY |
126 | select ASYNC_XOR | 126 | select ASYNC_XOR |
127 | select ASYNC_PQ | ||
128 | select ASYNC_RAID6_RECOV | ||
127 | ---help--- | 129 | ---help--- |
128 | A RAID-5 set of N drives with a capacity of C MB per drive provides | 130 | A RAID-5 set of N drives with a capacity of C MB per drive provides |
129 | the capacity of C * (N - 1) MB, and protects against a failure | 131 | the capacity of C * (N - 1) MB, and protects against a failure |
@@ -152,9 +154,33 @@ config MD_RAID456 | |||
152 | 154 | ||
153 | If unsure, say Y. | 155 | If unsure, say Y. |
154 | 156 | ||
157 | config MULTICORE_RAID456 | ||
158 | bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)" | ||
159 | depends on MD_RAID456 | ||
160 | depends on SMP | ||
161 | depends on EXPERIMENTAL | ||
162 | ---help--- | ||
163 | Enable the raid456 module to dispatch per-stripe raid operations to a | ||
164 | thread pool. | ||
165 | |||
166 | If unsure, say N. | ||
167 | |||
155 | config MD_RAID6_PQ | 168 | config MD_RAID6_PQ |
156 | tristate | 169 | tristate |
157 | 170 | ||
171 | config ASYNC_RAID6_TEST | ||
172 | tristate "Self test for hardware accelerated raid6 recovery" | ||
173 | depends on MD_RAID6_PQ | ||
174 | select ASYNC_RAID6_RECOV | ||
175 | ---help--- | ||
176 | This is a one-shot self test that permutes through the | ||
177 | recovery of all the possible two disk failure scenarios for a | ||
178 | N-disk array. Recovery is performed with the asynchronous | ||
179 | raid6 recovery routines, and will optionally use an offload | ||
180 | engine if one is available. | ||
181 | |||
182 | If unsure, say N. | ||
183 | |||
158 | config MD_MULTIPATH | 184 | config MD_MULTIPATH |
159 | tristate "Multipath I/O support" | 185 | tristate "Multipath I/O support" |
160 | depends on BLK_DEV_MD | 186 | depends on BLK_DEV_MD |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 3319c2fec28e..6986b0059d23 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -108,6 +108,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) | |||
108 | * allocated while we're using it | 108 | * allocated while we're using it |
109 | */ | 109 | */ |
110 | static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) | 110 | static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) |
111 | __releases(bitmap->lock) | ||
112 | __acquires(bitmap->lock) | ||
111 | { | 113 | { |
112 | unsigned char *mappage; | 114 | unsigned char *mappage; |
113 | 115 | ||
@@ -325,7 +327,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | |||
325 | return 0; | 327 | return 0; |
326 | 328 | ||
327 | bad_alignment: | 329 | bad_alignment: |
328 | rcu_read_unlock(); | ||
329 | return -EINVAL; | 330 | return -EINVAL; |
330 | } | 331 | } |
331 | 332 | ||
@@ -1207,6 +1208,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1207 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1208 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
1208 | sector_t offset, int *blocks, | 1209 | sector_t offset, int *blocks, |
1209 | int create) | 1210 | int create) |
1211 | __releases(bitmap->lock) | ||
1212 | __acquires(bitmap->lock) | ||
1210 | { | 1213 | { |
1211 | /* If 'create', we might release the lock and reclaim it. | 1214 | /* If 'create', we might release the lock and reclaim it. |
1212 | * The lock must have been taken with interrupts enabled. | 1215 | * The lock must have been taken with interrupts enabled. |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index ea4842905444..1ceceb334d5e 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -108,6 +108,9 @@ static int linear_congested(void *data, int bits) | |||
108 | linear_conf_t *conf; | 108 | linear_conf_t *conf; |
109 | int i, ret = 0; | 109 | int i, ret = 0; |
110 | 110 | ||
111 | if (mddev_congested(mddev, bits)) | ||
112 | return 1; | ||
113 | |||
111 | rcu_read_lock(); | 114 | rcu_read_lock(); |
112 | conf = rcu_dereference(mddev->private); | 115 | conf = rcu_dereference(mddev->private); |
113 | 116 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6aa497e4baf8..26ba42a79129 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -262,6 +262,12 @@ static void mddev_resume(mddev_t *mddev) | |||
262 | mddev->pers->quiesce(mddev, 0); | 262 | mddev->pers->quiesce(mddev, 0); |
263 | } | 263 | } |
264 | 264 | ||
265 | int mddev_congested(mddev_t *mddev, int bits) | ||
266 | { | ||
267 | return mddev->suspended; | ||
268 | } | ||
269 | EXPORT_SYMBOL(mddev_congested); | ||
270 | |||
265 | 271 | ||
266 | static inline mddev_t *mddev_get(mddev_t *mddev) | 272 | static inline mddev_t *mddev_get(mddev_t *mddev) |
267 | { | 273 | { |
@@ -4218,7 +4224,7 @@ static int do_md_run(mddev_t * mddev) | |||
4218 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 4224 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
4219 | mddev->sync_thread = md_register_thread(md_do_sync, | 4225 | mddev->sync_thread = md_register_thread(md_do_sync, |
4220 | mddev, | 4226 | mddev, |
4221 | "%s_resync"); | 4227 | "resync"); |
4222 | if (!mddev->sync_thread) { | 4228 | if (!mddev->sync_thread) { |
4223 | printk(KERN_ERR "%s: could not start resync" | 4229 | printk(KERN_ERR "%s: could not start resync" |
4224 | " thread...\n", | 4230 | " thread...\n", |
@@ -4575,10 +4581,10 @@ static int get_version(void __user * arg) | |||
4575 | static int get_array_info(mddev_t * mddev, void __user * arg) | 4581 | static int get_array_info(mddev_t * mddev, void __user * arg) |
4576 | { | 4582 | { |
4577 | mdu_array_info_t info; | 4583 | mdu_array_info_t info; |
4578 | int nr,working,active,failed,spare; | 4584 | int nr,working,insync,failed,spare; |
4579 | mdk_rdev_t *rdev; | 4585 | mdk_rdev_t *rdev; |
4580 | 4586 | ||
4581 | nr=working=active=failed=spare=0; | 4587 | nr=working=insync=failed=spare=0; |
4582 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 4588 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4583 | nr++; | 4589 | nr++; |
4584 | if (test_bit(Faulty, &rdev->flags)) | 4590 | if (test_bit(Faulty, &rdev->flags)) |
@@ -4586,7 +4592,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
4586 | else { | 4592 | else { |
4587 | working++; | 4593 | working++; |
4588 | if (test_bit(In_sync, &rdev->flags)) | 4594 | if (test_bit(In_sync, &rdev->flags)) |
4589 | active++; | 4595 | insync++; |
4590 | else | 4596 | else |
4591 | spare++; | 4597 | spare++; |
4592 | } | 4598 | } |
@@ -4611,7 +4617,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
4611 | info.state = (1<<MD_SB_CLEAN); | 4617 | info.state = (1<<MD_SB_CLEAN); |
4612 | if (mddev->bitmap && mddev->bitmap_offset) | 4618 | if (mddev->bitmap && mddev->bitmap_offset) |
4613 | info.state = (1<<MD_SB_BITMAP_PRESENT); | 4619 | info.state = (1<<MD_SB_BITMAP_PRESENT); |
4614 | info.active_disks = active; | 4620 | info.active_disks = insync; |
4615 | info.working_disks = working; | 4621 | info.working_disks = working; |
4616 | info.failed_disks = failed; | 4622 | info.failed_disks = failed; |
4617 | info.spare_disks = spare; | 4623 | info.spare_disks = spare; |
@@ -4721,7 +4727,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
4721 | if (!list_empty(&mddev->disks)) { | 4727 | if (!list_empty(&mddev->disks)) { |
4722 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, | 4728 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, |
4723 | mdk_rdev_t, same_set); | 4729 | mdk_rdev_t, same_set); |
4724 | int err = super_types[mddev->major_version] | 4730 | err = super_types[mddev->major_version] |
4725 | .load_super(rdev, rdev0, mddev->minor_version); | 4731 | .load_super(rdev, rdev0, mddev->minor_version); |
4726 | if (err < 0) { | 4732 | if (err < 0) { |
4727 | printk(KERN_WARNING | 4733 | printk(KERN_WARNING |
@@ -5631,7 +5637,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
5631 | thread->run = run; | 5637 | thread->run = run; |
5632 | thread->mddev = mddev; | 5638 | thread->mddev = mddev; |
5633 | thread->timeout = MAX_SCHEDULE_TIMEOUT; | 5639 | thread->timeout = MAX_SCHEDULE_TIMEOUT; |
5634 | thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); | 5640 | thread->tsk = kthread_run(md_thread, thread, |
5641 | "%s_%s", | ||
5642 | mdname(thread->mddev), | ||
5643 | name ?: mddev->pers->name); | ||
5635 | if (IS_ERR(thread->tsk)) { | 5644 | if (IS_ERR(thread->tsk)) { |
5636 | kfree(thread); | 5645 | kfree(thread); |
5637 | return NULL; | 5646 | return NULL; |
@@ -6745,7 +6754,7 @@ void md_check_recovery(mddev_t *mddev) | |||
6745 | } | 6754 | } |
6746 | mddev->sync_thread = md_register_thread(md_do_sync, | 6755 | mddev->sync_thread = md_register_thread(md_do_sync, |
6747 | mddev, | 6756 | mddev, |
6748 | "%s_resync"); | 6757 | "resync"); |
6749 | if (!mddev->sync_thread) { | 6758 | if (!mddev->sync_thread) { |
6750 | printk(KERN_ERR "%s: could not start resync" | 6759 | printk(KERN_ERR "%s: could not start resync" |
6751 | " thread...\n", | 6760 | " thread...\n", |
diff --git a/drivers/md/md.h b/drivers/md/md.h index f55d2ff95133..f184b69ef337 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -430,6 +430,7 @@ extern void md_write_end(mddev_t *mddev); | |||
430 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | 430 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); |
431 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | 431 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); |
432 | 432 | ||
433 | extern int mddev_congested(mddev_t *mddev, int bits); | ||
433 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 434 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, |
434 | sector_t sector, int size, struct page *page); | 435 | sector_t sector, int size, struct page *page); |
435 | extern void md_super_wait(mddev_t *mddev); | 436 | extern void md_super_wait(mddev_t *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d2d3fd54cc68..ee7646f974a0 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -150,7 +150,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); | 152 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); |
153 | memset(mp_bh, 0, sizeof(*mp_bh)); | ||
154 | 153 | ||
155 | mp_bh->master_bio = bio; | 154 | mp_bh->master_bio = bio; |
156 | mp_bh->mddev = mddev; | 155 | mp_bh->mddev = mddev; |
@@ -199,6 +198,9 @@ static int multipath_congested(void *data, int bits) | |||
199 | multipath_conf_t *conf = mddev->private; | 198 | multipath_conf_t *conf = mddev->private; |
200 | int i, ret = 0; | 199 | int i, ret = 0; |
201 | 200 | ||
201 | if (mddev_congested(mddev, bits)) | ||
202 | return 1; | ||
203 | |||
202 | rcu_read_lock(); | 204 | rcu_read_lock(); |
203 | for (i = 0; i < mddev->raid_disks ; i++) { | 205 | for (i = 0; i < mddev->raid_disks ; i++) { |
204 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | 206 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); |
@@ -504,7 +506,7 @@ static int multipath_run (mddev_t *mddev) | |||
504 | } | 506 | } |
505 | 507 | ||
506 | { | 508 | { |
507 | mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath"); | 509 | mddev->thread = md_register_thread(multipathd, mddev, NULL); |
508 | if (!mddev->thread) { | 510 | if (!mddev->thread) { |
509 | printk(KERN_ERR "multipath: couldn't allocate thread" | 511 | printk(KERN_ERR "multipath: couldn't allocate thread" |
510 | " for %s\n", mdname(mddev)); | 512 | " for %s\n", mdname(mddev)); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f845ed98fec9..d3a4ce06015a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -44,6 +44,9 @@ static int raid0_congested(void *data, int bits) | |||
44 | mdk_rdev_t **devlist = conf->devlist; | 44 | mdk_rdev_t **devlist = conf->devlist; |
45 | int i, ret = 0; | 45 | int i, ret = 0; |
46 | 46 | ||
47 | if (mddev_congested(mddev, bits)) | ||
48 | return 1; | ||
49 | |||
47 | for (i = 0; i < mddev->raid_disks && !ret ; i++) { | 50 | for (i = 0; i < mddev->raid_disks && !ret ; i++) { |
48 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); | 51 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); |
49 | 52 | ||
@@ -86,7 +89,7 @@ static void dump_zones(mddev_t *mddev) | |||
86 | 89 | ||
87 | static int create_strip_zones(mddev_t *mddev) | 90 | static int create_strip_zones(mddev_t *mddev) |
88 | { | 91 | { |
89 | int i, c, j, err; | 92 | int i, c, err; |
90 | sector_t curr_zone_end, sectors; | 93 | sector_t curr_zone_end, sectors; |
91 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; | 94 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; |
92 | struct strip_zone *zone; | 95 | struct strip_zone *zone; |
@@ -198,6 +201,8 @@ static int create_strip_zones(mddev_t *mddev) | |||
198 | /* now do the other zones */ | 201 | /* now do the other zones */ |
199 | for (i = 1; i < conf->nr_strip_zones; i++) | 202 | for (i = 1; i < conf->nr_strip_zones; i++) |
200 | { | 203 | { |
204 | int j; | ||
205 | |||
201 | zone = conf->strip_zone + i; | 206 | zone = conf->strip_zone + i; |
202 | dev = conf->devlist + i * mddev->raid_disks; | 207 | dev = conf->devlist + i * mddev->raid_disks; |
203 | 208 | ||
@@ -207,7 +212,6 @@ static int create_strip_zones(mddev_t *mddev) | |||
207 | c = 0; | 212 | c = 0; |
208 | 213 | ||
209 | for (j=0; j<cnt; j++) { | 214 | for (j=0; j<cnt; j++) { |
210 | char b[BDEVNAME_SIZE]; | ||
211 | rdev = conf->devlist[j]; | 215 | rdev = conf->devlist[j]; |
212 | printk(KERN_INFO "raid0: checking %s ...", | 216 | printk(KERN_INFO "raid0: checking %s ...", |
213 | bdevname(rdev->bdev, b)); | 217 | bdevname(rdev->bdev, b)); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ff7ed3335995..d1b9bd5fd4f6 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -576,6 +576,9 @@ static int raid1_congested(void *data, int bits) | |||
576 | conf_t *conf = mddev->private; | 576 | conf_t *conf = mddev->private; |
577 | int i, ret = 0; | 577 | int i, ret = 0; |
578 | 578 | ||
579 | if (mddev_congested(mddev, bits)) | ||
580 | return 1; | ||
581 | |||
579 | rcu_read_lock(); | 582 | rcu_read_lock(); |
580 | for (i = 0; i < mddev->raid_disks; i++) { | 583 | for (i = 0; i < mddev->raid_disks; i++) { |
581 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 584 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); |
@@ -851,7 +854,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
851 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; | 854 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; |
852 | read_bio->bi_bdev = mirror->rdev->bdev; | 855 | read_bio->bi_bdev = mirror->rdev->bdev; |
853 | read_bio->bi_end_io = raid1_end_read_request; | 856 | read_bio->bi_end_io = raid1_end_read_request; |
854 | read_bio->bi_rw = READ | do_sync; | 857 | read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); |
855 | read_bio->bi_private = r1_bio; | 858 | read_bio->bi_private = r1_bio; |
856 | 859 | ||
857 | generic_make_request(read_bio); | 860 | generic_make_request(read_bio); |
@@ -943,7 +946,8 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
943 | mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; | 946 | mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; |
944 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 947 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
945 | mbio->bi_end_io = raid1_end_write_request; | 948 | mbio->bi_end_io = raid1_end_write_request; |
946 | mbio->bi_rw = WRITE | do_barriers | do_sync; | 949 | mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | |
950 | (do_sync << BIO_RW_SYNCIO); | ||
947 | mbio->bi_private = r1_bio; | 951 | mbio->bi_private = r1_bio; |
948 | 952 | ||
949 | if (behind_pages) { | 953 | if (behind_pages) { |
@@ -1623,7 +1627,8 @@ static void raid1d(mddev_t *mddev) | |||
1623 | conf->mirrors[i].rdev->data_offset; | 1627 | conf->mirrors[i].rdev->data_offset; |
1624 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1628 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1625 | bio->bi_end_io = raid1_end_write_request; | 1629 | bio->bi_end_io = raid1_end_write_request; |
1626 | bio->bi_rw = WRITE | do_sync; | 1630 | bio->bi_rw = WRITE | |
1631 | (do_sync << BIO_RW_SYNCIO); | ||
1627 | bio->bi_private = r1_bio; | 1632 | bio->bi_private = r1_bio; |
1628 | r1_bio->bios[i] = bio; | 1633 | r1_bio->bios[i] = bio; |
1629 | generic_make_request(bio); | 1634 | generic_make_request(bio); |
@@ -1672,7 +1677,7 @@ static void raid1d(mddev_t *mddev) | |||
1672 | bio->bi_sector = r1_bio->sector + rdev->data_offset; | 1677 | bio->bi_sector = r1_bio->sector + rdev->data_offset; |
1673 | bio->bi_bdev = rdev->bdev; | 1678 | bio->bi_bdev = rdev->bdev; |
1674 | bio->bi_end_io = raid1_end_read_request; | 1679 | bio->bi_end_io = raid1_end_read_request; |
1675 | bio->bi_rw = READ | do_sync; | 1680 | bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); |
1676 | bio->bi_private = r1_bio; | 1681 | bio->bi_private = r1_bio; |
1677 | unplug = 1; | 1682 | unplug = 1; |
1678 | generic_make_request(bio); | 1683 | generic_make_request(bio); |
@@ -2047,7 +2052,7 @@ static int run(mddev_t *mddev) | |||
2047 | conf->last_used = j; | 2052 | conf->last_used = j; |
2048 | 2053 | ||
2049 | 2054 | ||
2050 | mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1"); | 2055 | mddev->thread = md_register_thread(raid1d, mddev, NULL); |
2051 | if (!mddev->thread) { | 2056 | if (!mddev->thread) { |
2052 | printk(KERN_ERR | 2057 | printk(KERN_ERR |
2053 | "raid1: couldn't allocate thread for %s\n", | 2058 | "raid1: couldn't allocate thread for %s\n", |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d0a2152e064f..51c4c5c4d87a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -631,6 +631,8 @@ static int raid10_congested(void *data, int bits) | |||
631 | conf_t *conf = mddev->private; | 631 | conf_t *conf = mddev->private; |
632 | int i, ret = 0; | 632 | int i, ret = 0; |
633 | 633 | ||
634 | if (mddev_congested(mddev, bits)) | ||
635 | return 1; | ||
634 | rcu_read_lock(); | 636 | rcu_read_lock(); |
635 | for (i = 0; i < mddev->raid_disks && ret == 0; i++) { | 637 | for (i = 0; i < mddev->raid_disks && ret == 0; i++) { |
636 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 638 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); |
@@ -882,7 +884,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
882 | mirror->rdev->data_offset; | 884 | mirror->rdev->data_offset; |
883 | read_bio->bi_bdev = mirror->rdev->bdev; | 885 | read_bio->bi_bdev = mirror->rdev->bdev; |
884 | read_bio->bi_end_io = raid10_end_read_request; | 886 | read_bio->bi_end_io = raid10_end_read_request; |
885 | read_bio->bi_rw = READ | do_sync; | 887 | read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); |
886 | read_bio->bi_private = r10_bio; | 888 | read_bio->bi_private = r10_bio; |
887 | 889 | ||
888 | generic_make_request(read_bio); | 890 | generic_make_request(read_bio); |
@@ -950,7 +952,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
950 | conf->mirrors[d].rdev->data_offset; | 952 | conf->mirrors[d].rdev->data_offset; |
951 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; | 953 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; |
952 | mbio->bi_end_io = raid10_end_write_request; | 954 | mbio->bi_end_io = raid10_end_write_request; |
953 | mbio->bi_rw = WRITE | do_sync; | 955 | mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); |
954 | mbio->bi_private = r10_bio; | 956 | mbio->bi_private = r10_bio; |
955 | 957 | ||
956 | atomic_inc(&r10_bio->remaining); | 958 | atomic_inc(&r10_bio->remaining); |
@@ -1623,7 +1625,7 @@ static void raid10d(mddev_t *mddev) | |||
1623 | bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr | 1625 | bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr |
1624 | + rdev->data_offset; | 1626 | + rdev->data_offset; |
1625 | bio->bi_bdev = rdev->bdev; | 1627 | bio->bi_bdev = rdev->bdev; |
1626 | bio->bi_rw = READ | do_sync; | 1628 | bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); |
1627 | bio->bi_private = r10_bio; | 1629 | bio->bi_private = r10_bio; |
1628 | bio->bi_end_io = raid10_end_read_request; | 1630 | bio->bi_end_io = raid10_end_read_request; |
1629 | unplug = 1; | 1631 | unplug = 1; |
@@ -1773,7 +1775,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1773 | max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); | 1775 | max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); |
1774 | if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 1776 | if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
1775 | /* recovery... the complicated one */ | 1777 | /* recovery... the complicated one */ |
1776 | int i, j, k; | 1778 | int j, k; |
1777 | r10_bio = NULL; | 1779 | r10_bio = NULL; |
1778 | 1780 | ||
1779 | for (i=0 ; i<conf->raid_disks; i++) | 1781 | for (i=0 ; i<conf->raid_disks; i++) |
@@ -2188,7 +2190,7 @@ static int run(mddev_t *mddev) | |||
2188 | } | 2190 | } |
2189 | 2191 | ||
2190 | 2192 | ||
2191 | mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10"); | 2193 | mddev->thread = md_register_thread(raid10d, mddev, NULL); |
2192 | if (!mddev->thread) { | 2194 | if (!mddev->thread) { |
2193 | printk(KERN_ERR | 2195 | printk(KERN_ERR |
2194 | "raid10: couldn't allocate thread for %s\n", | 2196 | "raid10: couldn't allocate thread for %s\n", |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 826eb3467357..94829804ab7f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -47,7 +47,9 @@ | |||
47 | #include <linux/kthread.h> | 47 | #include <linux/kthread.h> |
48 | #include <linux/raid/pq.h> | 48 | #include <linux/raid/pq.h> |
49 | #include <linux/async_tx.h> | 49 | #include <linux/async_tx.h> |
50 | #include <linux/async.h> | ||
50 | #include <linux/seq_file.h> | 51 | #include <linux/seq_file.h> |
52 | #include <linux/cpu.h> | ||
51 | #include "md.h" | 53 | #include "md.h" |
52 | #include "raid5.h" | 54 | #include "raid5.h" |
53 | #include "bitmap.h" | 55 | #include "bitmap.h" |
@@ -499,11 +501,18 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
499 | struct page *bio_page; | 501 | struct page *bio_page; |
500 | int i; | 502 | int i; |
501 | int page_offset; | 503 | int page_offset; |
504 | struct async_submit_ctl submit; | ||
505 | enum async_tx_flags flags = 0; | ||
502 | 506 | ||
503 | if (bio->bi_sector >= sector) | 507 | if (bio->bi_sector >= sector) |
504 | page_offset = (signed)(bio->bi_sector - sector) * 512; | 508 | page_offset = (signed)(bio->bi_sector - sector) * 512; |
505 | else | 509 | else |
506 | page_offset = (signed)(sector - bio->bi_sector) * -512; | 510 | page_offset = (signed)(sector - bio->bi_sector) * -512; |
511 | |||
512 | if (frombio) | ||
513 | flags |= ASYNC_TX_FENCE; | ||
514 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); | ||
515 | |||
507 | bio_for_each_segment(bvl, bio, i) { | 516 | bio_for_each_segment(bvl, bio, i) { |
508 | int len = bio_iovec_idx(bio, i)->bv_len; | 517 | int len = bio_iovec_idx(bio, i)->bv_len; |
509 | int clen; | 518 | int clen; |
@@ -525,15 +534,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
525 | bio_page = bio_iovec_idx(bio, i)->bv_page; | 534 | bio_page = bio_iovec_idx(bio, i)->bv_page; |
526 | if (frombio) | 535 | if (frombio) |
527 | tx = async_memcpy(page, bio_page, page_offset, | 536 | tx = async_memcpy(page, bio_page, page_offset, |
528 | b_offset, clen, | 537 | b_offset, clen, &submit); |
529 | ASYNC_TX_DEP_ACK, | ||
530 | tx, NULL, NULL); | ||
531 | else | 538 | else |
532 | tx = async_memcpy(bio_page, page, b_offset, | 539 | tx = async_memcpy(bio_page, page, b_offset, |
533 | page_offset, clen, | 540 | page_offset, clen, &submit); |
534 | ASYNC_TX_DEP_ACK, | ||
535 | tx, NULL, NULL); | ||
536 | } | 541 | } |
542 | /* chain the operations */ | ||
543 | submit.depend_tx = tx; | ||
544 | |||
537 | if (clen < len) /* hit end of page */ | 545 | if (clen < len) /* hit end of page */ |
538 | break; | 546 | break; |
539 | page_offset += len; | 547 | page_offset += len; |
@@ -592,6 +600,7 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
592 | { | 600 | { |
593 | struct dma_async_tx_descriptor *tx = NULL; | 601 | struct dma_async_tx_descriptor *tx = NULL; |
594 | raid5_conf_t *conf = sh->raid_conf; | 602 | raid5_conf_t *conf = sh->raid_conf; |
603 | struct async_submit_ctl submit; | ||
595 | int i; | 604 | int i; |
596 | 605 | ||
597 | pr_debug("%s: stripe %llu\n", __func__, | 606 | pr_debug("%s: stripe %llu\n", __func__, |
@@ -615,22 +624,34 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
615 | } | 624 | } |
616 | 625 | ||
617 | atomic_inc(&sh->count); | 626 | atomic_inc(&sh->count); |
618 | async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, | 627 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); |
619 | ops_complete_biofill, sh); | 628 | async_trigger_callback(&submit); |
620 | } | 629 | } |
621 | 630 | ||
622 | static void ops_complete_compute5(void *stripe_head_ref) | 631 | static void mark_target_uptodate(struct stripe_head *sh, int target) |
623 | { | 632 | { |
624 | struct stripe_head *sh = stripe_head_ref; | 633 | struct r5dev *tgt; |
625 | int target = sh->ops.target; | ||
626 | struct r5dev *tgt = &sh->dev[target]; | ||
627 | 634 | ||
628 | pr_debug("%s: stripe %llu\n", __func__, | 635 | if (target < 0) |
629 | (unsigned long long)sh->sector); | 636 | return; |
630 | 637 | ||
638 | tgt = &sh->dev[target]; | ||
631 | set_bit(R5_UPTODATE, &tgt->flags); | 639 | set_bit(R5_UPTODATE, &tgt->flags); |
632 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); | 640 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
633 | clear_bit(R5_Wantcompute, &tgt->flags); | 641 | clear_bit(R5_Wantcompute, &tgt->flags); |
642 | } | ||
643 | |||
644 | static void ops_complete_compute(void *stripe_head_ref) | ||
645 | { | ||
646 | struct stripe_head *sh = stripe_head_ref; | ||
647 | |||
648 | pr_debug("%s: stripe %llu\n", __func__, | ||
649 | (unsigned long long)sh->sector); | ||
650 | |||
651 | /* mark the computed target(s) as uptodate */ | ||
652 | mark_target_uptodate(sh, sh->ops.target); | ||
653 | mark_target_uptodate(sh, sh->ops.target2); | ||
654 | |||
634 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); | 655 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); |
635 | if (sh->check_state == check_state_compute_run) | 656 | if (sh->check_state == check_state_compute_run) |
636 | sh->check_state = check_state_compute_result; | 657 | sh->check_state = check_state_compute_result; |
@@ -638,16 +659,24 @@ static void ops_complete_compute5(void *stripe_head_ref) | |||
638 | release_stripe(sh); | 659 | release_stripe(sh); |
639 | } | 660 | } |
640 | 661 | ||
641 | static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) | 662 | /* return a pointer to the address conversion region of the scribble buffer */ |
663 | static addr_conv_t *to_addr_conv(struct stripe_head *sh, | ||
664 | struct raid5_percpu *percpu) | ||
665 | { | ||
666 | return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); | ||
667 | } | ||
668 | |||
669 | static struct dma_async_tx_descriptor * | ||
670 | ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) | ||
642 | { | 671 | { |
643 | /* kernel stack size limits the total number of disks */ | ||
644 | int disks = sh->disks; | 672 | int disks = sh->disks; |
645 | struct page *xor_srcs[disks]; | 673 | struct page **xor_srcs = percpu->scribble; |
646 | int target = sh->ops.target; | 674 | int target = sh->ops.target; |
647 | struct r5dev *tgt = &sh->dev[target]; | 675 | struct r5dev *tgt = &sh->dev[target]; |
648 | struct page *xor_dest = tgt->page; | 676 | struct page *xor_dest = tgt->page; |
649 | int count = 0; | 677 | int count = 0; |
650 | struct dma_async_tx_descriptor *tx; | 678 | struct dma_async_tx_descriptor *tx; |
679 | struct async_submit_ctl submit; | ||
651 | int i; | 680 | int i; |
652 | 681 | ||
653 | pr_debug("%s: stripe %llu block: %d\n", | 682 | pr_debug("%s: stripe %llu block: %d\n", |
@@ -660,17 +689,215 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) | |||
660 | 689 | ||
661 | atomic_inc(&sh->count); | 690 | atomic_inc(&sh->count); |
662 | 691 | ||
692 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, | ||
693 | ops_complete_compute, sh, to_addr_conv(sh, percpu)); | ||
663 | if (unlikely(count == 1)) | 694 | if (unlikely(count == 1)) |
664 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, | 695 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
665 | 0, NULL, ops_complete_compute5, sh); | ||
666 | else | 696 | else |
667 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 697 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
668 | ASYNC_TX_XOR_ZERO_DST, NULL, | ||
669 | ops_complete_compute5, sh); | ||
670 | 698 | ||
671 | return tx; | 699 | return tx; |
672 | } | 700 | } |
673 | 701 | ||
702 | /* set_syndrome_sources - populate source buffers for gen_syndrome | ||
703 | * @srcs - (struct page *) array of size sh->disks | ||
704 | * @sh - stripe_head to parse | ||
705 | * | ||
706 | * Populates srcs in proper layout order for the stripe and returns the | ||
707 | * 'count' of sources to be used in a call to async_gen_syndrome. The P | ||
708 | * destination buffer is recorded in srcs[count] and the Q destination | ||
709 | * is recorded in srcs[count+1]]. | ||
710 | */ | ||
711 | static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | ||
712 | { | ||
713 | int disks = sh->disks; | ||
714 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); | ||
715 | int d0_idx = raid6_d0(sh); | ||
716 | int count; | ||
717 | int i; | ||
718 | |||
719 | for (i = 0; i < disks; i++) | ||
720 | srcs[i] = (void *)raid6_empty_zero_page; | ||
721 | |||
722 | count = 0; | ||
723 | i = d0_idx; | ||
724 | do { | ||
725 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); | ||
726 | |||
727 | srcs[slot] = sh->dev[i].page; | ||
728 | i = raid6_next_disk(i, disks); | ||
729 | } while (i != d0_idx); | ||
730 | BUG_ON(count != syndrome_disks); | ||
731 | |||
732 | return count; | ||
733 | } | ||
734 | |||
735 | static struct dma_async_tx_descriptor * | ||
736 | ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) | ||
737 | { | ||
738 | int disks = sh->disks; | ||
739 | struct page **blocks = percpu->scribble; | ||
740 | int target; | ||
741 | int qd_idx = sh->qd_idx; | ||
742 | struct dma_async_tx_descriptor *tx; | ||
743 | struct async_submit_ctl submit; | ||
744 | struct r5dev *tgt; | ||
745 | struct page *dest; | ||
746 | int i; | ||
747 | int count; | ||
748 | |||
749 | if (sh->ops.target < 0) | ||
750 | target = sh->ops.target2; | ||
751 | else if (sh->ops.target2 < 0) | ||
752 | target = sh->ops.target; | ||
753 | else | ||
754 | /* we should only have one valid target */ | ||
755 | BUG(); | ||
756 | BUG_ON(target < 0); | ||
757 | pr_debug("%s: stripe %llu block: %d\n", | ||
758 | __func__, (unsigned long long)sh->sector, target); | ||
759 | |||
760 | tgt = &sh->dev[target]; | ||
761 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); | ||
762 | dest = tgt->page; | ||
763 | |||
764 | atomic_inc(&sh->count); | ||
765 | |||
766 | if (target == qd_idx) { | ||
767 | count = set_syndrome_sources(blocks, sh); | ||
768 | blocks[count] = NULL; /* regenerating p is not necessary */ | ||
769 | BUG_ON(blocks[count+1] != dest); /* q should already be set */ | ||
770 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, | ||
771 | ops_complete_compute, sh, | ||
772 | to_addr_conv(sh, percpu)); | ||
773 | tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); | ||
774 | } else { | ||
775 | /* Compute any data- or p-drive using XOR */ | ||
776 | count = 0; | ||
777 | for (i = disks; i-- ; ) { | ||
778 | if (i == target || i == qd_idx) | ||
779 | continue; | ||
780 | blocks[count++] = sh->dev[i].page; | ||
781 | } | ||
782 | |||
783 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, | ||
784 | NULL, ops_complete_compute, sh, | ||
785 | to_addr_conv(sh, percpu)); | ||
786 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); | ||
787 | } | ||
788 | |||
789 | return tx; | ||
790 | } | ||
791 | |||
792 | static struct dma_async_tx_descriptor * | ||
793 | ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | ||
794 | { | ||
795 | int i, count, disks = sh->disks; | ||
796 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; | ||
797 | int d0_idx = raid6_d0(sh); | ||
798 | int faila = -1, failb = -1; | ||
799 | int target = sh->ops.target; | ||
800 | int target2 = sh->ops.target2; | ||
801 | struct r5dev *tgt = &sh->dev[target]; | ||
802 | struct r5dev *tgt2 = &sh->dev[target2]; | ||
803 | struct dma_async_tx_descriptor *tx; | ||
804 | struct page **blocks = percpu->scribble; | ||
805 | struct async_submit_ctl submit; | ||
806 | |||
807 | pr_debug("%s: stripe %llu block1: %d block2: %d\n", | ||
808 | __func__, (unsigned long long)sh->sector, target, target2); | ||
809 | BUG_ON(target < 0 || target2 < 0); | ||
810 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); | ||
811 | BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); | ||
812 | |||
813 | /* we need to open-code set_syndrome_sources to handle the | ||
814 | * slot number conversion for 'faila' and 'failb' | ||
815 | */ | ||
816 | for (i = 0; i < disks ; i++) | ||
817 | blocks[i] = (void *)raid6_empty_zero_page; | ||
818 | count = 0; | ||
819 | i = d0_idx; | ||
820 | do { | ||
821 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); | ||
822 | |||
823 | blocks[slot] = sh->dev[i].page; | ||
824 | |||
825 | if (i == target) | ||
826 | faila = slot; | ||
827 | if (i == target2) | ||
828 | failb = slot; | ||
829 | i = raid6_next_disk(i, disks); | ||
830 | } while (i != d0_idx); | ||
831 | BUG_ON(count != syndrome_disks); | ||
832 | |||
833 | BUG_ON(faila == failb); | ||
834 | if (failb < faila) | ||
835 | swap(faila, failb); | ||
836 | pr_debug("%s: stripe: %llu faila: %d failb: %d\n", | ||
837 | __func__, (unsigned long long)sh->sector, faila, failb); | ||
838 | |||
839 | atomic_inc(&sh->count); | ||
840 | |||
841 | if (failb == syndrome_disks+1) { | ||
842 | /* Q disk is one of the missing disks */ | ||
843 | if (faila == syndrome_disks) { | ||
844 | /* Missing P+Q, just recompute */ | ||
845 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, | ||
846 | ops_complete_compute, sh, | ||
847 | to_addr_conv(sh, percpu)); | ||
848 | return async_gen_syndrome(blocks, 0, count+2, | ||
849 | STRIPE_SIZE, &submit); | ||
850 | } else { | ||
851 | struct page *dest; | ||
852 | int data_target; | ||
853 | int qd_idx = sh->qd_idx; | ||
854 | |||
855 | /* Missing D+Q: recompute D from P, then recompute Q */ | ||
856 | if (target == qd_idx) | ||
857 | data_target = target2; | ||
858 | else | ||
859 | data_target = target; | ||
860 | |||
861 | count = 0; | ||
862 | for (i = disks; i-- ; ) { | ||
863 | if (i == data_target || i == qd_idx) | ||
864 | continue; | ||
865 | blocks[count++] = sh->dev[i].page; | ||
866 | } | ||
867 | dest = sh->dev[data_target].page; | ||
868 | init_async_submit(&submit, | ||
869 | ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, | ||
870 | NULL, NULL, NULL, | ||
871 | to_addr_conv(sh, percpu)); | ||
872 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, | ||
873 | &submit); | ||
874 | |||
875 | count = set_syndrome_sources(blocks, sh); | ||
876 | init_async_submit(&submit, ASYNC_TX_FENCE, tx, | ||
877 | ops_complete_compute, sh, | ||
878 | to_addr_conv(sh, percpu)); | ||
879 | return async_gen_syndrome(blocks, 0, count+2, | ||
880 | STRIPE_SIZE, &submit); | ||
881 | } | ||
882 | } else { | ||
883 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, | ||
884 | ops_complete_compute, sh, | ||
885 | to_addr_conv(sh, percpu)); | ||
886 | if (failb == syndrome_disks) { | ||
887 | /* We're missing D+P. */ | ||
888 | return async_raid6_datap_recov(syndrome_disks+2, | ||
889 | STRIPE_SIZE, faila, | ||
890 | blocks, &submit); | ||
891 | } else { | ||
892 | /* We're missing D+D. */ | ||
893 | return async_raid6_2data_recov(syndrome_disks+2, | ||
894 | STRIPE_SIZE, faila, failb, | ||
895 | blocks, &submit); | ||
896 | } | ||
897 | } | ||
898 | } | ||
899 | |||
900 | |||
674 | static void ops_complete_prexor(void *stripe_head_ref) | 901 | static void ops_complete_prexor(void *stripe_head_ref) |
675 | { | 902 | { |
676 | struct stripe_head *sh = stripe_head_ref; | 903 | struct stripe_head *sh = stripe_head_ref; |
@@ -680,12 +907,13 @@ static void ops_complete_prexor(void *stripe_head_ref) | |||
680 | } | 907 | } |
681 | 908 | ||
682 | static struct dma_async_tx_descriptor * | 909 | static struct dma_async_tx_descriptor * |
683 | ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | 910 | ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, |
911 | struct dma_async_tx_descriptor *tx) | ||
684 | { | 912 | { |
685 | /* kernel stack size limits the total number of disks */ | ||
686 | int disks = sh->disks; | 913 | int disks = sh->disks; |
687 | struct page *xor_srcs[disks]; | 914 | struct page **xor_srcs = percpu->scribble; |
688 | int count = 0, pd_idx = sh->pd_idx, i; | 915 | int count = 0, pd_idx = sh->pd_idx, i; |
916 | struct async_submit_ctl submit; | ||
689 | 917 | ||
690 | /* existing parity data subtracted */ | 918 | /* existing parity data subtracted */ |
691 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; | 919 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
@@ -700,9 +928,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
700 | xor_srcs[count++] = dev->page; | 928 | xor_srcs[count++] = dev->page; |
701 | } | 929 | } |
702 | 930 | ||
703 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 931 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
704 | ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, | 932 | ops_complete_prexor, sh, to_addr_conv(sh, percpu)); |
705 | ops_complete_prexor, sh); | 933 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
706 | 934 | ||
707 | return tx; | 935 | return tx; |
708 | } | 936 | } |
@@ -742,17 +970,21 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
742 | return tx; | 970 | return tx; |
743 | } | 971 | } |
744 | 972 | ||
745 | static void ops_complete_postxor(void *stripe_head_ref) | 973 | static void ops_complete_reconstruct(void *stripe_head_ref) |
746 | { | 974 | { |
747 | struct stripe_head *sh = stripe_head_ref; | 975 | struct stripe_head *sh = stripe_head_ref; |
748 | int disks = sh->disks, i, pd_idx = sh->pd_idx; | 976 | int disks = sh->disks; |
977 | int pd_idx = sh->pd_idx; | ||
978 | int qd_idx = sh->qd_idx; | ||
979 | int i; | ||
749 | 980 | ||
750 | pr_debug("%s: stripe %llu\n", __func__, | 981 | pr_debug("%s: stripe %llu\n", __func__, |
751 | (unsigned long long)sh->sector); | 982 | (unsigned long long)sh->sector); |
752 | 983 | ||
753 | for (i = disks; i--; ) { | 984 | for (i = disks; i--; ) { |
754 | struct r5dev *dev = &sh->dev[i]; | 985 | struct r5dev *dev = &sh->dev[i]; |
755 | if (dev->written || i == pd_idx) | 986 | |
987 | if (dev->written || i == pd_idx || i == qd_idx) | ||
756 | set_bit(R5_UPTODATE, &dev->flags); | 988 | set_bit(R5_UPTODATE, &dev->flags); |
757 | } | 989 | } |
758 | 990 | ||
@@ -770,12 +1002,12 @@ static void ops_complete_postxor(void *stripe_head_ref) | |||
770 | } | 1002 | } |
771 | 1003 | ||
772 | static void | 1004 | static void |
773 | ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | 1005 | ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, |
1006 | struct dma_async_tx_descriptor *tx) | ||
774 | { | 1007 | { |
775 | /* kernel stack size limits the total number of disks */ | ||
776 | int disks = sh->disks; | 1008 | int disks = sh->disks; |
777 | struct page *xor_srcs[disks]; | 1009 | struct page **xor_srcs = percpu->scribble; |
778 | 1010 | struct async_submit_ctl submit; | |
779 | int count = 0, pd_idx = sh->pd_idx, i; | 1011 | int count = 0, pd_idx = sh->pd_idx, i; |
780 | struct page *xor_dest; | 1012 | struct page *xor_dest; |
781 | int prexor = 0; | 1013 | int prexor = 0; |
@@ -809,18 +1041,36 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
809 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST | 1041 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST |
810 | * for the synchronous xor case | 1042 | * for the synchronous xor case |
811 | */ | 1043 | */ |
812 | flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | | 1044 | flags = ASYNC_TX_ACK | |
813 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); | 1045 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); |
814 | 1046 | ||
815 | atomic_inc(&sh->count); | 1047 | atomic_inc(&sh->count); |
816 | 1048 | ||
817 | if (unlikely(count == 1)) { | 1049 | init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, |
818 | flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); | 1050 | to_addr_conv(sh, percpu)); |
819 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, | 1051 | if (unlikely(count == 1)) |
820 | flags, tx, ops_complete_postxor, sh); | 1052 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
821 | } else | 1053 | else |
822 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 1054 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
823 | flags, tx, ops_complete_postxor, sh); | 1055 | } |
1056 | |||
1057 | static void | ||
1058 | ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, | ||
1059 | struct dma_async_tx_descriptor *tx) | ||
1060 | { | ||
1061 | struct async_submit_ctl submit; | ||
1062 | struct page **blocks = percpu->scribble; | ||
1063 | int count; | ||
1064 | |||
1065 | pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); | ||
1066 | |||
1067 | count = set_syndrome_sources(blocks, sh); | ||
1068 | |||
1069 | atomic_inc(&sh->count); | ||
1070 | |||
1071 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, | ||
1072 | sh, to_addr_conv(sh, percpu)); | ||
1073 | async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); | ||
824 | } | 1074 | } |
825 | 1075 | ||
826 | static void ops_complete_check(void *stripe_head_ref) | 1076 | static void ops_complete_check(void *stripe_head_ref) |
@@ -835,63 +1085,115 @@ static void ops_complete_check(void *stripe_head_ref) | |||
835 | release_stripe(sh); | 1085 | release_stripe(sh); |
836 | } | 1086 | } |
837 | 1087 | ||
838 | static void ops_run_check(struct stripe_head *sh) | 1088 | static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) |
839 | { | 1089 | { |
840 | /* kernel stack size limits the total number of disks */ | ||
841 | int disks = sh->disks; | 1090 | int disks = sh->disks; |
842 | struct page *xor_srcs[disks]; | 1091 | int pd_idx = sh->pd_idx; |
1092 | int qd_idx = sh->qd_idx; | ||
1093 | struct page *xor_dest; | ||
1094 | struct page **xor_srcs = percpu->scribble; | ||
843 | struct dma_async_tx_descriptor *tx; | 1095 | struct dma_async_tx_descriptor *tx; |
844 | 1096 | struct async_submit_ctl submit; | |
845 | int count = 0, pd_idx = sh->pd_idx, i; | 1097 | int count; |
846 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; | 1098 | int i; |
847 | 1099 | ||
848 | pr_debug("%s: stripe %llu\n", __func__, | 1100 | pr_debug("%s: stripe %llu\n", __func__, |
849 | (unsigned long long)sh->sector); | 1101 | (unsigned long long)sh->sector); |
850 | 1102 | ||
1103 | count = 0; | ||
1104 | xor_dest = sh->dev[pd_idx].page; | ||
1105 | xor_srcs[count++] = xor_dest; | ||
851 | for (i = disks; i--; ) { | 1106 | for (i = disks; i--; ) { |
852 | struct r5dev *dev = &sh->dev[i]; | 1107 | if (i == pd_idx || i == qd_idx) |
853 | if (i != pd_idx) | 1108 | continue; |
854 | xor_srcs[count++] = dev->page; | 1109 | xor_srcs[count++] = sh->dev[i].page; |
855 | } | 1110 | } |
856 | 1111 | ||
857 | tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 1112 | init_async_submit(&submit, 0, NULL, NULL, NULL, |
858 | &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); | 1113 | to_addr_conv(sh, percpu)); |
1114 | tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | ||
1115 | &sh->ops.zero_sum_result, &submit); | ||
1116 | |||
1117 | atomic_inc(&sh->count); | ||
1118 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); | ||
1119 | tx = async_trigger_callback(&submit); | ||
1120 | } | ||
1121 | |||
1122 | static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) | ||
1123 | { | ||
1124 | struct page **srcs = percpu->scribble; | ||
1125 | struct async_submit_ctl submit; | ||
1126 | int count; | ||
1127 | |||
1128 | pr_debug("%s: stripe %llu checkp: %d\n", __func__, | ||
1129 | (unsigned long long)sh->sector, checkp); | ||
1130 | |||
1131 | count = set_syndrome_sources(srcs, sh); | ||
1132 | if (!checkp) | ||
1133 | srcs[count] = NULL; | ||
859 | 1134 | ||
860 | atomic_inc(&sh->count); | 1135 | atomic_inc(&sh->count); |
861 | tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, | 1136 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, |
862 | ops_complete_check, sh); | 1137 | sh, to_addr_conv(sh, percpu)); |
1138 | async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, | ||
1139 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); | ||
863 | } | 1140 | } |
864 | 1141 | ||
865 | static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) | 1142 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
866 | { | 1143 | { |
867 | int overlap_clear = 0, i, disks = sh->disks; | 1144 | int overlap_clear = 0, i, disks = sh->disks; |
868 | struct dma_async_tx_descriptor *tx = NULL; | 1145 | struct dma_async_tx_descriptor *tx = NULL; |
1146 | raid5_conf_t *conf = sh->raid_conf; | ||
1147 | int level = conf->level; | ||
1148 | struct raid5_percpu *percpu; | ||
1149 | unsigned long cpu; | ||
869 | 1150 | ||
1151 | cpu = get_cpu(); | ||
1152 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
870 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { | 1153 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
871 | ops_run_biofill(sh); | 1154 | ops_run_biofill(sh); |
872 | overlap_clear++; | 1155 | overlap_clear++; |
873 | } | 1156 | } |
874 | 1157 | ||
875 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { | 1158 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { |
876 | tx = ops_run_compute5(sh); | 1159 | if (level < 6) |
877 | /* terminate the chain if postxor is not set to be run */ | 1160 | tx = ops_run_compute5(sh, percpu); |
878 | if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) | 1161 | else { |
1162 | if (sh->ops.target2 < 0 || sh->ops.target < 0) | ||
1163 | tx = ops_run_compute6_1(sh, percpu); | ||
1164 | else | ||
1165 | tx = ops_run_compute6_2(sh, percpu); | ||
1166 | } | ||
1167 | /* terminate the chain if reconstruct is not set to be run */ | ||
1168 | if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) | ||
879 | async_tx_ack(tx); | 1169 | async_tx_ack(tx); |
880 | } | 1170 | } |
881 | 1171 | ||
882 | if (test_bit(STRIPE_OP_PREXOR, &ops_request)) | 1172 | if (test_bit(STRIPE_OP_PREXOR, &ops_request)) |
883 | tx = ops_run_prexor(sh, tx); | 1173 | tx = ops_run_prexor(sh, percpu, tx); |
884 | 1174 | ||
885 | if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { | 1175 | if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { |
886 | tx = ops_run_biodrain(sh, tx); | 1176 | tx = ops_run_biodrain(sh, tx); |
887 | overlap_clear++; | 1177 | overlap_clear++; |
888 | } | 1178 | } |
889 | 1179 | ||
890 | if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) | 1180 | if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { |
891 | ops_run_postxor(sh, tx); | 1181 | if (level < 6) |
1182 | ops_run_reconstruct5(sh, percpu, tx); | ||
1183 | else | ||
1184 | ops_run_reconstruct6(sh, percpu, tx); | ||
1185 | } | ||
892 | 1186 | ||
893 | if (test_bit(STRIPE_OP_CHECK, &ops_request)) | 1187 | if (test_bit(STRIPE_OP_CHECK, &ops_request)) { |
894 | ops_run_check(sh); | 1188 | if (sh->check_state == check_state_run) |
1189 | ops_run_check_p(sh, percpu); | ||
1190 | else if (sh->check_state == check_state_run_q) | ||
1191 | ops_run_check_pq(sh, percpu, 0); | ||
1192 | else if (sh->check_state == check_state_run_pq) | ||
1193 | ops_run_check_pq(sh, percpu, 1); | ||
1194 | else | ||
1195 | BUG(); | ||
1196 | } | ||
895 | 1197 | ||
896 | if (overlap_clear) | 1198 | if (overlap_clear) |
897 | for (i = disks; i--; ) { | 1199 | for (i = disks; i--; ) { |
@@ -899,6 +1201,7 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
899 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) | 1201 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
900 | wake_up(&sh->raid_conf->wait_for_overlap); | 1202 | wake_up(&sh->raid_conf->wait_for_overlap); |
901 | } | 1203 | } |
1204 | put_cpu(); | ||
902 | } | 1205 | } |
903 | 1206 | ||
904 | static int grow_one_stripe(raid5_conf_t *conf) | 1207 | static int grow_one_stripe(raid5_conf_t *conf) |
@@ -948,6 +1251,28 @@ static int grow_stripes(raid5_conf_t *conf, int num) | |||
948 | return 0; | 1251 | return 0; |
949 | } | 1252 | } |
950 | 1253 | ||
1254 | /** | ||
1255 | * scribble_len - return the required size of the scribble region | ||
1256 | * @num - total number of disks in the array | ||
1257 | * | ||
1258 | * The size must be enough to contain: | ||
1259 | * 1/ a struct page pointer for each device in the array +2 | ||
1260 | * 2/ room to convert each entry in (1) to its corresponding dma | ||
1261 | * (dma_map_page()) or page (page_address()) address. | ||
1262 | * | ||
1263 | * Note: the +2 is for the destination buffers of the ddf/raid6 case where we | ||
1264 | * calculate over all devices (not just the data blocks), using zeros in place | ||
1265 | * of the P and Q blocks. | ||
1266 | */ | ||
1267 | static size_t scribble_len(int num) | ||
1268 | { | ||
1269 | size_t len; | ||
1270 | |||
1271 | len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); | ||
1272 | |||
1273 | return len; | ||
1274 | } | ||
1275 | |||
951 | static int resize_stripes(raid5_conf_t *conf, int newsize) | 1276 | static int resize_stripes(raid5_conf_t *conf, int newsize) |
952 | { | 1277 | { |
953 | /* Make all the stripes able to hold 'newsize' devices. | 1278 | /* Make all the stripes able to hold 'newsize' devices. |
@@ -976,6 +1301,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
976 | struct stripe_head *osh, *nsh; | 1301 | struct stripe_head *osh, *nsh; |
977 | LIST_HEAD(newstripes); | 1302 | LIST_HEAD(newstripes); |
978 | struct disk_info *ndisks; | 1303 | struct disk_info *ndisks; |
1304 | unsigned long cpu; | ||
979 | int err; | 1305 | int err; |
980 | struct kmem_cache *sc; | 1306 | struct kmem_cache *sc; |
981 | int i; | 1307 | int i; |
@@ -1041,7 +1367,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1041 | /* Step 3. | 1367 | /* Step 3. |
1042 | * At this point, we are holding all the stripes so the array | 1368 | * At this point, we are holding all the stripes so the array |
1043 | * is completely stalled, so now is a good time to resize | 1369 | * is completely stalled, so now is a good time to resize |
1044 | * conf->disks. | 1370 | * conf->disks and the scribble region |
1045 | */ | 1371 | */ |
1046 | ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); | 1372 | ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); |
1047 | if (ndisks) { | 1373 | if (ndisks) { |
@@ -1052,10 +1378,30 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1052 | } else | 1378 | } else |
1053 | err = -ENOMEM; | 1379 | err = -ENOMEM; |
1054 | 1380 | ||
1381 | get_online_cpus(); | ||
1382 | conf->scribble_len = scribble_len(newsize); | ||
1383 | for_each_present_cpu(cpu) { | ||
1384 | struct raid5_percpu *percpu; | ||
1385 | void *scribble; | ||
1386 | |||
1387 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
1388 | scribble = kmalloc(conf->scribble_len, GFP_NOIO); | ||
1389 | |||
1390 | if (scribble) { | ||
1391 | kfree(percpu->scribble); | ||
1392 | percpu->scribble = scribble; | ||
1393 | } else { | ||
1394 | err = -ENOMEM; | ||
1395 | break; | ||
1396 | } | ||
1397 | } | ||
1398 | put_online_cpus(); | ||
1399 | |||
1055 | /* Step 4, return new stripes to service */ | 1400 | /* Step 4, return new stripes to service */ |
1056 | while(!list_empty(&newstripes)) { | 1401 | while(!list_empty(&newstripes)) { |
1057 | nsh = list_entry(newstripes.next, struct stripe_head, lru); | 1402 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
1058 | list_del_init(&nsh->lru); | 1403 | list_del_init(&nsh->lru); |
1404 | |||
1059 | for (i=conf->raid_disks; i < newsize; i++) | 1405 | for (i=conf->raid_disks; i < newsize; i++) |
1060 | if (nsh->dev[i].page == NULL) { | 1406 | if (nsh->dev[i].page == NULL) { |
1061 | struct page *p = alloc_page(GFP_NOIO); | 1407 | struct page *p = alloc_page(GFP_NOIO); |
@@ -1594,258 +1940,13 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1594 | } | 1940 | } |
1595 | 1941 | ||
1596 | 1942 | ||
1597 | |||
1598 | /* | ||
1599 | * Copy data between a page in the stripe cache, and one or more bion | ||
1600 | * The page could align with the middle of the bio, or there could be | ||
1601 | * several bion, each with several bio_vecs, which cover part of the page | ||
1602 | * Multiple bion are linked together on bi_next. There may be extras | ||
1603 | * at the end of this list. We ignore them. | ||
1604 | */ | ||
1605 | static void copy_data(int frombio, struct bio *bio, | ||
1606 | struct page *page, | ||
1607 | sector_t sector) | ||
1608 | { | ||
1609 | char *pa = page_address(page); | ||
1610 | struct bio_vec *bvl; | ||
1611 | int i; | ||
1612 | int page_offset; | ||
1613 | |||
1614 | if (bio->bi_sector >= sector) | ||
1615 | page_offset = (signed)(bio->bi_sector - sector) * 512; | ||
1616 | else | ||
1617 | page_offset = (signed)(sector - bio->bi_sector) * -512; | ||
1618 | bio_for_each_segment(bvl, bio, i) { | ||
1619 | int len = bio_iovec_idx(bio,i)->bv_len; | ||
1620 | int clen; | ||
1621 | int b_offset = 0; | ||
1622 | |||
1623 | if (page_offset < 0) { | ||
1624 | b_offset = -page_offset; | ||
1625 | page_offset += b_offset; | ||
1626 | len -= b_offset; | ||
1627 | } | ||
1628 | |||
1629 | if (len > 0 && page_offset + len > STRIPE_SIZE) | ||
1630 | clen = STRIPE_SIZE - page_offset; | ||
1631 | else clen = len; | ||
1632 | |||
1633 | if (clen > 0) { | ||
1634 | char *ba = __bio_kmap_atomic(bio, i, KM_USER0); | ||
1635 | if (frombio) | ||
1636 | memcpy(pa+page_offset, ba+b_offset, clen); | ||
1637 | else | ||
1638 | memcpy(ba+b_offset, pa+page_offset, clen); | ||
1639 | __bio_kunmap_atomic(ba, KM_USER0); | ||
1640 | } | ||
1641 | if (clen < len) /* hit end of page */ | ||
1642 | break; | ||
1643 | page_offset += len; | ||
1644 | } | ||
1645 | } | ||
1646 | |||
1647 | #define check_xor() do { \ | ||
1648 | if (count == MAX_XOR_BLOCKS) { \ | ||
1649 | xor_blocks(count, STRIPE_SIZE, dest, ptr);\ | ||
1650 | count = 0; \ | ||
1651 | } \ | ||
1652 | } while(0) | ||
1653 | |||
1654 | static void compute_parity6(struct stripe_head *sh, int method) | ||
1655 | { | ||
1656 | raid5_conf_t *conf = sh->raid_conf; | ||
1657 | int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; | ||
1658 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); | ||
1659 | struct bio *chosen; | ||
1660 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | ||
1661 | void *ptrs[syndrome_disks+2]; | ||
1662 | |||
1663 | pd_idx = sh->pd_idx; | ||
1664 | qd_idx = sh->qd_idx; | ||
1665 | d0_idx = raid6_d0(sh); | ||
1666 | |||
1667 | pr_debug("compute_parity, stripe %llu, method %d\n", | ||
1668 | (unsigned long long)sh->sector, method); | ||
1669 | |||
1670 | switch(method) { | ||
1671 | case READ_MODIFY_WRITE: | ||
1672 | BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ | ||
1673 | case RECONSTRUCT_WRITE: | ||
1674 | for (i= disks; i-- ;) | ||
1675 | if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { | ||
1676 | chosen = sh->dev[i].towrite; | ||
1677 | sh->dev[i].towrite = NULL; | ||
1678 | |||
1679 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | ||
1680 | wake_up(&conf->wait_for_overlap); | ||
1681 | |||
1682 | BUG_ON(sh->dev[i].written); | ||
1683 | sh->dev[i].written = chosen; | ||
1684 | } | ||
1685 | break; | ||
1686 | case CHECK_PARITY: | ||
1687 | BUG(); /* Not implemented yet */ | ||
1688 | } | ||
1689 | |||
1690 | for (i = disks; i--;) | ||
1691 | if (sh->dev[i].written) { | ||
1692 | sector_t sector = sh->dev[i].sector; | ||
1693 | struct bio *wbi = sh->dev[i].written; | ||
1694 | while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { | ||
1695 | copy_data(1, wbi, sh->dev[i].page, sector); | ||
1696 | wbi = r5_next_bio(wbi, sector); | ||
1697 | } | ||
1698 | |||
1699 | set_bit(R5_LOCKED, &sh->dev[i].flags); | ||
1700 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | ||
1701 | } | ||
1702 | |||
1703 | /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ | ||
1704 | |||
1705 | for (i = 0; i < disks; i++) | ||
1706 | ptrs[i] = (void *)raid6_empty_zero_page; | ||
1707 | |||
1708 | count = 0; | ||
1709 | i = d0_idx; | ||
1710 | do { | ||
1711 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); | ||
1712 | |||
1713 | ptrs[slot] = page_address(sh->dev[i].page); | ||
1714 | if (slot < syndrome_disks && | ||
1715 | !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { | ||
1716 | printk(KERN_ERR "block %d/%d not uptodate " | ||
1717 | "on parity calc\n", i, count); | ||
1718 | BUG(); | ||
1719 | } | ||
1720 | |||
1721 | i = raid6_next_disk(i, disks); | ||
1722 | } while (i != d0_idx); | ||
1723 | BUG_ON(count != syndrome_disks); | ||
1724 | |||
1725 | raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); | ||
1726 | |||
1727 | switch(method) { | ||
1728 | case RECONSTRUCT_WRITE: | ||
1729 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | ||
1730 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); | ||
1731 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); | ||
1732 | set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); | ||
1733 | break; | ||
1734 | case UPDATE_PARITY: | ||
1735 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | ||
1736 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); | ||
1737 | break; | ||
1738 | } | ||
1739 | } | ||
1740 | |||
1741 | |||
1742 | /* Compute one missing block */ | ||
1743 | static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) | ||
1744 | { | ||
1745 | int i, count, disks = sh->disks; | ||
1746 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; | ||
1747 | int qd_idx = sh->qd_idx; | ||
1748 | |||
1749 | pr_debug("compute_block_1, stripe %llu, idx %d\n", | ||
1750 | (unsigned long long)sh->sector, dd_idx); | ||
1751 | |||
1752 | if ( dd_idx == qd_idx ) { | ||
1753 | /* We're actually computing the Q drive */ | ||
1754 | compute_parity6(sh, UPDATE_PARITY); | ||
1755 | } else { | ||
1756 | dest = page_address(sh->dev[dd_idx].page); | ||
1757 | if (!nozero) memset(dest, 0, STRIPE_SIZE); | ||
1758 | count = 0; | ||
1759 | for (i = disks ; i--; ) { | ||
1760 | if (i == dd_idx || i == qd_idx) | ||
1761 | continue; | ||
1762 | p = page_address(sh->dev[i].page); | ||
1763 | if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) | ||
1764 | ptr[count++] = p; | ||
1765 | else | ||
1766 | printk("compute_block() %d, stripe %llu, %d" | ||
1767 | " not present\n", dd_idx, | ||
1768 | (unsigned long long)sh->sector, i); | ||
1769 | |||
1770 | check_xor(); | ||
1771 | } | ||
1772 | if (count) | ||
1773 | xor_blocks(count, STRIPE_SIZE, dest, ptr); | ||
1774 | if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); | ||
1775 | else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); | ||
1776 | } | ||
1777 | } | ||
1778 | |||
1779 | /* Compute two missing blocks */ | ||
1780 | static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) | ||
1781 | { | ||
1782 | int i, count, disks = sh->disks; | ||
1783 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; | ||
1784 | int d0_idx = raid6_d0(sh); | ||
1785 | int faila = -1, failb = -1; | ||
1786 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | ||
1787 | void *ptrs[syndrome_disks+2]; | ||
1788 | |||
1789 | for (i = 0; i < disks ; i++) | ||
1790 | ptrs[i] = (void *)raid6_empty_zero_page; | ||
1791 | count = 0; | ||
1792 | i = d0_idx; | ||
1793 | do { | ||
1794 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); | ||
1795 | |||
1796 | ptrs[slot] = page_address(sh->dev[i].page); | ||
1797 | |||
1798 | if (i == dd_idx1) | ||
1799 | faila = slot; | ||
1800 | if (i == dd_idx2) | ||
1801 | failb = slot; | ||
1802 | i = raid6_next_disk(i, disks); | ||
1803 | } while (i != d0_idx); | ||
1804 | BUG_ON(count != syndrome_disks); | ||
1805 | |||
1806 | BUG_ON(faila == failb); | ||
1807 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } | ||
1808 | |||
1809 | pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", | ||
1810 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, | ||
1811 | faila, failb); | ||
1812 | |||
1813 | if (failb == syndrome_disks+1) { | ||
1814 | /* Q disk is one of the missing disks */ | ||
1815 | if (faila == syndrome_disks) { | ||
1816 | /* Missing P+Q, just recompute */ | ||
1817 | compute_parity6(sh, UPDATE_PARITY); | ||
1818 | return; | ||
1819 | } else { | ||
1820 | /* We're missing D+Q; recompute D from P */ | ||
1821 | compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? | ||
1822 | dd_idx2 : dd_idx1), | ||
1823 | 0); | ||
1824 | compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ | ||
1825 | return; | ||
1826 | } | ||
1827 | } | ||
1828 | |||
1829 | /* We're missing D+P or D+D; */ | ||
1830 | if (failb == syndrome_disks) { | ||
1831 | /* We're missing D+P. */ | ||
1832 | raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); | ||
1833 | } else { | ||
1834 | /* We're missing D+D. */ | ||
1835 | raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, | ||
1836 | ptrs); | ||
1837 | } | ||
1838 | |||
1839 | /* Both the above update both missing blocks */ | ||
1840 | set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); | ||
1841 | set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); | ||
1842 | } | ||
1843 | |||
1844 | static void | 1943 | static void |
1845 | schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, | 1944 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
1846 | int rcw, int expand) | 1945 | int rcw, int expand) |
1847 | { | 1946 | { |
1848 | int i, pd_idx = sh->pd_idx, disks = sh->disks; | 1947 | int i, pd_idx = sh->pd_idx, disks = sh->disks; |
1948 | raid5_conf_t *conf = sh->raid_conf; | ||
1949 | int level = conf->level; | ||
1849 | 1950 | ||
1850 | if (rcw) { | 1951 | if (rcw) { |
1851 | /* if we are not expanding this is a proper write request, and | 1952 | /* if we are not expanding this is a proper write request, and |
@@ -1858,7 +1959,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, | |||
1858 | } else | 1959 | } else |
1859 | sh->reconstruct_state = reconstruct_state_run; | 1960 | sh->reconstruct_state = reconstruct_state_run; |
1860 | 1961 | ||
1861 | set_bit(STRIPE_OP_POSTXOR, &s->ops_request); | 1962 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
1862 | 1963 | ||
1863 | for (i = disks; i--; ) { | 1964 | for (i = disks; i--; ) { |
1864 | struct r5dev *dev = &sh->dev[i]; | 1965 | struct r5dev *dev = &sh->dev[i]; |
@@ -1871,17 +1972,18 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, | |||
1871 | s->locked++; | 1972 | s->locked++; |
1872 | } | 1973 | } |
1873 | } | 1974 | } |
1874 | if (s->locked + 1 == disks) | 1975 | if (s->locked + conf->max_degraded == disks) |
1875 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) | 1976 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
1876 | atomic_inc(&sh->raid_conf->pending_full_writes); | 1977 | atomic_inc(&conf->pending_full_writes); |
1877 | } else { | 1978 | } else { |
1979 | BUG_ON(level == 6); | ||
1878 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || | 1980 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
1879 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); | 1981 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
1880 | 1982 | ||
1881 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; | 1983 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; |
1882 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); | 1984 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); |
1883 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | 1985 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
1884 | set_bit(STRIPE_OP_POSTXOR, &s->ops_request); | 1986 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
1885 | 1987 | ||
1886 | for (i = disks; i--; ) { | 1988 | for (i = disks; i--; ) { |
1887 | struct r5dev *dev = &sh->dev[i]; | 1989 | struct r5dev *dev = &sh->dev[i]; |
@@ -1899,13 +2001,22 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, | |||
1899 | } | 2001 | } |
1900 | } | 2002 | } |
1901 | 2003 | ||
1902 | /* keep the parity disk locked while asynchronous operations | 2004 | /* keep the parity disk(s) locked while asynchronous operations |
1903 | * are in flight | 2005 | * are in flight |
1904 | */ | 2006 | */ |
1905 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); | 2007 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); |
1906 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | 2008 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
1907 | s->locked++; | 2009 | s->locked++; |
1908 | 2010 | ||
2011 | if (level == 6) { | ||
2012 | int qd_idx = sh->qd_idx; | ||
2013 | struct r5dev *dev = &sh->dev[qd_idx]; | ||
2014 | |||
2015 | set_bit(R5_LOCKED, &dev->flags); | ||
2016 | clear_bit(R5_UPTODATE, &dev->flags); | ||
2017 | s->locked++; | ||
2018 | } | ||
2019 | |||
1909 | pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", | 2020 | pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", |
1910 | __func__, (unsigned long long)sh->sector, | 2021 | __func__, (unsigned long long)sh->sector, |
1911 | s->locked, s->ops_request); | 2022 | s->locked, s->ops_request); |
@@ -1986,13 +2097,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
1986 | 2097 | ||
1987 | static void end_reshape(raid5_conf_t *conf); | 2098 | static void end_reshape(raid5_conf_t *conf); |
1988 | 2099 | ||
1989 | static int page_is_zero(struct page *p) | ||
1990 | { | ||
1991 | char *a = page_address(p); | ||
1992 | return ((*(u32*)a) == 0 && | ||
1993 | memcmp(a, a+4, STRIPE_SIZE-4)==0); | ||
1994 | } | ||
1995 | |||
1996 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, | 2100 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, |
1997 | struct stripe_head *sh) | 2101 | struct stripe_head *sh) |
1998 | { | 2102 | { |
@@ -2132,9 +2236,10 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, | |||
2132 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | 2236 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
2133 | set_bit(R5_Wantcompute, &dev->flags); | 2237 | set_bit(R5_Wantcompute, &dev->flags); |
2134 | sh->ops.target = disk_idx; | 2238 | sh->ops.target = disk_idx; |
2239 | sh->ops.target2 = -1; | ||
2135 | s->req_compute = 1; | 2240 | s->req_compute = 1; |
2136 | /* Careful: from this point on 'uptodate' is in the eye | 2241 | /* Careful: from this point on 'uptodate' is in the eye |
2137 | * of raid5_run_ops which services 'compute' operations | 2242 | * of raid_run_ops which services 'compute' operations |
2138 | * before writes. R5_Wantcompute flags a block that will | 2243 | * before writes. R5_Wantcompute flags a block that will |
2139 | * be R5_UPTODATE by the time it is needed for a | 2244 | * be R5_UPTODATE by the time it is needed for a |
2140 | * subsequent operation. | 2245 | * subsequent operation. |
@@ -2173,61 +2278,104 @@ static void handle_stripe_fill5(struct stripe_head *sh, | |||
2173 | set_bit(STRIPE_HANDLE, &sh->state); | 2278 | set_bit(STRIPE_HANDLE, &sh->state); |
2174 | } | 2279 | } |
2175 | 2280 | ||
2176 | static void handle_stripe_fill6(struct stripe_head *sh, | 2281 | /* fetch_block6 - checks the given member device to see if its data needs |
2177 | struct stripe_head_state *s, struct r6_state *r6s, | 2282 | * to be read or computed to satisfy a request. |
2178 | int disks) | 2283 | * |
2284 | * Returns 1 when no more member devices need to be checked, otherwise returns | ||
2285 | * 0 to tell the loop in handle_stripe_fill6 to continue | ||
2286 | */ | ||
2287 | static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, | ||
2288 | struct r6_state *r6s, int disk_idx, int disks) | ||
2179 | { | 2289 | { |
2180 | int i; | 2290 | struct r5dev *dev = &sh->dev[disk_idx]; |
2181 | for (i = disks; i--; ) { | 2291 | struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]], |
2182 | struct r5dev *dev = &sh->dev[i]; | 2292 | &sh->dev[r6s->failed_num[1]] }; |
2183 | if (!test_bit(R5_LOCKED, &dev->flags) && | 2293 | |
2184 | !test_bit(R5_UPTODATE, &dev->flags) && | 2294 | if (!test_bit(R5_LOCKED, &dev->flags) && |
2185 | (dev->toread || (dev->towrite && | 2295 | !test_bit(R5_UPTODATE, &dev->flags) && |
2186 | !test_bit(R5_OVERWRITE, &dev->flags)) || | 2296 | (dev->toread || |
2187 | s->syncing || s->expanding || | 2297 | (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || |
2188 | (s->failed >= 1 && | 2298 | s->syncing || s->expanding || |
2189 | (sh->dev[r6s->failed_num[0]].toread || | 2299 | (s->failed >= 1 && |
2190 | s->to_write)) || | 2300 | (fdev[0]->toread || s->to_write)) || |
2191 | (s->failed >= 2 && | 2301 | (s->failed >= 2 && |
2192 | (sh->dev[r6s->failed_num[1]].toread || | 2302 | (fdev[1]->toread || s->to_write)))) { |
2193 | s->to_write)))) { | 2303 | /* we would like to get this block, possibly by computing it, |
2194 | /* we would like to get this block, possibly | 2304 | * otherwise read it if the backing disk is insync |
2195 | * by computing it, but we might not be able to | 2305 | */ |
2306 | BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); | ||
2307 | BUG_ON(test_bit(R5_Wantread, &dev->flags)); | ||
2308 | if ((s->uptodate == disks - 1) && | ||
2309 | (s->failed && (disk_idx == r6s->failed_num[0] || | ||
2310 | disk_idx == r6s->failed_num[1]))) { | ||
2311 | /* have disk failed, and we're requested to fetch it; | ||
2312 | * do compute it | ||
2196 | */ | 2313 | */ |
2197 | if ((s->uptodate == disks - 1) && | 2314 | pr_debug("Computing stripe %llu block %d\n", |
2198 | (s->failed && (i == r6s->failed_num[0] || | 2315 | (unsigned long long)sh->sector, disk_idx); |
2199 | i == r6s->failed_num[1]))) { | 2316 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
2200 | pr_debug("Computing stripe %llu block %d\n", | 2317 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
2201 | (unsigned long long)sh->sector, i); | 2318 | set_bit(R5_Wantcompute, &dev->flags); |
2202 | compute_block_1(sh, i, 0); | 2319 | sh->ops.target = disk_idx; |
2203 | s->uptodate++; | 2320 | sh->ops.target2 = -1; /* no 2nd target */ |
2204 | } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { | 2321 | s->req_compute = 1; |
2205 | /* Computing 2-failure is *very* expensive; only | 2322 | s->uptodate++; |
2206 | * do it if failed >= 2 | 2323 | return 1; |
2207 | */ | 2324 | } else if (s->uptodate == disks-2 && s->failed >= 2) { |
2208 | int other; | 2325 | /* Computing 2-failure is *very* expensive; only |
2209 | for (other = disks; other--; ) { | 2326 | * do it if failed >= 2 |
2210 | if (other == i) | 2327 | */ |
2211 | continue; | 2328 | int other; |
2212 | if (!test_bit(R5_UPTODATE, | 2329 | for (other = disks; other--; ) { |
2213 | &sh->dev[other].flags)) | 2330 | if (other == disk_idx) |
2214 | break; | 2331 | continue; |
2215 | } | 2332 | if (!test_bit(R5_UPTODATE, |
2216 | BUG_ON(other < 0); | 2333 | &sh->dev[other].flags)) |
2217 | pr_debug("Computing stripe %llu blocks %d,%d\n", | 2334 | break; |
2218 | (unsigned long long)sh->sector, | ||
2219 | i, other); | ||
2220 | compute_block_2(sh, i, other); | ||
2221 | s->uptodate += 2; | ||
2222 | } else if (test_bit(R5_Insync, &dev->flags)) { | ||
2223 | set_bit(R5_LOCKED, &dev->flags); | ||
2224 | set_bit(R5_Wantread, &dev->flags); | ||
2225 | s->locked++; | ||
2226 | pr_debug("Reading block %d (sync=%d)\n", | ||
2227 | i, s->syncing); | ||
2228 | } | 2335 | } |
2336 | BUG_ON(other < 0); | ||
2337 | pr_debug("Computing stripe %llu blocks %d,%d\n", | ||
2338 | (unsigned long long)sh->sector, | ||
2339 | disk_idx, other); | ||
2340 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); | ||
2341 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | ||
2342 | set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); | ||
2343 | set_bit(R5_Wantcompute, &sh->dev[other].flags); | ||
2344 | sh->ops.target = disk_idx; | ||
2345 | sh->ops.target2 = other; | ||
2346 | s->uptodate += 2; | ||
2347 | s->req_compute = 1; | ||
2348 | return 1; | ||
2349 | } else if (test_bit(R5_Insync, &dev->flags)) { | ||
2350 | set_bit(R5_LOCKED, &dev->flags); | ||
2351 | set_bit(R5_Wantread, &dev->flags); | ||
2352 | s->locked++; | ||
2353 | pr_debug("Reading block %d (sync=%d)\n", | ||
2354 | disk_idx, s->syncing); | ||
2229 | } | 2355 | } |
2230 | } | 2356 | } |
2357 | |||
2358 | return 0; | ||
2359 | } | ||
2360 | |||
2361 | /** | ||
2362 | * handle_stripe_fill6 - read or compute data to satisfy pending requests. | ||
2363 | */ | ||
2364 | static void handle_stripe_fill6(struct stripe_head *sh, | ||
2365 | struct stripe_head_state *s, struct r6_state *r6s, | ||
2366 | int disks) | ||
2367 | { | ||
2368 | int i; | ||
2369 | |||
2370 | /* look for blocks to read/compute, skip this if a compute | ||
2371 | * is already in flight, or if the stripe contents are in the | ||
2372 | * midst of changing due to a write | ||
2373 | */ | ||
2374 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && | ||
2375 | !sh->reconstruct_state) | ||
2376 | for (i = disks; i--; ) | ||
2377 | if (fetch_block6(sh, s, r6s, i, disks)) | ||
2378 | break; | ||
2231 | set_bit(STRIPE_HANDLE, &sh->state); | 2379 | set_bit(STRIPE_HANDLE, &sh->state); |
2232 | } | 2380 | } |
2233 | 2381 | ||
@@ -2361,114 +2509,61 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, | |||
2361 | */ | 2509 | */ |
2362 | /* since handle_stripe can be called at any time we need to handle the | 2510 | /* since handle_stripe can be called at any time we need to handle the |
2363 | * case where a compute block operation has been submitted and then a | 2511 | * case where a compute block operation has been submitted and then a |
2364 | * subsequent call wants to start a write request. raid5_run_ops only | 2512 | * subsequent call wants to start a write request. raid_run_ops only |
2365 | * handles the case where compute block and postxor are requested | 2513 | * handles the case where compute block and reconstruct are requested |
2366 | * simultaneously. If this is not the case then new writes need to be | 2514 | * simultaneously. If this is not the case then new writes need to be |
2367 | * held off until the compute completes. | 2515 | * held off until the compute completes. |
2368 | */ | 2516 | */ |
2369 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && | 2517 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
2370 | (s->locked == 0 && (rcw == 0 || rmw == 0) && | 2518 | (s->locked == 0 && (rcw == 0 || rmw == 0) && |
2371 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) | 2519 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) |
2372 | schedule_reconstruction5(sh, s, rcw == 0, 0); | 2520 | schedule_reconstruction(sh, s, rcw == 0, 0); |
2373 | } | 2521 | } |
2374 | 2522 | ||
2375 | static void handle_stripe_dirtying6(raid5_conf_t *conf, | 2523 | static void handle_stripe_dirtying6(raid5_conf_t *conf, |
2376 | struct stripe_head *sh, struct stripe_head_state *s, | 2524 | struct stripe_head *sh, struct stripe_head_state *s, |
2377 | struct r6_state *r6s, int disks) | 2525 | struct r6_state *r6s, int disks) |
2378 | { | 2526 | { |
2379 | int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; | 2527 | int rcw = 0, pd_idx = sh->pd_idx, i; |
2380 | int qd_idx = sh->qd_idx; | 2528 | int qd_idx = sh->qd_idx; |
2529 | |||
2530 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2381 | for (i = disks; i--; ) { | 2531 | for (i = disks; i--; ) { |
2382 | struct r5dev *dev = &sh->dev[i]; | 2532 | struct r5dev *dev = &sh->dev[i]; |
2383 | /* Would I have to read this buffer for reconstruct_write */ | 2533 | /* check if we haven't enough data */ |
2384 | if (!test_bit(R5_OVERWRITE, &dev->flags) | 2534 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
2385 | && i != pd_idx && i != qd_idx | 2535 | i != pd_idx && i != qd_idx && |
2386 | && (!test_bit(R5_LOCKED, &dev->flags) | 2536 | !test_bit(R5_LOCKED, &dev->flags) && |
2387 | ) && | 2537 | !(test_bit(R5_UPTODATE, &dev->flags) || |
2388 | !test_bit(R5_UPTODATE, &dev->flags)) { | 2538 | test_bit(R5_Wantcompute, &dev->flags))) { |
2389 | if (test_bit(R5_Insync, &dev->flags)) rcw++; | 2539 | rcw++; |
2390 | else { | 2540 | if (!test_bit(R5_Insync, &dev->flags)) |
2391 | pr_debug("raid6: must_compute: " | 2541 | continue; /* it's a failed drive */ |
2392 | "disk %d flags=%#lx\n", i, dev->flags); | 2542 | |
2393 | must_compute++; | 2543 | if ( |
2544 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | ||
2545 | pr_debug("Read_old stripe %llu " | ||
2546 | "block %d for Reconstruct\n", | ||
2547 | (unsigned long long)sh->sector, i); | ||
2548 | set_bit(R5_LOCKED, &dev->flags); | ||
2549 | set_bit(R5_Wantread, &dev->flags); | ||
2550 | s->locked++; | ||
2551 | } else { | ||
2552 | pr_debug("Request delayed stripe %llu " | ||
2553 | "block %d for Reconstruct\n", | ||
2554 | (unsigned long long)sh->sector, i); | ||
2555 | set_bit(STRIPE_DELAYED, &sh->state); | ||
2556 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2394 | } | 2557 | } |
2395 | } | 2558 | } |
2396 | } | 2559 | } |
2397 | pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", | ||
2398 | (unsigned long long)sh->sector, rcw, must_compute); | ||
2399 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2400 | |||
2401 | if (rcw > 0) | ||
2402 | /* want reconstruct write, but need to get some data */ | ||
2403 | for (i = disks; i--; ) { | ||
2404 | struct r5dev *dev = &sh->dev[i]; | ||
2405 | if (!test_bit(R5_OVERWRITE, &dev->flags) | ||
2406 | && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) | ||
2407 | && !test_bit(R5_LOCKED, &dev->flags) && | ||
2408 | !test_bit(R5_UPTODATE, &dev->flags) && | ||
2409 | test_bit(R5_Insync, &dev->flags)) { | ||
2410 | if ( | ||
2411 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | ||
2412 | pr_debug("Read_old stripe %llu " | ||
2413 | "block %d for Reconstruct\n", | ||
2414 | (unsigned long long)sh->sector, i); | ||
2415 | set_bit(R5_LOCKED, &dev->flags); | ||
2416 | set_bit(R5_Wantread, &dev->flags); | ||
2417 | s->locked++; | ||
2418 | } else { | ||
2419 | pr_debug("Request delayed stripe %llu " | ||
2420 | "block %d for Reconstruct\n", | ||
2421 | (unsigned long long)sh->sector, i); | ||
2422 | set_bit(STRIPE_DELAYED, &sh->state); | ||
2423 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2424 | } | ||
2425 | } | ||
2426 | } | ||
2427 | /* now if nothing is locked, and if we have enough data, we can start a | 2560 | /* now if nothing is locked, and if we have enough data, we can start a |
2428 | * write request | 2561 | * write request |
2429 | */ | 2562 | */ |
2430 | if (s->locked == 0 && rcw == 0 && | 2563 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
2564 | s->locked == 0 && rcw == 0 && | ||
2431 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { | 2565 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { |
2432 | if (must_compute > 0) { | 2566 | schedule_reconstruction(sh, s, 1, 0); |
2433 | /* We have failed blocks and need to compute them */ | ||
2434 | switch (s->failed) { | ||
2435 | case 0: | ||
2436 | BUG(); | ||
2437 | case 1: | ||
2438 | compute_block_1(sh, r6s->failed_num[0], 0); | ||
2439 | break; | ||
2440 | case 2: | ||
2441 | compute_block_2(sh, r6s->failed_num[0], | ||
2442 | r6s->failed_num[1]); | ||
2443 | break; | ||
2444 | default: /* This request should have been failed? */ | ||
2445 | BUG(); | ||
2446 | } | ||
2447 | } | ||
2448 | |||
2449 | pr_debug("Computing parity for stripe %llu\n", | ||
2450 | (unsigned long long)sh->sector); | ||
2451 | compute_parity6(sh, RECONSTRUCT_WRITE); | ||
2452 | /* now every locked buffer is ready to be written */ | ||
2453 | for (i = disks; i--; ) | ||
2454 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { | ||
2455 | pr_debug("Writing stripe %llu block %d\n", | ||
2456 | (unsigned long long)sh->sector, i); | ||
2457 | s->locked++; | ||
2458 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | ||
2459 | } | ||
2460 | if (s->locked == disks) | ||
2461 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) | ||
2462 | atomic_inc(&conf->pending_full_writes); | ||
2463 | /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ | ||
2464 | set_bit(STRIPE_INSYNC, &sh->state); | ||
2465 | |||
2466 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | ||
2467 | atomic_dec(&conf->preread_active_stripes); | ||
2468 | if (atomic_read(&conf->preread_active_stripes) < | ||
2469 | IO_THRESHOLD) | ||
2470 | md_wakeup_thread(conf->mddev->thread); | ||
2471 | } | ||
2472 | } | 2567 | } |
2473 | } | 2568 | } |
2474 | 2569 | ||
@@ -2527,7 +2622,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | |||
2527 | * we are done. Otherwise update the mismatch count and repair | 2622 | * we are done. Otherwise update the mismatch count and repair |
2528 | * parity if !MD_RECOVERY_CHECK | 2623 | * parity if !MD_RECOVERY_CHECK |
2529 | */ | 2624 | */ |
2530 | if (sh->ops.zero_sum_result == 0) | 2625 | if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) |
2531 | /* parity is correct (on disc, | 2626 | /* parity is correct (on disc, |
2532 | * not in buffer any more) | 2627 | * not in buffer any more) |
2533 | */ | 2628 | */ |
@@ -2544,6 +2639,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | |||
2544 | set_bit(R5_Wantcompute, | 2639 | set_bit(R5_Wantcompute, |
2545 | &sh->dev[sh->pd_idx].flags); | 2640 | &sh->dev[sh->pd_idx].flags); |
2546 | sh->ops.target = sh->pd_idx; | 2641 | sh->ops.target = sh->pd_idx; |
2642 | sh->ops.target2 = -1; | ||
2547 | s->uptodate++; | 2643 | s->uptodate++; |
2548 | } | 2644 | } |
2549 | } | 2645 | } |
@@ -2560,67 +2656,74 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | |||
2560 | 2656 | ||
2561 | 2657 | ||
2562 | static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | 2658 | static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, |
2563 | struct stripe_head_state *s, | 2659 | struct stripe_head_state *s, |
2564 | struct r6_state *r6s, struct page *tmp_page, | 2660 | struct r6_state *r6s, int disks) |
2565 | int disks) | ||
2566 | { | 2661 | { |
2567 | int update_p = 0, update_q = 0; | ||
2568 | struct r5dev *dev; | ||
2569 | int pd_idx = sh->pd_idx; | 2662 | int pd_idx = sh->pd_idx; |
2570 | int qd_idx = sh->qd_idx; | 2663 | int qd_idx = sh->qd_idx; |
2664 | struct r5dev *dev; | ||
2571 | 2665 | ||
2572 | set_bit(STRIPE_HANDLE, &sh->state); | 2666 | set_bit(STRIPE_HANDLE, &sh->state); |
2573 | 2667 | ||
2574 | BUG_ON(s->failed > 2); | 2668 | BUG_ON(s->failed > 2); |
2575 | BUG_ON(s->uptodate < disks); | 2669 | |
2576 | /* Want to check and possibly repair P and Q. | 2670 | /* Want to check and possibly repair P and Q. |
2577 | * However there could be one 'failed' device, in which | 2671 | * However there could be one 'failed' device, in which |
2578 | * case we can only check one of them, possibly using the | 2672 | * case we can only check one of them, possibly using the |
2579 | * other to generate missing data | 2673 | * other to generate missing data |
2580 | */ | 2674 | */ |
2581 | 2675 | ||
2582 | /* If !tmp_page, we cannot do the calculations, | 2676 | switch (sh->check_state) { |
2583 | * but as we have set STRIPE_HANDLE, we will soon be called | 2677 | case check_state_idle: |
2584 | * by stripe_handle with a tmp_page - just wait until then. | 2678 | /* start a new check operation if there are < 2 failures */ |
2585 | */ | ||
2586 | if (tmp_page) { | ||
2587 | if (s->failed == r6s->q_failed) { | 2679 | if (s->failed == r6s->q_failed) { |
2588 | /* The only possible failed device holds 'Q', so it | 2680 | /* The only possible failed device holds Q, so it |
2589 | * makes sense to check P (If anything else were failed, | 2681 | * makes sense to check P (If anything else were failed, |
2590 | * we would have used P to recreate it). | 2682 | * we would have used P to recreate it). |
2591 | */ | 2683 | */ |
2592 | compute_block_1(sh, pd_idx, 1); | 2684 | sh->check_state = check_state_run; |
2593 | if (!page_is_zero(sh->dev[pd_idx].page)) { | ||
2594 | compute_block_1(sh, pd_idx, 0); | ||
2595 | update_p = 1; | ||
2596 | } | ||
2597 | } | 2685 | } |
2598 | if (!r6s->q_failed && s->failed < 2) { | 2686 | if (!r6s->q_failed && s->failed < 2) { |
2599 | /* q is not failed, and we didn't use it to generate | 2687 | /* Q is not failed, and we didn't use it to generate |
2600 | * anything, so it makes sense to check it | 2688 | * anything, so it makes sense to check it |
2601 | */ | 2689 | */ |
2602 | memcpy(page_address(tmp_page), | 2690 | if (sh->check_state == check_state_run) |
2603 | page_address(sh->dev[qd_idx].page), | 2691 | sh->check_state = check_state_run_pq; |
2604 | STRIPE_SIZE); | 2692 | else |
2605 | compute_parity6(sh, UPDATE_PARITY); | 2693 | sh->check_state = check_state_run_q; |
2606 | if (memcmp(page_address(tmp_page), | ||
2607 | page_address(sh->dev[qd_idx].page), | ||
2608 | STRIPE_SIZE) != 0) { | ||
2609 | clear_bit(STRIPE_INSYNC, &sh->state); | ||
2610 | update_q = 1; | ||
2611 | } | ||
2612 | } | 2694 | } |
2613 | if (update_p || update_q) { | 2695 | |
2614 | conf->mddev->resync_mismatches += STRIPE_SECTORS; | 2696 | /* discard potentially stale zero_sum_result */ |
2615 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 2697 | sh->ops.zero_sum_result = 0; |
2616 | /* don't try to repair!! */ | 2698 | |
2617 | update_p = update_q = 0; | 2699 | if (sh->check_state == check_state_run) { |
2700 | /* async_xor_zero_sum destroys the contents of P */ | ||
2701 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); | ||
2702 | s->uptodate--; | ||
2703 | } | ||
2704 | if (sh->check_state >= check_state_run && | ||
2705 | sh->check_state <= check_state_run_pq) { | ||
2706 | /* async_syndrome_zero_sum preserves P and Q, so | ||
2707 | * no need to mark them !uptodate here | ||
2708 | */ | ||
2709 | set_bit(STRIPE_OP_CHECK, &s->ops_request); | ||
2710 | break; | ||
2618 | } | 2711 | } |
2619 | 2712 | ||
2713 | /* we have 2-disk failure */ | ||
2714 | BUG_ON(s->failed != 2); | ||
2715 | /* fall through */ | ||
2716 | case check_state_compute_result: | ||
2717 | sh->check_state = check_state_idle; | ||
2718 | |||
2719 | /* check that a write has not made the stripe insync */ | ||
2720 | if (test_bit(STRIPE_INSYNC, &sh->state)) | ||
2721 | break; | ||
2722 | |||
2620 | /* now write out any block on a failed drive, | 2723 | /* now write out any block on a failed drive, |
2621 | * or P or Q if they need it | 2724 | * or P or Q if they were recomputed |
2622 | */ | 2725 | */ |
2623 | 2726 | BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ | |
2624 | if (s->failed == 2) { | 2727 | if (s->failed == 2) { |
2625 | dev = &sh->dev[r6s->failed_num[1]]; | 2728 | dev = &sh->dev[r6s->failed_num[1]]; |
2626 | s->locked++; | 2729 | s->locked++; |
@@ -2633,14 +2736,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | |||
2633 | set_bit(R5_LOCKED, &dev->flags); | 2736 | set_bit(R5_LOCKED, &dev->flags); |
2634 | set_bit(R5_Wantwrite, &dev->flags); | 2737 | set_bit(R5_Wantwrite, &dev->flags); |
2635 | } | 2738 | } |
2636 | 2739 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { | |
2637 | if (update_p) { | ||
2638 | dev = &sh->dev[pd_idx]; | 2740 | dev = &sh->dev[pd_idx]; |
2639 | s->locked++; | 2741 | s->locked++; |
2640 | set_bit(R5_LOCKED, &dev->flags); | 2742 | set_bit(R5_LOCKED, &dev->flags); |
2641 | set_bit(R5_Wantwrite, &dev->flags); | 2743 | set_bit(R5_Wantwrite, &dev->flags); |
2642 | } | 2744 | } |
2643 | if (update_q) { | 2745 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { |
2644 | dev = &sh->dev[qd_idx]; | 2746 | dev = &sh->dev[qd_idx]; |
2645 | s->locked++; | 2747 | s->locked++; |
2646 | set_bit(R5_LOCKED, &dev->flags); | 2748 | set_bit(R5_LOCKED, &dev->flags); |
@@ -2649,6 +2751,70 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | |||
2649 | clear_bit(STRIPE_DEGRADED, &sh->state); | 2751 | clear_bit(STRIPE_DEGRADED, &sh->state); |
2650 | 2752 | ||
2651 | set_bit(STRIPE_INSYNC, &sh->state); | 2753 | set_bit(STRIPE_INSYNC, &sh->state); |
2754 | break; | ||
2755 | case check_state_run: | ||
2756 | case check_state_run_q: | ||
2757 | case check_state_run_pq: | ||
2758 | break; /* we will be called again upon completion */ | ||
2759 | case check_state_check_result: | ||
2760 | sh->check_state = check_state_idle; | ||
2761 | |||
2762 | /* handle a successful check operation, if parity is correct | ||
2763 | * we are done. Otherwise update the mismatch count and repair | ||
2764 | * parity if !MD_RECOVERY_CHECK | ||
2765 | */ | ||
2766 | if (sh->ops.zero_sum_result == 0) { | ||
2767 | /* both parities are correct */ | ||
2768 | if (!s->failed) | ||
2769 | set_bit(STRIPE_INSYNC, &sh->state); | ||
2770 | else { | ||
2771 | /* in contrast to the raid5 case we can validate | ||
2772 | * parity, but still have a failure to write | ||
2773 | * back | ||
2774 | */ | ||
2775 | sh->check_state = check_state_compute_result; | ||
2776 | /* Returning at this point means that we may go | ||
2777 | * off and bring p and/or q uptodate again so | ||
2778 | * we make sure to check zero_sum_result again | ||
2779 | * to verify if p or q need writeback | ||
2780 | */ | ||
2781 | } | ||
2782 | } else { | ||
2783 | conf->mddev->resync_mismatches += STRIPE_SECTORS; | ||
2784 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | ||
2785 | /* don't try to repair!! */ | ||
2786 | set_bit(STRIPE_INSYNC, &sh->state); | ||
2787 | else { | ||
2788 | int *target = &sh->ops.target; | ||
2789 | |||
2790 | sh->ops.target = -1; | ||
2791 | sh->ops.target2 = -1; | ||
2792 | sh->check_state = check_state_compute_run; | ||
2793 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); | ||
2794 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | ||
2795 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { | ||
2796 | set_bit(R5_Wantcompute, | ||
2797 | &sh->dev[pd_idx].flags); | ||
2798 | *target = pd_idx; | ||
2799 | target = &sh->ops.target2; | ||
2800 | s->uptodate++; | ||
2801 | } | ||
2802 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { | ||
2803 | set_bit(R5_Wantcompute, | ||
2804 | &sh->dev[qd_idx].flags); | ||
2805 | *target = qd_idx; | ||
2806 | s->uptodate++; | ||
2807 | } | ||
2808 | } | ||
2809 | } | ||
2810 | break; | ||
2811 | case check_state_compute_run: | ||
2812 | break; | ||
2813 | default: | ||
2814 | printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", | ||
2815 | __func__, sh->check_state, | ||
2816 | (unsigned long long) sh->sector); | ||
2817 | BUG(); | ||
2652 | } | 2818 | } |
2653 | } | 2819 | } |
2654 | 2820 | ||
@@ -2666,6 +2832,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2666 | if (i != sh->pd_idx && i != sh->qd_idx) { | 2832 | if (i != sh->pd_idx && i != sh->qd_idx) { |
2667 | int dd_idx, j; | 2833 | int dd_idx, j; |
2668 | struct stripe_head *sh2; | 2834 | struct stripe_head *sh2; |
2835 | struct async_submit_ctl submit; | ||
2669 | 2836 | ||
2670 | sector_t bn = compute_blocknr(sh, i, 1); | 2837 | sector_t bn = compute_blocknr(sh, i, 1); |
2671 | sector_t s = raid5_compute_sector(conf, bn, 0, | 2838 | sector_t s = raid5_compute_sector(conf, bn, 0, |
@@ -2685,9 +2852,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2685 | } | 2852 | } |
2686 | 2853 | ||
2687 | /* place all the copies on one channel */ | 2854 | /* place all the copies on one channel */ |
2855 | init_async_submit(&submit, 0, tx, NULL, NULL, NULL); | ||
2688 | tx = async_memcpy(sh2->dev[dd_idx].page, | 2856 | tx = async_memcpy(sh2->dev[dd_idx].page, |
2689 | sh->dev[i].page, 0, 0, STRIPE_SIZE, | 2857 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
2690 | ASYNC_TX_DEP_ACK, tx, NULL, NULL); | 2858 | &submit); |
2691 | 2859 | ||
2692 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); | 2860 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
2693 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); | 2861 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |
@@ -2756,7 +2924,8 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2756 | rcu_read_lock(); | 2924 | rcu_read_lock(); |
2757 | for (i=disks; i--; ) { | 2925 | for (i=disks; i--; ) { |
2758 | mdk_rdev_t *rdev; | 2926 | mdk_rdev_t *rdev; |
2759 | struct r5dev *dev = &sh->dev[i]; | 2927 | |
2928 | dev = &sh->dev[i]; | ||
2760 | clear_bit(R5_Insync, &dev->flags); | 2929 | clear_bit(R5_Insync, &dev->flags); |
2761 | 2930 | ||
2762 | pr_debug("check %d: state 0x%lx toread %p read %p write %p " | 2931 | pr_debug("check %d: state 0x%lx toread %p read %p write %p " |
@@ -2973,7 +3142,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2973 | /* Need to write out all blocks after computing parity */ | 3142 | /* Need to write out all blocks after computing parity */ |
2974 | sh->disks = conf->raid_disks; | 3143 | sh->disks = conf->raid_disks; |
2975 | stripe_set_idx(sh->sector, conf, 0, sh); | 3144 | stripe_set_idx(sh->sector, conf, 0, sh); |
2976 | schedule_reconstruction5(sh, &s, 1, 1); | 3145 | schedule_reconstruction(sh, &s, 1, 1); |
2977 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { | 3146 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
2978 | clear_bit(STRIPE_EXPAND_READY, &sh->state); | 3147 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
2979 | atomic_dec(&conf->reshape_stripes); | 3148 | atomic_dec(&conf->reshape_stripes); |
@@ -2993,7 +3162,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2993 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); | 3162 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); |
2994 | 3163 | ||
2995 | if (s.ops_request) | 3164 | if (s.ops_request) |
2996 | raid5_run_ops(sh, s.ops_request); | 3165 | raid_run_ops(sh, s.ops_request); |
2997 | 3166 | ||
2998 | ops_run_io(sh, &s); | 3167 | ops_run_io(sh, &s); |
2999 | 3168 | ||
@@ -3002,7 +3171,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
3002 | return blocked_rdev == NULL; | 3171 | return blocked_rdev == NULL; |
3003 | } | 3172 | } |
3004 | 3173 | ||
3005 | static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | 3174 | static bool handle_stripe6(struct stripe_head *sh) |
3006 | { | 3175 | { |
3007 | raid5_conf_t *conf = sh->raid_conf; | 3176 | raid5_conf_t *conf = sh->raid_conf; |
3008 | int disks = sh->disks; | 3177 | int disks = sh->disks; |
@@ -3014,9 +3183,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3014 | mdk_rdev_t *blocked_rdev = NULL; | 3183 | mdk_rdev_t *blocked_rdev = NULL; |
3015 | 3184 | ||
3016 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " | 3185 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
3017 | "pd_idx=%d, qd_idx=%d\n", | 3186 | "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", |
3018 | (unsigned long long)sh->sector, sh->state, | 3187 | (unsigned long long)sh->sector, sh->state, |
3019 | atomic_read(&sh->count), pd_idx, qd_idx); | 3188 | atomic_read(&sh->count), pd_idx, qd_idx, |
3189 | sh->check_state, sh->reconstruct_state); | ||
3020 | memset(&s, 0, sizeof(s)); | 3190 | memset(&s, 0, sizeof(s)); |
3021 | 3191 | ||
3022 | spin_lock(&sh->lock); | 3192 | spin_lock(&sh->lock); |
@@ -3036,35 +3206,26 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3036 | 3206 | ||
3037 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", | 3207 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
3038 | i, dev->flags, dev->toread, dev->towrite, dev->written); | 3208 | i, dev->flags, dev->toread, dev->towrite, dev->written); |
3039 | /* maybe we can reply to a read */ | 3209 | /* maybe we can reply to a read |
3040 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { | 3210 | * |
3041 | struct bio *rbi, *rbi2; | 3211 | * new wantfill requests are only permitted while |
3042 | pr_debug("Return read for disc %d\n", i); | 3212 | * ops_complete_biofill is guaranteed to be inactive |
3043 | spin_lock_irq(&conf->device_lock); | 3213 | */ |
3044 | rbi = dev->toread; | 3214 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && |
3045 | dev->toread = NULL; | 3215 | !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) |
3046 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) | 3216 | set_bit(R5_Wantfill, &dev->flags); |
3047 | wake_up(&conf->wait_for_overlap); | ||
3048 | spin_unlock_irq(&conf->device_lock); | ||
3049 | while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { | ||
3050 | copy_data(0, rbi, dev->page, dev->sector); | ||
3051 | rbi2 = r5_next_bio(rbi, dev->sector); | ||
3052 | spin_lock_irq(&conf->device_lock); | ||
3053 | if (!raid5_dec_bi_phys_segments(rbi)) { | ||
3054 | rbi->bi_next = return_bi; | ||
3055 | return_bi = rbi; | ||
3056 | } | ||
3057 | spin_unlock_irq(&conf->device_lock); | ||
3058 | rbi = rbi2; | ||
3059 | } | ||
3060 | } | ||
3061 | 3217 | ||
3062 | /* now count some things */ | 3218 | /* now count some things */ |
3063 | if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; | 3219 | if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; |
3064 | if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; | 3220 | if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; |
3221 | if (test_bit(R5_Wantcompute, &dev->flags)) { | ||
3222 | s.compute++; | ||
3223 | BUG_ON(s.compute > 2); | ||
3224 | } | ||
3065 | 3225 | ||
3066 | 3226 | if (test_bit(R5_Wantfill, &dev->flags)) { | |
3067 | if (dev->toread) | 3227 | s.to_fill++; |
3228 | } else if (dev->toread) | ||
3068 | s.to_read++; | 3229 | s.to_read++; |
3069 | if (dev->towrite) { | 3230 | if (dev->towrite) { |
3070 | s.to_write++; | 3231 | s.to_write++; |
@@ -3105,6 +3266,11 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3105 | blocked_rdev = NULL; | 3266 | blocked_rdev = NULL; |
3106 | } | 3267 | } |
3107 | 3268 | ||
3269 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { | ||
3270 | set_bit(STRIPE_OP_BIOFILL, &s.ops_request); | ||
3271 | set_bit(STRIPE_BIOFILL_RUN, &sh->state); | ||
3272 | } | ||
3273 | |||
3108 | pr_debug("locked=%d uptodate=%d to_read=%d" | 3274 | pr_debug("locked=%d uptodate=%d to_read=%d" |
3109 | " to_write=%d failed=%d failed_num=%d,%d\n", | 3275 | " to_write=%d failed=%d failed_num=%d,%d\n", |
3110 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, | 3276 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
@@ -3145,19 +3311,62 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3145 | * or to load a block that is being partially written. | 3311 | * or to load a block that is being partially written. |
3146 | */ | 3312 | */ |
3147 | if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || | 3313 | if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || |
3148 | (s.syncing && (s.uptodate < disks)) || s.expanding) | 3314 | (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) |
3149 | handle_stripe_fill6(sh, &s, &r6s, disks); | 3315 | handle_stripe_fill6(sh, &s, &r6s, disks); |
3150 | 3316 | ||
3151 | /* now to consider writing and what else, if anything should be read */ | 3317 | /* Now we check to see if any write operations have recently |
3152 | if (s.to_write) | 3318 | * completed |
3319 | */ | ||
3320 | if (sh->reconstruct_state == reconstruct_state_drain_result) { | ||
3321 | int qd_idx = sh->qd_idx; | ||
3322 | |||
3323 | sh->reconstruct_state = reconstruct_state_idle; | ||
3324 | /* All the 'written' buffers and the parity blocks are ready to | ||
3325 | * be written back to disk | ||
3326 | */ | ||
3327 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); | ||
3328 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags)); | ||
3329 | for (i = disks; i--; ) { | ||
3330 | dev = &sh->dev[i]; | ||
3331 | if (test_bit(R5_LOCKED, &dev->flags) && | ||
3332 | (i == sh->pd_idx || i == qd_idx || | ||
3333 | dev->written)) { | ||
3334 | pr_debug("Writing block %d\n", i); | ||
3335 | BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); | ||
3336 | set_bit(R5_Wantwrite, &dev->flags); | ||
3337 | if (!test_bit(R5_Insync, &dev->flags) || | ||
3338 | ((i == sh->pd_idx || i == qd_idx) && | ||
3339 | s.failed == 0)) | ||
3340 | set_bit(STRIPE_INSYNC, &sh->state); | ||
3341 | } | ||
3342 | } | ||
3343 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | ||
3344 | atomic_dec(&conf->preread_active_stripes); | ||
3345 | if (atomic_read(&conf->preread_active_stripes) < | ||
3346 | IO_THRESHOLD) | ||
3347 | md_wakeup_thread(conf->mddev->thread); | ||
3348 | } | ||
3349 | } | ||
3350 | |||
3351 | /* Now to consider new write requests and what else, if anything | ||
3352 | * should be read. We do not handle new writes when: | ||
3353 | * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. | ||
3354 | * 2/ A 'check' operation is in flight, as it may clobber the parity | ||
3355 | * block. | ||
3356 | */ | ||
3357 | if (s.to_write && !sh->reconstruct_state && !sh->check_state) | ||
3153 | handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); | 3358 | handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); |
3154 | 3359 | ||
3155 | /* maybe we need to check and possibly fix the parity for this stripe | 3360 | /* maybe we need to check and possibly fix the parity for this stripe |
3156 | * Any reads will already have been scheduled, so we just see if enough | 3361 | * Any reads will already have been scheduled, so we just see if enough |
3157 | * data is available | 3362 | * data is available. The parity check is held off while parity |
3363 | * dependent operations are in flight. | ||
3158 | */ | 3364 | */ |
3159 | if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) | 3365 | if (sh->check_state || |
3160 | handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); | 3366 | (s.syncing && s.locked == 0 && |
3367 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && | ||
3368 | !test_bit(STRIPE_INSYNC, &sh->state))) | ||
3369 | handle_parity_checks6(conf, sh, &s, &r6s, disks); | ||
3161 | 3370 | ||
3162 | if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { | 3371 | if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { |
3163 | md_done_sync(conf->mddev, STRIPE_SECTORS,1); | 3372 | md_done_sync(conf->mddev, STRIPE_SECTORS,1); |
@@ -3178,15 +3387,29 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3178 | set_bit(R5_Wantwrite, &dev->flags); | 3387 | set_bit(R5_Wantwrite, &dev->flags); |
3179 | set_bit(R5_ReWrite, &dev->flags); | 3388 | set_bit(R5_ReWrite, &dev->flags); |
3180 | set_bit(R5_LOCKED, &dev->flags); | 3389 | set_bit(R5_LOCKED, &dev->flags); |
3390 | s.locked++; | ||
3181 | } else { | 3391 | } else { |
3182 | /* let's read it back */ | 3392 | /* let's read it back */ |
3183 | set_bit(R5_Wantread, &dev->flags); | 3393 | set_bit(R5_Wantread, &dev->flags); |
3184 | set_bit(R5_LOCKED, &dev->flags); | 3394 | set_bit(R5_LOCKED, &dev->flags); |
3395 | s.locked++; | ||
3185 | } | 3396 | } |
3186 | } | 3397 | } |
3187 | } | 3398 | } |
3188 | 3399 | ||
3189 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { | 3400 | /* Finish reconstruct operations initiated by the expansion process */ |
3401 | if (sh->reconstruct_state == reconstruct_state_result) { | ||
3402 | sh->reconstruct_state = reconstruct_state_idle; | ||
3403 | clear_bit(STRIPE_EXPANDING, &sh->state); | ||
3404 | for (i = conf->raid_disks; i--; ) { | ||
3405 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | ||
3406 | set_bit(R5_LOCKED, &sh->dev[i].flags); | ||
3407 | s.locked++; | ||
3408 | } | ||
3409 | } | ||
3410 | |||
3411 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && | ||
3412 | !sh->reconstruct_state) { | ||
3190 | struct stripe_head *sh2 | 3413 | struct stripe_head *sh2 |
3191 | = get_active_stripe(conf, sh->sector, 1, 1, 1); | 3414 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
3192 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | 3415 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { |
@@ -3207,14 +3430,8 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3207 | /* Need to write out all blocks after computing P&Q */ | 3430 | /* Need to write out all blocks after computing P&Q */ |
3208 | sh->disks = conf->raid_disks; | 3431 | sh->disks = conf->raid_disks; |
3209 | stripe_set_idx(sh->sector, conf, 0, sh); | 3432 | stripe_set_idx(sh->sector, conf, 0, sh); |
3210 | compute_parity6(sh, RECONSTRUCT_WRITE); | 3433 | schedule_reconstruction(sh, &s, 1, 1); |
3211 | for (i = conf->raid_disks ; i-- ; ) { | 3434 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
3212 | set_bit(R5_LOCKED, &sh->dev[i].flags); | ||
3213 | s.locked++; | ||
3214 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | ||
3215 | } | ||
3216 | clear_bit(STRIPE_EXPANDING, &sh->state); | ||
3217 | } else if (s.expanded) { | ||
3218 | clear_bit(STRIPE_EXPAND_READY, &sh->state); | 3435 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
3219 | atomic_dec(&conf->reshape_stripes); | 3436 | atomic_dec(&conf->reshape_stripes); |
3220 | wake_up(&conf->wait_for_overlap); | 3437 | wake_up(&conf->wait_for_overlap); |
@@ -3232,6 +3449,9 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3232 | if (unlikely(blocked_rdev)) | 3449 | if (unlikely(blocked_rdev)) |
3233 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); | 3450 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); |
3234 | 3451 | ||
3452 | if (s.ops_request) | ||
3453 | raid_run_ops(sh, s.ops_request); | ||
3454 | |||
3235 | ops_run_io(sh, &s); | 3455 | ops_run_io(sh, &s); |
3236 | 3456 | ||
3237 | return_io(return_bi); | 3457 | return_io(return_bi); |
@@ -3240,16 +3460,14 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3240 | } | 3460 | } |
3241 | 3461 | ||
3242 | /* returns true if the stripe was handled */ | 3462 | /* returns true if the stripe was handled */ |
3243 | static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) | 3463 | static bool handle_stripe(struct stripe_head *sh) |
3244 | { | 3464 | { |
3245 | if (sh->raid_conf->level == 6) | 3465 | if (sh->raid_conf->level == 6) |
3246 | return handle_stripe6(sh, tmp_page); | 3466 | return handle_stripe6(sh); |
3247 | else | 3467 | else |
3248 | return handle_stripe5(sh); | 3468 | return handle_stripe5(sh); |
3249 | } | 3469 | } |
3250 | 3470 | ||
3251 | |||
3252 | |||
3253 | static void raid5_activate_delayed(raid5_conf_t *conf) | 3471 | static void raid5_activate_delayed(raid5_conf_t *conf) |
3254 | { | 3472 | { |
3255 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { | 3473 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
@@ -3331,6 +3549,9 @@ static int raid5_congested(void *data, int bits) | |||
3331 | /* No difference between reads and writes. Just check | 3549 | /* No difference between reads and writes. Just check |
3332 | * how busy the stripe_cache is | 3550 | * how busy the stripe_cache is |
3333 | */ | 3551 | */ |
3552 | |||
3553 | if (mddev_congested(mddev, bits)) | ||
3554 | return 1; | ||
3334 | if (conf->inactive_blocked) | 3555 | if (conf->inactive_blocked) |
3335 | return 1; | 3556 | return 1; |
3336 | if (conf->quiesce) | 3557 | if (conf->quiesce) |
@@ -3880,7 +4101,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3880 | INIT_LIST_HEAD(&stripes); | 4101 | INIT_LIST_HEAD(&stripes); |
3881 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { | 4102 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { |
3882 | int j; | 4103 | int j; |
3883 | int skipped = 0; | 4104 | int skipped_disk = 0; |
3884 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); | 4105 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); |
3885 | set_bit(STRIPE_EXPANDING, &sh->state); | 4106 | set_bit(STRIPE_EXPANDING, &sh->state); |
3886 | atomic_inc(&conf->reshape_stripes); | 4107 | atomic_inc(&conf->reshape_stripes); |
@@ -3896,14 +4117,14 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3896 | continue; | 4117 | continue; |
3897 | s = compute_blocknr(sh, j, 0); | 4118 | s = compute_blocknr(sh, j, 0); |
3898 | if (s < raid5_size(mddev, 0, 0)) { | 4119 | if (s < raid5_size(mddev, 0, 0)) { |
3899 | skipped = 1; | 4120 | skipped_disk = 1; |
3900 | continue; | 4121 | continue; |
3901 | } | 4122 | } |
3902 | memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); | 4123 | memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); |
3903 | set_bit(R5_Expanded, &sh->dev[j].flags); | 4124 | set_bit(R5_Expanded, &sh->dev[j].flags); |
3904 | set_bit(R5_UPTODATE, &sh->dev[j].flags); | 4125 | set_bit(R5_UPTODATE, &sh->dev[j].flags); |
3905 | } | 4126 | } |
3906 | if (!skipped) { | 4127 | if (!skipped_disk) { |
3907 | set_bit(STRIPE_EXPAND_READY, &sh->state); | 4128 | set_bit(STRIPE_EXPAND_READY, &sh->state); |
3908 | set_bit(STRIPE_HANDLE, &sh->state); | 4129 | set_bit(STRIPE_HANDLE, &sh->state); |
3909 | } | 4130 | } |
@@ -4057,7 +4278,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4057 | spin_unlock(&sh->lock); | 4278 | spin_unlock(&sh->lock); |
4058 | 4279 | ||
4059 | /* wait for any blocked device to be handled */ | 4280 | /* wait for any blocked device to be handled */ |
4060 | while(unlikely(!handle_stripe(sh, NULL))) | 4281 | while (unlikely(!handle_stripe(sh))) |
4061 | ; | 4282 | ; |
4062 | release_stripe(sh); | 4283 | release_stripe(sh); |
4063 | 4284 | ||
@@ -4114,7 +4335,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4114 | return handled; | 4335 | return handled; |
4115 | } | 4336 | } |
4116 | 4337 | ||
4117 | handle_stripe(sh, NULL); | 4338 | handle_stripe(sh); |
4118 | release_stripe(sh); | 4339 | release_stripe(sh); |
4119 | handled++; | 4340 | handled++; |
4120 | } | 4341 | } |
@@ -4128,6 +4349,36 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4128 | return handled; | 4349 | return handled; |
4129 | } | 4350 | } |
4130 | 4351 | ||
4352 | #ifdef CONFIG_MULTICORE_RAID456 | ||
4353 | static void __process_stripe(void *param, async_cookie_t cookie) | ||
4354 | { | ||
4355 | struct stripe_head *sh = param; | ||
4356 | |||
4357 | handle_stripe(sh); | ||
4358 | release_stripe(sh); | ||
4359 | } | ||
4360 | |||
4361 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4362 | { | ||
4363 | async_schedule_domain(__process_stripe, sh, domain); | ||
4364 | } | ||
4365 | |||
4366 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4367 | { | ||
4368 | async_synchronize_full_domain(domain); | ||
4369 | } | ||
4370 | #else | ||
4371 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4372 | { | ||
4373 | handle_stripe(sh); | ||
4374 | release_stripe(sh); | ||
4375 | cond_resched(); | ||
4376 | } | ||
4377 | |||
4378 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4379 | { | ||
4380 | } | ||
4381 | #endif | ||
4131 | 4382 | ||
4132 | 4383 | ||
4133 | /* | 4384 | /* |
@@ -4142,6 +4393,7 @@ static void raid5d(mddev_t *mddev) | |||
4142 | struct stripe_head *sh; | 4393 | struct stripe_head *sh; |
4143 | raid5_conf_t *conf = mddev->private; | 4394 | raid5_conf_t *conf = mddev->private; |
4144 | int handled; | 4395 | int handled; |
4396 | LIST_HEAD(raid_domain); | ||
4145 | 4397 | ||
4146 | pr_debug("+++ raid5d active\n"); | 4398 | pr_debug("+++ raid5d active\n"); |
4147 | 4399 | ||
@@ -4178,8 +4430,7 @@ static void raid5d(mddev_t *mddev) | |||
4178 | spin_unlock_irq(&conf->device_lock); | 4430 | spin_unlock_irq(&conf->device_lock); |
4179 | 4431 | ||
4180 | handled++; | 4432 | handled++; |
4181 | handle_stripe(sh, conf->spare_page); | 4433 | process_stripe(sh, &raid_domain); |
4182 | release_stripe(sh); | ||
4183 | 4434 | ||
4184 | spin_lock_irq(&conf->device_lock); | 4435 | spin_lock_irq(&conf->device_lock); |
4185 | } | 4436 | } |
@@ -4187,6 +4438,7 @@ static void raid5d(mddev_t *mddev) | |||
4187 | 4438 | ||
4188 | spin_unlock_irq(&conf->device_lock); | 4439 | spin_unlock_irq(&conf->device_lock); |
4189 | 4440 | ||
4441 | synchronize_stripe_processing(&raid_domain); | ||
4190 | async_tx_issue_pending_all(); | 4442 | async_tx_issue_pending_all(); |
4191 | unplug_slaves(mddev); | 4443 | unplug_slaves(mddev); |
4192 | 4444 | ||
@@ -4319,15 +4571,118 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4319 | return sectors * (raid_disks - conf->max_degraded); | 4571 | return sectors * (raid_disks - conf->max_degraded); |
4320 | } | 4572 | } |
4321 | 4573 | ||
4574 | static void raid5_free_percpu(raid5_conf_t *conf) | ||
4575 | { | ||
4576 | struct raid5_percpu *percpu; | ||
4577 | unsigned long cpu; | ||
4578 | |||
4579 | if (!conf->percpu) | ||
4580 | return; | ||
4581 | |||
4582 | get_online_cpus(); | ||
4583 | for_each_possible_cpu(cpu) { | ||
4584 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
4585 | safe_put_page(percpu->spare_page); | ||
4586 | kfree(percpu->scribble); | ||
4587 | } | ||
4588 | #ifdef CONFIG_HOTPLUG_CPU | ||
4589 | unregister_cpu_notifier(&conf->cpu_notify); | ||
4590 | #endif | ||
4591 | put_online_cpus(); | ||
4592 | |||
4593 | free_percpu(conf->percpu); | ||
4594 | } | ||
4595 | |||
4322 | static void free_conf(raid5_conf_t *conf) | 4596 | static void free_conf(raid5_conf_t *conf) |
4323 | { | 4597 | { |
4324 | shrink_stripes(conf); | 4598 | shrink_stripes(conf); |
4325 | safe_put_page(conf->spare_page); | 4599 | raid5_free_percpu(conf); |
4326 | kfree(conf->disks); | 4600 | kfree(conf->disks); |
4327 | kfree(conf->stripe_hashtbl); | 4601 | kfree(conf->stripe_hashtbl); |
4328 | kfree(conf); | 4602 | kfree(conf); |
4329 | } | 4603 | } |
4330 | 4604 | ||
4605 | #ifdef CONFIG_HOTPLUG_CPU | ||
4606 | static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | ||
4607 | void *hcpu) | ||
4608 | { | ||
4609 | raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify); | ||
4610 | long cpu = (long)hcpu; | ||
4611 | struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); | ||
4612 | |||
4613 | switch (action) { | ||
4614 | case CPU_UP_PREPARE: | ||
4615 | case CPU_UP_PREPARE_FROZEN: | ||
4616 | if (conf->level == 6 && !percpu->spare_page) | ||
4617 | percpu->spare_page = alloc_page(GFP_KERNEL); | ||
4618 | if (!percpu->scribble) | ||
4619 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
4620 | |||
4621 | if (!percpu->scribble || | ||
4622 | (conf->level == 6 && !percpu->spare_page)) { | ||
4623 | safe_put_page(percpu->spare_page); | ||
4624 | kfree(percpu->scribble); | ||
4625 | pr_err("%s: failed memory allocation for cpu%ld\n", | ||
4626 | __func__, cpu); | ||
4627 | return NOTIFY_BAD; | ||
4628 | } | ||
4629 | break; | ||
4630 | case CPU_DEAD: | ||
4631 | case CPU_DEAD_FROZEN: | ||
4632 | safe_put_page(percpu->spare_page); | ||
4633 | kfree(percpu->scribble); | ||
4634 | percpu->spare_page = NULL; | ||
4635 | percpu->scribble = NULL; | ||
4636 | break; | ||
4637 | default: | ||
4638 | break; | ||
4639 | } | ||
4640 | return NOTIFY_OK; | ||
4641 | } | ||
4642 | #endif | ||
4643 | |||
4644 | static int raid5_alloc_percpu(raid5_conf_t *conf) | ||
4645 | { | ||
4646 | unsigned long cpu; | ||
4647 | struct page *spare_page; | ||
4648 | struct raid5_percpu *allcpus; | ||
4649 | void *scribble; | ||
4650 | int err; | ||
4651 | |||
4652 | allcpus = alloc_percpu(struct raid5_percpu); | ||
4653 | if (!allcpus) | ||
4654 | return -ENOMEM; | ||
4655 | conf->percpu = allcpus; | ||
4656 | |||
4657 | get_online_cpus(); | ||
4658 | err = 0; | ||
4659 | for_each_present_cpu(cpu) { | ||
4660 | if (conf->level == 6) { | ||
4661 | spare_page = alloc_page(GFP_KERNEL); | ||
4662 | if (!spare_page) { | ||
4663 | err = -ENOMEM; | ||
4664 | break; | ||
4665 | } | ||
4666 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; | ||
4667 | } | ||
4668 | scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); | ||
4669 | if (!scribble) { | ||
4670 | err = -ENOMEM; | ||
4671 | break; | ||
4672 | } | ||
4673 | per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; | ||
4674 | } | ||
4675 | #ifdef CONFIG_HOTPLUG_CPU | ||
4676 | conf->cpu_notify.notifier_call = raid456_cpu_notify; | ||
4677 | conf->cpu_notify.priority = 0; | ||
4678 | if (err == 0) | ||
4679 | err = register_cpu_notifier(&conf->cpu_notify); | ||
4680 | #endif | ||
4681 | put_online_cpus(); | ||
4682 | |||
4683 | return err; | ||
4684 | } | ||
4685 | |||
4331 | static raid5_conf_t *setup_conf(mddev_t *mddev) | 4686 | static raid5_conf_t *setup_conf(mddev_t *mddev) |
4332 | { | 4687 | { |
4333 | raid5_conf_t *conf; | 4688 | raid5_conf_t *conf; |
@@ -4369,6 +4724,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4369 | goto abort; | 4724 | goto abort; |
4370 | 4725 | ||
4371 | conf->raid_disks = mddev->raid_disks; | 4726 | conf->raid_disks = mddev->raid_disks; |
4727 | conf->scribble_len = scribble_len(conf->raid_disks); | ||
4372 | if (mddev->reshape_position == MaxSector) | 4728 | if (mddev->reshape_position == MaxSector) |
4373 | conf->previous_raid_disks = mddev->raid_disks; | 4729 | conf->previous_raid_disks = mddev->raid_disks; |
4374 | else | 4730 | else |
@@ -4384,11 +4740,10 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4384 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) | 4740 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
4385 | goto abort; | 4741 | goto abort; |
4386 | 4742 | ||
4387 | if (mddev->new_level == 6) { | 4743 | conf->level = mddev->new_level; |
4388 | conf->spare_page = alloc_page(GFP_KERNEL); | 4744 | if (raid5_alloc_percpu(conf) != 0) |
4389 | if (!conf->spare_page) | 4745 | goto abort; |
4390 | goto abort; | 4746 | |
4391 | } | ||
4392 | spin_lock_init(&conf->device_lock); | 4747 | spin_lock_init(&conf->device_lock); |
4393 | init_waitqueue_head(&conf->wait_for_stripe); | 4748 | init_waitqueue_head(&conf->wait_for_stripe); |
4394 | init_waitqueue_head(&conf->wait_for_overlap); | 4749 | init_waitqueue_head(&conf->wait_for_overlap); |
@@ -4447,7 +4802,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4447 | printk(KERN_INFO "raid5: allocated %dkB for %s\n", | 4802 | printk(KERN_INFO "raid5: allocated %dkB for %s\n", |
4448 | memory, mdname(mddev)); | 4803 | memory, mdname(mddev)); |
4449 | 4804 | ||
4450 | conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); | 4805 | conf->thread = md_register_thread(raid5d, mddev, NULL); |
4451 | if (!conf->thread) { | 4806 | if (!conf->thread) { |
4452 | printk(KERN_ERR | 4807 | printk(KERN_ERR |
4453 | "raid5: couldn't allocate thread for %s\n", | 4808 | "raid5: couldn't allocate thread for %s\n", |
@@ -4613,7 +4968,7 @@ static int run(mddev_t *mddev) | |||
4613 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 4968 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
4614 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 4969 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
4615 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, | 4970 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
4616 | "%s_reshape"); | 4971 | "reshape"); |
4617 | } | 4972 | } |
4618 | 4973 | ||
4619 | /* read-ahead size must cover two whole stripes, which is | 4974 | /* read-ahead size must cover two whole stripes, which is |
@@ -5031,7 +5386,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5031 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 5386 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
5032 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 5387 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
5033 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, | 5388 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
5034 | "%s_reshape"); | 5389 | "reshape"); |
5035 | if (!mddev->sync_thread) { | 5390 | if (!mddev->sync_thread) { |
5036 | mddev->recovery = 0; | 5391 | mddev->recovery = 0; |
5037 | spin_lock_irq(&conf->device_lock); | 5392 | spin_lock_irq(&conf->device_lock); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 9459689c4ea0..2390e0e83daf 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _RAID5_H | 2 | #define _RAID5_H |
3 | 3 | ||
4 | #include <linux/raid/xor.h> | 4 | #include <linux/raid/xor.h> |
5 | #include <linux/dmaengine.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * | 8 | * |
@@ -175,7 +176,9 @@ | |||
175 | */ | 176 | */ |
176 | enum check_states { | 177 | enum check_states { |
177 | check_state_idle = 0, | 178 | check_state_idle = 0, |
178 | check_state_run, /* parity check */ | 179 | check_state_run, /* xor parity check */ |
180 | check_state_run_q, /* q-parity check */ | ||
181 | check_state_run_pq, /* pq dual parity check */ | ||
179 | check_state_check_result, | 182 | check_state_check_result, |
180 | check_state_compute_run, /* parity repair */ | 183 | check_state_compute_run, /* parity repair */ |
181 | check_state_compute_result, | 184 | check_state_compute_result, |
@@ -215,8 +218,8 @@ struct stripe_head { | |||
215 | * @target - STRIPE_OP_COMPUTE_BLK target | 218 | * @target - STRIPE_OP_COMPUTE_BLK target |
216 | */ | 219 | */ |
217 | struct stripe_operations { | 220 | struct stripe_operations { |
218 | int target; | 221 | int target, target2; |
219 | u32 zero_sum_result; | 222 | enum sum_check_flags zero_sum_result; |
220 | } ops; | 223 | } ops; |
221 | struct r5dev { | 224 | struct r5dev { |
222 | struct bio req; | 225 | struct bio req; |
@@ -298,7 +301,7 @@ struct r6_state { | |||
298 | #define STRIPE_OP_COMPUTE_BLK 1 | 301 | #define STRIPE_OP_COMPUTE_BLK 1 |
299 | #define STRIPE_OP_PREXOR 2 | 302 | #define STRIPE_OP_PREXOR 2 |
300 | #define STRIPE_OP_BIODRAIN 3 | 303 | #define STRIPE_OP_BIODRAIN 3 |
301 | #define STRIPE_OP_POSTXOR 4 | 304 | #define STRIPE_OP_RECONSTRUCT 4 |
302 | #define STRIPE_OP_CHECK 5 | 305 | #define STRIPE_OP_CHECK 5 |
303 | 306 | ||
304 | /* | 307 | /* |
@@ -385,8 +388,21 @@ struct raid5_private_data { | |||
385 | * (fresh device added). | 388 | * (fresh device added). |
386 | * Cleared when a sync completes. | 389 | * Cleared when a sync completes. |
387 | */ | 390 | */ |
388 | 391 | /* per cpu variables */ | |
389 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | 392 | struct raid5_percpu { |
393 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | ||
394 | void *scribble; /* space for constructing buffer | ||
395 | * lists and performing address | ||
396 | * conversions | ||
397 | */ | ||
398 | } *percpu; | ||
399 | size_t scribble_len; /* size of scribble region must be | ||
400 | * associated with conf to handle | ||
401 | * cpu hotplug while reshaping | ||
402 | */ | ||
403 | #ifdef CONFIG_HOTPLUG_CPU | ||
404 | struct notifier_block cpu_notify; | ||
405 | #endif | ||
390 | 406 | ||
391 | /* | 407 | /* |
392 | * Free stripes pool | 408 | * Free stripes pool |
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h index 895e2efca8a9..01fc70484743 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.h +++ b/drivers/media/dvb/dvb-core/dvbdev.h | |||
@@ -31,10 +31,9 @@ | |||
31 | #define DVB_MAJOR 212 | 31 | #define DVB_MAJOR 212 |
32 | 32 | ||
33 | #if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 | 33 | #if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 |
34 | #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS | 34 | #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS |
35 | #else | 35 | #else |
36 | #warning invalid CONFIG_DVB_MAX_ADAPTERS value | 36 | #define DVB_MAX_ADAPTERS 8 |
37 | #define DVB_MAX_ADAPTERS 8 | ||
38 | #endif | 37 | #endif |
39 | 38 | ||
40 | #define DVB_UNSET (-1) | 39 | #define DVB_UNSET (-1) |
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 0e4b97fba384..9744b0692417 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig | |||
@@ -75,7 +75,7 @@ config DVB_USB_DIB0700 | |||
75 | select DVB_DIB3000MC if !DVB_FE_CUSTOMISE | 75 | select DVB_DIB3000MC if !DVB_FE_CUSTOMISE |
76 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE | 76 | select DVB_S5H1411 if !DVB_FE_CUSTOMISE |
77 | select DVB_LGDT3305 if !DVB_FE_CUSTOMISE | 77 | select DVB_LGDT3305 if !DVB_FE_CUSTOMISE |
78 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE | 78 | select DVB_TUNER_DIB0070 |
79 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE | 79 | select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE |
80 | select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE | 80 | select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE |
81 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE | 81 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE |
diff --git a/drivers/media/video/saa7164/saa7164-api.c b/drivers/media/video/saa7164/saa7164-api.c index bb6df1b276be..6f094a96ac81 100644 --- a/drivers/media/video/saa7164/saa7164-api.c +++ b/drivers/media/video/saa7164/saa7164-api.c | |||
@@ -415,7 +415,7 @@ int saa7164_api_enum_subdevs(struct saa7164_dev *dev) | |||
415 | goto out; | 415 | goto out; |
416 | } | 416 | } |
417 | 417 | ||
418 | if (debug & DBGLVL_API) | 418 | if (saa_debug & DBGLVL_API) |
419 | saa7164_dumphex16(dev, buf, (buflen/16)*16); | 419 | saa7164_dumphex16(dev, buf, (buflen/16)*16); |
420 | 420 | ||
421 | saa7164_api_dump_subdevs(dev, buf, buflen); | 421 | saa7164_api_dump_subdevs(dev, buf, buflen); |
@@ -480,7 +480,7 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg, | |||
480 | 480 | ||
481 | dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); | 481 | dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); |
482 | 482 | ||
483 | if (debug & DBGLVL_I2C) | 483 | if (saa_debug & DBGLVL_I2C) |
484 | saa7164_dumphex16(dev, buf, 2 * 16); | 484 | saa7164_dumphex16(dev, buf, 2 * 16); |
485 | 485 | ||
486 | ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR, | 486 | ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR, |
@@ -488,7 +488,7 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg, | |||
488 | if (ret != SAA_OK) | 488 | if (ret != SAA_OK) |
489 | printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); | 489 | printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); |
490 | else { | 490 | else { |
491 | if (debug & DBGLVL_I2C) | 491 | if (saa_debug & DBGLVL_I2C) |
492 | saa7164_dumphex16(dev, buf, sizeof(buf)); | 492 | saa7164_dumphex16(dev, buf, sizeof(buf)); |
493 | memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen); | 493 | memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen); |
494 | } | 494 | } |
@@ -548,7 +548,7 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen, | |||
548 | *((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen; | 548 | *((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen; |
549 | memcpy((buf + 2 * sizeof(u32)), data, datalen); | 549 | memcpy((buf + 2 * sizeof(u32)), data, datalen); |
550 | 550 | ||
551 | if (debug & DBGLVL_I2C) | 551 | if (saa_debug & DBGLVL_I2C) |
552 | saa7164_dumphex16(dev, buf, sizeof(buf)); | 552 | saa7164_dumphex16(dev, buf, sizeof(buf)); |
553 | 553 | ||
554 | ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR, | 554 | ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR, |
diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c index e097f1a0969a..c45966edc0cf 100644 --- a/drivers/media/video/saa7164/saa7164-cmd.c +++ b/drivers/media/video/saa7164/saa7164-cmd.c | |||
@@ -250,7 +250,7 @@ int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno) | |||
250 | unsigned long stamp; | 250 | unsigned long stamp; |
251 | int r; | 251 | int r; |
252 | 252 | ||
253 | if (debug >= 4) | 253 | if (saa_debug >= 4) |
254 | saa7164_bus_dump(dev); | 254 | saa7164_bus_dump(dev); |
255 | 255 | ||
256 | dprintk(DBGLVL_CMD, "%s(seqno=%d)\n", __func__, seqno); | 256 | dprintk(DBGLVL_CMD, "%s(seqno=%d)\n", __func__, seqno); |
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c index f0dbead188c8..709affc31042 100644 --- a/drivers/media/video/saa7164/saa7164-core.c +++ b/drivers/media/video/saa7164/saa7164-core.c | |||
@@ -45,8 +45,8 @@ MODULE_LICENSE("GPL"); | |||
45 | 32 bus | 45 | 32 bus |
46 | */ | 46 | */ |
47 | 47 | ||
48 | unsigned int debug; | 48 | unsigned int saa_debug; |
49 | module_param(debug, int, 0644); | 49 | module_param_named(debug, saa_debug, int, 0644); |
50 | MODULE_PARM_DESC(debug, "enable debug messages"); | 50 | MODULE_PARM_DESC(debug, "enable debug messages"); |
51 | 51 | ||
52 | unsigned int waitsecs = 10; | 52 | unsigned int waitsecs = 10; |
@@ -653,7 +653,7 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev, | |||
653 | printk(KERN_ERR "%s() Unsupported board detected, " | 653 | printk(KERN_ERR "%s() Unsupported board detected, " |
654 | "registering without firmware\n", __func__); | 654 | "registering without firmware\n", __func__); |
655 | 655 | ||
656 | dprintk(1, "%s() parameter debug = %d\n", __func__, debug); | 656 | dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug); |
657 | dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs); | 657 | dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs); |
658 | 658 | ||
659 | fail_fw: | 659 | fail_fw: |
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h index 6753008a9c9b..42660b546f0e 100644 --- a/drivers/media/video/saa7164/saa7164.h +++ b/drivers/media/video/saa7164/saa7164.h | |||
@@ -375,9 +375,9 @@ extern int saa7164_buffer_dealloc(struct saa7164_tsport *port, | |||
375 | 375 | ||
376 | /* ----------------------------------------------------------- */ | 376 | /* ----------------------------------------------------------- */ |
377 | 377 | ||
378 | extern unsigned int debug; | 378 | extern unsigned int saa_debug; |
379 | #define dprintk(level, fmt, arg...)\ | 379 | #define dprintk(level, fmt, arg...)\ |
380 | do { if (debug & level)\ | 380 | do { if (saa_debug & level)\ |
381 | printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\ | 381 | printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\ |
382 | } while (0) | 382 | } while (0) |
383 | 383 | ||
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index a5b448ea4eab..b3bf1c44d74d 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -339,9 +339,9 @@ static int h_memstick_read_dev_id(struct memstick_dev *card, | |||
339 | card->id.type = id_reg.type; | 339 | card->id.type = id_reg.type; |
340 | card->id.category = id_reg.category; | 340 | card->id.category = id_reg.category; |
341 | card->id.class = id_reg.class; | 341 | card->id.class = id_reg.class; |
342 | dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode); | ||
342 | } | 343 | } |
343 | complete(&card->mrq_complete); | 344 | complete(&card->mrq_complete); |
344 | dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode); | ||
345 | return -EAGAIN; | 345 | return -EAGAIN; |
346 | } | 346 | } |
347 | } | 347 | } |
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 79689b10f937..766e21e15574 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c | |||
@@ -937,6 +937,8 @@ static int quicktest1(unsigned long arg) | |||
937 | 937 | ||
938 | /* Need 1K cacheline aligned that does not cross page boundary */ | 938 | /* Need 1K cacheline aligned that does not cross page boundary */ |
939 | p = kmalloc(4096, 0); | 939 | p = kmalloc(4096, 0); |
940 | if (p == NULL) | ||
941 | return -ENOMEM; | ||
940 | mq = ALIGNUP(p, 1024); | 942 | mq = ALIGNUP(p, 1024); |
941 | memset(mes, 0xee, sizeof(mes)); | 943 | memset(mes, 0xee, sizeof(mes)); |
942 | dw = mq; | 944 | dw = mq; |
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 9cbf95bedce6..ccd4408a26c7 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c | |||
@@ -340,10 +340,9 @@ static struct proc_dir_entry *proc_gru __read_mostly; | |||
340 | 340 | ||
341 | static int create_proc_file(struct proc_entry *p) | 341 | static int create_proc_file(struct proc_entry *p) |
342 | { | 342 | { |
343 | p->entry = create_proc_entry(p->name, p->mode, proc_gru); | 343 | p->entry = proc_create(p->name, p->mode, proc_gru, p->fops); |
344 | if (!p->entry) | 344 | if (!p->entry) |
345 | return -1; | 345 | return -1; |
346 | p->entry->proc_fops = p->fops; | ||
347 | return 0; | 346 | return 0; |
348 | } | 347 | } |
349 | 348 | ||
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 065fa818be57..fc25586b7ee1 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -599,6 +599,7 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
599 | struct scatterlist *sg; | 599 | struct scatterlist *sg; |
600 | unsigned int i; | 600 | unsigned int i; |
601 | enum dma_data_direction direction; | 601 | enum dma_data_direction direction; |
602 | unsigned int sglen; | ||
602 | 603 | ||
603 | /* | 604 | /* |
604 | * We don't do DMA on "complex" transfers, i.e. with | 605 | * We don't do DMA on "complex" transfers, i.e. with |
@@ -628,11 +629,14 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
628 | else | 629 | else |
629 | direction = DMA_TO_DEVICE; | 630 | direction = DMA_TO_DEVICE; |
630 | 631 | ||
632 | sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); | ||
633 | if (sglen != data->sg_len) | ||
634 | goto unmap_exit; | ||
631 | desc = chan->device->device_prep_slave_sg(chan, | 635 | desc = chan->device->device_prep_slave_sg(chan, |
632 | data->sg, data->sg_len, direction, | 636 | data->sg, data->sg_len, direction, |
633 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 637 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
634 | if (!desc) | 638 | if (!desc) |
635 | return -ENOMEM; | 639 | goto unmap_exit; |
636 | 640 | ||
637 | host->dma.data_desc = desc; | 641 | host->dma.data_desc = desc; |
638 | desc->callback = atmci_dma_complete; | 642 | desc->callback = atmci_dma_complete; |
@@ -643,6 +647,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
643 | chan->device->device_issue_pending(chan); | 647 | chan->device->device_issue_pending(chan); |
644 | 648 | ||
645 | return 0; | 649 | return 0; |
650 | unmap_exit: | ||
651 | dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); | ||
652 | return -ENOMEM; | ||
646 | } | 653 | } |
647 | 654 | ||
648 | #else /* CONFIG_MMC_ATMELMCI_DMA */ | 655 | #else /* CONFIG_MMC_ATMELMCI_DMA */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 8741d0f5146a..3d1e5329da12 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -22,12 +22,13 @@ | |||
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/amba/mmci.h> | ||
26 | #include <linux/regulator/consumer.h> | ||
25 | 27 | ||
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
27 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
28 | #include <asm/io.h> | 30 | #include <asm/io.h> |
29 | #include <asm/sizes.h> | 31 | #include <asm/sizes.h> |
30 | #include <asm/mach/mmc.h> | ||
31 | 32 | ||
32 | #include "mmci.h" | 33 | #include "mmci.h" |
33 | 34 | ||
@@ -38,6 +39,36 @@ | |||
38 | 39 | ||
39 | static unsigned int fmax = 515633; | 40 | static unsigned int fmax = 515633; |
40 | 41 | ||
42 | /* | ||
43 | * This must be called with host->lock held | ||
44 | */ | ||
45 | static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) | ||
46 | { | ||
47 | u32 clk = 0; | ||
48 | |||
49 | if (desired) { | ||
50 | if (desired >= host->mclk) { | ||
51 | clk = MCI_CLK_BYPASS; | ||
52 | host->cclk = host->mclk; | ||
53 | } else { | ||
54 | clk = host->mclk / (2 * desired) - 1; | ||
55 | if (clk >= 256) | ||
56 | clk = 255; | ||
57 | host->cclk = host->mclk / (2 * (clk + 1)); | ||
58 | } | ||
59 | if (host->hw_designer == 0x80) | ||
60 | clk |= MCI_FCEN; /* Bug fix in ST IP block */ | ||
61 | clk |= MCI_CLK_ENABLE; | ||
62 | /* This hasn't proven to be worthwhile */ | ||
63 | /* clk |= MCI_CLK_PWRSAVE; */ | ||
64 | } | ||
65 | |||
66 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) | ||
67 | clk |= MCI_WIDE_BUS; | ||
68 | |||
69 | writel(clk, host->base + MMCICLOCK); | ||
70 | } | ||
71 | |||
41 | static void | 72 | static void |
42 | mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | 73 | mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) |
43 | { | 74 | { |
@@ -419,30 +450,31 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
419 | static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 450 | static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
420 | { | 451 | { |
421 | struct mmci_host *host = mmc_priv(mmc); | 452 | struct mmci_host *host = mmc_priv(mmc); |
422 | u32 clk = 0, pwr = 0; | 453 | u32 pwr = 0; |
423 | 454 | unsigned long flags; | |
424 | if (ios->clock) { | ||
425 | if (ios->clock >= host->mclk) { | ||
426 | clk = MCI_CLK_BYPASS; | ||
427 | host->cclk = host->mclk; | ||
428 | } else { | ||
429 | clk = host->mclk / (2 * ios->clock) - 1; | ||
430 | if (clk >= 256) | ||
431 | clk = 255; | ||
432 | host->cclk = host->mclk / (2 * (clk + 1)); | ||
433 | } | ||
434 | if (host->hw_designer == AMBA_VENDOR_ST) | ||
435 | clk |= MCI_FCEN; /* Bug fix in ST IP block */ | ||
436 | clk |= MCI_CLK_ENABLE; | ||
437 | } | ||
438 | |||
439 | if (host->plat->translate_vdd) | ||
440 | pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); | ||
441 | 455 | ||
442 | switch (ios->power_mode) { | 456 | switch (ios->power_mode) { |
443 | case MMC_POWER_OFF: | 457 | case MMC_POWER_OFF: |
458 | if(host->vcc && | ||
459 | regulator_is_enabled(host->vcc)) | ||
460 | regulator_disable(host->vcc); | ||
444 | break; | 461 | break; |
445 | case MMC_POWER_UP: | 462 | case MMC_POWER_UP: |
463 | #ifdef CONFIG_REGULATOR | ||
464 | if (host->vcc) | ||
465 | /* This implicitly enables the regulator */ | ||
466 | mmc_regulator_set_ocr(host->vcc, ios->vdd); | ||
467 | #endif | ||
468 | /* | ||
469 | * The translate_vdd function is not used if you have | ||
470 | * an external regulator, or your design is really weird. | ||
471 | * Using it would mean sending in power control BOTH using | ||
472 | * a regulator AND the 4 MMCIPWR bits. If we don't have | ||
473 | * a regulator, we might have some other platform specific | ||
474 | * power control behind this translate function. | ||
475 | */ | ||
476 | if (!host->vcc && host->plat->translate_vdd) | ||
477 | pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); | ||
446 | /* The ST version does not have this, fall through to POWER_ON */ | 478 | /* The ST version does not have this, fall through to POWER_ON */ |
447 | if (host->hw_designer != AMBA_VENDOR_ST) { | 479 | if (host->hw_designer != AMBA_VENDOR_ST) { |
448 | pwr |= MCI_PWR_UP; | 480 | pwr |= MCI_PWR_UP; |
@@ -465,12 +497,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
465 | } | 497 | } |
466 | } | 498 | } |
467 | 499 | ||
468 | writel(clk, host->base + MMCICLOCK); | 500 | spin_lock_irqsave(&host->lock, flags); |
501 | |||
502 | mmci_set_clkreg(host, ios->clock); | ||
469 | 503 | ||
470 | if (host->pwr != pwr) { | 504 | if (host->pwr != pwr) { |
471 | host->pwr = pwr; | 505 | host->pwr = pwr; |
472 | writel(pwr, host->base + MMCIPOWER); | 506 | writel(pwr, host->base + MMCIPOWER); |
473 | } | 507 | } |
508 | |||
509 | spin_unlock_irqrestore(&host->lock, flags); | ||
474 | } | 510 | } |
475 | 511 | ||
476 | static int mmci_get_ro(struct mmc_host *mmc) | 512 | static int mmci_get_ro(struct mmc_host *mmc) |
@@ -517,7 +553,7 @@ static void mmci_check_status(unsigned long data) | |||
517 | 553 | ||
518 | static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | 554 | static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) |
519 | { | 555 | { |
520 | struct mmc_platform_data *plat = dev->dev.platform_data; | 556 | struct mmci_platform_data *plat = dev->dev.platform_data; |
521 | struct mmci_host *host; | 557 | struct mmci_host *host; |
522 | struct mmc_host *mmc; | 558 | struct mmc_host *mmc; |
523 | int ret; | 559 | int ret; |
@@ -583,7 +619,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
583 | mmc->ops = &mmci_ops; | 619 | mmc->ops = &mmci_ops; |
584 | mmc->f_min = (host->mclk + 511) / 512; | 620 | mmc->f_min = (host->mclk + 511) / 512; |
585 | mmc->f_max = min(host->mclk, fmax); | 621 | mmc->f_max = min(host->mclk, fmax); |
586 | mmc->ocr_avail = plat->ocr_mask; | 622 | #ifdef CONFIG_REGULATOR |
623 | /* If we're using the regulator framework, try to fetch a regulator */ | ||
624 | host->vcc = regulator_get(&dev->dev, "vmmc"); | ||
625 | if (IS_ERR(host->vcc)) | ||
626 | host->vcc = NULL; | ||
627 | else { | ||
628 | int mask = mmc_regulator_get_ocrmask(host->vcc); | ||
629 | |||
630 | if (mask < 0) | ||
631 | dev_err(&dev->dev, "error getting OCR mask (%d)\n", | ||
632 | mask); | ||
633 | else { | ||
634 | host->mmc->ocr_avail = (u32) mask; | ||
635 | if (plat->ocr_mask) | ||
636 | dev_warn(&dev->dev, | ||
637 | "Provided ocr_mask/setpower will not be used " | ||
638 | "(using regulator instead)\n"); | ||
639 | } | ||
640 | } | ||
641 | #endif | ||
642 | /* Fall back to platform data if no regulator is found */ | ||
643 | if (host->vcc == NULL) | ||
644 | mmc->ocr_avail = plat->ocr_mask; | ||
645 | mmc->caps = plat->capabilities; | ||
587 | 646 | ||
588 | /* | 647 | /* |
589 | * We can do SGIO | 648 | * We can do SGIO |
@@ -720,6 +779,10 @@ static int __devexit mmci_remove(struct amba_device *dev) | |||
720 | clk_disable(host->clk); | 779 | clk_disable(host->clk); |
721 | clk_put(host->clk); | 780 | clk_put(host->clk); |
722 | 781 | ||
782 | if (regulator_is_enabled(host->vcc)) | ||
783 | regulator_disable(host->vcc); | ||
784 | regulator_put(host->vcc); | ||
785 | |||
723 | mmc_free_host(mmc); | 786 | mmc_free_host(mmc); |
724 | 787 | ||
725 | amba_release_regions(dev); | 788 | amba_release_regions(dev); |
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 839f264c9725..1ceb9a90f59b 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h | |||
@@ -161,7 +161,7 @@ struct mmci_host { | |||
161 | unsigned int mclk; | 161 | unsigned int mclk; |
162 | unsigned int cclk; | 162 | unsigned int cclk; |
163 | u32 pwr; | 163 | u32 pwr; |
164 | struct mmc_platform_data *plat; | 164 | struct mmci_platform_data *plat; |
165 | 165 | ||
166 | u8 hw_designer; | 166 | u8 hw_designer; |
167 | u8 hw_revision:4; | 167 | u8 hw_revision:4; |
@@ -175,6 +175,7 @@ struct mmci_host { | |||
175 | struct scatterlist *sg_ptr; | 175 | struct scatterlist *sg_ptr; |
176 | unsigned int sg_off; | 176 | unsigned int sg_off; |
177 | unsigned int size; | 177 | unsigned int size; |
178 | struct regulator *vcc; | ||
178 | }; | 179 | }; |
179 | 180 | ||
180 | static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) | 181 | static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index e55ac792d68c..5e0b1529964d 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mmc/host.h> | 28 | #include <linux/mmc/host.h> |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/regulator/consumer.h> | 30 | #include <linux/regulator/consumer.h> |
31 | #include <linux/gpio.h> | ||
31 | 32 | ||
32 | #include <asm/sizes.h> | 33 | #include <asm/sizes.h> |
33 | 34 | ||
@@ -96,10 +97,18 @@ static inline void pxamci_init_ocr(struct pxamci_host *host) | |||
96 | 97 | ||
97 | static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) | 98 | static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) |
98 | { | 99 | { |
100 | int on; | ||
101 | |||
99 | #ifdef CONFIG_REGULATOR | 102 | #ifdef CONFIG_REGULATOR |
100 | if (host->vcc) | 103 | if (host->vcc) |
101 | mmc_regulator_set_ocr(host->vcc, vdd); | 104 | mmc_regulator_set_ocr(host->vcc, vdd); |
102 | #endif | 105 | #endif |
106 | if (!host->vcc && host->pdata && | ||
107 | gpio_is_valid(host->pdata->gpio_power)) { | ||
108 | on = ((1 << vdd) & host->pdata->ocr_mask); | ||
109 | gpio_set_value(host->pdata->gpio_power, | ||
110 | !!on ^ host->pdata->gpio_power_invert); | ||
111 | } | ||
103 | if (!host->vcc && host->pdata && host->pdata->setpower) | 112 | if (!host->vcc && host->pdata && host->pdata->setpower) |
104 | host->pdata->setpower(mmc_dev(host->mmc), vdd); | 113 | host->pdata->setpower(mmc_dev(host->mmc), vdd); |
105 | } | 114 | } |
@@ -421,6 +430,12 @@ static int pxamci_get_ro(struct mmc_host *mmc) | |||
421 | { | 430 | { |
422 | struct pxamci_host *host = mmc_priv(mmc); | 431 | struct pxamci_host *host = mmc_priv(mmc); |
423 | 432 | ||
433 | if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { | ||
434 | if (host->pdata->gpio_card_ro_invert) | ||
435 | return !gpio_get_value(host->pdata->gpio_card_ro); | ||
436 | else | ||
437 | return gpio_get_value(host->pdata->gpio_card_ro); | ||
438 | } | ||
424 | if (host->pdata && host->pdata->get_ro) | 439 | if (host->pdata && host->pdata->get_ro) |
425 | return !!host->pdata->get_ro(mmc_dev(mmc)); | 440 | return !!host->pdata->get_ro(mmc_dev(mmc)); |
426 | /* | 441 | /* |
@@ -534,7 +549,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
534 | struct mmc_host *mmc; | 549 | struct mmc_host *mmc; |
535 | struct pxamci_host *host = NULL; | 550 | struct pxamci_host *host = NULL; |
536 | struct resource *r, *dmarx, *dmatx; | 551 | struct resource *r, *dmarx, *dmatx; |
537 | int ret, irq; | 552 | int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; |
538 | 553 | ||
539 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 554 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
540 | irq = platform_get_irq(pdev, 0); | 555 | irq = platform_get_irq(pdev, 0); |
@@ -661,13 +676,63 @@ static int pxamci_probe(struct platform_device *pdev) | |||
661 | } | 676 | } |
662 | host->dma_drcmrtx = dmatx->start; | 677 | host->dma_drcmrtx = dmatx->start; |
663 | 678 | ||
679 | if (host->pdata) { | ||
680 | gpio_cd = host->pdata->gpio_card_detect; | ||
681 | gpio_ro = host->pdata->gpio_card_ro; | ||
682 | gpio_power = host->pdata->gpio_power; | ||
683 | } | ||
684 | if (gpio_is_valid(gpio_power)) { | ||
685 | ret = gpio_request(gpio_power, "mmc card power"); | ||
686 | if (ret) { | ||
687 | dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); | ||
688 | goto out; | ||
689 | } | ||
690 | gpio_direction_output(gpio_power, | ||
691 | host->pdata->gpio_power_invert); | ||
692 | } | ||
693 | if (gpio_is_valid(gpio_ro)) { | ||
694 | ret = gpio_request(gpio_ro, "mmc card read only"); | ||
695 | if (ret) { | ||
696 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power); | ||
697 | goto err_gpio_ro; | ||
698 | } | ||
699 | gpio_direction_input(gpio_ro); | ||
700 | } | ||
701 | if (gpio_is_valid(gpio_cd)) { | ||
702 | ret = gpio_request(gpio_cd, "mmc card detect"); | ||
703 | if (ret) { | ||
704 | dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power); | ||
705 | goto err_gpio_cd; | ||
706 | } | ||
707 | gpio_direction_input(gpio_cd); | ||
708 | |||
709 | ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, | ||
710 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
711 | "mmc card detect", mmc); | ||
712 | if (ret) { | ||
713 | dev_err(&pdev->dev, "failed to request card detect IRQ\n"); | ||
714 | goto err_request_irq; | ||
715 | } | ||
716 | } | ||
717 | |||
664 | if (host->pdata && host->pdata->init) | 718 | if (host->pdata && host->pdata->init) |
665 | host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); | 719 | host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); |
666 | 720 | ||
721 | if (gpio_is_valid(gpio_power) && host->pdata->setpower) | ||
722 | dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n"); | ||
723 | if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) | ||
724 | dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n"); | ||
725 | |||
667 | mmc_add_host(mmc); | 726 | mmc_add_host(mmc); |
668 | 727 | ||
669 | return 0; | 728 | return 0; |
670 | 729 | ||
730 | err_request_irq: | ||
731 | gpio_free(gpio_cd); | ||
732 | err_gpio_cd: | ||
733 | gpio_free(gpio_ro); | ||
734 | err_gpio_ro: | ||
735 | gpio_free(gpio_power); | ||
671 | out: | 736 | out: |
672 | if (host) { | 737 | if (host) { |
673 | if (host->dma >= 0) | 738 | if (host->dma >= 0) |
@@ -688,12 +753,26 @@ static int pxamci_probe(struct platform_device *pdev) | |||
688 | static int pxamci_remove(struct platform_device *pdev) | 753 | static int pxamci_remove(struct platform_device *pdev) |
689 | { | 754 | { |
690 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 755 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
756 | int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; | ||
691 | 757 | ||
692 | platform_set_drvdata(pdev, NULL); | 758 | platform_set_drvdata(pdev, NULL); |
693 | 759 | ||
694 | if (mmc) { | 760 | if (mmc) { |
695 | struct pxamci_host *host = mmc_priv(mmc); | 761 | struct pxamci_host *host = mmc_priv(mmc); |
696 | 762 | ||
763 | if (host->pdata) { | ||
764 | gpio_cd = host->pdata->gpio_card_detect; | ||
765 | gpio_ro = host->pdata->gpio_card_ro; | ||
766 | gpio_power = host->pdata->gpio_power; | ||
767 | } | ||
768 | if (gpio_is_valid(gpio_cd)) { | ||
769 | free_irq(gpio_to_irq(gpio_cd), mmc); | ||
770 | gpio_free(gpio_cd); | ||
771 | } | ||
772 | if (gpio_is_valid(gpio_ro)) | ||
773 | gpio_free(gpio_ro); | ||
774 | if (gpio_is_valid(gpio_power)) | ||
775 | gpio_free(gpio_power); | ||
697 | if (host->vcc) | 776 | if (host->vcc) |
698 | regulator_put(host->vcc); | 777 | regulator_put(host->vcc); |
699 | 778 | ||
@@ -725,20 +804,20 @@ static int pxamci_remove(struct platform_device *pdev) | |||
725 | } | 804 | } |
726 | 805 | ||
727 | #ifdef CONFIG_PM | 806 | #ifdef CONFIG_PM |
728 | static int pxamci_suspend(struct platform_device *dev, pm_message_t state) | 807 | static int pxamci_suspend(struct device *dev) |
729 | { | 808 | { |
730 | struct mmc_host *mmc = platform_get_drvdata(dev); | 809 | struct mmc_host *mmc = dev_get_drvdata(dev); |
731 | int ret = 0; | 810 | int ret = 0; |
732 | 811 | ||
733 | if (mmc) | 812 | if (mmc) |
734 | ret = mmc_suspend_host(mmc, state); | 813 | ret = mmc_suspend_host(mmc, PMSG_SUSPEND); |
735 | 814 | ||
736 | return ret; | 815 | return ret; |
737 | } | 816 | } |
738 | 817 | ||
739 | static int pxamci_resume(struct platform_device *dev) | 818 | static int pxamci_resume(struct device *dev) |
740 | { | 819 | { |
741 | struct mmc_host *mmc = platform_get_drvdata(dev); | 820 | struct mmc_host *mmc = dev_get_drvdata(dev); |
742 | int ret = 0; | 821 | int ret = 0; |
743 | 822 | ||
744 | if (mmc) | 823 | if (mmc) |
@@ -746,19 +825,22 @@ static int pxamci_resume(struct platform_device *dev) | |||
746 | 825 | ||
747 | return ret; | 826 | return ret; |
748 | } | 827 | } |
749 | #else | 828 | |
750 | #define pxamci_suspend NULL | 829 | static struct dev_pm_ops pxamci_pm_ops = { |
751 | #define pxamci_resume NULL | 830 | .suspend = pxamci_suspend, |
831 | .resume = pxamci_resume, | ||
832 | }; | ||
752 | #endif | 833 | #endif |
753 | 834 | ||
754 | static struct platform_driver pxamci_driver = { | 835 | static struct platform_driver pxamci_driver = { |
755 | .probe = pxamci_probe, | 836 | .probe = pxamci_probe, |
756 | .remove = pxamci_remove, | 837 | .remove = pxamci_remove, |
757 | .suspend = pxamci_suspend, | ||
758 | .resume = pxamci_resume, | ||
759 | .driver = { | 838 | .driver = { |
760 | .name = DRIVER_NAME, | 839 | .name = DRIVER_NAME, |
761 | .owner = THIS_MODULE, | 840 | .owner = THIS_MODULE, |
841 | #ifdef CONFIG_PM | ||
842 | .pm = &pxamci_pm_ops, | ||
843 | #endif | ||
762 | }, | 844 | }, |
763 | }; | 845 | }; |
764 | 846 | ||
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index e4ec3659759a..ecf90f5c97c2 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -159,7 +159,7 @@ config MTD_AFS_PARTS | |||
159 | 159 | ||
160 | config MTD_OF_PARTS | 160 | config MTD_OF_PARTS |
161 | tristate "Flash partition map based on OF description" | 161 | tristate "Flash partition map based on OF description" |
162 | depends on PPC_OF && MTD_PARTITIONS | 162 | depends on (MICROBLAZE || PPC_OF) && MTD_PARTITIONS |
163 | help | 163 | help |
164 | This provides a partition parsing function which derives | 164 | This provides a partition parsing function which derives |
165 | the partition map from the children of the flash node, | 165 | the partition map from the children of the flash node, |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 3a9a960644b6..841e085ab74a 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -74,7 +74,7 @@ config MTD_PHYSMAP_BANKWIDTH | |||
74 | 74 | ||
75 | config MTD_PHYSMAP_OF | 75 | config MTD_PHYSMAP_OF |
76 | tristate "Flash device in physical memory map based on OF description" | 76 | tristate "Flash device in physical memory map based on OF description" |
77 | depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) | 77 | depends on (MICROBLAZE || PPC_OF) && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) |
78 | help | 78 | help |
79 | This provides a 'mapping' driver which allows the NOR Flash and | 79 | This provides a 'mapping' driver which allows the NOR Flash and |
80 | ROM driver code to communicate with chips which are mapped | 80 | ROM driver code to communicate with chips which are mapped |
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 15c0195ebd31..a24be34a3f7a 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev) | |||
768 | 768 | ||
769 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); | 769 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); |
770 | 770 | ||
771 | /* Renegotiate with link partner */ | 771 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); |
772 | if (autoneg_normal) { | 772 | if (autoneg_normal) { |
773 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); | 773 | /* Renegotiate with link partner */ |
774 | data |= BMCR_ANENABLE | BMCR_ANRESTART; | 774 | data |= BMCR_ANENABLE | BMCR_ANRESTART; |
775 | } else { | ||
776 | /* Don't negotiate speed or duplex */ | ||
777 | data &= ~(BMCR_ANENABLE | BMCR_ANRESTART); | ||
778 | |||
779 | /* Set speed and duplex static */ | ||
780 | if (current_speed_selection == 10) | ||
781 | data &= ~BMCR_SPEED100; | ||
782 | else | ||
783 | data |= BMCR_SPEED100; | ||
784 | |||
785 | if (current_duplex != full) | ||
786 | data &= ~BMCR_FULLDPLX; | ||
787 | else | ||
788 | data |= BMCR_FULLDPLX; | ||
775 | } | 789 | } |
776 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); | 790 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); |
777 | } | 791 | } |
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 1445e5865196..84db145d2b59 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/gpio.h> | ||
20 | 21 | ||
21 | #include <net/irda/irda.h> | 22 | #include <net/irda/irda.h> |
22 | #include <net/irda/irmod.h> | 23 | #include <net/irda/irmod.h> |
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si) | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
167 | * Set the IrDA communications mode. | ||
168 | */ | ||
169 | static void pxa_irda_set_mode(struct pxa_irda *si, int mode) | ||
170 | { | ||
171 | if (si->pdata->transceiver_mode) | ||
172 | si->pdata->transceiver_mode(si->dev, mode); | ||
173 | else { | ||
174 | if (gpio_is_valid(si->pdata->gpio_pwdown)) | ||
175 | gpio_set_value(si->pdata->gpio_pwdown, | ||
176 | !(mode & IR_OFF) ^ | ||
177 | !si->pdata->gpio_pwdown_inverted); | ||
178 | pxa2xx_transceiver_mode(si->dev, mode); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | /* | ||
166 | * Set the IrDA communications speed. | 183 | * Set the IrDA communications speed. |
167 | */ | 184 | */ |
168 | static int pxa_irda_set_speed(struct pxa_irda *si, int speed) | 185 | static int pxa_irda_set_speed(struct pxa_irda *si, int speed) |
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) | |||
188 | pxa_irda_disable_clk(si); | 205 | pxa_irda_disable_clk(si); |
189 | 206 | ||
190 | /* set board transceiver to SIR mode */ | 207 | /* set board transceiver to SIR mode */ |
191 | si->pdata->transceiver_mode(si->dev, IR_SIRMODE); | 208 | pxa_irda_set_mode(si, IR_SIRMODE); |
192 | 209 | ||
193 | /* enable the STUART clock */ | 210 | /* enable the STUART clock */ |
194 | pxa_irda_enable_sirclk(si); | 211 | pxa_irda_enable_sirclk(si); |
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) | |||
222 | ICCR0 = 0; | 239 | ICCR0 = 0; |
223 | 240 | ||
224 | /* set board transceiver to FIR mode */ | 241 | /* set board transceiver to FIR mode */ |
225 | si->pdata->transceiver_mode(si->dev, IR_FIRMODE); | 242 | pxa_irda_set_mode(si, IR_FIRMODE); |
226 | 243 | ||
227 | /* enable the FICP clock */ | 244 | /* enable the FICP clock */ |
228 | pxa_irda_enable_firclk(si); | 245 | pxa_irda_enable_firclk(si); |
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si) | |||
641 | local_irq_restore(flags); | 658 | local_irq_restore(flags); |
642 | 659 | ||
643 | /* power off board transceiver */ | 660 | /* power off board transceiver */ |
644 | si->pdata->transceiver_mode(si->dev, IR_OFF); | 661 | pxa_irda_set_mode(si, IR_OFF); |
645 | 662 | ||
646 | printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); | 663 | printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); |
647 | } | 664 | } |
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev) | |||
849 | if (err) | 866 | if (err) |
850 | goto err_mem_5; | 867 | goto err_mem_5; |
851 | 868 | ||
852 | if (si->pdata->startup) | 869 | if (gpio_is_valid(si->pdata->gpio_pwdown)) { |
870 | err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch"); | ||
871 | if (err) | ||
872 | goto err_startup; | ||
873 | err = gpio_direction_output(si->pdata->gpio_pwdown, | ||
874 | !si->pdata->gpio_pwdown_inverted); | ||
875 | if (err) { | ||
876 | gpio_free(si->pdata->gpio_pwdown); | ||
877 | goto err_startup; | ||
878 | } | ||
879 | } | ||
880 | |||
881 | if (si->pdata->startup) { | ||
853 | err = si->pdata->startup(si->dev); | 882 | err = si->pdata->startup(si->dev); |
854 | if (err) | 883 | if (err) |
855 | goto err_startup; | 884 | goto err_startup; |
885 | } | ||
886 | |||
887 | if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup) | ||
888 | dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n"); | ||
856 | 889 | ||
857 | dev->netdev_ops = &pxa_irda_netdev_ops; | 890 | dev->netdev_ops = &pxa_irda_netdev_ops; |
858 | 891 | ||
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev) | |||
903 | if (dev) { | 936 | if (dev) { |
904 | struct pxa_irda *si = netdev_priv(dev); | 937 | struct pxa_irda *si = netdev_priv(dev); |
905 | unregister_netdev(dev); | 938 | unregister_netdev(dev); |
939 | if (gpio_is_valid(si->pdata->gpio_pwdown)) | ||
940 | gpio_free(si->pdata->gpio_pwdown); | ||
906 | if (si->pdata->shutdown) | 941 | if (si->pdata->shutdown) |
907 | si->pdata->shutdown(si->dev); | 942 | si->pdata->shutdown(si->dev); |
908 | kfree(si->tx_buff.head); | 943 | kfree(si->tx_buff.head); |
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index cee199ceba2f..3c16602172fc 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/mlx4/cmd.h> | 35 | #include <linux/mlx4/cmd.h> |
36 | #include <linux/cache.h> | ||
36 | 37 | ||
37 | #include "fw.h" | 38 | #include "fw.h" |
38 | #include "icm.h" | 39 | #include "icm.h" |
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
698 | #define INIT_HCA_IN_SIZE 0x200 | 699 | #define INIT_HCA_IN_SIZE 0x200 |
699 | #define INIT_HCA_VERSION_OFFSET 0x000 | 700 | #define INIT_HCA_VERSION_OFFSET 0x000 |
700 | #define INIT_HCA_VERSION 2 | 701 | #define INIT_HCA_VERSION 2 |
702 | #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e | ||
701 | #define INIT_HCA_FLAGS_OFFSET 0x014 | 703 | #define INIT_HCA_FLAGS_OFFSET 0x014 |
702 | #define INIT_HCA_QPC_OFFSET 0x020 | 704 | #define INIT_HCA_QPC_OFFSET 0x020 |
703 | #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) | 705 | #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) |
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
735 | 737 | ||
736 | *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; | 738 | *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; |
737 | 739 | ||
740 | *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = | ||
741 | (ilog2(cache_line_size()) - 4) << 5; | ||
742 | |||
738 | #if defined(__LITTLE_ENDIAN) | 743 | #if defined(__LITTLE_ENDIAN) |
739 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); | 744 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); |
740 | #elif defined(__BIG_ENDIAN) | 745 | #elif defined(__BIG_ENDIAN) |
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c index 2ab1d59870f4..a8b689635a3b 100644 --- a/drivers/net/wireless/arlan-proc.c +++ b/drivers/net/wireless/arlan-proc.c | |||
@@ -402,7 +402,7 @@ static int arlan_setup_card_by_book(struct net_device *dev) | |||
402 | 402 | ||
403 | static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0"; | 403 | static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0"; |
404 | 404 | ||
405 | static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp, | 405 | static int arlan_sysctl_info(ctl_table * ctl, int write, |
406 | void __user *buffer, size_t * lenp, loff_t *ppos) | 406 | void __user *buffer, size_t * lenp, loff_t *ppos) |
407 | { | 407 | { |
408 | int i; | 408 | int i; |
@@ -629,7 +629,7 @@ final: | |||
629 | *lenp = pos; | 629 | *lenp = pos; |
630 | 630 | ||
631 | if (!write) | 631 | if (!write) |
632 | retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 632 | retv = proc_dostring(ctl, write, buffer, lenp, ppos); |
633 | else | 633 | else |
634 | { | 634 | { |
635 | *lenp = 0; | 635 | *lenp = 0; |
@@ -639,7 +639,7 @@ final: | |||
639 | } | 639 | } |
640 | 640 | ||
641 | 641 | ||
642 | static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp, | 642 | static int arlan_sysctl_info161719(ctl_table * ctl, int write, |
643 | void __user *buffer, size_t * lenp, loff_t *ppos) | 643 | void __user *buffer, size_t * lenp, loff_t *ppos) |
644 | { | 644 | { |
645 | int i; | 645 | int i; |
@@ -669,11 +669,11 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp | |||
669 | 669 | ||
670 | final: | 670 | final: |
671 | *lenp = pos; | 671 | *lenp = pos; |
672 | retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 672 | retv = proc_dostring(ctl, write, buffer, lenp, ppos); |
673 | return retv; | 673 | return retv; |
674 | } | 674 | } |
675 | 675 | ||
676 | static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp, | 676 | static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, |
677 | void __user *buffer, size_t * lenp, loff_t *ppos) | 677 | void __user *buffer, size_t * lenp, loff_t *ppos) |
678 | { | 678 | { |
679 | int i; | 679 | int i; |
@@ -698,11 +698,11 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp | |||
698 | SARLBNpln(u_char, txBuffer, 0x800); | 698 | SARLBNpln(u_char, txBuffer, 0x800); |
699 | final: | 699 | final: |
700 | *lenp = pos; | 700 | *lenp = pos; |
701 | retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 701 | retv = proc_dostring(ctl, write, buffer, lenp, ppos); |
702 | return retv; | 702 | return retv; |
703 | } | 703 | } |
704 | 704 | ||
705 | static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp, | 705 | static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, |
706 | void __user *buffer, size_t * lenp, loff_t *ppos) | 706 | void __user *buffer, size_t * lenp, loff_t *ppos) |
707 | { | 707 | { |
708 | int i; | 708 | int i; |
@@ -726,11 +726,11 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp | |||
726 | SARLBNpln(u_char, rxBuffer, 0x800); | 726 | SARLBNpln(u_char, rxBuffer, 0x800); |
727 | final: | 727 | final: |
728 | *lenp = pos; | 728 | *lenp = pos; |
729 | retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 729 | retv = proc_dostring(ctl, write, buffer, lenp, ppos); |
730 | return retv; | 730 | return retv; |
731 | } | 731 | } |
732 | 732 | ||
733 | static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp, | 733 | static int arlan_sysctl_info18(ctl_table * ctl, int write, |
734 | void __user *buffer, size_t * lenp, loff_t *ppos) | 734 | void __user *buffer, size_t * lenp, loff_t *ppos) |
735 | { | 735 | { |
736 | int i; | 736 | int i; |
@@ -756,7 +756,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp, | |||
756 | 756 | ||
757 | final: | 757 | final: |
758 | *lenp = pos; | 758 | *lenp = pos; |
759 | retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 759 | retv = proc_dostring(ctl, write, buffer, lenp, ppos); |
760 | return retv; | 760 | return retv; |
761 | } | 761 | } |
762 | 762 | ||
@@ -766,7 +766,7 @@ final: | |||
766 | 766 | ||
767 | static char conf_reset_result[200]; | 767 | static char conf_reset_result[200]; |
768 | 768 | ||
769 | static int arlan_configure(ctl_table * ctl, int write, struct file *filp, | 769 | static int arlan_configure(ctl_table * ctl, int write, |
770 | void __user *buffer, size_t * lenp, loff_t *ppos) | 770 | void __user *buffer, size_t * lenp, loff_t *ppos) |
771 | { | 771 | { |
772 | int pos = 0; | 772 | int pos = 0; |
@@ -788,10 +788,10 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp, | |||
788 | return -1; | 788 | return -1; |
789 | 789 | ||
790 | *lenp = pos; | 790 | *lenp = pos; |
791 | return proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 791 | return proc_dostring(ctl, write, buffer, lenp, ppos); |
792 | } | 792 | } |
793 | 793 | ||
794 | static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp, | 794 | static int arlan_sysctl_reset(ctl_table * ctl, int write, |
795 | void __user *buffer, size_t * lenp, loff_t *ppos) | 795 | void __user *buffer, size_t * lenp, loff_t *ppos) |
796 | { | 796 | { |
797 | int pos = 0; | 797 | int pos = 0; |
@@ -811,7 +811,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp, | |||
811 | } else | 811 | } else |
812 | return -1; | 812 | return -1; |
813 | *lenp = pos + 3; | 813 | *lenp = pos + 3; |
814 | return proc_dostring(ctl, write, filp, buffer, lenp, ppos); | 814 | return proc_dostring(ctl, write, buffer, lenp, ppos); |
815 | } | 815 | } |
816 | 816 | ||
817 | 817 | ||
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 554e11f9e1ce..8eefe56f1cbe 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #define PARPORT_MIN_SPINTIME_VALUE 1 | 31 | #define PARPORT_MIN_SPINTIME_VALUE 1 |
32 | #define PARPORT_MAX_SPINTIME_VALUE 1000 | 32 | #define PARPORT_MAX_SPINTIME_VALUE 1000 |
33 | 33 | ||
34 | static int do_active_device(ctl_table *table, int write, struct file *filp, | 34 | static int do_active_device(ctl_table *table, int write, |
35 | void __user *result, size_t *lenp, loff_t *ppos) | 35 | void __user *result, size_t *lenp, loff_t *ppos) |
36 | { | 36 | { |
37 | struct parport *port = (struct parport *)table->extra1; | 37 | struct parport *port = (struct parport *)table->extra1; |
@@ -68,7 +68,7 @@ static int do_active_device(ctl_table *table, int write, struct file *filp, | |||
68 | } | 68 | } |
69 | 69 | ||
70 | #ifdef CONFIG_PARPORT_1284 | 70 | #ifdef CONFIG_PARPORT_1284 |
71 | static int do_autoprobe(ctl_table *table, int write, struct file *filp, | 71 | static int do_autoprobe(ctl_table *table, int write, |
72 | void __user *result, size_t *lenp, loff_t *ppos) | 72 | void __user *result, size_t *lenp, loff_t *ppos) |
73 | { | 73 | { |
74 | struct parport_device_info *info = table->extra2; | 74 | struct parport_device_info *info = table->extra2; |
@@ -111,7 +111,7 @@ static int do_autoprobe(ctl_table *table, int write, struct file *filp, | |||
111 | #endif /* IEEE1284.3 support. */ | 111 | #endif /* IEEE1284.3 support. */ |
112 | 112 | ||
113 | static int do_hardware_base_addr (ctl_table *table, int write, | 113 | static int do_hardware_base_addr (ctl_table *table, int write, |
114 | struct file *filp, void __user *result, | 114 | void __user *result, |
115 | size_t *lenp, loff_t *ppos) | 115 | size_t *lenp, loff_t *ppos) |
116 | { | 116 | { |
117 | struct parport *port = (struct parport *)table->extra1; | 117 | struct parport *port = (struct parport *)table->extra1; |
@@ -139,7 +139,7 @@ static int do_hardware_base_addr (ctl_table *table, int write, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static int do_hardware_irq (ctl_table *table, int write, | 141 | static int do_hardware_irq (ctl_table *table, int write, |
142 | struct file *filp, void __user *result, | 142 | void __user *result, |
143 | size_t *lenp, loff_t *ppos) | 143 | size_t *lenp, loff_t *ppos) |
144 | { | 144 | { |
145 | struct parport *port = (struct parport *)table->extra1; | 145 | struct parport *port = (struct parport *)table->extra1; |
@@ -167,7 +167,7 @@ static int do_hardware_irq (ctl_table *table, int write, | |||
167 | } | 167 | } |
168 | 168 | ||
169 | static int do_hardware_dma (ctl_table *table, int write, | 169 | static int do_hardware_dma (ctl_table *table, int write, |
170 | struct file *filp, void __user *result, | 170 | void __user *result, |
171 | size_t *lenp, loff_t *ppos) | 171 | size_t *lenp, loff_t *ppos) |
172 | { | 172 | { |
173 | struct parport *port = (struct parport *)table->extra1; | 173 | struct parport *port = (struct parport *)table->extra1; |
@@ -195,7 +195,7 @@ static int do_hardware_dma (ctl_table *table, int write, | |||
195 | } | 195 | } |
196 | 196 | ||
197 | static int do_hardware_modes (ctl_table *table, int write, | 197 | static int do_hardware_modes (ctl_table *table, int write, |
198 | struct file *filp, void __user *result, | 198 | void __user *result, |
199 | size_t *lenp, loff_t *ppos) | 199 | size_t *lenp, loff_t *ppos) |
200 | { | 200 | { |
201 | struct parport *port = (struct parport *)table->extra1; | 201 | struct parport *port = (struct parport *)table->extra1; |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 36faa9a8e18f..3070f77eb56a 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -72,15 +72,9 @@ do { \ | |||
72 | 72 | ||
73 | #define SLOT_NAME_SIZE 10 | 73 | #define SLOT_NAME_SIZE 10 |
74 | struct slot { | 74 | struct slot { |
75 | u8 bus; | ||
76 | u8 device; | ||
77 | u8 state; | 75 | u8 state; |
78 | u8 hp_slot; | ||
79 | u32 number; | ||
80 | struct controller *ctrl; | 76 | struct controller *ctrl; |
81 | struct hpc_ops *hpc_ops; | ||
82 | struct hotplug_slot *hotplug_slot; | 77 | struct hotplug_slot *hotplug_slot; |
83 | struct list_head slot_list; | ||
84 | struct delayed_work work; /* work for button event */ | 78 | struct delayed_work work; /* work for button event */ |
85 | struct mutex lock; | 79 | struct mutex lock; |
86 | }; | 80 | }; |
@@ -92,18 +86,10 @@ struct event_info { | |||
92 | }; | 86 | }; |
93 | 87 | ||
94 | struct controller { | 88 | struct controller { |
95 | struct mutex crit_sect; /* critical section mutex */ | ||
96 | struct mutex ctrl_lock; /* controller lock */ | 89 | struct mutex ctrl_lock; /* controller lock */ |
97 | int num_slots; /* Number of slots on ctlr */ | ||
98 | int slot_num_inc; /* 1 or -1 */ | ||
99 | struct pci_dev *pci_dev; | ||
100 | struct pcie_device *pcie; /* PCI Express port service */ | 90 | struct pcie_device *pcie; /* PCI Express port service */ |
101 | struct list_head slot_list; | 91 | struct slot *slot; |
102 | struct hpc_ops *hpc_ops; | ||
103 | wait_queue_head_t queue; /* sleep & wake process */ | 92 | wait_queue_head_t queue; /* sleep & wake process */ |
104 | u8 slot_device_offset; | ||
105 | u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */ | ||
106 | u8 slot_bus; /* Bus where the slots handled by this controller sit */ | ||
107 | u32 slot_cap; | 93 | u32 slot_cap; |
108 | u8 cap_base; | 94 | u8 cap_base; |
109 | struct timer_list poll_timer; | 95 | struct timer_list poll_timer; |
@@ -131,40 +117,20 @@ struct controller { | |||
131 | #define POWERON_STATE 3 | 117 | #define POWERON_STATE 3 |
132 | #define POWEROFF_STATE 4 | 118 | #define POWEROFF_STATE 4 |
133 | 119 | ||
134 | /* Error messages */ | 120 | #define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP) |
135 | #define INTERLOCK_OPEN 0x00000002 | 121 | #define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP) |
136 | #define ADD_NOT_SUPPORTED 0x00000003 | 122 | #define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP) |
137 | #define CARD_FUNCTIONING 0x00000005 | 123 | #define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP) |
138 | #define ADAPTER_NOT_SAME 0x00000006 | 124 | #define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP) |
139 | #define NO_ADAPTER_PRESENT 0x00000009 | 125 | #define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS) |
140 | #define NOT_ENOUGH_RESOURCES 0x0000000B | 126 | #define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP) |
141 | #define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C | 127 | #define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS) |
142 | #define WRONG_BUS_FREQUENCY 0x0000000D | 128 | #define PSN(ctrl) ((ctrl)->slot_cap >> 19) |
143 | #define POWER_FAILURE 0x0000000E | ||
144 | |||
145 | /* Field definitions in Slot Capabilities Register */ | ||
146 | #define ATTN_BUTTN_PRSN 0x00000001 | ||
147 | #define PWR_CTRL_PRSN 0x00000002 | ||
148 | #define MRL_SENS_PRSN 0x00000004 | ||
149 | #define ATTN_LED_PRSN 0x00000008 | ||
150 | #define PWR_LED_PRSN 0x00000010 | ||
151 | #define HP_SUPR_RM_SUP 0x00000020 | ||
152 | #define EMI_PRSN 0x00020000 | ||
153 | #define NO_CMD_CMPL_SUP 0x00040000 | ||
154 | |||
155 | #define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN) | ||
156 | #define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN) | ||
157 | #define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN) | ||
158 | #define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN) | ||
159 | #define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN) | ||
160 | #define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP) | ||
161 | #define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN) | ||
162 | #define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & NO_CMD_CMPL_SUP) | ||
163 | 129 | ||
164 | extern int pciehp_sysfs_enable_slot(struct slot *slot); | 130 | extern int pciehp_sysfs_enable_slot(struct slot *slot); |
165 | extern int pciehp_sysfs_disable_slot(struct slot *slot); | 131 | extern int pciehp_sysfs_disable_slot(struct slot *slot); |
166 | extern u8 pciehp_handle_attention_button(struct slot *p_slot); | 132 | extern u8 pciehp_handle_attention_button(struct slot *p_slot); |
167 | extern u8 pciehp_handle_switch_change(struct slot *p_slot); | 133 | extern u8 pciehp_handle_switch_change(struct slot *p_slot); |
168 | extern u8 pciehp_handle_presence_change(struct slot *p_slot); | 134 | extern u8 pciehp_handle_presence_change(struct slot *p_slot); |
169 | extern u8 pciehp_handle_power_fault(struct slot *p_slot); | 135 | extern u8 pciehp_handle_power_fault(struct slot *p_slot); |
170 | extern int pciehp_configure_device(struct slot *p_slot); | 136 | extern int pciehp_configure_device(struct slot *p_slot); |
@@ -175,45 +141,30 @@ int pcie_init_notification(struct controller *ctrl); | |||
175 | int pciehp_enable_slot(struct slot *p_slot); | 141 | int pciehp_enable_slot(struct slot *p_slot); |
176 | int pciehp_disable_slot(struct slot *p_slot); | 142 | int pciehp_disable_slot(struct slot *p_slot); |
177 | int pcie_enable_notification(struct controller *ctrl); | 143 | int pcie_enable_notification(struct controller *ctrl); |
144 | int pciehp_power_on_slot(struct slot *slot); | ||
145 | int pciehp_power_off_slot(struct slot *slot); | ||
146 | int pciehp_get_power_status(struct slot *slot, u8 *status); | ||
147 | int pciehp_get_attention_status(struct slot *slot, u8 *status); | ||
148 | |||
149 | int pciehp_set_attention_status(struct slot *slot, u8 status); | ||
150 | int pciehp_get_latch_status(struct slot *slot, u8 *status); | ||
151 | int pciehp_get_adapter_status(struct slot *slot, u8 *status); | ||
152 | int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed); | ||
153 | int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val); | ||
154 | int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed); | ||
155 | int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val); | ||
156 | int pciehp_query_power_fault(struct slot *slot); | ||
157 | void pciehp_green_led_on(struct slot *slot); | ||
158 | void pciehp_green_led_off(struct slot *slot); | ||
159 | void pciehp_green_led_blink(struct slot *slot); | ||
160 | int pciehp_check_link_status(struct controller *ctrl); | ||
161 | void pciehp_release_ctrl(struct controller *ctrl); | ||
178 | 162 | ||
179 | static inline const char *slot_name(struct slot *slot) | 163 | static inline const char *slot_name(struct slot *slot) |
180 | { | 164 | { |
181 | return hotplug_slot_name(slot->hotplug_slot); | 165 | return hotplug_slot_name(slot->hotplug_slot); |
182 | } | 166 | } |
183 | 167 | ||
184 | static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) | ||
185 | { | ||
186 | struct slot *slot; | ||
187 | |||
188 | list_for_each_entry(slot, &ctrl->slot_list, slot_list) { | ||
189 | if (slot->device == device) | ||
190 | return slot; | ||
191 | } | ||
192 | |||
193 | ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device); | ||
194 | return NULL; | ||
195 | } | ||
196 | |||
197 | struct hpc_ops { | ||
198 | int (*power_on_slot)(struct slot *slot); | ||
199 | int (*power_off_slot)(struct slot *slot); | ||
200 | int (*get_power_status)(struct slot *slot, u8 *status); | ||
201 | int (*get_attention_status)(struct slot *slot, u8 *status); | ||
202 | int (*set_attention_status)(struct slot *slot, u8 status); | ||
203 | int (*get_latch_status)(struct slot *slot, u8 *status); | ||
204 | int (*get_adapter_status)(struct slot *slot, u8 *status); | ||
205 | int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
206 | int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
207 | int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val); | ||
208 | int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val); | ||
209 | int (*query_power_fault)(struct slot *slot); | ||
210 | void (*green_led_on)(struct slot *slot); | ||
211 | void (*green_led_off)(struct slot *slot); | ||
212 | void (*green_led_blink)(struct slot *slot); | ||
213 | void (*release_ctlr)(struct controller *ctrl); | ||
214 | int (*check_lnk_status)(struct controller *ctrl); | ||
215 | }; | ||
216 | |||
217 | #ifdef CONFIG_ACPI | 168 | #ifdef CONFIG_ACPI |
218 | #include <acpi/acpi.h> | 169 | #include <acpi/acpi.h> |
219 | #include <acpi/acpi_bus.h> | 170 | #include <acpi/acpi_bus.h> |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 7163e6a6cfae..37c8d3d0323e 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -33,6 +33,11 @@ | |||
33 | #define PCIEHP_DETECT_AUTO (2) | 33 | #define PCIEHP_DETECT_AUTO (2) |
34 | #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO | 34 | #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO |
35 | 35 | ||
36 | struct dummy_slot { | ||
37 | u32 number; | ||
38 | struct list_head list; | ||
39 | }; | ||
40 | |||
36 | static int slot_detection_mode; | 41 | static int slot_detection_mode; |
37 | static char *pciehp_detect_mode; | 42 | static char *pciehp_detect_mode; |
38 | module_param(pciehp_detect_mode, charp, 0444); | 43 | module_param(pciehp_detect_mode, charp, 0444); |
@@ -77,7 +82,7 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
77 | int pos; | 82 | int pos; |
78 | u32 slot_cap; | 83 | u32 slot_cap; |
79 | acpi_handle handle; | 84 | acpi_handle handle; |
80 | struct slot *slot, *tmp; | 85 | struct dummy_slot *slot, *tmp; |
81 | struct pci_dev *pdev = dev->port; | 86 | struct pci_dev *pdev = dev->port; |
82 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 87 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ |
83 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 88 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) |
@@ -89,11 +94,11 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
89 | if (!slot) | 94 | if (!slot) |
90 | return -ENOMEM; | 95 | return -ENOMEM; |
91 | slot->number = slot_cap >> 19; | 96 | slot->number = slot_cap >> 19; |
92 | list_for_each_entry(tmp, &dummy_slots, slot_list) { | 97 | list_for_each_entry(tmp, &dummy_slots, list) { |
93 | if (tmp->number == slot->number) | 98 | if (tmp->number == slot->number) |
94 | dup_slot_id++; | 99 | dup_slot_id++; |
95 | } | 100 | } |
96 | list_add_tail(&slot->slot_list, &dummy_slots); | 101 | list_add_tail(&slot->list, &dummy_slots); |
97 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); | 102 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); |
98 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) | 103 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) |
99 | acpi_slot_detected = 1; | 104 | acpi_slot_detected = 1; |
@@ -109,11 +114,11 @@ static struct pcie_port_service_driver __initdata dummy_driver = { | |||
109 | 114 | ||
110 | static int __init select_detection_mode(void) | 115 | static int __init select_detection_mode(void) |
111 | { | 116 | { |
112 | struct slot *slot, *tmp; | 117 | struct dummy_slot *slot, *tmp; |
113 | pcie_port_service_register(&dummy_driver); | 118 | pcie_port_service_register(&dummy_driver); |
114 | pcie_port_service_unregister(&dummy_driver); | 119 | pcie_port_service_unregister(&dummy_driver); |
115 | list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { | 120 | list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { |
116 | list_del(&slot->slot_list); | 121 | list_del(&slot->list); |
117 | kfree(slot); | 122 | kfree(slot); |
118 | } | 123 | } |
119 | if (acpi_slot_detected && dup_slot_id) | 124 | if (acpi_slot_detected && dup_slot_id) |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 2317557fdee6..bc234719b1df 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -99,65 +99,55 @@ static void release_slot(struct hotplug_slot *hotplug_slot) | |||
99 | kfree(hotplug_slot); | 99 | kfree(hotplug_slot); |
100 | } | 100 | } |
101 | 101 | ||
102 | static int init_slots(struct controller *ctrl) | 102 | static int init_slot(struct controller *ctrl) |
103 | { | 103 | { |
104 | struct slot *slot; | 104 | struct slot *slot = ctrl->slot; |
105 | struct hotplug_slot *hotplug_slot; | 105 | struct hotplug_slot *hotplug = NULL; |
106 | struct hotplug_slot_info *info; | 106 | struct hotplug_slot_info *info = NULL; |
107 | char name[SLOT_NAME_SIZE]; | 107 | char name[SLOT_NAME_SIZE]; |
108 | int retval = -ENOMEM; | 108 | int retval = -ENOMEM; |
109 | 109 | ||
110 | list_for_each_entry(slot, &ctrl->slot_list, slot_list) { | 110 | hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL); |
111 | hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); | 111 | if (!hotplug) |
112 | if (!hotplug_slot) | 112 | goto out; |
113 | goto error; | 113 | |
114 | 114 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
115 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 115 | if (!info) |
116 | if (!info) | 116 | goto out; |
117 | goto error_hpslot; | 117 | |
118 | 118 | /* register this slot with the hotplug pci core */ | |
119 | /* register this slot with the hotplug pci core */ | 119 | hotplug->info = info; |
120 | hotplug_slot->info = info; | 120 | hotplug->private = slot; |
121 | hotplug_slot->private = slot; | 121 | hotplug->release = &release_slot; |
122 | hotplug_slot->release = &release_slot; | 122 | hotplug->ops = &pciehp_hotplug_slot_ops; |
123 | hotplug_slot->ops = &pciehp_hotplug_slot_ops; | 123 | slot->hotplug_slot = hotplug; |
124 | slot->hotplug_slot = hotplug_slot; | 124 | snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); |
125 | snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); | 125 | |
126 | 126 | ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n", | |
127 | ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " | 127 | pci_domain_nr(ctrl->pcie->port->subordinate), |
128 | "hp_slot=%x sun=%x slot_device_offset=%x\n", | 128 | ctrl->pcie->port->subordinate->number, PSN(ctrl)); |
129 | pci_domain_nr(ctrl->pci_dev->subordinate), | 129 | retval = pci_hp_register(hotplug, |
130 | slot->bus, slot->device, slot->hp_slot, slot->number, | 130 | ctrl->pcie->port->subordinate, 0, name); |
131 | ctrl->slot_device_offset); | 131 | if (retval) { |
132 | retval = pci_hp_register(hotplug_slot, | 132 | ctrl_err(ctrl, |
133 | ctrl->pci_dev->subordinate, | 133 | "pci_hp_register failed with error %d\n", retval); |
134 | slot->device, | 134 | goto out; |
135 | name); | 135 | } |
136 | if (retval) { | 136 | get_power_status(hotplug, &info->power_status); |
137 | ctrl_err(ctrl, "pci_hp_register failed with error %d\n", | 137 | get_attention_status(hotplug, &info->attention_status); |
138 | retval); | 138 | get_latch_status(hotplug, &info->latch_status); |
139 | goto error_info; | 139 | get_adapter_status(hotplug, &info->adapter_status); |
140 | } | 140 | out: |
141 | get_power_status(hotplug_slot, &info->power_status); | 141 | if (retval) { |
142 | get_attention_status(hotplug_slot, &info->attention_status); | 142 | kfree(info); |
143 | get_latch_status(hotplug_slot, &info->latch_status); | 143 | kfree(hotplug); |
144 | get_adapter_status(hotplug_slot, &info->adapter_status); | ||
145 | } | 144 | } |
146 | |||
147 | return 0; | ||
148 | error_info: | ||
149 | kfree(info); | ||
150 | error_hpslot: | ||
151 | kfree(hotplug_slot); | ||
152 | error: | ||
153 | return retval; | 145 | return retval; |
154 | } | 146 | } |
155 | 147 | ||
156 | static void cleanup_slots(struct controller *ctrl) | 148 | static void cleanup_slot(struct controller *ctrl) |
157 | { | 149 | { |
158 | struct slot *slot; | 150 | pci_hp_deregister(ctrl->slot->hotplug_slot); |
159 | list_for_each_entry(slot, &ctrl->slot_list, slot_list) | ||
160 | pci_hp_deregister(slot->hotplug_slot); | ||
161 | } | 151 | } |
162 | 152 | ||
163 | /* | 153 | /* |
@@ -173,7 +163,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) | |||
173 | hotplug_slot->info->attention_status = status; | 163 | hotplug_slot->info->attention_status = status; |
174 | 164 | ||
175 | if (ATTN_LED(slot->ctrl)) | 165 | if (ATTN_LED(slot->ctrl)) |
176 | slot->hpc_ops->set_attention_status(slot, status); | 166 | pciehp_set_attention_status(slot, status); |
177 | 167 | ||
178 | return 0; | 168 | return 0; |
179 | } | 169 | } |
@@ -208,7 +198,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
208 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 198 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
209 | __func__, slot_name(slot)); | 199 | __func__, slot_name(slot)); |
210 | 200 | ||
211 | retval = slot->hpc_ops->get_power_status(slot, value); | 201 | retval = pciehp_get_power_status(slot, value); |
212 | if (retval < 0) | 202 | if (retval < 0) |
213 | *value = hotplug_slot->info->power_status; | 203 | *value = hotplug_slot->info->power_status; |
214 | 204 | ||
@@ -223,7 +213,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
223 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 213 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
224 | __func__, slot_name(slot)); | 214 | __func__, slot_name(slot)); |
225 | 215 | ||
226 | retval = slot->hpc_ops->get_attention_status(slot, value); | 216 | retval = pciehp_get_attention_status(slot, value); |
227 | if (retval < 0) | 217 | if (retval < 0) |
228 | *value = hotplug_slot->info->attention_status; | 218 | *value = hotplug_slot->info->attention_status; |
229 | 219 | ||
@@ -238,7 +228,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
238 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 228 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
239 | __func__, slot_name(slot)); | 229 | __func__, slot_name(slot)); |
240 | 230 | ||
241 | retval = slot->hpc_ops->get_latch_status(slot, value); | 231 | retval = pciehp_get_latch_status(slot, value); |
242 | if (retval < 0) | 232 | if (retval < 0) |
243 | *value = hotplug_slot->info->latch_status; | 233 | *value = hotplug_slot->info->latch_status; |
244 | 234 | ||
@@ -253,7 +243,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
253 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 243 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
254 | __func__, slot_name(slot)); | 244 | __func__, slot_name(slot)); |
255 | 245 | ||
256 | retval = slot->hpc_ops->get_adapter_status(slot, value); | 246 | retval = pciehp_get_adapter_status(slot, value); |
257 | if (retval < 0) | 247 | if (retval < 0) |
258 | *value = hotplug_slot->info->adapter_status; | 248 | *value = hotplug_slot->info->adapter_status; |
259 | 249 | ||
@@ -269,7 +259,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | |||
269 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 259 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
270 | __func__, slot_name(slot)); | 260 | __func__, slot_name(slot)); |
271 | 261 | ||
272 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); | 262 | retval = pciehp_get_max_link_speed(slot, value); |
273 | if (retval < 0) | 263 | if (retval < 0) |
274 | *value = PCI_SPEED_UNKNOWN; | 264 | *value = PCI_SPEED_UNKNOWN; |
275 | 265 | ||
@@ -284,7 +274,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe | |||
284 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 274 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
285 | __func__, slot_name(slot)); | 275 | __func__, slot_name(slot)); |
286 | 276 | ||
287 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); | 277 | retval = pciehp_get_cur_link_speed(slot, value); |
288 | if (retval < 0) | 278 | if (retval < 0) |
289 | *value = PCI_SPEED_UNKNOWN; | 279 | *value = PCI_SPEED_UNKNOWN; |
290 | 280 | ||
@@ -295,7 +285,7 @@ static int pciehp_probe(struct pcie_device *dev) | |||
295 | { | 285 | { |
296 | int rc; | 286 | int rc; |
297 | struct controller *ctrl; | 287 | struct controller *ctrl; |
298 | struct slot *t_slot; | 288 | struct slot *slot; |
299 | u8 value; | 289 | u8 value; |
300 | struct pci_dev *pdev = dev->port; | 290 | struct pci_dev *pdev = dev->port; |
301 | 291 | ||
@@ -314,7 +304,7 @@ static int pciehp_probe(struct pcie_device *dev) | |||
314 | set_service_data(dev, ctrl); | 304 | set_service_data(dev, ctrl); |
315 | 305 | ||
316 | /* Setup the slot information structures */ | 306 | /* Setup the slot information structures */ |
317 | rc = init_slots(ctrl); | 307 | rc = init_slot(ctrl); |
318 | if (rc) { | 308 | if (rc) { |
319 | if (rc == -EBUSY) | 309 | if (rc == -EBUSY) |
320 | ctrl_warn(ctrl, "Slot already registered by another " | 310 | ctrl_warn(ctrl, "Slot already registered by another " |
@@ -332,15 +322,15 @@ static int pciehp_probe(struct pcie_device *dev) | |||
332 | } | 322 | } |
333 | 323 | ||
334 | /* Check if slot is occupied */ | 324 | /* Check if slot is occupied */ |
335 | t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | 325 | slot = ctrl->slot; |
336 | t_slot->hpc_ops->get_adapter_status(t_slot, &value); | 326 | pciehp_get_adapter_status(slot, &value); |
337 | if (value) { | 327 | if (value) { |
338 | if (pciehp_force) | 328 | if (pciehp_force) |
339 | pciehp_enable_slot(t_slot); | 329 | pciehp_enable_slot(slot); |
340 | } else { | 330 | } else { |
341 | /* Power off slot if not occupied */ | 331 | /* Power off slot if not occupied */ |
342 | if (POWER_CTRL(ctrl)) { | 332 | if (POWER_CTRL(ctrl)) { |
343 | rc = t_slot->hpc_ops->power_off_slot(t_slot); | 333 | rc = pciehp_power_off_slot(slot); |
344 | if (rc) | 334 | if (rc) |
345 | goto err_out_free_ctrl_slot; | 335 | goto err_out_free_ctrl_slot; |
346 | } | 336 | } |
@@ -349,19 +339,19 @@ static int pciehp_probe(struct pcie_device *dev) | |||
349 | return 0; | 339 | return 0; |
350 | 340 | ||
351 | err_out_free_ctrl_slot: | 341 | err_out_free_ctrl_slot: |
352 | cleanup_slots(ctrl); | 342 | cleanup_slot(ctrl); |
353 | err_out_release_ctlr: | 343 | err_out_release_ctlr: |
354 | ctrl->hpc_ops->release_ctlr(ctrl); | 344 | pciehp_release_ctrl(ctrl); |
355 | err_out_none: | 345 | err_out_none: |
356 | return -ENODEV; | 346 | return -ENODEV; |
357 | } | 347 | } |
358 | 348 | ||
359 | static void pciehp_remove (struct pcie_device *dev) | 349 | static void pciehp_remove(struct pcie_device *dev) |
360 | { | 350 | { |
361 | struct controller *ctrl = get_service_data(dev); | 351 | struct controller *ctrl = get_service_data(dev); |
362 | 352 | ||
363 | cleanup_slots(ctrl); | 353 | cleanup_slot(ctrl); |
364 | ctrl->hpc_ops->release_ctlr(ctrl); | 354 | pciehp_release_ctrl(ctrl); |
365 | } | 355 | } |
366 | 356 | ||
367 | #ifdef CONFIG_PM | 357 | #ifdef CONFIG_PM |
@@ -376,20 +366,20 @@ static int pciehp_resume (struct pcie_device *dev) | |||
376 | dev_info(&dev->device, "%s ENTRY\n", __func__); | 366 | dev_info(&dev->device, "%s ENTRY\n", __func__); |
377 | if (pciehp_force) { | 367 | if (pciehp_force) { |
378 | struct controller *ctrl = get_service_data(dev); | 368 | struct controller *ctrl = get_service_data(dev); |
379 | struct slot *t_slot; | 369 | struct slot *slot; |
380 | u8 status; | 370 | u8 status; |
381 | 371 | ||
382 | /* reinitialize the chipset's event detection logic */ | 372 | /* reinitialize the chipset's event detection logic */ |
383 | pcie_enable_notification(ctrl); | 373 | pcie_enable_notification(ctrl); |
384 | 374 | ||
385 | t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | 375 | slot = ctrl->slot; |
386 | 376 | ||
387 | /* Check if slot is occupied */ | 377 | /* Check if slot is occupied */ |
388 | t_slot->hpc_ops->get_adapter_status(t_slot, &status); | 378 | pciehp_get_adapter_status(slot, &status); |
389 | if (status) | 379 | if (status) |
390 | pciehp_enable_slot(t_slot); | 380 | pciehp_enable_slot(slot); |
391 | else | 381 | else |
392 | pciehp_disable_slot(t_slot); | 382 | pciehp_disable_slot(slot); |
393 | } | 383 | } |
394 | return 0; | 384 | return 0; |
395 | } | 385 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index b97cb4c3e0fe..84487d126e4d 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -82,7 +82,7 @@ u8 pciehp_handle_switch_change(struct slot *p_slot) | |||
82 | /* Switch Change */ | 82 | /* Switch Change */ |
83 | ctrl_dbg(ctrl, "Switch interrupt received\n"); | 83 | ctrl_dbg(ctrl, "Switch interrupt received\n"); |
84 | 84 | ||
85 | p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 85 | pciehp_get_latch_status(p_slot, &getstatus); |
86 | if (getstatus) { | 86 | if (getstatus) { |
87 | /* | 87 | /* |
88 | * Switch opened | 88 | * Switch opened |
@@ -114,7 +114,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) | |||
114 | /* Switch is open, assume a presence change | 114 | /* Switch is open, assume a presence change |
115 | * Save the presence state | 115 | * Save the presence state |
116 | */ | 116 | */ |
117 | p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save); | 117 | pciehp_get_adapter_status(p_slot, &presence_save); |
118 | if (presence_save) { | 118 | if (presence_save) { |
119 | /* | 119 | /* |
120 | * Card Present | 120 | * Card Present |
@@ -143,7 +143,7 @@ u8 pciehp_handle_power_fault(struct slot *p_slot) | |||
143 | /* power fault */ | 143 | /* power fault */ |
144 | ctrl_dbg(ctrl, "Power fault interrupt received\n"); | 144 | ctrl_dbg(ctrl, "Power fault interrupt received\n"); |
145 | 145 | ||
146 | if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { | 146 | if (!pciehp_query_power_fault(p_slot)) { |
147 | /* | 147 | /* |
148 | * power fault Cleared | 148 | * power fault Cleared |
149 | */ | 149 | */ |
@@ -172,7 +172,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
172 | { | 172 | { |
173 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ | 173 | /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ |
174 | if (POWER_CTRL(ctrl)) { | 174 | if (POWER_CTRL(ctrl)) { |
175 | if (pslot->hpc_ops->power_off_slot(pslot)) { | 175 | if (pciehp_power_off_slot(pslot)) { |
176 | ctrl_err(ctrl, | 176 | ctrl_err(ctrl, |
177 | "Issue of Slot Power Off command failed\n"); | 177 | "Issue of Slot Power Off command failed\n"); |
178 | return; | 178 | return; |
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | if (PWR_LED(ctrl)) | 188 | if (PWR_LED(ctrl)) |
189 | pslot->hpc_ops->green_led_off(pslot); | 189 | pciehp_green_led_off(pslot); |
190 | 190 | ||
191 | if (ATTN_LED(ctrl)) { | 191 | if (ATTN_LED(ctrl)) { |
192 | if (pslot->hpc_ops->set_attention_status(pslot, 1)) { | 192 | if (pciehp_set_attention_status(pslot, 1)) { |
193 | ctrl_err(ctrl, | 193 | ctrl_err(ctrl, |
194 | "Issue of Set Attention Led command failed\n"); | 194 | "Issue of Set Attention Led command failed\n"); |
195 | return; | 195 | return; |
@@ -208,24 +208,20 @@ static int board_added(struct slot *p_slot) | |||
208 | { | 208 | { |
209 | int retval = 0; | 209 | int retval = 0; |
210 | struct controller *ctrl = p_slot->ctrl; | 210 | struct controller *ctrl = p_slot->ctrl; |
211 | struct pci_bus *parent = ctrl->pci_dev->subordinate; | 211 | struct pci_bus *parent = ctrl->pcie->port->subordinate; |
212 | |||
213 | ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n", | ||
214 | __func__, p_slot->device, ctrl->slot_device_offset, | ||
215 | p_slot->hp_slot); | ||
216 | 212 | ||
217 | if (POWER_CTRL(ctrl)) { | 213 | if (POWER_CTRL(ctrl)) { |
218 | /* Power on slot */ | 214 | /* Power on slot */ |
219 | retval = p_slot->hpc_ops->power_on_slot(p_slot); | 215 | retval = pciehp_power_on_slot(p_slot); |
220 | if (retval) | 216 | if (retval) |
221 | return retval; | 217 | return retval; |
222 | } | 218 | } |
223 | 219 | ||
224 | if (PWR_LED(ctrl)) | 220 | if (PWR_LED(ctrl)) |
225 | p_slot->hpc_ops->green_led_blink(p_slot); | 221 | pciehp_green_led_blink(p_slot); |
226 | 222 | ||
227 | /* Check link training status */ | 223 | /* Check link training status */ |
228 | retval = p_slot->hpc_ops->check_lnk_status(ctrl); | 224 | retval = pciehp_check_link_status(ctrl); |
229 | if (retval) { | 225 | if (retval) { |
230 | ctrl_err(ctrl, "Failed to check link status\n"); | 226 | ctrl_err(ctrl, "Failed to check link status\n"); |
231 | set_slot_off(ctrl, p_slot); | 227 | set_slot_off(ctrl, p_slot); |
@@ -233,21 +229,21 @@ static int board_added(struct slot *p_slot) | |||
233 | } | 229 | } |
234 | 230 | ||
235 | /* Check for a power fault */ | 231 | /* Check for a power fault */ |
236 | if (p_slot->hpc_ops->query_power_fault(p_slot)) { | 232 | if (pciehp_query_power_fault(p_slot)) { |
237 | ctrl_dbg(ctrl, "Power fault detected\n"); | 233 | ctrl_dbg(ctrl, "Power fault detected\n"); |
238 | retval = POWER_FAILURE; | 234 | retval = -EIO; |
239 | goto err_exit; | 235 | goto err_exit; |
240 | } | 236 | } |
241 | 237 | ||
242 | retval = pciehp_configure_device(p_slot); | 238 | retval = pciehp_configure_device(p_slot); |
243 | if (retval) { | 239 | if (retval) { |
244 | ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", | 240 | ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n", |
245 | pci_domain_nr(parent), p_slot->bus, p_slot->device); | 241 | pci_domain_nr(parent), parent->number); |
246 | goto err_exit; | 242 | goto err_exit; |
247 | } | 243 | } |
248 | 244 | ||
249 | if (PWR_LED(ctrl)) | 245 | if (PWR_LED(ctrl)) |
250 | p_slot->hpc_ops->green_led_on(p_slot); | 246 | pciehp_green_led_on(p_slot); |
251 | 247 | ||
252 | return 0; | 248 | return 0; |
253 | 249 | ||
@@ -269,11 +265,9 @@ static int remove_board(struct slot *p_slot) | |||
269 | if (retval) | 265 | if (retval) |
270 | return retval; | 266 | return retval; |
271 | 267 | ||
272 | ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot); | ||
273 | |||
274 | if (POWER_CTRL(ctrl)) { | 268 | if (POWER_CTRL(ctrl)) { |
275 | /* power off slot */ | 269 | /* power off slot */ |
276 | retval = p_slot->hpc_ops->power_off_slot(p_slot); | 270 | retval = pciehp_power_off_slot(p_slot); |
277 | if (retval) { | 271 | if (retval) { |
278 | ctrl_err(ctrl, | 272 | ctrl_err(ctrl, |
279 | "Issue of Slot Disable command failed\n"); | 273 | "Issue of Slot Disable command failed\n"); |
@@ -287,9 +281,9 @@ static int remove_board(struct slot *p_slot) | |||
287 | msleep(1000); | 281 | msleep(1000); |
288 | } | 282 | } |
289 | 283 | ||
284 | /* turn off Green LED */ | ||
290 | if (PWR_LED(ctrl)) | 285 | if (PWR_LED(ctrl)) |
291 | /* turn off Green LED */ | 286 | pciehp_green_led_off(p_slot); |
292 | p_slot->hpc_ops->green_led_off(p_slot); | ||
293 | 287 | ||
294 | return 0; | 288 | return 0; |
295 | } | 289 | } |
@@ -317,18 +311,17 @@ static void pciehp_power_thread(struct work_struct *work) | |||
317 | case POWEROFF_STATE: | 311 | case POWEROFF_STATE: |
318 | mutex_unlock(&p_slot->lock); | 312 | mutex_unlock(&p_slot->lock); |
319 | ctrl_dbg(p_slot->ctrl, | 313 | ctrl_dbg(p_slot->ctrl, |
320 | "Disabling domain:bus:device=%04x:%02x:%02x\n", | 314 | "Disabling domain:bus:device=%04x:%02x:00\n", |
321 | pci_domain_nr(p_slot->ctrl->pci_dev->subordinate), | 315 | pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), |
322 | p_slot->bus, p_slot->device); | 316 | p_slot->ctrl->pcie->port->subordinate->number); |
323 | pciehp_disable_slot(p_slot); | 317 | pciehp_disable_slot(p_slot); |
324 | mutex_lock(&p_slot->lock); | 318 | mutex_lock(&p_slot->lock); |
325 | p_slot->state = STATIC_STATE; | 319 | p_slot->state = STATIC_STATE; |
326 | break; | 320 | break; |
327 | case POWERON_STATE: | 321 | case POWERON_STATE: |
328 | mutex_unlock(&p_slot->lock); | 322 | mutex_unlock(&p_slot->lock); |
329 | if (pciehp_enable_slot(p_slot) && | 323 | if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) |
330 | PWR_LED(p_slot->ctrl)) | 324 | pciehp_green_led_off(p_slot); |
331 | p_slot->hpc_ops->green_led_off(p_slot); | ||
332 | mutex_lock(&p_slot->lock); | 325 | mutex_lock(&p_slot->lock); |
333 | p_slot->state = STATIC_STATE; | 326 | p_slot->state = STATIC_STATE; |
334 | break; | 327 | break; |
@@ -379,10 +372,10 @@ static int update_slot_info(struct slot *slot) | |||
379 | if (!info) | 372 | if (!info) |
380 | return -ENOMEM; | 373 | return -ENOMEM; |
381 | 374 | ||
382 | slot->hpc_ops->get_power_status(slot, &(info->power_status)); | 375 | pciehp_get_power_status(slot, &info->power_status); |
383 | slot->hpc_ops->get_attention_status(slot, &(info->attention_status)); | 376 | pciehp_get_attention_status(slot, &info->attention_status); |
384 | slot->hpc_ops->get_latch_status(slot, &(info->latch_status)); | 377 | pciehp_get_latch_status(slot, &info->latch_status); |
385 | slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status)); | 378 | pciehp_get_adapter_status(slot, &info->adapter_status); |
386 | 379 | ||
387 | result = pci_hp_change_slot_info(slot->hotplug_slot, info); | 380 | result = pci_hp_change_slot_info(slot->hotplug_slot, info); |
388 | kfree (info); | 381 | kfree (info); |
@@ -399,7 +392,7 @@ static void handle_button_press_event(struct slot *p_slot) | |||
399 | 392 | ||
400 | switch (p_slot->state) { | 393 | switch (p_slot->state) { |
401 | case STATIC_STATE: | 394 | case STATIC_STATE: |
402 | p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 395 | pciehp_get_power_status(p_slot, &getstatus); |
403 | if (getstatus) { | 396 | if (getstatus) { |
404 | p_slot->state = BLINKINGOFF_STATE; | 397 | p_slot->state = BLINKINGOFF_STATE; |
405 | ctrl_info(ctrl, | 398 | ctrl_info(ctrl, |
@@ -413,9 +406,9 @@ static void handle_button_press_event(struct slot *p_slot) | |||
413 | } | 406 | } |
414 | /* blink green LED and turn off amber */ | 407 | /* blink green LED and turn off amber */ |
415 | if (PWR_LED(ctrl)) | 408 | if (PWR_LED(ctrl)) |
416 | p_slot->hpc_ops->green_led_blink(p_slot); | 409 | pciehp_green_led_blink(p_slot); |
417 | if (ATTN_LED(ctrl)) | 410 | if (ATTN_LED(ctrl)) |
418 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 411 | pciehp_set_attention_status(p_slot, 0); |
419 | 412 | ||
420 | schedule_delayed_work(&p_slot->work, 5*HZ); | 413 | schedule_delayed_work(&p_slot->work, 5*HZ); |
421 | break; | 414 | break; |
@@ -430,13 +423,13 @@ static void handle_button_press_event(struct slot *p_slot) | |||
430 | cancel_delayed_work(&p_slot->work); | 423 | cancel_delayed_work(&p_slot->work); |
431 | if (p_slot->state == BLINKINGOFF_STATE) { | 424 | if (p_slot->state == BLINKINGOFF_STATE) { |
432 | if (PWR_LED(ctrl)) | 425 | if (PWR_LED(ctrl)) |
433 | p_slot->hpc_ops->green_led_on(p_slot); | 426 | pciehp_green_led_on(p_slot); |
434 | } else { | 427 | } else { |
435 | if (PWR_LED(ctrl)) | 428 | if (PWR_LED(ctrl)) |
436 | p_slot->hpc_ops->green_led_off(p_slot); | 429 | pciehp_green_led_off(p_slot); |
437 | } | 430 | } |
438 | if (ATTN_LED(ctrl)) | 431 | if (ATTN_LED(ctrl)) |
439 | p_slot->hpc_ops->set_attention_status(p_slot, 0); | 432 | pciehp_set_attention_status(p_slot, 0); |
440 | ctrl_info(ctrl, "PCI slot #%s - action canceled " | 433 | ctrl_info(ctrl, "PCI slot #%s - action canceled " |
441 | "due to button press\n", slot_name(p_slot)); | 434 | "due to button press\n", slot_name(p_slot)); |
442 | p_slot->state = STATIC_STATE; | 435 | p_slot->state = STATIC_STATE; |
@@ -474,7 +467,7 @@ static void handle_surprise_event(struct slot *p_slot) | |||
474 | info->p_slot = p_slot; | 467 | info->p_slot = p_slot; |
475 | INIT_WORK(&info->work, pciehp_power_thread); | 468 | INIT_WORK(&info->work, pciehp_power_thread); |
476 | 469 | ||
477 | p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); | 470 | pciehp_get_adapter_status(p_slot, &getstatus); |
478 | if (!getstatus) | 471 | if (!getstatus) |
479 | p_slot->state = POWEROFF_STATE; | 472 | p_slot->state = POWEROFF_STATE; |
480 | else | 473 | else |
@@ -498,9 +491,9 @@ static void interrupt_event_handler(struct work_struct *work) | |||
498 | if (!POWER_CTRL(ctrl)) | 491 | if (!POWER_CTRL(ctrl)) |
499 | break; | 492 | break; |
500 | if (ATTN_LED(ctrl)) | 493 | if (ATTN_LED(ctrl)) |
501 | p_slot->hpc_ops->set_attention_status(p_slot, 1); | 494 | pciehp_set_attention_status(p_slot, 1); |
502 | if (PWR_LED(ctrl)) | 495 | if (PWR_LED(ctrl)) |
503 | p_slot->hpc_ops->green_led_off(p_slot); | 496 | pciehp_green_led_off(p_slot); |
504 | break; | 497 | break; |
505 | case INT_PRESENCE_ON: | 498 | case INT_PRESENCE_ON: |
506 | case INT_PRESENCE_OFF: | 499 | case INT_PRESENCE_OFF: |
@@ -525,45 +518,38 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
525 | int rc; | 518 | int rc; |
526 | struct controller *ctrl = p_slot->ctrl; | 519 | struct controller *ctrl = p_slot->ctrl; |
527 | 520 | ||
528 | /* Check to see if (latch closed, card present, power off) */ | 521 | rc = pciehp_get_adapter_status(p_slot, &getstatus); |
529 | mutex_lock(&p_slot->ctrl->crit_sect); | ||
530 | |||
531 | rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); | ||
532 | if (rc || !getstatus) { | 522 | if (rc || !getstatus) { |
533 | ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); | 523 | ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); |
534 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
535 | return -ENODEV; | 524 | return -ENODEV; |
536 | } | 525 | } |
537 | if (MRL_SENS(p_slot->ctrl)) { | 526 | if (MRL_SENS(p_slot->ctrl)) { |
538 | rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 527 | rc = pciehp_get_latch_status(p_slot, &getstatus); |
539 | if (rc || getstatus) { | 528 | if (rc || getstatus) { |
540 | ctrl_info(ctrl, "Latch open on slot(%s)\n", | 529 | ctrl_info(ctrl, "Latch open on slot(%s)\n", |
541 | slot_name(p_slot)); | 530 | slot_name(p_slot)); |
542 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
543 | return -ENODEV; | 531 | return -ENODEV; |
544 | } | 532 | } |
545 | } | 533 | } |
546 | 534 | ||
547 | if (POWER_CTRL(p_slot->ctrl)) { | 535 | if (POWER_CTRL(p_slot->ctrl)) { |
548 | rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 536 | rc = pciehp_get_power_status(p_slot, &getstatus); |
549 | if (rc || getstatus) { | 537 | if (rc || getstatus) { |
550 | ctrl_info(ctrl, "Already enabled on slot(%s)\n", | 538 | ctrl_info(ctrl, "Already enabled on slot(%s)\n", |
551 | slot_name(p_slot)); | 539 | slot_name(p_slot)); |
552 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
553 | return -EINVAL; | 540 | return -EINVAL; |
554 | } | 541 | } |
555 | } | 542 | } |
556 | 543 | ||
557 | p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 544 | pciehp_get_latch_status(p_slot, &getstatus); |
558 | 545 | ||
559 | rc = board_added(p_slot); | 546 | rc = board_added(p_slot); |
560 | if (rc) { | 547 | if (rc) { |
561 | p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 548 | pciehp_get_latch_status(p_slot, &getstatus); |
562 | } | 549 | } |
563 | 550 | ||
564 | update_slot_info(p_slot); | 551 | update_slot_info(p_slot); |
565 | 552 | ||
566 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
567 | return rc; | 553 | return rc; |
568 | } | 554 | } |
569 | 555 | ||
@@ -577,35 +563,29 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
577 | if (!p_slot->ctrl) | 563 | if (!p_slot->ctrl) |
578 | return 1; | 564 | return 1; |
579 | 565 | ||
580 | /* Check to see if (latch closed, card present, power on) */ | ||
581 | mutex_lock(&p_slot->ctrl->crit_sect); | ||
582 | |||
583 | if (!HP_SUPR_RM(p_slot->ctrl)) { | 566 | if (!HP_SUPR_RM(p_slot->ctrl)) { |
584 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); | 567 | ret = pciehp_get_adapter_status(p_slot, &getstatus); |
585 | if (ret || !getstatus) { | 568 | if (ret || !getstatus) { |
586 | ctrl_info(ctrl, "No adapter on slot(%s)\n", | 569 | ctrl_info(ctrl, "No adapter on slot(%s)\n", |
587 | slot_name(p_slot)); | 570 | slot_name(p_slot)); |
588 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
589 | return -ENODEV; | 571 | return -ENODEV; |
590 | } | 572 | } |
591 | } | 573 | } |
592 | 574 | ||
593 | if (MRL_SENS(p_slot->ctrl)) { | 575 | if (MRL_SENS(p_slot->ctrl)) { |
594 | ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); | 576 | ret = pciehp_get_latch_status(p_slot, &getstatus); |
595 | if (ret || getstatus) { | 577 | if (ret || getstatus) { |
596 | ctrl_info(ctrl, "Latch open on slot(%s)\n", | 578 | ctrl_info(ctrl, "Latch open on slot(%s)\n", |
597 | slot_name(p_slot)); | 579 | slot_name(p_slot)); |
598 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
599 | return -ENODEV; | 580 | return -ENODEV; |
600 | } | 581 | } |
601 | } | 582 | } |
602 | 583 | ||
603 | if (POWER_CTRL(p_slot->ctrl)) { | 584 | if (POWER_CTRL(p_slot->ctrl)) { |
604 | ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); | 585 | ret = pciehp_get_power_status(p_slot, &getstatus); |
605 | if (ret || !getstatus) { | 586 | if (ret || !getstatus) { |
606 | ctrl_info(ctrl, "Already disabled on slot(%s)\n", | 587 | ctrl_info(ctrl, "Already disabled on slot(%s)\n", |
607 | slot_name(p_slot)); | 588 | slot_name(p_slot)); |
608 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
609 | return -EINVAL; | 589 | return -EINVAL; |
610 | } | 590 | } |
611 | } | 591 | } |
@@ -613,7 +593,6 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
613 | ret = remove_board(p_slot); | 593 | ret = remove_board(p_slot); |
614 | update_slot_info(p_slot); | 594 | update_slot_info(p_slot); |
615 | 595 | ||
616 | mutex_unlock(&p_slot->ctrl->crit_sect); | ||
617 | return ret; | 596 | return ret; |
618 | } | 597 | } |
619 | 598 | ||
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 271f917b6f2c..9ef4605c1ef6 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -44,25 +44,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); | |||
44 | 44 | ||
45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) |
46 | { | 46 | { |
47 | struct pci_dev *dev = ctrl->pci_dev; | 47 | struct pci_dev *dev = ctrl->pcie->port; |
48 | return pci_read_config_word(dev, ctrl->cap_base + reg, value); | 48 | return pci_read_config_word(dev, ctrl->cap_base + reg, value); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) | 51 | static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) |
52 | { | 52 | { |
53 | struct pci_dev *dev = ctrl->pci_dev; | 53 | struct pci_dev *dev = ctrl->pcie->port; |
54 | return pci_read_config_dword(dev, ctrl->cap_base + reg, value); | 54 | return pci_read_config_dword(dev, ctrl->cap_base + reg, value); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) | 57 | static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) |
58 | { | 58 | { |
59 | struct pci_dev *dev = ctrl->pci_dev; | 59 | struct pci_dev *dev = ctrl->pcie->port; |
60 | return pci_write_config_word(dev, ctrl->cap_base + reg, value); | 60 | return pci_write_config_word(dev, ctrl->cap_base + reg, value); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) | 63 | static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) |
64 | { | 64 | { |
65 | struct pci_dev *dev = ctrl->pci_dev; | 65 | struct pci_dev *dev = ctrl->pcie->port; |
66 | return pci_write_config_dword(dev, ctrl->cap_base + reg, value); | 66 | return pci_write_config_dword(dev, ctrl->cap_base + reg, value); |
67 | } | 67 | } |
68 | 68 | ||
@@ -266,7 +266,7 @@ static void pcie_wait_link_active(struct controller *ctrl) | |||
266 | ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n"); | 266 | ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n"); |
267 | } | 267 | } |
268 | 268 | ||
269 | static int hpc_check_lnk_status(struct controller *ctrl) | 269 | int pciehp_check_link_status(struct controller *ctrl) |
270 | { | 270 | { |
271 | u16 lnk_status; | 271 | u16 lnk_status; |
272 | int retval = 0; | 272 | int retval = 0; |
@@ -305,7 +305,7 @@ static int hpc_check_lnk_status(struct controller *ctrl) | |||
305 | return retval; | 305 | return retval; |
306 | } | 306 | } |
307 | 307 | ||
308 | static int hpc_get_attention_status(struct slot *slot, u8 *status) | 308 | int pciehp_get_attention_status(struct slot *slot, u8 *status) |
309 | { | 309 | { |
310 | struct controller *ctrl = slot->ctrl; | 310 | struct controller *ctrl = slot->ctrl; |
311 | u16 slot_ctrl; | 311 | u16 slot_ctrl; |
@@ -344,7 +344,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) | |||
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
346 | 346 | ||
347 | static int hpc_get_power_status(struct slot *slot, u8 *status) | 347 | int pciehp_get_power_status(struct slot *slot, u8 *status) |
348 | { | 348 | { |
349 | struct controller *ctrl = slot->ctrl; | 349 | struct controller *ctrl = slot->ctrl; |
350 | u16 slot_ctrl; | 350 | u16 slot_ctrl; |
@@ -376,7 +376,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) | |||
376 | return retval; | 376 | return retval; |
377 | } | 377 | } |
378 | 378 | ||
379 | static int hpc_get_latch_status(struct slot *slot, u8 *status) | 379 | int pciehp_get_latch_status(struct slot *slot, u8 *status) |
380 | { | 380 | { |
381 | struct controller *ctrl = slot->ctrl; | 381 | struct controller *ctrl = slot->ctrl; |
382 | u16 slot_status; | 382 | u16 slot_status; |
@@ -392,7 +392,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) | |||
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | static int hpc_get_adapter_status(struct slot *slot, u8 *status) | 395 | int pciehp_get_adapter_status(struct slot *slot, u8 *status) |
396 | { | 396 | { |
397 | struct controller *ctrl = slot->ctrl; | 397 | struct controller *ctrl = slot->ctrl; |
398 | u16 slot_status; | 398 | u16 slot_status; |
@@ -408,7 +408,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) | |||
408 | return 0; | 408 | return 0; |
409 | } | 409 | } |
410 | 410 | ||
411 | static int hpc_query_power_fault(struct slot *slot) | 411 | int pciehp_query_power_fault(struct slot *slot) |
412 | { | 412 | { |
413 | struct controller *ctrl = slot->ctrl; | 413 | struct controller *ctrl = slot->ctrl; |
414 | u16 slot_status; | 414 | u16 slot_status; |
@@ -422,7 +422,7 @@ static int hpc_query_power_fault(struct slot *slot) | |||
422 | return !!(slot_status & PCI_EXP_SLTSTA_PFD); | 422 | return !!(slot_status & PCI_EXP_SLTSTA_PFD); |
423 | } | 423 | } |
424 | 424 | ||
425 | static int hpc_set_attention_status(struct slot *slot, u8 value) | 425 | int pciehp_set_attention_status(struct slot *slot, u8 value) |
426 | { | 426 | { |
427 | struct controller *ctrl = slot->ctrl; | 427 | struct controller *ctrl = slot->ctrl; |
428 | u16 slot_cmd; | 428 | u16 slot_cmd; |
@@ -450,7 +450,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) | |||
450 | return rc; | 450 | return rc; |
451 | } | 451 | } |
452 | 452 | ||
453 | static void hpc_set_green_led_on(struct slot *slot) | 453 | void pciehp_green_led_on(struct slot *slot) |
454 | { | 454 | { |
455 | struct controller *ctrl = slot->ctrl; | 455 | struct controller *ctrl = slot->ctrl; |
456 | u16 slot_cmd; | 456 | u16 slot_cmd; |
@@ -463,7 +463,7 @@ static void hpc_set_green_led_on(struct slot *slot) | |||
463 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 463 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
464 | } | 464 | } |
465 | 465 | ||
466 | static void hpc_set_green_led_off(struct slot *slot) | 466 | void pciehp_green_led_off(struct slot *slot) |
467 | { | 467 | { |
468 | struct controller *ctrl = slot->ctrl; | 468 | struct controller *ctrl = slot->ctrl; |
469 | u16 slot_cmd; | 469 | u16 slot_cmd; |
@@ -476,7 +476,7 @@ static void hpc_set_green_led_off(struct slot *slot) | |||
476 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 476 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
477 | } | 477 | } |
478 | 478 | ||
479 | static void hpc_set_green_led_blink(struct slot *slot) | 479 | void pciehp_green_led_blink(struct slot *slot) |
480 | { | 480 | { |
481 | struct controller *ctrl = slot->ctrl; | 481 | struct controller *ctrl = slot->ctrl; |
482 | u16 slot_cmd; | 482 | u16 slot_cmd; |
@@ -489,7 +489,7 @@ static void hpc_set_green_led_blink(struct slot *slot) | |||
489 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 489 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
490 | } | 490 | } |
491 | 491 | ||
492 | static int hpc_power_on_slot(struct slot * slot) | 492 | int pciehp_power_on_slot(struct slot * slot) |
493 | { | 493 | { |
494 | struct controller *ctrl = slot->ctrl; | 494 | struct controller *ctrl = slot->ctrl; |
495 | u16 slot_cmd; | 495 | u16 slot_cmd; |
@@ -497,8 +497,6 @@ static int hpc_power_on_slot(struct slot * slot) | |||
497 | u16 slot_status; | 497 | u16 slot_status; |
498 | int retval = 0; | 498 | int retval = 0; |
499 | 499 | ||
500 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); | ||
501 | |||
502 | /* Clear sticky power-fault bit from previous power failures */ | 500 | /* Clear sticky power-fault bit from previous power failures */ |
503 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); | 501 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
504 | if (retval) { | 502 | if (retval) { |
@@ -539,7 +537,7 @@ static int hpc_power_on_slot(struct slot * slot) | |||
539 | 537 | ||
540 | static inline int pcie_mask_bad_dllp(struct controller *ctrl) | 538 | static inline int pcie_mask_bad_dllp(struct controller *ctrl) |
541 | { | 539 | { |
542 | struct pci_dev *dev = ctrl->pci_dev; | 540 | struct pci_dev *dev = ctrl->pcie->port; |
543 | int pos; | 541 | int pos; |
544 | u32 reg; | 542 | u32 reg; |
545 | 543 | ||
@@ -556,7 +554,7 @@ static inline int pcie_mask_bad_dllp(struct controller *ctrl) | |||
556 | 554 | ||
557 | static inline void pcie_unmask_bad_dllp(struct controller *ctrl) | 555 | static inline void pcie_unmask_bad_dllp(struct controller *ctrl) |
558 | { | 556 | { |
559 | struct pci_dev *dev = ctrl->pci_dev; | 557 | struct pci_dev *dev = ctrl->pcie->port; |
560 | u32 reg; | 558 | u32 reg; |
561 | int pos; | 559 | int pos; |
562 | 560 | ||
@@ -570,7 +568,7 @@ static inline void pcie_unmask_bad_dllp(struct controller *ctrl) | |||
570 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); | 568 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); |
571 | } | 569 | } |
572 | 570 | ||
573 | static int hpc_power_off_slot(struct slot * slot) | 571 | int pciehp_power_off_slot(struct slot * slot) |
574 | { | 572 | { |
575 | struct controller *ctrl = slot->ctrl; | 573 | struct controller *ctrl = slot->ctrl; |
576 | u16 slot_cmd; | 574 | u16 slot_cmd; |
@@ -578,8 +576,6 @@ static int hpc_power_off_slot(struct slot * slot) | |||
578 | int retval = 0; | 576 | int retval = 0; |
579 | int changed; | 577 | int changed; |
580 | 578 | ||
581 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); | ||
582 | |||
583 | /* | 579 | /* |
584 | * Set Bad DLLP Mask bit in Correctable Error Mask | 580 | * Set Bad DLLP Mask bit in Correctable Error Mask |
585 | * Register. This is the workaround against Bad DLLP error | 581 | * Register. This is the workaround against Bad DLLP error |
@@ -614,8 +610,8 @@ static int hpc_power_off_slot(struct slot * slot) | |||
614 | static irqreturn_t pcie_isr(int irq, void *dev_id) | 610 | static irqreturn_t pcie_isr(int irq, void *dev_id) |
615 | { | 611 | { |
616 | struct controller *ctrl = (struct controller *)dev_id; | 612 | struct controller *ctrl = (struct controller *)dev_id; |
613 | struct slot *slot = ctrl->slot; | ||
617 | u16 detected, intr_loc; | 614 | u16 detected, intr_loc; |
618 | struct slot *p_slot; | ||
619 | 615 | ||
620 | /* | 616 | /* |
621 | * In order to guarantee that all interrupt events are | 617 | * In order to guarantee that all interrupt events are |
@@ -656,29 +652,27 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
656 | if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) | 652 | if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) |
657 | return IRQ_HANDLED; | 653 | return IRQ_HANDLED; |
658 | 654 | ||
659 | p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | ||
660 | |||
661 | /* Check MRL Sensor Changed */ | 655 | /* Check MRL Sensor Changed */ |
662 | if (intr_loc & PCI_EXP_SLTSTA_MRLSC) | 656 | if (intr_loc & PCI_EXP_SLTSTA_MRLSC) |
663 | pciehp_handle_switch_change(p_slot); | 657 | pciehp_handle_switch_change(slot); |
664 | 658 | ||
665 | /* Check Attention Button Pressed */ | 659 | /* Check Attention Button Pressed */ |
666 | if (intr_loc & PCI_EXP_SLTSTA_ABP) | 660 | if (intr_loc & PCI_EXP_SLTSTA_ABP) |
667 | pciehp_handle_attention_button(p_slot); | 661 | pciehp_handle_attention_button(slot); |
668 | 662 | ||
669 | /* Check Presence Detect Changed */ | 663 | /* Check Presence Detect Changed */ |
670 | if (intr_loc & PCI_EXP_SLTSTA_PDC) | 664 | if (intr_loc & PCI_EXP_SLTSTA_PDC) |
671 | pciehp_handle_presence_change(p_slot); | 665 | pciehp_handle_presence_change(slot); |
672 | 666 | ||
673 | /* Check Power Fault Detected */ | 667 | /* Check Power Fault Detected */ |
674 | if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { | 668 | if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { |
675 | ctrl->power_fault_detected = 1; | 669 | ctrl->power_fault_detected = 1; |
676 | pciehp_handle_power_fault(p_slot); | 670 | pciehp_handle_power_fault(slot); |
677 | } | 671 | } |
678 | return IRQ_HANDLED; | 672 | return IRQ_HANDLED; |
679 | } | 673 | } |
680 | 674 | ||
681 | static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | 675 | int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) |
682 | { | 676 | { |
683 | struct controller *ctrl = slot->ctrl; | 677 | struct controller *ctrl = slot->ctrl; |
684 | enum pcie_link_speed lnk_speed; | 678 | enum pcie_link_speed lnk_speed; |
@@ -709,7 +703,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
709 | return retval; | 703 | return retval; |
710 | } | 704 | } |
711 | 705 | ||
712 | static int hpc_get_max_lnk_width(struct slot *slot, | 706 | int pciehp_get_max_lnk_width(struct slot *slot, |
713 | enum pcie_link_width *value) | 707 | enum pcie_link_width *value) |
714 | { | 708 | { |
715 | struct controller *ctrl = slot->ctrl; | 709 | struct controller *ctrl = slot->ctrl; |
@@ -759,7 +753,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, | |||
759 | return retval; | 753 | return retval; |
760 | } | 754 | } |
761 | 755 | ||
762 | static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | 756 | int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) |
763 | { | 757 | { |
764 | struct controller *ctrl = slot->ctrl; | 758 | struct controller *ctrl = slot->ctrl; |
765 | enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; | 759 | enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; |
@@ -791,7 +785,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
791 | return retval; | 785 | return retval; |
792 | } | 786 | } |
793 | 787 | ||
794 | static int hpc_get_cur_lnk_width(struct slot *slot, | 788 | int pciehp_get_cur_lnk_width(struct slot *slot, |
795 | enum pcie_link_width *value) | 789 | enum pcie_link_width *value) |
796 | { | 790 | { |
797 | struct controller *ctrl = slot->ctrl; | 791 | struct controller *ctrl = slot->ctrl; |
@@ -842,30 +836,6 @@ static int hpc_get_cur_lnk_width(struct slot *slot, | |||
842 | return retval; | 836 | return retval; |
843 | } | 837 | } |
844 | 838 | ||
845 | static void pcie_release_ctrl(struct controller *ctrl); | ||
846 | static struct hpc_ops pciehp_hpc_ops = { | ||
847 | .power_on_slot = hpc_power_on_slot, | ||
848 | .power_off_slot = hpc_power_off_slot, | ||
849 | .set_attention_status = hpc_set_attention_status, | ||
850 | .get_power_status = hpc_get_power_status, | ||
851 | .get_attention_status = hpc_get_attention_status, | ||
852 | .get_latch_status = hpc_get_latch_status, | ||
853 | .get_adapter_status = hpc_get_adapter_status, | ||
854 | |||
855 | .get_max_bus_speed = hpc_get_max_lnk_speed, | ||
856 | .get_cur_bus_speed = hpc_get_cur_lnk_speed, | ||
857 | .get_max_lnk_width = hpc_get_max_lnk_width, | ||
858 | .get_cur_lnk_width = hpc_get_cur_lnk_width, | ||
859 | |||
860 | .query_power_fault = hpc_query_power_fault, | ||
861 | .green_led_on = hpc_set_green_led_on, | ||
862 | .green_led_off = hpc_set_green_led_off, | ||
863 | .green_led_blink = hpc_set_green_led_blink, | ||
864 | |||
865 | .release_ctlr = pcie_release_ctrl, | ||
866 | .check_lnk_status = hpc_check_lnk_status, | ||
867 | }; | ||
868 | |||
869 | int pcie_enable_notification(struct controller *ctrl) | 839 | int pcie_enable_notification(struct controller *ctrl) |
870 | { | 840 | { |
871 | u16 cmd, mask; | 841 | u16 cmd, mask; |
@@ -930,23 +900,16 @@ static int pcie_init_slot(struct controller *ctrl) | |||
930 | if (!slot) | 900 | if (!slot) |
931 | return -ENOMEM; | 901 | return -ENOMEM; |
932 | 902 | ||
933 | slot->hp_slot = 0; | ||
934 | slot->ctrl = ctrl; | 903 | slot->ctrl = ctrl; |
935 | slot->bus = ctrl->pci_dev->subordinate->number; | ||
936 | slot->device = ctrl->slot_device_offset + slot->hp_slot; | ||
937 | slot->hpc_ops = ctrl->hpc_ops; | ||
938 | slot->number = ctrl->first_slot; | ||
939 | mutex_init(&slot->lock); | 904 | mutex_init(&slot->lock); |
940 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); | 905 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); |
941 | list_add(&slot->slot_list, &ctrl->slot_list); | 906 | ctrl->slot = slot; |
942 | return 0; | 907 | return 0; |
943 | } | 908 | } |
944 | 909 | ||
945 | static void pcie_cleanup_slot(struct controller *ctrl) | 910 | static void pcie_cleanup_slot(struct controller *ctrl) |
946 | { | 911 | { |
947 | struct slot *slot; | 912 | struct slot *slot = ctrl->slot; |
948 | slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list); | ||
949 | list_del(&slot->slot_list); | ||
950 | cancel_delayed_work(&slot->work); | 913 | cancel_delayed_work(&slot->work); |
951 | flush_scheduled_work(); | 914 | flush_scheduled_work(); |
952 | flush_workqueue(pciehp_wq); | 915 | flush_workqueue(pciehp_wq); |
@@ -957,7 +920,7 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
957 | { | 920 | { |
958 | int i; | 921 | int i; |
959 | u16 reg16; | 922 | u16 reg16; |
960 | struct pci_dev *pdev = ctrl->pci_dev; | 923 | struct pci_dev *pdev = ctrl->pcie->port; |
961 | 924 | ||
962 | if (!pciehp_debug) | 925 | if (!pciehp_debug) |
963 | return; | 926 | return; |
@@ -980,7 +943,7 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
980 | (unsigned long long)pci_resource_start(pdev, i)); | 943 | (unsigned long long)pci_resource_start(pdev, i)); |
981 | } | 944 | } |
982 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); | 945 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); |
983 | ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); | 946 | ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); |
984 | ctrl_info(ctrl, " Attention Button : %3s\n", | 947 | ctrl_info(ctrl, " Attention Button : %3s\n", |
985 | ATTN_BUTTN(ctrl) ? "yes" : "no"); | 948 | ATTN_BUTTN(ctrl) ? "yes" : "no"); |
986 | ctrl_info(ctrl, " Power Controller : %3s\n", | 949 | ctrl_info(ctrl, " Power Controller : %3s\n", |
@@ -1014,10 +977,7 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1014 | dev_err(&dev->device, "%s: Out of memory\n", __func__); | 977 | dev_err(&dev->device, "%s: Out of memory\n", __func__); |
1015 | goto abort; | 978 | goto abort; |
1016 | } | 979 | } |
1017 | INIT_LIST_HEAD(&ctrl->slot_list); | ||
1018 | |||
1019 | ctrl->pcie = dev; | 980 | ctrl->pcie = dev; |
1020 | ctrl->pci_dev = pdev; | ||
1021 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 981 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
1022 | if (!ctrl->cap_base) { | 982 | if (!ctrl->cap_base) { |
1023 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); | 983 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); |
@@ -1029,11 +989,6 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1029 | } | 989 | } |
1030 | 990 | ||
1031 | ctrl->slot_cap = slot_cap; | 991 | ctrl->slot_cap = slot_cap; |
1032 | ctrl->first_slot = slot_cap >> 19; | ||
1033 | ctrl->slot_device_offset = 0; | ||
1034 | ctrl->num_slots = 1; | ||
1035 | ctrl->hpc_ops = &pciehp_hpc_ops; | ||
1036 | mutex_init(&ctrl->crit_sect); | ||
1037 | mutex_init(&ctrl->ctrl_lock); | 992 | mutex_init(&ctrl->ctrl_lock); |
1038 | init_waitqueue_head(&ctrl->queue); | 993 | init_waitqueue_head(&ctrl->queue); |
1039 | dbg_ctrl(ctrl); | 994 | dbg_ctrl(ctrl); |
@@ -1089,7 +1044,7 @@ abort: | |||
1089 | return NULL; | 1044 | return NULL; |
1090 | } | 1045 | } |
1091 | 1046 | ||
1092 | void pcie_release_ctrl(struct controller *ctrl) | 1047 | void pciehp_release_ctrl(struct controller *ctrl) |
1093 | { | 1048 | { |
1094 | pcie_shutdown_notification(ctrl); | 1049 | pcie_shutdown_notification(ctrl); |
1095 | pcie_cleanup_slot(ctrl); | 1050 | pcie_cleanup_slot(ctrl); |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 02e24d63b3ee..21733108adde 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -63,27 +63,27 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev) | |||
63 | int pciehp_configure_device(struct slot *p_slot) | 63 | int pciehp_configure_device(struct slot *p_slot) |
64 | { | 64 | { |
65 | struct pci_dev *dev; | 65 | struct pci_dev *dev; |
66 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 66 | struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; |
67 | int num, fn; | 67 | int num, fn; |
68 | struct controller *ctrl = p_slot->ctrl; | 68 | struct controller *ctrl = p_slot->ctrl; |
69 | 69 | ||
70 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 70 | dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); |
71 | if (dev) { | 71 | if (dev) { |
72 | ctrl_err(ctrl, "Device %s already exists " | 72 | ctrl_err(ctrl, "Device %s already exists " |
73 | "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), | 73 | "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), |
74 | pci_domain_nr(parent), p_slot->bus, p_slot->device); | 74 | pci_domain_nr(parent), parent->number); |
75 | pci_dev_put(dev); | 75 | pci_dev_put(dev); |
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | } | 77 | } |
78 | 78 | ||
79 | num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); | 79 | num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); |
80 | if (num == 0) { | 80 | if (num == 0) { |
81 | ctrl_err(ctrl, "No new device found\n"); | 81 | ctrl_err(ctrl, "No new device found\n"); |
82 | return -ENODEV; | 82 | return -ENODEV; |
83 | } | 83 | } |
84 | 84 | ||
85 | for (fn = 0; fn < 8; fn++) { | 85 | for (fn = 0; fn < 8; fn++) { |
86 | dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn)); | 86 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); |
87 | if (!dev) | 87 | if (!dev) |
88 | continue; | 88 | continue; |
89 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | 89 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { |
@@ -111,19 +111,18 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
111 | int j; | 111 | int j; |
112 | u8 bctl = 0; | 112 | u8 bctl = 0; |
113 | u8 presence = 0; | 113 | u8 presence = 0; |
114 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 114 | struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; |
115 | u16 command; | 115 | u16 command; |
116 | struct controller *ctrl = p_slot->ctrl; | 116 | struct controller *ctrl = p_slot->ctrl; |
117 | 117 | ||
118 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", | 118 | ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", |
119 | __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); | 119 | __func__, pci_domain_nr(parent), parent->number); |
120 | ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); | 120 | ret = pciehp_get_adapter_status(p_slot, &presence); |
121 | if (ret) | 121 | if (ret) |
122 | presence = 0; | 122 | presence = 0; |
123 | 123 | ||
124 | for (j = 0; j < 8; j++) { | 124 | for (j = 0; j < 8; j++) { |
125 | struct pci_dev* temp = pci_get_slot(parent, | 125 | struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j)); |
126 | (p_slot->device << 3) | j); | ||
127 | if (!temp) | 126 | if (!temp) |
128 | continue; | 127 | continue; |
129 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | 128 | if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 10c0e62bd5a8..2ce8f9ccc66e 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -318,6 +318,8 @@ static int __init aer_service_init(void) | |||
318 | { | 318 | { |
319 | if (pcie_aer_disable) | 319 | if (pcie_aer_disable) |
320 | return -ENXIO; | 320 | return -ENXIO; |
321 | if (!pci_msi_enabled()) | ||
322 | return -ENXIO; | ||
321 | return pcie_port_service_register(&aerdriver); | 323 | return pcie_port_service_register(&aerdriver); |
322 | } | 324 | } |
323 | 325 | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f289ca9bf18d..745402e8e498 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -303,9 +303,6 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev, | |||
303 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 303 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); | 304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); |
305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; | 305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
306 | /* 00b and 10b are defined as "Reserved". */ | ||
307 | if (info->support == PCIE_LINK_STATE_L1) | ||
308 | info->support = 0; | ||
309 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; | 306 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
310 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | 307 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; |
311 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 308 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index 047394d98ac2..3247828aa203 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -71,6 +71,7 @@ pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o | |||
71 | pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o | 71 | pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o |
72 | pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o | 72 | pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o |
73 | pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o | 73 | pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o |
74 | pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o | ||
74 | pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o | 75 | pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o |
75 | pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o | 76 | pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o |
76 | pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o | 77 | pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index c49a7269f6d1..87e22ef8eb02 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -300,25 +300,29 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) | |||
300 | return soc_common_drv_pcmcia_remove(&dev->dev); | 300 | return soc_common_drv_pcmcia_remove(&dev->dev); |
301 | } | 301 | } |
302 | 302 | ||
303 | static int pxa2xx_drv_pcmcia_suspend(struct platform_device *dev, pm_message_t state) | 303 | static int pxa2xx_drv_pcmcia_suspend(struct device *dev) |
304 | { | 304 | { |
305 | return pcmcia_socket_dev_suspend(&dev->dev, state); | 305 | return pcmcia_socket_dev_suspend(dev, PMSG_SUSPEND); |
306 | } | 306 | } |
307 | 307 | ||
308 | static int pxa2xx_drv_pcmcia_resume(struct platform_device *dev) | 308 | static int pxa2xx_drv_pcmcia_resume(struct device *dev) |
309 | { | 309 | { |
310 | pxa2xx_configure_sockets(&dev->dev); | 310 | pxa2xx_configure_sockets(dev); |
311 | return pcmcia_socket_dev_resume(&dev->dev); | 311 | return pcmcia_socket_dev_resume(dev); |
312 | } | 312 | } |
313 | 313 | ||
314 | static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { | ||
315 | .suspend = pxa2xx_drv_pcmcia_suspend, | ||
316 | .resume = pxa2xx_drv_pcmcia_resume, | ||
317 | }; | ||
318 | |||
314 | static struct platform_driver pxa2xx_pcmcia_driver = { | 319 | static struct platform_driver pxa2xx_pcmcia_driver = { |
315 | .probe = pxa2xx_drv_pcmcia_probe, | 320 | .probe = pxa2xx_drv_pcmcia_probe, |
316 | .remove = pxa2xx_drv_pcmcia_remove, | 321 | .remove = pxa2xx_drv_pcmcia_remove, |
317 | .suspend = pxa2xx_drv_pcmcia_suspend, | ||
318 | .resume = pxa2xx_drv_pcmcia_resume, | ||
319 | .driver = { | 322 | .driver = { |
320 | .name = "pxa2xx-pcmcia", | 323 | .name = "pxa2xx-pcmcia", |
321 | .owner = THIS_MODULE, | 324 | .owner = THIS_MODULE, |
325 | .pm = &pxa2xx_drv_pcmcia_pm_ops, | ||
322 | }, | 326 | }, |
323 | }; | 327 | }; |
324 | 328 | ||
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c new file mode 100644 index 000000000000..3a8993ed5621 --- /dev/null +++ b/drivers/pcmcia/pxa2xx_palmtc.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * linux/drivers/pcmcia/pxa2xx_palmtc.c | ||
3 | * | ||
4 | * Driver for Palm Tungsten|C PCMCIA | ||
5 | * | ||
6 | * Copyright (C) 2008 Alex Osborne <ato@meshy.org> | ||
7 | * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/gpio.h> | ||
18 | #include <linux/delay.h> | ||
19 | |||
20 | #include <asm/mach-types.h> | ||
21 | #include <mach/palmtc.h> | ||
22 | #include "soc_common.h" | ||
23 | |||
24 | static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | ||
25 | { | ||
26 | int ret; | ||
27 | |||
28 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1"); | ||
29 | if (ret) | ||
30 | goto err1; | ||
31 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0); | ||
32 | if (ret) | ||
33 | goto err2; | ||
34 | |||
35 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2"); | ||
36 | if (ret) | ||
37 | goto err2; | ||
38 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0); | ||
39 | if (ret) | ||
40 | goto err3; | ||
41 | |||
42 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3"); | ||
43 | if (ret) | ||
44 | goto err3; | ||
45 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0); | ||
46 | if (ret) | ||
47 | goto err4; | ||
48 | |||
49 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST"); | ||
50 | if (ret) | ||
51 | goto err4; | ||
52 | ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1); | ||
53 | if (ret) | ||
54 | goto err5; | ||
55 | |||
56 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY"); | ||
57 | if (ret) | ||
58 | goto err5; | ||
59 | ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY); | ||
60 | if (ret) | ||
61 | goto err6; | ||
62 | |||
63 | ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY"); | ||
64 | if (ret) | ||
65 | goto err6; | ||
66 | ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | ||
67 | if (ret) | ||
68 | goto err7; | ||
69 | |||
70 | skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); | ||
71 | return 0; | ||
72 | |||
73 | err7: | ||
74 | gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | ||
75 | err6: | ||
76 | gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); | ||
77 | err5: | ||
78 | gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); | ||
79 | err4: | ||
80 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); | ||
81 | err3: | ||
82 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); | ||
83 | err2: | ||
84 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); | ||
85 | err1: | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | ||
90 | { | ||
91 | gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY); | ||
92 | gpio_free(GPIO_NR_PALMTC_PCMCIA_READY); | ||
93 | gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET); | ||
94 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3); | ||
95 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2); | ||
96 | gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1); | ||
97 | } | ||
98 | |||
99 | static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | ||
100 | struct pcmcia_state *state) | ||
101 | { | ||
102 | state->detect = 1; /* always inserted */ | ||
103 | state->ready = !!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_READY); | ||
104 | state->bvd1 = 1; | ||
105 | state->bvd2 = 1; | ||
106 | state->wrprot = 0; | ||
107 | state->vs_3v = 1; | ||
108 | state->vs_Xv = 0; | ||
109 | } | ||
110 | |||
111 | static int palmtc_wifi_powerdown(void) | ||
112 | { | ||
113 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1); | ||
114 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0); | ||
115 | mdelay(40); | ||
116 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0); | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static int palmtc_wifi_powerup(void) | ||
121 | { | ||
122 | int timeout = 50; | ||
123 | |||
124 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1); | ||
125 | mdelay(50); | ||
126 | |||
127 | /* Power up the card, 1.8V first, after a while 3.3V */ | ||
128 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1); | ||
129 | mdelay(100); | ||
130 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1); | ||
131 | |||
132 | /* Wait till the card is ready */ | ||
133 | while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) && | ||
134 | timeout) { | ||
135 | mdelay(1); | ||
136 | timeout--; | ||
137 | } | ||
138 | |||
139 | /* Power down the WiFi in case of error */ | ||
140 | if (!timeout) { | ||
141 | palmtc_wifi_powerdown(); | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | /* Reset the card */ | ||
146 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1); | ||
147 | mdelay(20); | ||
148 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0); | ||
149 | mdelay(25); | ||
150 | |||
151 | gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | ||
157 | const socket_state_t *state) | ||
158 | { | ||
159 | int ret = 1; | ||
160 | |||
161 | if (state->Vcc == 0) | ||
162 | ret = palmtc_wifi_powerdown(); | ||
163 | else if (state->Vcc == 33) | ||
164 | ret = palmtc_wifi_powerup(); | ||
165 | |||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt) | ||
170 | { | ||
171 | } | ||
172 | |||
173 | static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | static struct pcmcia_low_level palmtc_pcmcia_ops = { | ||
178 | .owner = THIS_MODULE, | ||
179 | |||
180 | .first = 0, | ||
181 | .nr = 1, | ||
182 | |||
183 | .hw_init = palmtc_pcmcia_hw_init, | ||
184 | .hw_shutdown = palmtc_pcmcia_hw_shutdown, | ||
185 | |||
186 | .socket_state = palmtc_pcmcia_socket_state, | ||
187 | .configure_socket = palmtc_pcmcia_configure_socket, | ||
188 | |||
189 | .socket_init = palmtc_pcmcia_socket_init, | ||
190 | .socket_suspend = palmtc_pcmcia_socket_suspend, | ||
191 | }; | ||
192 | |||
193 | static struct platform_device *palmtc_pcmcia_device; | ||
194 | |||
195 | static int __init palmtc_pcmcia_init(void) | ||
196 | { | ||
197 | int ret; | ||
198 | |||
199 | if (!machine_is_palmtc()) | ||
200 | return -ENODEV; | ||
201 | |||
202 | palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | ||
203 | if (!palmtc_pcmcia_device) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops, | ||
207 | sizeof(palmtc_pcmcia_ops)); | ||
208 | |||
209 | if (!ret) | ||
210 | ret = platform_device_add(palmtc_pcmcia_device); | ||
211 | |||
212 | if (ret) | ||
213 | platform_device_put(palmtc_pcmcia_device); | ||
214 | |||
215 | return ret; | ||
216 | } | ||
217 | |||
218 | static void __exit palmtc_pcmcia_exit(void) | ||
219 | { | ||
220 | platform_device_unregister(palmtc_pcmcia_device); | ||
221 | } | ||
222 | |||
223 | module_init(palmtc_pcmcia_init); | ||
224 | module_exit(palmtc_pcmcia_exit); | ||
225 | |||
226 | MODULE_AUTHOR("Alex Osborne <ato@meshy.org>," | ||
227 | " Marek Vasut <marek.vasut@gmail.com>"); | ||
228 | MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C"); | ||
229 | MODULE_ALIAS("platform:pxa2xx-pcmcia"); | ||
230 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index bb8cc05605ac..747ca194fad4 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c | |||
@@ -438,34 +438,37 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev) | |||
438 | } | 438 | } |
439 | 439 | ||
440 | #ifdef CONFIG_PM | 440 | #ifdef CONFIG_PM |
441 | static int pxa_rtc_suspend(struct platform_device *pdev, pm_message_t state) | 441 | static int pxa_rtc_suspend(struct device *dev) |
442 | { | 442 | { |
443 | struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); | 443 | struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); |
444 | 444 | ||
445 | if (device_may_wakeup(&pdev->dev)) | 445 | if (device_may_wakeup(dev)) |
446 | enable_irq_wake(pxa_rtc->irq_Alrm); | 446 | enable_irq_wake(pxa_rtc->irq_Alrm); |
447 | return 0; | 447 | return 0; |
448 | } | 448 | } |
449 | 449 | ||
450 | static int pxa_rtc_resume(struct platform_device *pdev) | 450 | static int pxa_rtc_resume(struct device *dev) |
451 | { | 451 | { |
452 | struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); | 452 | struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); |
453 | 453 | ||
454 | if (device_may_wakeup(&pdev->dev)) | 454 | if (device_may_wakeup(dev)) |
455 | disable_irq_wake(pxa_rtc->irq_Alrm); | 455 | disable_irq_wake(pxa_rtc->irq_Alrm); |
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | #else | 458 | |
459 | #define pxa_rtc_suspend NULL | 459 | static struct dev_pm_ops pxa_rtc_pm_ops = { |
460 | #define pxa_rtc_resume NULL | 460 | .suspend = pxa_rtc_suspend, |
461 | .resume = pxa_rtc_resume, | ||
462 | }; | ||
461 | #endif | 463 | #endif |
462 | 464 | ||
463 | static struct platform_driver pxa_rtc_driver = { | 465 | static struct platform_driver pxa_rtc_driver = { |
464 | .remove = __exit_p(pxa_rtc_remove), | 466 | .remove = __exit_p(pxa_rtc_remove), |
465 | .suspend = pxa_rtc_suspend, | ||
466 | .resume = pxa_rtc_resume, | ||
467 | .driver = { | 467 | .driver = { |
468 | .name = "pxa-rtc", | 468 | .name = "pxa-rtc", |
469 | #ifdef CONFIG_PM | ||
470 | .pm = &pxa_rtc_pm_ops, | ||
471 | #endif | ||
469 | }, | 472 | }, |
470 | }; | 473 | }; |
471 | 474 | ||
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 021b2928f0b9..29f98a70586e 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -393,31 +393,34 @@ static int sa1100_rtc_remove(struct platform_device *pdev) | |||
393 | } | 393 | } |
394 | 394 | ||
395 | #ifdef CONFIG_PM | 395 | #ifdef CONFIG_PM |
396 | static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state) | 396 | static int sa1100_rtc_suspend(struct device *dev) |
397 | { | 397 | { |
398 | if (device_may_wakeup(&pdev->dev)) | 398 | if (device_may_wakeup(dev)) |
399 | enable_irq_wake(IRQ_RTCAlrm); | 399 | enable_irq_wake(IRQ_RTCAlrm); |
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | static int sa1100_rtc_resume(struct platform_device *pdev) | 403 | static int sa1100_rtc_resume(struct device *dev) |
404 | { | 404 | { |
405 | if (device_may_wakeup(&pdev->dev)) | 405 | if (device_may_wakeup(dev)) |
406 | disable_irq_wake(IRQ_RTCAlrm); | 406 | disable_irq_wake(IRQ_RTCAlrm); |
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | #else | 409 | |
410 | #define sa1100_rtc_suspend NULL | 410 | static struct dev_pm_ops sa1100_rtc_pm_ops = { |
411 | #define sa1100_rtc_resume NULL | 411 | .suspend = sa1100_rtc_suspend, |
412 | .resume = sa1100_rtc_resume, | ||
413 | }; | ||
412 | #endif | 414 | #endif |
413 | 415 | ||
414 | static struct platform_driver sa1100_rtc_driver = { | 416 | static struct platform_driver sa1100_rtc_driver = { |
415 | .probe = sa1100_rtc_probe, | 417 | .probe = sa1100_rtc_probe, |
416 | .remove = sa1100_rtc_remove, | 418 | .remove = sa1100_rtc_remove, |
417 | .suspend = sa1100_rtc_suspend, | ||
418 | .resume = sa1100_rtc_resume, | ||
419 | .driver = { | 419 | .driver = { |
420 | .name = "sa1100-rtc", | 420 | .name = "sa1100-rtc", |
421 | #ifdef CONFIG_PM | ||
422 | .pm = &sa1100_rtc_pm_ops, | ||
423 | #endif | ||
421 | }, | 424 | }, |
422 | }; | 425 | }; |
423 | 426 | ||
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index 8d349b23249a..300cea768d74 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c | |||
@@ -649,7 +649,7 @@ static int cpm_uart_tx_pump(struct uart_port *port) | |||
649 | u8 *p; | 649 | u8 *p; |
650 | int count; | 650 | int count; |
651 | struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; | 651 | struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; |
652 | struct circ_buf *xmit = &port->info->xmit; | 652 | struct circ_buf *xmit = &port->state->xmit; |
653 | 653 | ||
654 | /* Handle xon/xoff */ | 654 | /* Handle xon/xoff */ |
655 | if (port->x_char) { | 655 | if (port->x_char) { |
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index 7be52fe288eb..31f172397af3 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c | |||
@@ -18,6 +18,7 @@ static char *serial_version = "$Revision: 1.25 $"; | |||
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/tty_flip.h> | 19 | #include <linux/tty_flip.h> |
20 | #include <linux/major.h> | 20 | #include <linux/major.h> |
21 | #include <linux/smp_lock.h> | ||
21 | #include <linux/string.h> | 22 | #include <linux/string.h> |
22 | #include <linux/fcntl.h> | 23 | #include <linux/fcntl.h> |
23 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index 6443b7ff274a..b8629d74f6a2 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c | |||
@@ -726,9 +726,10 @@ static struct uart_driver serial_pxa_reg = { | |||
726 | .cons = PXA_CONSOLE, | 726 | .cons = PXA_CONSOLE, |
727 | }; | 727 | }; |
728 | 728 | ||
729 | static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state) | 729 | #ifdef CONFIG_PM |
730 | static int serial_pxa_suspend(struct device *dev) | ||
730 | { | 731 | { |
731 | struct uart_pxa_port *sport = platform_get_drvdata(dev); | 732 | struct uart_pxa_port *sport = dev_get_drvdata(dev); |
732 | 733 | ||
733 | if (sport) | 734 | if (sport) |
734 | uart_suspend_port(&serial_pxa_reg, &sport->port); | 735 | uart_suspend_port(&serial_pxa_reg, &sport->port); |
@@ -736,9 +737,9 @@ static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state) | |||
736 | return 0; | 737 | return 0; |
737 | } | 738 | } |
738 | 739 | ||
739 | static int serial_pxa_resume(struct platform_device *dev) | 740 | static int serial_pxa_resume(struct device *dev) |
740 | { | 741 | { |
741 | struct uart_pxa_port *sport = platform_get_drvdata(dev); | 742 | struct uart_pxa_port *sport = dev_get_drvdata(dev); |
742 | 743 | ||
743 | if (sport) | 744 | if (sport) |
744 | uart_resume_port(&serial_pxa_reg, &sport->port); | 745 | uart_resume_port(&serial_pxa_reg, &sport->port); |
@@ -746,6 +747,12 @@ static int serial_pxa_resume(struct platform_device *dev) | |||
746 | return 0; | 747 | return 0; |
747 | } | 748 | } |
748 | 749 | ||
750 | static struct dev_pm_ops serial_pxa_pm_ops = { | ||
751 | .suspend = serial_pxa_suspend, | ||
752 | .resume = serial_pxa_resume, | ||
753 | }; | ||
754 | #endif | ||
755 | |||
749 | static int serial_pxa_probe(struct platform_device *dev) | 756 | static int serial_pxa_probe(struct platform_device *dev) |
750 | { | 757 | { |
751 | struct uart_pxa_port *sport; | 758 | struct uart_pxa_port *sport; |
@@ -825,11 +832,12 @@ static struct platform_driver serial_pxa_driver = { | |||
825 | .probe = serial_pxa_probe, | 832 | .probe = serial_pxa_probe, |
826 | .remove = serial_pxa_remove, | 833 | .remove = serial_pxa_remove, |
827 | 834 | ||
828 | .suspend = serial_pxa_suspend, | ||
829 | .resume = serial_pxa_resume, | ||
830 | .driver = { | 835 | .driver = { |
831 | .name = "pxa2xx-uart", | 836 | .name = "pxa2xx-uart", |
832 | .owner = THIS_MODULE, | 837 | .owner = THIS_MODULE, |
838 | #ifdef CONFIG_PM | ||
839 | .pm = &serial_pxa_pm_ops, | ||
840 | #endif | ||
833 | }, | 841 | }, |
834 | }; | 842 | }; |
835 | 843 | ||
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index c0f950a7cbec..958a3ffc8987 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
@@ -532,7 +532,7 @@ static void restore_state(struct pl022 *pl022) | |||
532 | GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ | 532 | GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ |
533 | GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ | 533 | GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ |
534 | GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ | 534 | GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ |
535 | GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ | 535 | GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ |
536 | GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ | 536 | GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ |
537 | GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ | 537 | GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ |
538 | GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ | 538 | GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ |
@@ -1247,8 +1247,8 @@ static int verify_controller_parameters(struct pl022 *pl022, | |||
1247 | return -EINVAL; | 1247 | return -EINVAL; |
1248 | } | 1248 | } |
1249 | if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { | 1249 | if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { |
1250 | if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) | 1250 | if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) |
1251 | && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { | 1251 | && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { |
1252 | dev_err(chip_info->dev, | 1252 | dev_err(chip_info->dev, |
1253 | "Clock Phase is configured incorrectly\n"); | 1253 | "Clock Phase is configured incorrectly\n"); |
1254 | return -EINVAL; | 1254 | return -EINVAL; |
@@ -1485,7 +1485,7 @@ static int pl022_setup(struct spi_device *spi) | |||
1485 | chip_info->data_size = SSP_DATA_BITS_12; | 1485 | chip_info->data_size = SSP_DATA_BITS_12; |
1486 | chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; | 1486 | chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; |
1487 | chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; | 1487 | chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; |
1488 | chip_info->clk_phase = SSP_CLK_FALLING_EDGE; | 1488 | chip_info->clk_phase = SSP_CLK_SECOND_EDGE; |
1489 | chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; | 1489 | chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; |
1490 | chip_info->ctrl_len = SSP_BITS_8; | 1490 | chip_info->ctrl_len = SSP_BITS_8; |
1491 | chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; | 1491 | chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 31dd56f0dae9..c8c2b693ffac 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -1668,10 +1668,9 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev) | |||
1668 | } | 1668 | } |
1669 | 1669 | ||
1670 | #ifdef CONFIG_PM | 1670 | #ifdef CONFIG_PM |
1671 | 1671 | static int pxa2xx_spi_suspend(struct device *dev) | |
1672 | static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | ||
1673 | { | 1672 | { |
1674 | struct driver_data *drv_data = platform_get_drvdata(pdev); | 1673 | struct driver_data *drv_data = dev_get_drvdata(dev); |
1675 | struct ssp_device *ssp = drv_data->ssp; | 1674 | struct ssp_device *ssp = drv_data->ssp; |
1676 | int status = 0; | 1675 | int status = 0; |
1677 | 1676 | ||
@@ -1684,9 +1683,9 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1684 | return 0; | 1683 | return 0; |
1685 | } | 1684 | } |
1686 | 1685 | ||
1687 | static int pxa2xx_spi_resume(struct platform_device *pdev) | 1686 | static int pxa2xx_spi_resume(struct device *dev) |
1688 | { | 1687 | { |
1689 | struct driver_data *drv_data = platform_get_drvdata(pdev); | 1688 | struct driver_data *drv_data = dev_get_drvdata(dev); |
1690 | struct ssp_device *ssp = drv_data->ssp; | 1689 | struct ssp_device *ssp = drv_data->ssp; |
1691 | int status = 0; | 1690 | int status = 0; |
1692 | 1691 | ||
@@ -1703,26 +1702,29 @@ static int pxa2xx_spi_resume(struct platform_device *pdev) | |||
1703 | /* Start the queue running */ | 1702 | /* Start the queue running */ |
1704 | status = start_queue(drv_data); | 1703 | status = start_queue(drv_data); |
1705 | if (status != 0) { | 1704 | if (status != 0) { |
1706 | dev_err(&pdev->dev, "problem starting queue (%d)\n", status); | 1705 | dev_err(dev, "problem starting queue (%d)\n", status); |
1707 | return status; | 1706 | return status; |
1708 | } | 1707 | } |
1709 | 1708 | ||
1710 | return 0; | 1709 | return 0; |
1711 | } | 1710 | } |
1712 | #else | 1711 | |
1713 | #define pxa2xx_spi_suspend NULL | 1712 | static struct dev_pm_ops pxa2xx_spi_pm_ops = { |
1714 | #define pxa2xx_spi_resume NULL | 1713 | .suspend = pxa2xx_spi_suspend, |
1715 | #endif /* CONFIG_PM */ | 1714 | .resume = pxa2xx_spi_resume, |
1715 | }; | ||
1716 | #endif | ||
1716 | 1717 | ||
1717 | static struct platform_driver driver = { | 1718 | static struct platform_driver driver = { |
1718 | .driver = { | 1719 | .driver = { |
1719 | .name = "pxa2xx-spi", | 1720 | .name = "pxa2xx-spi", |
1720 | .owner = THIS_MODULE, | 1721 | .owner = THIS_MODULE, |
1722 | #ifdef CONFIG_PM | ||
1723 | .pm = &pxa2xx_spi_pm_ops, | ||
1724 | #endif | ||
1721 | }, | 1725 | }, |
1722 | .remove = pxa2xx_spi_remove, | 1726 | .remove = pxa2xx_spi_remove, |
1723 | .shutdown = pxa2xx_spi_shutdown, | 1727 | .shutdown = pxa2xx_spi_shutdown, |
1724 | .suspend = pxa2xx_spi_suspend, | ||
1725 | .resume = pxa2xx_spi_resume, | ||
1726 | }; | 1728 | }; |
1727 | 1729 | ||
1728 | static int __init pxa2xx_spi_init(void) | 1730 | static int __init pxa2xx_spi_init(void) |
diff --git a/drivers/staging/go7007/Makefile b/drivers/staging/go7007/Makefile index d14ea84a01f6..1301caa7495d 100644 --- a/drivers/staging/go7007/Makefile +++ b/drivers/staging/go7007/Makefile | |||
@@ -32,8 +32,3 @@ endif | |||
32 | 32 | ||
33 | EXTRA_CFLAGS += -Idrivers/media/dvb/frontends | 33 | EXTRA_CFLAGS += -Idrivers/media/dvb/frontends |
34 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core | 34 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core |
35 | |||
36 | # Ubuntu 8.04 has CONFIG_SND undefined, so include lum sound/config.h too | ||
37 | ifeq ($(CONFIG_SND),) | ||
38 | EXTRA_CFLAGS += -include sound/config.h | ||
39 | endif | ||
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index ebd7237230e3..240750881d28 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
@@ -22,7 +22,6 @@ config USB_ARCH_HAS_HCD | |||
22 | default y if PCMCIA && !M32R # sl811_cs | 22 | default y if PCMCIA && !M32R # sl811_cs |
23 | default y if ARM # SL-811 | 23 | default y if ARM # SL-811 |
24 | default y if SUPERH # r8a66597-hcd | 24 | default y if SUPERH # r8a66597-hcd |
25 | default y if MICROBLAZE | ||
26 | default PCI | 25 | default PCI |
27 | 26 | ||
28 | # many non-PCI SOC chips embed OHCI | 27 | # many non-PCI SOC chips embed OHCI |
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index b5294a9344de..f1c06202fdf2 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
@@ -481,38 +481,47 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev) | |||
481 | return 0; | 481 | return 0; |
482 | } | 482 | } |
483 | 483 | ||
484 | #ifdef CONFIG_PM | 484 | #ifdef CONFIG_PM |
485 | static int ohci_hcd_pxa27x_drv_suspend(struct platform_device *pdev, pm_message_t state) | 485 | static int ohci_hcd_pxa27x_drv_suspend(struct device *dev) |
486 | { | 486 | { |
487 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 487 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
488 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); | 488 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); |
489 | 489 | ||
490 | if (time_before(jiffies, ohci->ohci.next_statechange)) | 490 | if (time_before(jiffies, ohci->ohci.next_statechange)) |
491 | msleep(5); | 491 | msleep(5); |
492 | ohci->ohci.next_statechange = jiffies; | 492 | ohci->ohci.next_statechange = jiffies; |
493 | 493 | ||
494 | pxa27x_stop_hc(ohci, &pdev->dev); | 494 | pxa27x_stop_hc(ohci, dev); |
495 | hcd->state = HC_STATE_SUSPENDED; | 495 | hcd->state = HC_STATE_SUSPENDED; |
496 | 496 | ||
497 | return 0; | 497 | return 0; |
498 | } | 498 | } |
499 | 499 | ||
500 | static int ohci_hcd_pxa27x_drv_resume(struct platform_device *pdev) | 500 | static int ohci_hcd_pxa27x_drv_resume(struct device *dev) |
501 | { | 501 | { |
502 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 502 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
503 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); | 503 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); |
504 | struct pxaohci_platform_data *inf = dev->platform_data; | ||
504 | int status; | 505 | int status; |
505 | 506 | ||
506 | if (time_before(jiffies, ohci->ohci.next_statechange)) | 507 | if (time_before(jiffies, ohci->ohci.next_statechange)) |
507 | msleep(5); | 508 | msleep(5); |
508 | ohci->ohci.next_statechange = jiffies; | 509 | ohci->ohci.next_statechange = jiffies; |
509 | 510 | ||
510 | if ((status = pxa27x_start_hc(ohci, &pdev->dev)) < 0) | 511 | if ((status = pxa27x_start_hc(ohci, dev)) < 0) |
511 | return status; | 512 | return status; |
512 | 513 | ||
514 | /* Select Power Management Mode */ | ||
515 | pxa27x_ohci_select_pmm(ohci, inf->port_mode); | ||
516 | |||
513 | ohci_finish_controller_resume(hcd); | 517 | ohci_finish_controller_resume(hcd); |
514 | return 0; | 518 | return 0; |
515 | } | 519 | } |
520 | |||
521 | static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { | ||
522 | .suspend = ohci_hcd_pxa27x_drv_suspend, | ||
523 | .resume = ohci_hcd_pxa27x_drv_resume, | ||
524 | }; | ||
516 | #endif | 525 | #endif |
517 | 526 | ||
518 | /* work with hotplug and coldplug */ | 527 | /* work with hotplug and coldplug */ |
@@ -522,13 +531,12 @@ static struct platform_driver ohci_hcd_pxa27x_driver = { | |||
522 | .probe = ohci_hcd_pxa27x_drv_probe, | 531 | .probe = ohci_hcd_pxa27x_drv_probe, |
523 | .remove = ohci_hcd_pxa27x_drv_remove, | 532 | .remove = ohci_hcd_pxa27x_drv_remove, |
524 | .shutdown = usb_hcd_platform_shutdown, | 533 | .shutdown = usb_hcd_platform_shutdown, |
525 | #ifdef CONFIG_PM | ||
526 | .suspend = ohci_hcd_pxa27x_drv_suspend, | ||
527 | .resume = ohci_hcd_pxa27x_drv_resume, | ||
528 | #endif | ||
529 | .driver = { | 534 | .driver = { |
530 | .name = "pxa27x-ohci", | 535 | .name = "pxa27x-ohci", |
531 | .owner = THIS_MODULE, | 536 | .owner = THIS_MODULE, |
537 | #ifdef CONFIG_PM | ||
538 | .pm = &ohci_hcd_pxa27x_pm_ops, | ||
539 | #endif | ||
532 | }, | 540 | }, |
533 | }; | 541 | }; |
534 | 542 | ||
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 68fa0e43b781..8c075b2416bb 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -912,6 +912,7 @@ static void sierra_release(struct usb_serial *serial) | |||
912 | } | 912 | } |
913 | } | 913 | } |
914 | 914 | ||
915 | #ifdef CONFIG_PM | ||
915 | static void stop_read_write_urbs(struct usb_serial *serial) | 916 | static void stop_read_write_urbs(struct usb_serial *serial) |
916 | { | 917 | { |
917 | int i, j; | 918 | int i, j; |
@@ -988,6 +989,10 @@ static int sierra_resume(struct usb_serial *serial) | |||
988 | 989 | ||
989 | return ec ? -EIO : 0; | 990 | return ec ? -EIO : 0; |
990 | } | 991 | } |
992 | #else | ||
993 | #define sierra_suspend NULL | ||
994 | #define sierra_resume NULL | ||
995 | #endif | ||
991 | 996 | ||
992 | static struct usb_serial_driver sierra_device = { | 997 | static struct usb_serial_driver sierra_device = { |
993 | .driver = { | 998 | .driver = { |
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c index 93bb4340cc64..701a1081e199 100644 --- a/drivers/video/backlight/da903x_bl.c +++ b/drivers/video/backlight/da903x_bl.c | |||
@@ -154,34 +154,38 @@ static int da903x_backlight_remove(struct platform_device *pdev) | |||
154 | } | 154 | } |
155 | 155 | ||
156 | #ifdef CONFIG_PM | 156 | #ifdef CONFIG_PM |
157 | static int da903x_backlight_suspend(struct platform_device *pdev, | 157 | static int da903x_backlight_suspend(struct device *dev) |
158 | pm_message_t state) | ||
159 | { | 158 | { |
159 | struct platform_device *pdev = to_platform_device(dev); | ||
160 | struct backlight_device *bl = platform_get_drvdata(pdev); | 160 | struct backlight_device *bl = platform_get_drvdata(pdev); |
161 | return da903x_backlight_set(bl, 0); | 161 | return da903x_backlight_set(bl, 0); |
162 | } | 162 | } |
163 | 163 | ||
164 | static int da903x_backlight_resume(struct platform_device *pdev) | 164 | static int da903x_backlight_resume(struct device *dev) |
165 | { | 165 | { |
166 | struct platform_device *pdev = to_platform_device(dev); | ||
166 | struct backlight_device *bl = platform_get_drvdata(pdev); | 167 | struct backlight_device *bl = platform_get_drvdata(pdev); |
167 | 168 | ||
168 | backlight_update_status(bl); | 169 | backlight_update_status(bl); |
169 | return 0; | 170 | return 0; |
170 | } | 171 | } |
171 | #else | 172 | |
172 | #define da903x_backlight_suspend NULL | 173 | static struct dev_pm_ops da903x_backlight_pm_ops = { |
173 | #define da903x_backlight_resume NULL | 174 | .suspend = da903x_backlight_suspend, |
175 | .resume = da903x_backlight_resume, | ||
176 | }; | ||
174 | #endif | 177 | #endif |
175 | 178 | ||
176 | static struct platform_driver da903x_backlight_driver = { | 179 | static struct platform_driver da903x_backlight_driver = { |
177 | .driver = { | 180 | .driver = { |
178 | .name = "da903x-backlight", | 181 | .name = "da903x-backlight", |
179 | .owner = THIS_MODULE, | 182 | .owner = THIS_MODULE, |
183 | #ifdef CONFIG_PM | ||
184 | .pm = &da903x_backlight_pm_ops, | ||
185 | #endif | ||
180 | }, | 186 | }, |
181 | .probe = da903x_backlight_probe, | 187 | .probe = da903x_backlight_probe, |
182 | .remove = da903x_backlight_remove, | 188 | .remove = da903x_backlight_remove, |
183 | .suspend = da903x_backlight_suspend, | ||
184 | .resume = da903x_backlight_resume, | ||
185 | }; | 189 | }; |
186 | 190 | ||
187 | static int __init da903x_backlight_init(void) | 191 | static int __init da903x_backlight_init(void) |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 6506117c134b..1820c4a24434 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -1638,24 +1638,26 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) | |||
1638 | * Power management hooks. Note that we won't be called from IRQ context, | 1638 | * Power management hooks. Note that we won't be called from IRQ context, |
1639 | * unlike the blank functions above, so we may sleep. | 1639 | * unlike the blank functions above, so we may sleep. |
1640 | */ | 1640 | */ |
1641 | static int pxafb_suspend(struct platform_device *dev, pm_message_t state) | 1641 | static int pxafb_suspend(struct device *dev) |
1642 | { | 1642 | { |
1643 | struct pxafb_info *fbi = platform_get_drvdata(dev); | 1643 | struct pxafb_info *fbi = dev_get_drvdata(dev); |
1644 | 1644 | ||
1645 | set_ctrlr_state(fbi, C_DISABLE_PM); | 1645 | set_ctrlr_state(fbi, C_DISABLE_PM); |
1646 | return 0; | 1646 | return 0; |
1647 | } | 1647 | } |
1648 | 1648 | ||
1649 | static int pxafb_resume(struct platform_device *dev) | 1649 | static int pxafb_resume(struct device *dev) |
1650 | { | 1650 | { |
1651 | struct pxafb_info *fbi = platform_get_drvdata(dev); | 1651 | struct pxafb_info *fbi = dev_get_drvdata(dev); |
1652 | 1652 | ||
1653 | set_ctrlr_state(fbi, C_ENABLE_PM); | 1653 | set_ctrlr_state(fbi, C_ENABLE_PM); |
1654 | return 0; | 1654 | return 0; |
1655 | } | 1655 | } |
1656 | #else | 1656 | |
1657 | #define pxafb_suspend NULL | 1657 | static struct dev_pm_ops pxafb_pm_ops = { |
1658 | #define pxafb_resume NULL | 1658 | .suspend = pxafb_suspend, |
1659 | .resume = pxafb_resume, | ||
1660 | }; | ||
1659 | #endif | 1661 | #endif |
1660 | 1662 | ||
1661 | static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi) | 1663 | static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi) |
@@ -2081,6 +2083,9 @@ static int __devinit pxafb_probe(struct platform_device *dev) | |||
2081 | goto failed; | 2083 | goto failed; |
2082 | } | 2084 | } |
2083 | 2085 | ||
2086 | if (cpu_is_pxa3xx() && inf->acceleration_enabled) | ||
2087 | fbi->fb.fix.accel = FB_ACCEL_PXA3XX; | ||
2088 | |||
2084 | fbi->backlight_power = inf->pxafb_backlight_power; | 2089 | fbi->backlight_power = inf->pxafb_backlight_power; |
2085 | fbi->lcd_power = inf->pxafb_lcd_power; | 2090 | fbi->lcd_power = inf->pxafb_lcd_power; |
2086 | 2091 | ||
@@ -2091,14 +2096,14 @@ static int __devinit pxafb_probe(struct platform_device *dev) | |||
2091 | goto failed_fbi; | 2096 | goto failed_fbi; |
2092 | } | 2097 | } |
2093 | 2098 | ||
2094 | r = request_mem_region(r->start, r->end - r->start + 1, dev->name); | 2099 | r = request_mem_region(r->start, resource_size(r), dev->name); |
2095 | if (r == NULL) { | 2100 | if (r == NULL) { |
2096 | dev_err(&dev->dev, "failed to request I/O memory\n"); | 2101 | dev_err(&dev->dev, "failed to request I/O memory\n"); |
2097 | ret = -EBUSY; | 2102 | ret = -EBUSY; |
2098 | goto failed_fbi; | 2103 | goto failed_fbi; |
2099 | } | 2104 | } |
2100 | 2105 | ||
2101 | fbi->mmio_base = ioremap(r->start, r->end - r->start + 1); | 2106 | fbi->mmio_base = ioremap(r->start, resource_size(r)); |
2102 | if (fbi->mmio_base == NULL) { | 2107 | if (fbi->mmio_base == NULL) { |
2103 | dev_err(&dev->dev, "failed to map I/O memory\n"); | 2108 | dev_err(&dev->dev, "failed to map I/O memory\n"); |
2104 | ret = -EBUSY; | 2109 | ret = -EBUSY; |
@@ -2197,7 +2202,7 @@ failed_free_dma: | |||
2197 | failed_free_io: | 2202 | failed_free_io: |
2198 | iounmap(fbi->mmio_base); | 2203 | iounmap(fbi->mmio_base); |
2199 | failed_free_res: | 2204 | failed_free_res: |
2200 | release_mem_region(r->start, r->end - r->start + 1); | 2205 | release_mem_region(r->start, resource_size(r)); |
2201 | failed_fbi: | 2206 | failed_fbi: |
2202 | clk_put(fbi->clk); | 2207 | clk_put(fbi->clk); |
2203 | platform_set_drvdata(dev, NULL); | 2208 | platform_set_drvdata(dev, NULL); |
@@ -2237,7 +2242,7 @@ static int __devexit pxafb_remove(struct platform_device *dev) | |||
2237 | iounmap(fbi->mmio_base); | 2242 | iounmap(fbi->mmio_base); |
2238 | 2243 | ||
2239 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | 2244 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); |
2240 | release_mem_region(r->start, r->end - r->start + 1); | 2245 | release_mem_region(r->start, resource_size(r)); |
2241 | 2246 | ||
2242 | clk_put(fbi->clk); | 2247 | clk_put(fbi->clk); |
2243 | kfree(fbi); | 2248 | kfree(fbi); |
@@ -2248,11 +2253,12 @@ static int __devexit pxafb_remove(struct platform_device *dev) | |||
2248 | static struct platform_driver pxafb_driver = { | 2253 | static struct platform_driver pxafb_driver = { |
2249 | .probe = pxafb_probe, | 2254 | .probe = pxafb_probe, |
2250 | .remove = __devexit_p(pxafb_remove), | 2255 | .remove = __devexit_p(pxafb_remove), |
2251 | .suspend = pxafb_suspend, | ||
2252 | .resume = pxafb_resume, | ||
2253 | .driver = { | 2256 | .driver = { |
2254 | .owner = THIS_MODULE, | 2257 | .owner = THIS_MODULE, |
2255 | .name = "pxa2xx-fb", | 2258 | .name = "pxa2xx-fb", |
2259 | #ifdef CONFIG_PM | ||
2260 | .pm = &pxafb_pm_ops, | ||
2261 | #endif | ||
2256 | }, | 2262 | }, |
2257 | }; | 2263 | }; |
2258 | 2264 | ||
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index ba3d71f5c7d0..9554ad5f9af7 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c | |||
@@ -702,7 +702,7 @@ static int vlynq_probe(struct platform_device *pdev) | |||
702 | dev->mem_start = mem_res->start; | 702 | dev->mem_start = mem_res->start; |
703 | dev->mem_end = mem_res->end; | 703 | dev->mem_end = mem_res->end; |
704 | 704 | ||
705 | len = regs_res->end - regs_res->start; | 705 | len = resource_size(regs_res); |
706 | if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { | 706 | if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { |
707 | printk(KERN_ERR "%s: Can't request vlynq registers\n", | 707 | printk(KERN_ERR "%s: Can't request vlynq registers\n", |
708 | dev_name(&dev->dev)); | 708 | dev_name(&dev->dev)); |