diff options
| -rw-r--r-- | Documentation/hw_random.txt | 59 | ||||
| -rw-r--r-- | Documentation/kernel-parameters.txt | 4 | ||||
| -rw-r--r-- | Makefile | 2 | ||||
| -rw-r--r-- | drivers/ata/ahci.c | 6 | ||||
| -rw-r--r-- | drivers/ata/libata-core.c | 46 | ||||
| -rw-r--r-- | drivers/ata/libata-scsi.c | 14 | ||||
| -rw-r--r-- | drivers/ata/pata_it821x.c | 2 | ||||
| -rw-r--r-- | drivers/ata/sata_promise.c | 109 | ||||
| -rw-r--r-- | drivers/char/hw_random/Kconfig | 9 | ||||
| -rw-r--r-- | drivers/mtd/devices/block2mtd.c | 1 | ||||
| -rw-r--r-- | include/linux/libata.h | 8 | ||||
| -rw-r--r-- | kernel/acct.c | 23 | ||||
| -rw-r--r-- | kernel/marker.c | 31 | ||||
| -rw-r--r-- | kernel/printk.c | 83 | ||||
| -rw-r--r-- | mm/bootmem.c | 25 | ||||
| -rw-r--r-- | mm/vmscan.c | 27 | ||||
| -rw-r--r-- | scripts/Makefile.modpost | 6 | ||||
| -rw-r--r-- | scripts/mod/file2alias.c | 4 | ||||
| -rw-r--r-- | scripts/mod/modpost.c | 5 | ||||
| -rw-r--r-- | scripts/mod/modpost.h | 1 | ||||
| -rw-r--r-- | security/smack/smackfs.c | 35 |
21 files changed, 312 insertions, 188 deletions
diff --git a/Documentation/hw_random.txt b/Documentation/hw_random.txt index bb58c36b5845..690f52550c80 100644 --- a/Documentation/hw_random.txt +++ b/Documentation/hw_random.txt | |||
| @@ -1,33 +1,26 @@ | |||
| 1 | Hardware driver for Intel/AMD/VIA Random Number Generators (RNG) | ||
| 2 | Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> | ||
| 3 | Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> | ||
| 4 | |||
| 5 | Introduction: | 1 | Introduction: |
| 6 | 2 | ||
| 7 | The hw_random device driver is software that makes use of a | 3 | The hw_random framework is software that makes use of a |
| 8 | special hardware feature on your CPU or motherboard, | 4 | special hardware feature on your CPU or motherboard, |
| 9 | a Random Number Generator (RNG). | 5 | a Random Number Generator (RNG). The software has two parts: |
| 6 | a core providing the /dev/hw_random character device and its | ||
| 7 | sysfs support, plus a hardware-specific driver that plugs | ||
| 8 | into that core. | ||
| 10 | 9 | ||
| 11 | In order to make effective use of this device driver, you | 10 | To make the most effective use of these mechanisms, you |
| 12 | should download the support software as well. Download the | 11 | should download the support software as well. Download the |
| 13 | latest version of the "rng-tools" package from the | 12 | latest version of the "rng-tools" package from the |
| 14 | hw_random driver's official Web site: | 13 | hw_random driver's official Web site: |
| 15 | 14 | ||
| 16 | http://sourceforge.net/projects/gkernel/ | 15 | http://sourceforge.net/projects/gkernel/ |
| 17 | 16 | ||
| 18 | About the Intel RNG hardware, from the firmware hub datasheet: | 17 | Those tools use /dev/hw_random to fill the kernel entropy pool, |
| 19 | 18 | which is used internally and exported by the /dev/urandom and | |
| 20 | The Firmware Hub integrates a Random Number Generator (RNG) | 19 | /dev/random special files. |
| 21 | using thermal noise generated from inherently random quantum | ||
| 22 | mechanical properties of silicon. When not generating new random | ||
| 23 | bits the RNG circuitry will enter a low power state. Intel will | ||
| 24 | provide a binary software driver to give third party software | ||
| 25 | access to our RNG for use as a security feature. At this time, | ||
| 26 | the RNG is only to be used with a system in an OS-present state. | ||
| 27 | 20 | ||
| 28 | Theory of operation: | 21 | Theory of operation: |
| 29 | 22 | ||
| 30 | Character driver. Using the standard open() | 23 | CHARACTER DEVICE. Using the standard open() |
| 31 | and read() system calls, you can read random data from | 24 | and read() system calls, you can read random data from |
| 32 | the hardware RNG device. This data is NOT CHECKED by any | 25 | the hardware RNG device. This data is NOT CHECKED by any |
| 33 | fitness tests, and could potentially be bogus (if the | 26 | fitness tests, and could potentially be bogus (if the |
| @@ -36,9 +29,37 @@ Theory of operation: | |||
| 36 | a security-conscious person would run fitness tests on the | 29 | a security-conscious person would run fitness tests on the |
| 37 | data before assuming it is truly random. | 30 | data before assuming it is truly random. |
| 38 | 31 | ||
| 39 | /dev/hwrandom is char device major 10, minor 183. | 32 | The rng-tools package uses such tests in "rngd", and lets you |
| 33 | run them by hand with a "rngtest" utility. | ||
| 34 | |||
| 35 | /dev/hw_random is char device major 10, minor 183. | ||
| 36 | |||
| 37 | CLASS DEVICE. There is a /sys/class/misc/hw_random node with | ||
| 38 | two unique attributes, "rng_available" and "rng_current". The | ||
| 39 | "rng_available" attribute lists the hardware-specific drivers | ||
| 40 | available, while "rng_current" lists the one which is currently | ||
| 41 | connected to /dev/hw_random. If your system has more than one | ||
| 42 | RNG available, you may change the one used by writing a name from | ||
| 43 | the list in "rng_available" into "rng_current". | ||
| 44 | |||
| 45 | ========================================================================== | ||
| 46 | |||
| 47 | Hardware driver for Intel/AMD/VIA Random Number Generators (RNG) | ||
| 48 | Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> | ||
| 49 | Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> | ||
| 50 | |||
| 51 | |||
| 52 | About the Intel RNG hardware, from the firmware hub datasheet: | ||
| 53 | |||
| 54 | The Firmware Hub integrates a Random Number Generator (RNG) | ||
| 55 | using thermal noise generated from inherently random quantum | ||
| 56 | mechanical properties of silicon. When not generating new random | ||
| 57 | bits the RNG circuitry will enter a low power state. Intel will | ||
| 58 | provide a binary software driver to give third party software | ||
| 59 | access to our RNG for use as a security feature. At this time, | ||
| 60 | the RNG is only to be used with a system in an OS-present state. | ||
| 40 | 61 | ||
| 41 | Driver notes: | 62 | Intel RNG Driver notes: |
| 42 | 63 | ||
| 43 | * FIXME: support poll(2) | 64 | * FIXME: support poll(2) |
| 44 | 65 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 650b0d8aa89b..508e2a2c9864 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1130,6 +1130,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 1130 | memmap=nn[KMG]$ss[KMG] | 1130 | memmap=nn[KMG]$ss[KMG] |
| 1131 | [KNL,ACPI] Mark specific memory as reserved. | 1131 | [KNL,ACPI] Mark specific memory as reserved. |
| 1132 | Region of memory to be used, from ss to ss+nn. | 1132 | Region of memory to be used, from ss to ss+nn. |
| 1133 | Example: Exclude memory from 0x18690000-0x1869ffff | ||
| 1134 | memmap=64K$0x18690000 | ||
| 1135 | or | ||
| 1136 | memmap=0x10000$0x18690000 | ||
| 1133 | 1137 | ||
| 1134 | meye.*= [HW] Set MotionEye Camera parameters | 1138 | meye.*= [HW] Set MotionEye Camera parameters |
| 1135 | See Documentation/video4linux/meye.txt. | 1139 | See Documentation/video4linux/meye.txt. |
| @@ -189,7 +189,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ | |||
| 189 | # Alternatively CROSS_COMPILE can be set in the environment. | 189 | # Alternatively CROSS_COMPILE can be set in the environment. |
| 190 | # Default value for CROSS_COMPILE is not to prefix executables | 190 | # Default value for CROSS_COMPILE is not to prefix executables |
| 191 | # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile | 191 | # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile |
| 192 | 192 | export KBUILD_BUILDHOST := $(SUBARCH) | |
| 193 | ARCH ?= $(SUBARCH) | 193 | ARCH ?= $(SUBARCH) |
| 194 | CROSS_COMPILE ?= | 194 | CROSS_COMPILE ?= |
| 195 | 195 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 17ee6ed985d9..b1eb4e24c86a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -433,6 +433,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
| 433 | /* board_ahci_sb600 */ | 433 | /* board_ahci_sb600 */ |
| 434 | { | 434 | { |
| 435 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 435 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
| 436 | AHCI_HFLAG_32BIT_ONLY | | ||
| 436 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), | 437 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), |
| 437 | .flags = AHCI_FLAG_COMMON, | 438 | .flags = AHCI_FLAG_COMMON, |
| 438 | .link_flags = AHCI_LFLAG_COMMON, | 439 | .link_flags = AHCI_LFLAG_COMMON, |
| @@ -1217,8 +1218,11 @@ static void ahci_dev_config(struct ata_device *dev) | |||
| 1217 | { | 1218 | { |
| 1218 | struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; | 1219 | struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; |
| 1219 | 1220 | ||
| 1220 | if (hpriv->flags & AHCI_HFLAG_SECT255) | 1221 | if (hpriv->flags & AHCI_HFLAG_SECT255) { |
| 1221 | dev->max_sectors = 255; | 1222 | dev->max_sectors = 255; |
| 1223 | ata_dev_printk(dev, KERN_INFO, | ||
| 1224 | "SB600 AHCI: limiting to 255 sectors per cmd\n"); | ||
| 1225 | } | ||
| 1222 | } | 1226 | } |
| 1223 | 1227 | ||
| 1224 | static unsigned int ahci_dev_classify(struct ata_port *ap) | 1228 | static unsigned int ahci_dev_classify(struct ata_port *ap) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4bbe31f98ef8..c4248b37ff64 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -1416,12 +1416,12 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
| 1416 | /* read native max address */ | 1416 | /* read native max address */ |
| 1417 | rc = ata_read_native_max_address(dev, &native_sectors); | 1417 | rc = ata_read_native_max_address(dev, &native_sectors); |
| 1418 | if (rc) { | 1418 | if (rc) { |
| 1419 | /* If HPA isn't going to be unlocked, skip HPA | 1419 | /* If device aborted the command or HPA isn't going to |
| 1420 | * resizing from the next try. | 1420 | * be unlocked, skip HPA resizing. |
| 1421 | */ | 1421 | */ |
| 1422 | if (!ata_ignore_hpa) { | 1422 | if (rc == -EACCES || !ata_ignore_hpa) { |
| 1423 | ata_dev_printk(dev, KERN_WARNING, "HPA support seems " | 1423 | ata_dev_printk(dev, KERN_WARNING, "HPA support seems " |
| 1424 | "broken, will skip HPA handling\n"); | 1424 | "broken, skipping HPA handling\n"); |
| 1425 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; | 1425 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; |
| 1426 | 1426 | ||
| 1427 | /* we can continue if device aborted the command */ | 1427 | /* we can continue if device aborted the command */ |
| @@ -2092,24 +2092,34 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
| 2092 | id, sizeof(id[0]) * ATA_ID_WORDS, 0); | 2092 | id, sizeof(id[0]) * ATA_ID_WORDS, 0); |
| 2093 | if (err_mask) { | 2093 | if (err_mask) { |
| 2094 | if (err_mask & AC_ERR_NODEV_HINT) { | 2094 | if (err_mask & AC_ERR_NODEV_HINT) { |
| 2095 | DPRINTK("ata%u.%d: NODEV after polling detection\n", | 2095 | ata_dev_printk(dev, KERN_DEBUG, |
| 2096 | ap->print_id, dev->devno); | 2096 | "NODEV after polling detection\n"); |
| 2097 | return -ENOENT; | 2097 | return -ENOENT; |
| 2098 | } | 2098 | } |
| 2099 | 2099 | ||
| 2100 | /* Device or controller might have reported the wrong | 2100 | if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { |
| 2101 | * device class. Give a shot at the other IDENTIFY if | 2101 | /* Device or controller might have reported |
| 2102 | * the current one is aborted by the device. | 2102 | * the wrong device class. Give a shot at the |
| 2103 | */ | 2103 | * other IDENTIFY if the current one is |
| 2104 | if (may_fallback && | 2104 | * aborted by the device. |
| 2105 | (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { | 2105 | */ |
| 2106 | may_fallback = 0; | 2106 | if (may_fallback) { |
| 2107 | may_fallback = 0; | ||
| 2107 | 2108 | ||
| 2108 | if (class == ATA_DEV_ATA) | 2109 | if (class == ATA_DEV_ATA) |
| 2109 | class = ATA_DEV_ATAPI; | 2110 | class = ATA_DEV_ATAPI; |
| 2110 | else | 2111 | else |
| 2111 | class = ATA_DEV_ATA; | 2112 | class = ATA_DEV_ATA; |
| 2112 | goto retry; | 2113 | goto retry; |
| 2114 | } | ||
| 2115 | |||
| 2116 | /* Control reaches here iff the device aborted | ||
| 2117 | * both flavors of IDENTIFYs which happens | ||
| 2118 | * sometimes with phantom devices. | ||
| 2119 | */ | ||
| 2120 | ata_dev_printk(dev, KERN_DEBUG, | ||
| 2121 | "both IDENTIFYs aborted, assuming NODEV\n"); | ||
| 2122 | return -ENOENT; | ||
| 2113 | } | 2123 | } |
| 2114 | 2124 | ||
| 2115 | rc = -EIO; | 2125 | rc = -EIO; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 8f0e8f2bc628..15795394b0a8 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -527,6 +527,14 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, | |||
| 527 | return qc; | 527 | return qc; |
| 528 | } | 528 | } |
| 529 | 529 | ||
| 530 | static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) | ||
| 531 | { | ||
| 532 | struct scsi_cmnd *scmd = qc->scsicmd; | ||
| 533 | |||
| 534 | qc->extrabytes = scmd->request->extra_len; | ||
| 535 | qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; | ||
| 536 | } | ||
| 537 | |||
| 530 | /** | 538 | /** |
| 531 | * ata_dump_status - user friendly display of error info | 539 | * ata_dump_status - user friendly display of error info |
| 532 | * @id: id of the port in question | 540 | * @id: id of the port in question |
| @@ -2539,7 +2547,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
| 2539 | } | 2547 | } |
| 2540 | 2548 | ||
| 2541 | qc->tf.command = ATA_CMD_PACKET; | 2549 | qc->tf.command = ATA_CMD_PACKET; |
| 2542 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; | 2550 | ata_qc_set_pc_nbytes(qc); |
| 2543 | 2551 | ||
| 2544 | /* check whether ATAPI DMA is safe */ | 2552 | /* check whether ATAPI DMA is safe */ |
| 2545 | if (!using_pio && ata_check_atapi_dma(qc)) | 2553 | if (!using_pio && ata_check_atapi_dma(qc)) |
| @@ -2550,7 +2558,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
| 2550 | * want to set it properly, and for DMA where it is | 2558 | * want to set it properly, and for DMA where it is |
| 2551 | * effectively meaningless. | 2559 | * effectively meaningless. |
| 2552 | */ | 2560 | */ |
| 2553 | nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); | 2561 | nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024); |
| 2554 | 2562 | ||
| 2555 | /* Most ATAPI devices which honor transfer chunk size don't | 2563 | /* Most ATAPI devices which honor transfer chunk size don't |
| 2556 | * behave according to the spec when odd chunk size which | 2564 | * behave according to the spec when odd chunk size which |
| @@ -2876,7 +2884,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
| 2876 | * TODO: find out if we need to do more here to | 2884 | * TODO: find out if we need to do more here to |
| 2877 | * cover scatter/gather case. | 2885 | * cover scatter/gather case. |
| 2878 | */ | 2886 | */ |
| 2879 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; | 2887 | ata_qc_set_pc_nbytes(qc); |
| 2880 | 2888 | ||
| 2881 | /* request result TF and be quiet about device error */ | 2889 | /* request result TF and be quiet about device error */ |
| 2882 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | 2890 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; |
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 109ddd42c266..257951d03dbb 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
| @@ -564,7 +564,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) | |||
| 564 | struct it821x_dev *itdev = ap->private_data; | 564 | struct it821x_dev *itdev = ap->private_data; |
| 565 | 565 | ||
| 566 | /* Only use dma for transfers to/from the media. */ | 566 | /* Only use dma for transfers to/from the media. */ |
| 567 | if (qc->nbytes < 2048) | 567 | if (ata_qc_raw_nbytes(qc) < 2048) |
| 568 | return -EOPNOTSUPP; | 568 | return -EOPNOTSUPP; |
| 569 | 569 | ||
| 570 | /* No ATAPI DMA in smart mode */ | 570 | /* No ATAPI DMA in smart mode */ |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index f251a5f569d5..11c1afea2db2 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #include "sata_promise.h" | 46 | #include "sata_promise.h" |
| 47 | 47 | ||
| 48 | #define DRV_NAME "sata_promise" | 48 | #define DRV_NAME "sata_promise" |
| 49 | #define DRV_VERSION "2.11" | 49 | #define DRV_VERSION "2.12" |
| 50 | 50 | ||
| 51 | enum { | 51 | enum { |
| 52 | PDC_MAX_PORTS = 4, | 52 | PDC_MAX_PORTS = 4, |
| @@ -145,7 +145,9 @@ static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); | |||
| 145 | static void pdc_irq_clear(struct ata_port *ap); | 145 | static void pdc_irq_clear(struct ata_port *ap); |
| 146 | static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); | 146 | static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); |
| 147 | static void pdc_freeze(struct ata_port *ap); | 147 | static void pdc_freeze(struct ata_port *ap); |
| 148 | static void pdc_sata_freeze(struct ata_port *ap); | ||
| 148 | static void pdc_thaw(struct ata_port *ap); | 149 | static void pdc_thaw(struct ata_port *ap); |
| 150 | static void pdc_sata_thaw(struct ata_port *ap); | ||
| 149 | static void pdc_pata_error_handler(struct ata_port *ap); | 151 | static void pdc_pata_error_handler(struct ata_port *ap); |
| 150 | static void pdc_sata_error_handler(struct ata_port *ap); | 152 | static void pdc_sata_error_handler(struct ata_port *ap); |
| 151 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); | 153 | static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); |
| @@ -180,8 +182,8 @@ static const struct ata_port_operations pdc_sata_ops = { | |||
| 180 | 182 | ||
| 181 | .qc_prep = pdc_qc_prep, | 183 | .qc_prep = pdc_qc_prep, |
| 182 | .qc_issue = pdc_qc_issue_prot, | 184 | .qc_issue = pdc_qc_issue_prot, |
| 183 | .freeze = pdc_freeze, | 185 | .freeze = pdc_sata_freeze, |
| 184 | .thaw = pdc_thaw, | 186 | .thaw = pdc_sata_thaw, |
| 185 | .error_handler = pdc_sata_error_handler, | 187 | .error_handler = pdc_sata_error_handler, |
| 186 | .post_internal_cmd = pdc_post_internal_cmd, | 188 | .post_internal_cmd = pdc_post_internal_cmd, |
| 187 | .cable_detect = pdc_sata_cable_detect, | 189 | .cable_detect = pdc_sata_cable_detect, |
| @@ -205,8 +207,8 @@ static const struct ata_port_operations pdc_old_sata_ops = { | |||
| 205 | 207 | ||
| 206 | .qc_prep = pdc_qc_prep, | 208 | .qc_prep = pdc_qc_prep, |
| 207 | .qc_issue = pdc_qc_issue_prot, | 209 | .qc_issue = pdc_qc_issue_prot, |
| 208 | .freeze = pdc_freeze, | 210 | .freeze = pdc_sata_freeze, |
| 209 | .thaw = pdc_thaw, | 211 | .thaw = pdc_sata_thaw, |
| 210 | .error_handler = pdc_sata_error_handler, | 212 | .error_handler = pdc_sata_error_handler, |
| 211 | .post_internal_cmd = pdc_post_internal_cmd, | 213 | .post_internal_cmd = pdc_post_internal_cmd, |
| 212 | .cable_detect = pdc_sata_cable_detect, | 214 | .cable_detect = pdc_sata_cable_detect, |
| @@ -631,6 +633,41 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) | |||
| 631 | } | 633 | } |
| 632 | } | 634 | } |
| 633 | 635 | ||
| 636 | static int pdc_is_sataii_tx4(unsigned long flags) | ||
| 637 | { | ||
| 638 | const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; | ||
| 639 | return (flags & mask) == mask; | ||
| 640 | } | ||
| 641 | |||
| 642 | static unsigned int pdc_port_no_to_ata_no(unsigned int port_no, | ||
| 643 | int is_sataii_tx4) | ||
| 644 | { | ||
| 645 | static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; | ||
| 646 | return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; | ||
| 647 | } | ||
| 648 | |||
| 649 | static unsigned int pdc_sata_nr_ports(const struct ata_port *ap) | ||
| 650 | { | ||
| 651 | return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2; | ||
| 652 | } | ||
| 653 | |||
| 654 | static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap) | ||
| 655 | { | ||
| 656 | const struct ata_host *host = ap->host; | ||
| 657 | unsigned int nr_ports = pdc_sata_nr_ports(ap); | ||
| 658 | unsigned int i; | ||
| 659 | |||
| 660 | for(i = 0; i < nr_ports && host->ports[i] != ap; ++i) | ||
| 661 | ; | ||
| 662 | BUG_ON(i >= nr_ports); | ||
| 663 | return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); | ||
| 664 | } | ||
| 665 | |||
| 666 | static unsigned int pdc_sata_hotplug_offset(const struct ata_port *ap) | ||
| 667 | { | ||
| 668 | return (ap->flags & PDC_FLAG_GEN_II) ? PDC2_SATA_PLUG_CSR : PDC_SATA_PLUG_CSR; | ||
| 669 | } | ||
| 670 | |||
| 634 | static void pdc_freeze(struct ata_port *ap) | 671 | static void pdc_freeze(struct ata_port *ap) |
| 635 | { | 672 | { |
| 636 | void __iomem *mmio = ap->ioaddr.cmd_addr; | 673 | void __iomem *mmio = ap->ioaddr.cmd_addr; |
| @@ -643,6 +680,29 @@ static void pdc_freeze(struct ata_port *ap) | |||
| 643 | readl(mmio + PDC_CTLSTAT); /* flush */ | 680 | readl(mmio + PDC_CTLSTAT); /* flush */ |
| 644 | } | 681 | } |
| 645 | 682 | ||
| 683 | static void pdc_sata_freeze(struct ata_port *ap) | ||
| 684 | { | ||
| 685 | struct ata_host *host = ap->host; | ||
| 686 | void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; | ||
| 687 | unsigned int hotplug_offset = pdc_sata_hotplug_offset(ap); | ||
| 688 | unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); | ||
| 689 | u32 hotplug_status; | ||
| 690 | |||
| 691 | /* Disable hotplug events on this port. | ||
| 692 | * | ||
| 693 | * Locking: | ||
| 694 | * 1) hotplug register accesses must be serialised via host->lock | ||
| 695 | * 2) ap->lock == &ap->host->lock | ||
| 696 | * 3) ->freeze() and ->thaw() are called with ap->lock held | ||
| 697 | */ | ||
| 698 | hotplug_status = readl(host_mmio + hotplug_offset); | ||
| 699 | hotplug_status |= 0x11 << (ata_no + 16); | ||
| 700 | writel(hotplug_status, host_mmio + hotplug_offset); | ||
| 701 | readl(host_mmio + hotplug_offset); /* flush */ | ||
| 702 | |||
| 703 | pdc_freeze(ap); | ||
| 704 | } | ||
| 705 | |||
| 646 | static void pdc_thaw(struct ata_port *ap) | 706 | static void pdc_thaw(struct ata_port *ap) |
| 647 | { | 707 | { |
| 648 | void __iomem *mmio = ap->ioaddr.cmd_addr; | 708 | void __iomem *mmio = ap->ioaddr.cmd_addr; |
| @@ -658,6 +718,26 @@ static void pdc_thaw(struct ata_port *ap) | |||
| 658 | readl(mmio + PDC_CTLSTAT); /* flush */ | 718 | readl(mmio + PDC_CTLSTAT); /* flush */ |
| 659 | } | 719 | } |
| 660 | 720 | ||
| 721 | static void pdc_sata_thaw(struct ata_port *ap) | ||
| 722 | { | ||
| 723 | struct ata_host *host = ap->host; | ||
| 724 | void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; | ||
| 725 | unsigned int hotplug_offset = pdc_sata_hotplug_offset(ap); | ||
| 726 | unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); | ||
| 727 | u32 hotplug_status; | ||
| 728 | |||
| 729 | pdc_thaw(ap); | ||
| 730 | |||
| 731 | /* Enable hotplug events on this port. | ||
| 732 | * Locking: see pdc_sata_freeze(). | ||
| 733 | */ | ||
| 734 | hotplug_status = readl(host_mmio + hotplug_offset); | ||
| 735 | hotplug_status |= 0x11 << ata_no; | ||
| 736 | hotplug_status &= ~(0x11 << (ata_no + 16)); | ||
| 737 | writel(hotplug_status, host_mmio + hotplug_offset); | ||
| 738 | readl(host_mmio + hotplug_offset); /* flush */ | ||
| 739 | } | ||
| 740 | |||
| 661 | static void pdc_common_error_handler(struct ata_port *ap, ata_reset_fn_t hardreset) | 741 | static void pdc_common_error_handler(struct ata_port *ap, ata_reset_fn_t hardreset) |
| 662 | { | 742 | { |
| 663 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) | 743 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) |
| @@ -765,19 +845,6 @@ static void pdc_irq_clear(struct ata_port *ap) | |||
| 765 | readl(mmio + PDC_INT_SEQMASK); | 845 | readl(mmio + PDC_INT_SEQMASK); |
| 766 | } | 846 | } |
| 767 | 847 | ||
| 768 | static int pdc_is_sataii_tx4(unsigned long flags) | ||
| 769 | { | ||
| 770 | const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; | ||
| 771 | return (flags & mask) == mask; | ||
| 772 | } | ||
| 773 | |||
| 774 | static unsigned int pdc_port_no_to_ata_no(unsigned int port_no, | ||
| 775 | int is_sataii_tx4) | ||
| 776 | { | ||
| 777 | static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; | ||
| 778 | return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; | ||
| 779 | } | ||
| 780 | |||
| 781 | static irqreturn_t pdc_interrupt(int irq, void *dev_instance) | 848 | static irqreturn_t pdc_interrupt(int irq, void *dev_instance) |
| 782 | { | 849 | { |
| 783 | struct ata_host *host = dev_instance; | 850 | struct ata_host *host = dev_instance; |
| @@ -799,6 +866,8 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) | |||
| 799 | 866 | ||
| 800 | mmio_base = host->iomap[PDC_MMIO_BAR]; | 867 | mmio_base = host->iomap[PDC_MMIO_BAR]; |
| 801 | 868 | ||
| 869 | spin_lock(&host->lock); | ||
| 870 | |||
| 802 | /* read and clear hotplug flags for all ports */ | 871 | /* read and clear hotplug flags for all ports */ |
| 803 | if (host->ports[0]->flags & PDC_FLAG_GEN_II) | 872 | if (host->ports[0]->flags & PDC_FLAG_GEN_II) |
| 804 | hotplug_offset = PDC2_SATA_PLUG_CSR; | 873 | hotplug_offset = PDC2_SATA_PLUG_CSR; |
| @@ -814,11 +883,9 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) | |||
| 814 | 883 | ||
| 815 | if (mask == 0xffffffff && hotplug_status == 0) { | 884 | if (mask == 0xffffffff && hotplug_status == 0) { |
| 816 | VPRINTK("QUICK EXIT 2\n"); | 885 | VPRINTK("QUICK EXIT 2\n"); |
| 817 | return IRQ_NONE; | 886 | goto done_irq; |
| 818 | } | 887 | } |
| 819 | 888 | ||
| 820 | spin_lock(&host->lock); | ||
| 821 | |||
| 822 | mask &= 0xffff; /* only 16 tags possible */ | 889 | mask &= 0xffff; /* only 16 tags possible */ |
| 823 | if (mask == 0 && hotplug_status == 0) { | 890 | if (mask == 0 && hotplug_status == 0) { |
| 824 | VPRINTK("QUICK EXIT 3\n"); | 891 | VPRINTK("QUICK EXIT 3\n"); |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 6bbd4fa50f3b..8d6c2089d2a8 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
| @@ -9,7 +9,14 @@ config HW_RANDOM | |||
| 9 | Hardware Random Number Generator Core infrastructure. | 9 | Hardware Random Number Generator Core infrastructure. |
| 10 | 10 | ||
| 11 | To compile this driver as a module, choose M here: the | 11 | To compile this driver as a module, choose M here: the |
| 12 | module will be called rng-core. | 12 | module will be called rng-core. This provides a device |
| 13 | that's usually called /dev/hw_random, and which exposes one | ||
| 14 | of possibly several hardware random number generators. | ||
| 15 | |||
| 16 | These hardware random number generators do not feed directly | ||
| 17 | into the kernel's random number generator. That is usually | ||
| 18 | handled by the "rngd" daemon. Documentation/hw_random.txt | ||
| 19 | has more information. | ||
| 13 | 20 | ||
| 14 | If unsure, say Y. | 21 | If unsure, say Y. |
| 15 | 22 | ||
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index eeaaa9dce6ef..ad1880c67518 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c | |||
| @@ -408,7 +408,6 @@ static int block2mtd_setup2(const char *val) | |||
| 408 | if (token[1]) { | 408 | if (token[1]) { |
| 409 | ret = parse_num(&erase_size, token[1]); | 409 | ret = parse_num(&erase_size, token[1]); |
| 410 | if (ret) { | 410 | if (ret) { |
| 411 | kfree(name); | ||
| 412 | parse_err("illegal erase size"); | 411 | parse_err("illegal erase size"); |
| 413 | } | 412 | } |
| 414 | } | 413 | } |
diff --git a/include/linux/libata.h b/include/linux/libata.h index a05f60013642..269cdba09578 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -463,6 +463,7 @@ struct ata_queued_cmd { | |||
| 463 | unsigned int sect_size; | 463 | unsigned int sect_size; |
| 464 | 464 | ||
| 465 | unsigned int nbytes; | 465 | unsigned int nbytes; |
| 466 | unsigned int extrabytes; | ||
| 466 | unsigned int curbytes; | 467 | unsigned int curbytes; |
| 467 | 468 | ||
| 468 | struct scatterlist *cursg; | 469 | struct scatterlist *cursg; |
| @@ -1336,6 +1337,11 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, | |||
| 1336 | return NULL; | 1337 | return NULL; |
| 1337 | } | 1338 | } |
| 1338 | 1339 | ||
| 1340 | static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) | ||
| 1341 | { | ||
| 1342 | return qc->nbytes - min(qc->extrabytes, qc->nbytes); | ||
| 1343 | } | ||
| 1344 | |||
| 1339 | static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) | 1345 | static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) |
| 1340 | { | 1346 | { |
| 1341 | memset(tf, 0, sizeof(*tf)); | 1347 | memset(tf, 0, sizeof(*tf)); |
| @@ -1354,7 +1360,7 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc) | |||
| 1354 | qc->flags = 0; | 1360 | qc->flags = 0; |
| 1355 | qc->cursg = NULL; | 1361 | qc->cursg = NULL; |
| 1356 | qc->cursg_ofs = 0; | 1362 | qc->cursg_ofs = 0; |
| 1357 | qc->nbytes = qc->curbytes = 0; | 1363 | qc->nbytes = qc->extrabytes = qc->curbytes = 0; |
| 1358 | qc->n_elem = 0; | 1364 | qc->n_elem = 0; |
| 1359 | qc->err_mask = 0; | 1365 | qc->err_mask = 0; |
| 1360 | qc->sect_size = ATA_SECT_SIZE; | 1366 | qc->sect_size = ATA_SECT_SIZE; |
diff --git a/kernel/acct.c b/kernel/acct.c index 521dfa53cb99..91e1cfd734d2 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
| @@ -58,6 +58,7 @@ | |||
| 58 | #include <asm/uaccess.h> | 58 | #include <asm/uaccess.h> |
| 59 | #include <asm/div64.h> | 59 | #include <asm/div64.h> |
| 60 | #include <linux/blkdev.h> /* sector_div */ | 60 | #include <linux/blkdev.h> /* sector_div */ |
| 61 | #include <linux/pid_namespace.h> | ||
| 61 | 62 | ||
| 62 | /* | 63 | /* |
| 63 | * These constants control the amount of freespace that suspend and | 64 | * These constants control the amount of freespace that suspend and |
| @@ -74,7 +75,7 @@ int acct_parm[3] = {4, 2, 30}; | |||
| 74 | /* | 75 | /* |
| 75 | * External references and all of the globals. | 76 | * External references and all of the globals. |
| 76 | */ | 77 | */ |
| 77 | static void do_acct_process(struct file *); | 78 | static void do_acct_process(struct pid_namespace *ns, struct file *); |
| 78 | 79 | ||
| 79 | /* | 80 | /* |
| 80 | * This structure is used so that all the data protected by lock | 81 | * This structure is used so that all the data protected by lock |
| @@ -86,6 +87,7 @@ struct acct_glbs { | |||
| 86 | volatile int active; | 87 | volatile int active; |
| 87 | volatile int needcheck; | 88 | volatile int needcheck; |
| 88 | struct file *file; | 89 | struct file *file; |
| 90 | struct pid_namespace *ns; | ||
| 89 | struct timer_list timer; | 91 | struct timer_list timer; |
| 90 | }; | 92 | }; |
| 91 | 93 | ||
| @@ -175,9 +177,11 @@ out: | |||
| 175 | static void acct_file_reopen(struct file *file) | 177 | static void acct_file_reopen(struct file *file) |
| 176 | { | 178 | { |
| 177 | struct file *old_acct = NULL; | 179 | struct file *old_acct = NULL; |
| 180 | struct pid_namespace *old_ns = NULL; | ||
| 178 | 181 | ||
| 179 | if (acct_globals.file) { | 182 | if (acct_globals.file) { |
| 180 | old_acct = acct_globals.file; | 183 | old_acct = acct_globals.file; |
| 184 | old_ns = acct_globals.ns; | ||
| 181 | del_timer(&acct_globals.timer); | 185 | del_timer(&acct_globals.timer); |
| 182 | acct_globals.active = 0; | 186 | acct_globals.active = 0; |
| 183 | acct_globals.needcheck = 0; | 187 | acct_globals.needcheck = 0; |
| @@ -185,6 +189,7 @@ static void acct_file_reopen(struct file *file) | |||
| 185 | } | 189 | } |
| 186 | if (file) { | 190 | if (file) { |
| 187 | acct_globals.file = file; | 191 | acct_globals.file = file; |
| 192 | acct_globals.ns = get_pid_ns(task_active_pid_ns(current)); | ||
| 188 | acct_globals.needcheck = 0; | 193 | acct_globals.needcheck = 0; |
| 189 | acct_globals.active = 1; | 194 | acct_globals.active = 1; |
| 190 | /* It's been deleted if it was used before so this is safe */ | 195 | /* It's been deleted if it was used before so this is safe */ |
| @@ -196,8 +201,9 @@ static void acct_file_reopen(struct file *file) | |||
| 196 | if (old_acct) { | 201 | if (old_acct) { |
| 197 | mnt_unpin(old_acct->f_path.mnt); | 202 | mnt_unpin(old_acct->f_path.mnt); |
| 198 | spin_unlock(&acct_globals.lock); | 203 | spin_unlock(&acct_globals.lock); |
| 199 | do_acct_process(old_acct); | 204 | do_acct_process(old_ns, old_acct); |
| 200 | filp_close(old_acct, NULL); | 205 | filp_close(old_acct, NULL); |
| 206 | put_pid_ns(old_ns); | ||
| 201 | spin_lock(&acct_globals.lock); | 207 | spin_lock(&acct_globals.lock); |
| 202 | } | 208 | } |
| 203 | } | 209 | } |
| @@ -419,7 +425,7 @@ static u32 encode_float(u64 value) | |||
| 419 | /* | 425 | /* |
| 420 | * do_acct_process does all actual work. Caller holds the reference to file. | 426 | * do_acct_process does all actual work. Caller holds the reference to file. |
| 421 | */ | 427 | */ |
| 422 | static void do_acct_process(struct file *file) | 428 | static void do_acct_process(struct pid_namespace *ns, struct file *file) |
| 423 | { | 429 | { |
| 424 | struct pacct_struct *pacct = ¤t->signal->pacct; | 430 | struct pacct_struct *pacct = ¤t->signal->pacct; |
| 425 | acct_t ac; | 431 | acct_t ac; |
| @@ -481,8 +487,10 @@ static void do_acct_process(struct file *file) | |||
| 481 | ac.ac_gid16 = current->gid; | 487 | ac.ac_gid16 = current->gid; |
| 482 | #endif | 488 | #endif |
| 483 | #if ACCT_VERSION==3 | 489 | #if ACCT_VERSION==3 |
| 484 | ac.ac_pid = current->tgid; | 490 | ac.ac_pid = task_tgid_nr_ns(current, ns); |
| 485 | ac.ac_ppid = current->real_parent->tgid; | 491 | rcu_read_lock(); |
| 492 | ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); | ||
| 493 | rcu_read_unlock(); | ||
| 486 | #endif | 494 | #endif |
| 487 | 495 | ||
| 488 | spin_lock_irq(¤t->sighand->siglock); | 496 | spin_lock_irq(¤t->sighand->siglock); |
| @@ -578,6 +586,7 @@ void acct_collect(long exitcode, int group_dead) | |||
| 578 | void acct_process(void) | 586 | void acct_process(void) |
| 579 | { | 587 | { |
| 580 | struct file *file = NULL; | 588 | struct file *file = NULL; |
| 589 | struct pid_namespace *ns; | ||
| 581 | 590 | ||
| 582 | /* | 591 | /* |
| 583 | * accelerate the common fastpath: | 592 | * accelerate the common fastpath: |
| @@ -592,8 +601,10 @@ void acct_process(void) | |||
| 592 | return; | 601 | return; |
| 593 | } | 602 | } |
| 594 | get_file(file); | 603 | get_file(file); |
| 604 | ns = get_pid_ns(acct_globals.ns); | ||
| 595 | spin_unlock(&acct_globals.lock); | 605 | spin_unlock(&acct_globals.lock); |
| 596 | 606 | ||
| 597 | do_acct_process(file); | 607 | do_acct_process(ns, file); |
| 598 | fput(file); | 608 | fput(file); |
| 609 | put_pid_ns(ns); | ||
| 599 | } | 610 | } |
diff --git a/kernel/marker.c b/kernel/marker.c index 48a4ea5afffd..041c33e3e95c 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
| @@ -104,18 +104,18 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, | |||
| 104 | char ptype; | 104 | char ptype; |
| 105 | 105 | ||
| 106 | /* | 106 | /* |
| 107 | * disabling preemption to make sure the teardown of the callbacks can | 107 | * preempt_disable does two things : disabling preemption to make sure |
| 108 | * be done correctly when they are in modules and they insure RCU read | 108 | * the teardown of the callbacks can be done correctly when they are in |
| 109 | * coherency. | 109 | * modules and they insure RCU read coherency. |
| 110 | */ | 110 | */ |
| 111 | preempt_disable(); | 111 | preempt_disable(); |
| 112 | ptype = ACCESS_ONCE(mdata->ptype); | 112 | ptype = mdata->ptype; |
| 113 | if (likely(!ptype)) { | 113 | if (likely(!ptype)) { |
| 114 | marker_probe_func *func; | 114 | marker_probe_func *func; |
| 115 | /* Must read the ptype before ptr. They are not data dependant, | 115 | /* Must read the ptype before ptr. They are not data dependant, |
| 116 | * so we put an explicit smp_rmb() here. */ | 116 | * so we put an explicit smp_rmb() here. */ |
| 117 | smp_rmb(); | 117 | smp_rmb(); |
| 118 | func = ACCESS_ONCE(mdata->single.func); | 118 | func = mdata->single.func; |
| 119 | /* Must read the ptr before private data. They are not data | 119 | /* Must read the ptr before private data. They are not data |
| 120 | * dependant, so we put an explicit smp_rmb() here. */ | 120 | * dependant, so we put an explicit smp_rmb() here. */ |
| 121 | smp_rmb(); | 121 | smp_rmb(); |
| @@ -133,7 +133,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, | |||
| 133 | * in the fast path, so put the explicit barrier here. | 133 | * in the fast path, so put the explicit barrier here. |
| 134 | */ | 134 | */ |
| 135 | smp_read_barrier_depends(); | 135 | smp_read_barrier_depends(); |
| 136 | multi = ACCESS_ONCE(mdata->multi); | 136 | multi = mdata->multi; |
| 137 | for (i = 0; multi[i].func; i++) { | 137 | for (i = 0; multi[i].func; i++) { |
| 138 | va_start(args, fmt); | 138 | va_start(args, fmt); |
| 139 | multi[i].func(multi[i].probe_private, call_private, fmt, | 139 | multi[i].func(multi[i].probe_private, call_private, fmt, |
| @@ -161,13 +161,13 @@ void marker_probe_cb_noarg(const struct marker *mdata, | |||
| 161 | char ptype; | 161 | char ptype; |
| 162 | 162 | ||
| 163 | preempt_disable(); | 163 | preempt_disable(); |
| 164 | ptype = ACCESS_ONCE(mdata->ptype); | 164 | ptype = mdata->ptype; |
| 165 | if (likely(!ptype)) { | 165 | if (likely(!ptype)) { |
| 166 | marker_probe_func *func; | 166 | marker_probe_func *func; |
| 167 | /* Must read the ptype before ptr. They are not data dependant, | 167 | /* Must read the ptype before ptr. They are not data dependant, |
| 168 | * so we put an explicit smp_rmb() here. */ | 168 | * so we put an explicit smp_rmb() here. */ |
| 169 | smp_rmb(); | 169 | smp_rmb(); |
| 170 | func = ACCESS_ONCE(mdata->single.func); | 170 | func = mdata->single.func; |
| 171 | /* Must read the ptr before private data. They are not data | 171 | /* Must read the ptr before private data. They are not data |
| 172 | * dependant, so we put an explicit smp_rmb() here. */ | 172 | * dependant, so we put an explicit smp_rmb() here. */ |
| 173 | smp_rmb(); | 173 | smp_rmb(); |
| @@ -183,7 +183,7 @@ void marker_probe_cb_noarg(const struct marker *mdata, | |||
| 183 | * in the fast path, so put the explicit barrier here. | 183 | * in the fast path, so put the explicit barrier here. |
| 184 | */ | 184 | */ |
| 185 | smp_read_barrier_depends(); | 185 | smp_read_barrier_depends(); |
| 186 | multi = ACCESS_ONCE(mdata->multi); | 186 | multi = mdata->multi; |
| 187 | for (i = 0; multi[i].func; i++) | 187 | for (i = 0; multi[i].func; i++) |
| 188 | multi[i].func(multi[i].probe_private, call_private, fmt, | 188 | multi[i].func(multi[i].probe_private, call_private, fmt, |
| 189 | &args); | 189 | &args); |
| @@ -551,9 +551,9 @@ static int set_marker(struct marker_entry **entry, struct marker *elem, | |||
| 551 | 551 | ||
| 552 | /* | 552 | /* |
| 553 | * Disable a marker and its probe callback. | 553 | * Disable a marker and its probe callback. |
| 554 | * Note: only after a synchronize_sched() issued after setting elem->call to the | 554 | * Note: only waiting an RCU period after setting elem->call to the empty |
| 555 | * empty function insures that the original callback is not used anymore. This | 555 | * function insures that the original callback is not used anymore. This insured |
| 556 | * insured by preemption disabling around the call site. | 556 | * by preempt_disable around the call site. |
| 557 | */ | 557 | */ |
| 558 | static void disable_marker(struct marker *elem) | 558 | static void disable_marker(struct marker *elem) |
| 559 | { | 559 | { |
| @@ -565,8 +565,8 @@ static void disable_marker(struct marker *elem) | |||
| 565 | elem->ptype = 0; /* single probe */ | 565 | elem->ptype = 0; /* single probe */ |
| 566 | /* | 566 | /* |
| 567 | * Leave the private data and id there, because removal is racy and | 567 | * Leave the private data and id there, because removal is racy and |
| 568 | * should be done only after a synchronize_sched(). These are never used | 568 | * should be done only after an RCU period. These are never used until |
| 569 | * until the next initialization anyway. | 569 | * the next initialization anyway. |
| 570 | */ | 570 | */ |
| 571 | } | 571 | } |
| 572 | 572 | ||
| @@ -601,9 +601,6 @@ void marker_update_probe_range(struct marker *begin, | |||
| 601 | 601 | ||
| 602 | /* | 602 | /* |
| 603 | * Update probes, removing the faulty probes. | 603 | * Update probes, removing the faulty probes. |
| 604 | * Issues a synchronize_sched() when no reference to the module passed | ||
| 605 | * as parameter is found in the probes so the probe module can be | ||
| 606 | * safely unloaded from now on. | ||
| 607 | * | 604 | * |
| 608 | * Internal callback only changed before the first probe is connected to it. | 605 | * Internal callback only changed before the first probe is connected to it. |
| 609 | * Single probe private data can only be changed on 0 -> 1 and 2 -> 1 | 606 | * Single probe private data can only be changed on 0 -> 1 and 2 -> 1 |
diff --git a/kernel/printk.c b/kernel/printk.c index 9adc2a473e6e..c46a20a19a15 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -616,6 +616,40 @@ asmlinkage int printk(const char *fmt, ...) | |||
| 616 | /* cpu currently holding logbuf_lock */ | 616 | /* cpu currently holding logbuf_lock */ |
| 617 | static volatile unsigned int printk_cpu = UINT_MAX; | 617 | static volatile unsigned int printk_cpu = UINT_MAX; |
| 618 | 618 | ||
| 619 | /* | ||
| 620 | * Can we actually use the console at this time on this cpu? | ||
| 621 | * | ||
| 622 | * Console drivers may assume that per-cpu resources have | ||
| 623 | * been allocated. So unless they're explicitly marked as | ||
| 624 | * being able to cope (CON_ANYTIME) don't call them until | ||
| 625 | * this CPU is officially up. | ||
| 626 | */ | ||
| 627 | static inline int can_use_console(unsigned int cpu) | ||
| 628 | { | ||
| 629 | return cpu_online(cpu) || have_callable_console(); | ||
| 630 | } | ||
| 631 | |||
| 632 | /* | ||
| 633 | * Try to get console ownership to actually show the kernel | ||
| 634 | * messages from a 'printk'. Return true (and with the | ||
| 635 | * console_semaphore held, and 'console_locked' set) if it | ||
| 636 | * is successful, false otherwise. | ||
| 637 | * | ||
| 638 | * This gets called with the 'logbuf_lock' spinlock held and | ||
| 639 | * interrupts disabled. It should return with 'lockbuf_lock' | ||
| 640 | * released but interrupts still disabled. | ||
| 641 | */ | ||
| 642 | static int acquire_console_semaphore_for_printk(unsigned int cpu) | ||
| 643 | { | ||
| 644 | int retval = 0; | ||
| 645 | |||
| 646 | if (can_use_console(cpu)) | ||
| 647 | retval = !try_acquire_console_sem(); | ||
| 648 | printk_cpu = UINT_MAX; | ||
| 649 | spin_unlock(&logbuf_lock); | ||
| 650 | return retval; | ||
| 651 | } | ||
| 652 | |||
| 619 | const char printk_recursion_bug_msg [] = | 653 | const char printk_recursion_bug_msg [] = |
| 620 | KERN_CRIT "BUG: recent printk recursion!\n"; | 654 | KERN_CRIT "BUG: recent printk recursion!\n"; |
| 621 | static int printk_recursion_bug; | 655 | static int printk_recursion_bug; |
| @@ -725,43 +759,22 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
| 725 | log_level_unknown = 1; | 759 | log_level_unknown = 1; |
| 726 | } | 760 | } |
| 727 | 761 | ||
| 728 | if (!down_trylock(&console_sem)) { | 762 | /* |
| 729 | /* | 763 | * Try to acquire and then immediately release the |
| 730 | * We own the drivers. We can drop the spinlock and | 764 | * console semaphore. The release will do all the |
| 731 | * let release_console_sem() print the text, maybe ... | 765 | * actual magic (print out buffers, wake up klogd, |
| 732 | */ | 766 | * etc). |
| 733 | console_locked = 1; | 767 | * |
| 734 | printk_cpu = UINT_MAX; | 768 | * The acquire_console_semaphore_for_printk() function |
| 735 | spin_unlock(&logbuf_lock); | 769 | * will release 'logbuf_lock' regardless of whether it |
| 770 | * actually gets the semaphore or not. | ||
| 771 | */ | ||
| 772 | if (acquire_console_semaphore_for_printk(this_cpu)) | ||
| 773 | release_console_sem(); | ||
| 736 | 774 | ||
| 737 | /* | 775 | lockdep_on(); |
| 738 | * Console drivers may assume that per-cpu resources have | ||
| 739 | * been allocated. So unless they're explicitly marked as | ||
| 740 | * being able to cope (CON_ANYTIME) don't call them until | ||
| 741 | * this CPU is officially up. | ||
| 742 | */ | ||
| 743 | if (cpu_online(smp_processor_id()) || have_callable_console()) { | ||
| 744 | console_may_schedule = 0; | ||
| 745 | release_console_sem(); | ||
| 746 | } else { | ||
| 747 | /* Release by hand to avoid flushing the buffer. */ | ||
| 748 | console_locked = 0; | ||
| 749 | up(&console_sem); | ||
| 750 | } | ||
| 751 | lockdep_on(); | ||
| 752 | raw_local_irq_restore(flags); | ||
| 753 | } else { | ||
| 754 | /* | ||
| 755 | * Someone else owns the drivers. We drop the spinlock, which | ||
| 756 | * allows the semaphore holder to proceed and to call the | ||
| 757 | * console drivers with the output which we just produced. | ||
| 758 | */ | ||
| 759 | printk_cpu = UINT_MAX; | ||
| 760 | spin_unlock(&logbuf_lock); | ||
| 761 | lockdep_on(); | ||
| 762 | out_restore_irqs: | 776 | out_restore_irqs: |
| 763 | raw_local_irq_restore(flags); | 777 | raw_local_irq_restore(flags); |
| 764 | } | ||
| 765 | 778 | ||
| 766 | preempt_enable(); | 779 | preempt_enable(); |
| 767 | return printed_len; | 780 | return printed_len; |
diff --git a/mm/bootmem.c b/mm/bootmem.c index f6ff4337b424..2ccea700968f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
| @@ -125,6 +125,7 @@ static int __init reserve_bootmem_core(bootmem_data_t *bdata, | |||
| 125 | BUG_ON(!size); | 125 | BUG_ON(!size); |
| 126 | BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); | 126 | BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); |
| 127 | BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); | 127 | BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); |
| 128 | BUG_ON(addr < bdata->node_boot_start); | ||
| 128 | 129 | ||
| 129 | sidx = PFN_DOWN(addr - bdata->node_boot_start); | 130 | sidx = PFN_DOWN(addr - bdata->node_boot_start); |
| 130 | eidx = PFN_UP(addr + size - bdata->node_boot_start); | 131 | eidx = PFN_UP(addr + size - bdata->node_boot_start); |
| @@ -156,21 +157,31 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, | |||
| 156 | unsigned long sidx, eidx; | 157 | unsigned long sidx, eidx; |
| 157 | unsigned long i; | 158 | unsigned long i; |
| 158 | 159 | ||
| 160 | BUG_ON(!size); | ||
| 161 | |||
| 162 | /* out range */ | ||
| 163 | if (addr + size < bdata->node_boot_start || | ||
| 164 | PFN_DOWN(addr) > bdata->node_low_pfn) | ||
| 165 | return; | ||
| 159 | /* | 166 | /* |
| 160 | * round down end of usable mem, partially free pages are | 167 | * round down end of usable mem, partially free pages are |
| 161 | * considered reserved. | 168 | * considered reserved. |
| 162 | */ | 169 | */ |
| 163 | BUG_ON(!size); | ||
| 164 | BUG_ON(PFN_DOWN(addr + size) > bdata->node_low_pfn); | ||
| 165 | 170 | ||
| 166 | if (addr < bdata->last_success) | 171 | if (addr >= bdata->node_boot_start && addr < bdata->last_success) |
| 167 | bdata->last_success = addr; | 172 | bdata->last_success = addr; |
| 168 | 173 | ||
| 169 | /* | 174 | /* |
| 170 | * Round up the beginning of the address. | 175 | * Round up to index to the range. |
| 171 | */ | 176 | */ |
| 172 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | 177 | if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) |
| 178 | sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); | ||
| 179 | else | ||
| 180 | sidx = 0; | ||
| 181 | |||
| 173 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); | 182 | eidx = PFN_DOWN(addr + size - bdata->node_boot_start); |
| 183 | if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) | ||
| 184 | eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); | ||
| 174 | 185 | ||
| 175 | for (i = sidx; i < eidx; i++) { | 186 | for (i = sidx; i < eidx; i++) { |
| 176 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) | 187 | if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) |
| @@ -421,7 +432,9 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, | |||
| 421 | 432 | ||
| 422 | void __init free_bootmem(unsigned long addr, unsigned long size) | 433 | void __init free_bootmem(unsigned long addr, unsigned long size) |
| 423 | { | 434 | { |
| 424 | free_bootmem_core(NODE_DATA(0)->bdata, addr, size); | 435 | bootmem_data_t *bdata; |
| 436 | list_for_each_entry(bdata, &bdata_list, list) | ||
| 437 | free_bootmem_core(bdata, addr, size); | ||
| 425 | } | 438 | } |
| 426 | 439 | ||
| 427 | unsigned long __init free_all_bootmem(void) | 440 | unsigned long __init free_all_bootmem(void) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 45711585684e..4046434046e6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -70,13 +70,6 @@ struct scan_control { | |||
| 70 | 70 | ||
| 71 | int order; | 71 | int order; |
| 72 | 72 | ||
| 73 | /* | ||
| 74 | * Pages that have (or should have) IO pending. If we run into | ||
| 75 | * a lot of these, we're better off waiting a little for IO to | ||
| 76 | * finish rather than scanning more pages in the VM. | ||
| 77 | */ | ||
| 78 | int nr_io_pages; | ||
| 79 | |||
| 80 | /* Which cgroup do we reclaim from */ | 73 | /* Which cgroup do we reclaim from */ |
| 81 | struct mem_cgroup *mem_cgroup; | 74 | struct mem_cgroup *mem_cgroup; |
| 82 | 75 | ||
| @@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 512 | */ | 505 | */ |
| 513 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) | 506 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) |
| 514 | wait_on_page_writeback(page); | 507 | wait_on_page_writeback(page); |
| 515 | else { | 508 | else |
| 516 | sc->nr_io_pages++; | ||
| 517 | goto keep_locked; | 509 | goto keep_locked; |
| 518 | } | ||
| 519 | } | 510 | } |
| 520 | 511 | ||
| 521 | referenced = page_referenced(page, 1, sc->mem_cgroup); | 512 | referenced = page_referenced(page, 1, sc->mem_cgroup); |
| @@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 554 | if (PageDirty(page)) { | 545 | if (PageDirty(page)) { |
| 555 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) | 546 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) |
| 556 | goto keep_locked; | 547 | goto keep_locked; |
| 557 | if (!may_enter_fs) { | 548 | if (!may_enter_fs) |
| 558 | sc->nr_io_pages++; | ||
| 559 | goto keep_locked; | 549 | goto keep_locked; |
| 560 | } | ||
| 561 | if (!sc->may_writepage) | 550 | if (!sc->may_writepage) |
| 562 | goto keep_locked; | 551 | goto keep_locked; |
| 563 | 552 | ||
| @@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 568 | case PAGE_ACTIVATE: | 557 | case PAGE_ACTIVATE: |
| 569 | goto activate_locked; | 558 | goto activate_locked; |
| 570 | case PAGE_SUCCESS: | 559 | case PAGE_SUCCESS: |
| 571 | if (PageWriteback(page) || PageDirty(page)) { | 560 | if (PageWriteback(page) || PageDirty(page)) |
| 572 | sc->nr_io_pages++; | ||
| 573 | goto keep; | 561 | goto keep; |
| 574 | } | ||
| 575 | /* | 562 | /* |
| 576 | * A synchronous write - probably a ramdisk. Go | 563 | * A synchronous write - probably a ramdisk. Go |
| 577 | * ahead and try to reclaim the page. | 564 | * ahead and try to reclaim the page. |
| @@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
| 1344 | 1331 | ||
| 1345 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 1332 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
| 1346 | sc->nr_scanned = 0; | 1333 | sc->nr_scanned = 0; |
| 1347 | sc->nr_io_pages = 0; | ||
| 1348 | if (!priority) | 1334 | if (!priority) |
| 1349 | disable_swap_token(); | 1335 | disable_swap_token(); |
| 1350 | nr_reclaimed += shrink_zones(priority, zones, sc); | 1336 | nr_reclaimed += shrink_zones(priority, zones, sc); |
| @@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
| 1379 | } | 1365 | } |
| 1380 | 1366 | ||
| 1381 | /* Take a nap, wait for some writeback to complete */ | 1367 | /* Take a nap, wait for some writeback to complete */ |
| 1382 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2 && | 1368 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2) |
| 1383 | sc->nr_io_pages > sc->swap_cluster_max) | ||
| 1384 | congestion_wait(WRITE, HZ/10); | 1369 | congestion_wait(WRITE, HZ/10); |
| 1385 | } | 1370 | } |
| 1386 | /* top priority shrink_caches still had more to do? don't OOM, then */ | 1371 | /* top priority shrink_caches still had more to do? don't OOM, then */ |
| @@ -1514,7 +1499,6 @@ loop_again: | |||
| 1514 | if (!priority) | 1499 | if (!priority) |
| 1515 | disable_swap_token(); | 1500 | disable_swap_token(); |
| 1516 | 1501 | ||
| 1517 | sc.nr_io_pages = 0; | ||
| 1518 | all_zones_ok = 1; | 1502 | all_zones_ok = 1; |
| 1519 | 1503 | ||
| 1520 | /* | 1504 | /* |
| @@ -1607,8 +1591,7 @@ loop_again: | |||
| 1607 | * OK, kswapd is getting into trouble. Take a nap, then take | 1591 | * OK, kswapd is getting into trouble. Take a nap, then take |
| 1608 | * another pass across the zones. | 1592 | * another pass across the zones. |
| 1609 | */ | 1593 | */ |
| 1610 | if (total_scanned && priority < DEF_PRIORITY - 2 && | 1594 | if (total_scanned && priority < DEF_PRIORITY - 2) |
| 1611 | sc.nr_io_pages > sc.swap_cluster_max) | ||
| 1612 | congestion_wait(WRITE, HZ/10); | 1595 | congestion_wait(WRITE, HZ/10); |
| 1613 | 1596 | ||
| 1614 | /* | 1597 | /* |
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index cfc004e04417..2d20640854b7 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost | |||
| @@ -58,6 +58,9 @@ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o))) | |||
| 58 | # Stop after building .o files if NOFINAL is set. Makes compile tests quicker | 58 | # Stop after building .o files if NOFINAL is set. Makes compile tests quicker |
| 59 | _modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) | 59 | _modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) |
| 60 | 60 | ||
| 61 | ifneq ($(KBUILD_BUILDHOST),$(ARCH)) | ||
| 62 | cross_build := 1 | ||
| 63 | endif | ||
| 61 | 64 | ||
| 62 | # Step 2), invoke modpost | 65 | # Step 2), invoke modpost |
| 63 | # Includes step 3,4 | 66 | # Includes step 3,4 |
| @@ -70,7 +73,8 @@ modpost = scripts/mod/modpost \ | |||
| 70 | $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ | 73 | $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ |
| 71 | $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \ | 74 | $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \ |
| 72 | $(if $(CONFIG_MARKERS),-M $(markersfile)) \ | 75 | $(if $(CONFIG_MARKERS),-M $(markersfile)) \ |
| 73 | $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) | 76 | $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ |
| 77 | $(if $(cross_build),-c) | ||
| 74 | 78 | ||
| 75 | quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules | 79 | quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules |
| 76 | cmd_modpost = $(modpost) -s | 80 | cmd_modpost = $(modpost) -s |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 9ddf944cce29..348d8687b7c9 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
| @@ -51,11 +51,13 @@ do { \ | |||
| 51 | sprintf(str + strlen(str), "*"); \ | 51 | sprintf(str + strlen(str), "*"); \ |
| 52 | } while(0) | 52 | } while(0) |
| 53 | 53 | ||
| 54 | unsigned int cross_build = 0; | ||
| 54 | /** | 55 | /** |
| 55 | * Check that sizeof(device_id type) are consistent with size of section | 56 | * Check that sizeof(device_id type) are consistent with size of section |
| 56 | * in .o file. If in-consistent then userspace and kernel does not agree | 57 | * in .o file. If in-consistent then userspace and kernel does not agree |
| 57 | * on actual size which is a bug. | 58 | * on actual size which is a bug. |
| 58 | * Also verify that the final entry in the table is all zeros. | 59 | * Also verify that the final entry in the table is all zeros. |
| 60 | * Ignore both checks if build host differ from target host and size differs. | ||
| 59 | **/ | 61 | **/ |
| 60 | static void device_id_check(const char *modname, const char *device_id, | 62 | static void device_id_check(const char *modname, const char *device_id, |
| 61 | unsigned long size, unsigned long id_size, | 63 | unsigned long size, unsigned long id_size, |
| @@ -64,6 +66,8 @@ static void device_id_check(const char *modname, const char *device_id, | |||
| 64 | int i; | 66 | int i; |
| 65 | 67 | ||
| 66 | if (size % id_size || size < id_size) { | 68 | if (size % id_size || size < id_size) { |
| 69 | if (cross_build != 0) | ||
| 70 | return; | ||
| 67 | fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " | 71 | fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " |
| 68 | "of the size of section __mod_%s_device_table=%lu.\n" | 72 | "of the size of section __mod_%s_device_table=%lu.\n" |
| 69 | "Fix definition of struct %s_device_id " | 73 | "Fix definition of struct %s_device_id " |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 695b5d657cf5..110cf243fa4e 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
| @@ -2026,7 +2026,7 @@ int main(int argc, char **argv) | |||
| 2026 | int opt; | 2026 | int opt; |
| 2027 | int err; | 2027 | int err; |
| 2028 | 2028 | ||
| 2029 | while ((opt = getopt(argc, argv, "i:I:msSo:awM:K:")) != -1) { | 2029 | while ((opt = getopt(argc, argv, "i:I:cmsSo:awM:K:")) != -1) { |
| 2030 | switch (opt) { | 2030 | switch (opt) { |
| 2031 | case 'i': | 2031 | case 'i': |
| 2032 | kernel_read = optarg; | 2032 | kernel_read = optarg; |
| @@ -2035,6 +2035,9 @@ int main(int argc, char **argv) | |||
| 2035 | module_read = optarg; | 2035 | module_read = optarg; |
| 2036 | external_module = 1; | 2036 | external_module = 1; |
| 2037 | break; | 2037 | break; |
| 2038 | case 'c': | ||
| 2039 | cross_build = 1; | ||
| 2040 | break; | ||
| 2038 | case 'm': | 2041 | case 'm': |
| 2039 | modversions = 1; | 2042 | modversions = 1; |
| 2040 | break; | 2043 | break; |
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index 565c5872407e..09f58e33d227 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h | |||
| @@ -135,6 +135,7 @@ struct elf_info { | |||
| 135 | }; | 135 | }; |
| 136 | 136 | ||
| 137 | /* file2alias.c */ | 137 | /* file2alias.c */ |
| 138 | extern unsigned int cross_build; | ||
| 138 | void handle_moddevtable(struct module *mod, struct elf_info *info, | 139 | void handle_moddevtable(struct module *mod, struct elf_info *info, |
| 139 | Elf_Sym *sym, const char *symname); | 140 | Elf_Sym *sym, const char *symname); |
| 140 | void add_moddevtable(struct buffer *buf, struct module *mod); | 141 | void add_moddevtable(struct buffer *buf, struct module *mod); |
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index afe7c9b0732a..cfae8afcc262 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c | |||
| @@ -74,11 +74,6 @@ struct smk_list_entry *smack_list; | |||
| 74 | #define SEQ_READ_FINISHED 1 | 74 | #define SEQ_READ_FINISHED 1 |
| 75 | 75 | ||
| 76 | /* | 76 | /* |
| 77 | * Disable concurrent writing open() operations | ||
| 78 | */ | ||
| 79 | static struct semaphore smack_write_sem; | ||
| 80 | |||
| 81 | /* | ||
| 82 | * Values for parsing cipso rules | 77 | * Values for parsing cipso rules |
| 83 | * SMK_DIGITLEN: Length of a digit field in a rule. | 78 | * SMK_DIGITLEN: Length of a digit field in a rule. |
| 84 | * SMK_CIPSOMIN: Minimum possible cipso rule length. | 79 | * SMK_CIPSOMIN: Minimum possible cipso rule length. |
| @@ -168,32 +163,7 @@ static struct seq_operations load_seq_ops = { | |||
| 168 | */ | 163 | */ |
| 169 | static int smk_open_load(struct inode *inode, struct file *file) | 164 | static int smk_open_load(struct inode *inode, struct file *file) |
| 170 | { | 165 | { |
| 171 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | 166 | return seq_open(file, &load_seq_ops); |
| 172 | return seq_open(file, &load_seq_ops); | ||
| 173 | |||
| 174 | if (down_interruptible(&smack_write_sem)) | ||
| 175 | return -ERESTARTSYS; | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | /** | ||
| 181 | * smk_release_load - release() for /smack/load | ||
| 182 | * @inode: inode structure representing file | ||
| 183 | * @file: "load" file pointer | ||
| 184 | * | ||
| 185 | * For a reading session, use the seq_file release | ||
| 186 | * implementation. | ||
| 187 | * Otherwise, we are at the end of a writing session so | ||
| 188 | * clean everything up. | ||
| 189 | */ | ||
| 190 | static int smk_release_load(struct inode *inode, struct file *file) | ||
| 191 | { | ||
| 192 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | ||
| 193 | return seq_release(inode, file); | ||
| 194 | |||
| 195 | up(&smack_write_sem); | ||
| 196 | return 0; | ||
| 197 | } | 167 | } |
| 198 | 168 | ||
| 199 | /** | 169 | /** |
| @@ -341,7 +311,7 @@ static const struct file_operations smk_load_ops = { | |||
| 341 | .read = seq_read, | 311 | .read = seq_read, |
| 342 | .llseek = seq_lseek, | 312 | .llseek = seq_lseek, |
| 343 | .write = smk_write_load, | 313 | .write = smk_write_load, |
| 344 | .release = smk_release_load, | 314 | .release = seq_release, |
| 345 | }; | 315 | }; |
| 346 | 316 | ||
| 347 | /** | 317 | /** |
| @@ -1011,7 +981,6 @@ static int __init init_smk_fs(void) | |||
| 1011 | } | 981 | } |
| 1012 | } | 982 | } |
| 1013 | 983 | ||
| 1014 | sema_init(&smack_write_sem, 1); | ||
| 1015 | smk_cipso_doi(); | 984 | smk_cipso_doi(); |
| 1016 | smk_unlbl_ambient(NULL); | 985 | smk_unlbl_ambient(NULL); |
| 1017 | 986 | ||
