aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--drivers/firewire/fw-cdev.c52
-rw-r--r--drivers/firewire/fw-device.h5
-rw-r--r--drivers/firewire/fw-ohci.c41
-rw-r--r--drivers/firewire/fw-ohci.h2
-rw-r--r--drivers/firewire/fw-sbp2.c647
-rw-r--r--drivers/firewire/fw-topology.c10
-rw-r--r--drivers/firewire/fw-topology.h6
-rw-r--r--drivers/firewire/fw-transaction.c12
-rw-r--r--drivers/ieee1394/csr1212.c57
-rw-r--r--drivers/ieee1394/csr1212.h6
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/ieee1394/ieee1394_core.c2
-rw-r--r--drivers/ieee1394/nodemgr.c22
-rw-r--r--drivers/ieee1394/pcilynx.c29
-rw-r--r--drivers/ieee1394/sbp2.c15
-rw-r--r--drivers/video/cirrusfb.c5
-rw-r--r--fs/open.c4
-rw-r--r--include/asm-powerpc/dma-mapping.h143
-rw-r--r--include/linux/firewire-cdev.h15
-rw-r--r--include/linux/security.h18
-rw-r--r--security/dummy.c6
-rw-r--r--security/selinux/avc.c5
-rw-r--r--security/selinux/hooks.c53
-rw-r--r--security/selinux/include/avc.h2
-rw-r--r--security/selinux/include/objsec.h2
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/selinuxfs.c26
-rw-r--r--security/selinux/ss/avtab.c91
-rw-r--r--security/selinux/ss/avtab.h16
-rw-r--r--security/selinux/ss/conditional.c4
-rw-r--r--security/selinux/ss/ebitmap.c282
-rw-r--r--security/selinux/ss/ebitmap.h89
-rw-r--r--security/selinux/ss/mls.c156
-rw-r--r--security/selinux/ss/policydb.c11
-rw-r--r--security/selinux/ss/policydb.h8
-rw-r--r--security/selinux/ss/services.c91
39 files changed, 1295 insertions, 667 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 777c8d8bd5e..c5cfcfa4c87 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -869,6 +869,7 @@ cpu_init (void)
869 void *cpu_data; 869 void *cpu_data;
870 870
871 cpu_data = per_cpu_init(); 871 cpu_data = per_cpu_init();
872#ifdef CONFIG_SMP
872 /* 873 /*
873 * insert boot cpu into sibling and core mapes 874 * insert boot cpu into sibling and core mapes
874 * (must be done after per_cpu area is setup) 875 * (must be done after per_cpu area is setup)
@@ -877,6 +878,7 @@ cpu_init (void)
877 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 878 cpu_set(0, per_cpu(cpu_sibling_map, 0));
878 cpu_set(0, cpu_core_map[0]); 879 cpu_set(0, cpu_core_map[0]);
879 } 880 }
881#endif
880 882
881 /* 883 /*
882 * We set ar.k3 so that assembly code in MCA handler can compute 884 * We set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 29ed495444f..702d884a338 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -243,7 +243,8 @@ int __meminit vmemmap_populate(struct page *start_page,
243 "physical %p.\n", start, p, __pa(p)); 243 "physical %p.\n", start, p, __pa(p));
244 244
245 mapped = htab_bolt_mapping(start, start + page_size, 245 mapped = htab_bolt_mapping(start, start + page_size,
246 __pa(p), mode_rw, mmu_linear_psize); 246 __pa(p), mode_rw, mmu_linear_psize,
247 mmu_kernel_ssize);
247 BUG_ON(mapped < 0); 248 BUG_ON(mapped < 0);
248 } 249 }
249 250
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index d6ed8e5e1cc..e8756e5f6b2 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -64,10 +64,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
64KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 64KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
65 65
66$(obj)/zImage: IMAGE_OFFSET := 0x1000 66$(obj)/zImage: IMAGE_OFFSET := 0x1000
67$(obj)/zImage: EXTRA_AFLAGS := $(SVGA_MODE) $(RAMDISK) 67$(obj)/zImage: asflags-y := $(SVGA_MODE) $(RAMDISK)
68$(obj)/bzImage: IMAGE_OFFSET := 0x100000 68$(obj)/bzImage: IMAGE_OFFSET := 0x100000
69$(obj)/bzImage: EXTRA_CFLAGS := -D__BIG_KERNEL__ 69$(obj)/bzImage: ccflags-y := -D__BIG_KERNEL__
70$(obj)/bzImage: EXTRA_AFLAGS := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__ 70$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
71$(obj)/bzImage: BUILDFLAGS := -b 71$(obj)/bzImage: BUILDFLAGS := -b
72 72
73quiet_cmd_image = BUILD $@ 73quiet_cmd_image = BUILD $@
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 06471302200..60f1a8924a9 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -25,11 +25,14 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/poll.h> 27#include <linux/poll.h>
28#include <linux/preempt.h>
29#include <linux/time.h>
28#include <linux/delay.h> 30#include <linux/delay.h>
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/idr.h> 32#include <linux/idr.h>
31#include <linux/compat.h> 33#include <linux/compat.h>
32#include <linux/firewire-cdev.h> 34#include <linux/firewire-cdev.h>
35#include <asm/system.h>
33#include <asm/uaccess.h> 36#include <asm/uaccess.h>
34#include "fw-transaction.h" 37#include "fw-transaction.h"
35#include "fw-topology.h" 38#include "fw-topology.h"
@@ -140,11 +143,10 @@ static void queue_event(struct client *client, struct event *event,
140 event->v[1].size = size1; 143 event->v[1].size = size1;
141 144
142 spin_lock_irqsave(&client->lock, flags); 145 spin_lock_irqsave(&client->lock, flags);
143
144 list_add_tail(&event->link, &client->event_list); 146 list_add_tail(&event->link, &client->event_list);
145 wake_up_interruptible(&client->wait);
146
147 spin_unlock_irqrestore(&client->lock, flags); 147 spin_unlock_irqrestore(&client->lock, flags);
148
149 wake_up_interruptible(&client->wait);
148} 150}
149 151
150static int 152static int
@@ -621,20 +623,19 @@ iso_callback(struct fw_iso_context *context, u32 cycle,
621 size_t header_length, void *header, void *data) 623 size_t header_length, void *header, void *data)
622{ 624{
623 struct client *client = data; 625 struct client *client = data;
624 struct iso_interrupt *interrupt; 626 struct iso_interrupt *irq;
625 627
626 interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC); 628 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
627 if (interrupt == NULL) 629 if (irq == NULL)
628 return; 630 return;
629 631
630 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 632 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
631 interrupt->interrupt.closure = client->iso_closure; 633 irq->interrupt.closure = client->iso_closure;
632 interrupt->interrupt.cycle = cycle; 634 irq->interrupt.cycle = cycle;
633 interrupt->interrupt.header_length = header_length; 635 irq->interrupt.header_length = header_length;
634 memcpy(interrupt->interrupt.header, header, header_length); 636 memcpy(irq->interrupt.header, header, header_length);
635 queue_event(client, &interrupt->event, 637 queue_event(client, &irq->event, &irq->interrupt,
636 &interrupt->interrupt, 638 sizeof(irq->interrupt) + header_length, NULL, 0);
637 sizeof(interrupt->interrupt) + header_length, NULL, 0);
638} 639}
639 640
640static int ioctl_create_iso_context(struct client *client, void *buffer) 641static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -812,6 +813,28 @@ static int ioctl_stop_iso(struct client *client, void *buffer)
812 return fw_iso_context_stop(client->iso_context); 813 return fw_iso_context_stop(client->iso_context);
813} 814}
814 815
816static int ioctl_get_cycle_timer(struct client *client, void *buffer)
817{
818 struct fw_cdev_get_cycle_timer *request = buffer;
819 struct fw_card *card = client->device->card;
820 unsigned long long bus_time;
821 struct timeval tv;
822 unsigned long flags;
823
824 preempt_disable();
825 local_irq_save(flags);
826
827 bus_time = card->driver->get_bus_time(card);
828 do_gettimeofday(&tv);
829
830 local_irq_restore(flags);
831 preempt_enable();
832
833 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
834 request->cycle_timer = bus_time & 0xffffffff;
835 return 0;
836}
837
815static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 838static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
816 ioctl_get_info, 839 ioctl_get_info,
817 ioctl_send_request, 840 ioctl_send_request,
@@ -825,6 +848,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
825 ioctl_queue_iso, 848 ioctl_queue_iso,
826 ioctl_start_iso, 849 ioctl_start_iso,
827 ioctl_stop_iso, 850 ioctl_stop_iso,
851 ioctl_get_cycle_timer,
828}; 852};
829 853
830static int 854static int
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index d13e6a69707..894d4a92a18 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -102,11 +102,6 @@ fw_unit(struct device *dev)
102#define CSR_INSTANCE 0x18 102#define CSR_INSTANCE 0x18
103#define CSR_DIRECTORY_ID 0x20 103#define CSR_DIRECTORY_ID 0x20
104 104
105#define SBP2_COMMAND_SET_SPECIFIER 0x38
106#define SBP2_COMMAND_SET 0x39
107#define SBP2_COMMAND_SET_REVISION 0x3b
108#define SBP2_FIRMWARE_REVISION 0x3c
109
110struct fw_csr_iterator { 105struct fw_csr_iterator {
111 u32 *p; 106 u32 *p;
112 u32 *end; 107 u32 *end;
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index e14c1ca7813..2f307c4df33 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -18,21 +18,23 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/kernel.h> 21#include <linux/compiler.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/gfp.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/spinlock.h>
30 32
31#include <asm/uaccess.h> 33#include <asm/page.h>
32#include <asm/semaphore.h> 34#include <asm/system.h>
33 35
34#include "fw-transaction.h"
35#include "fw-ohci.h" 36#include "fw-ohci.h"
37#include "fw-transaction.h"
36 38
37#define DESCRIPTOR_OUTPUT_MORE 0 39#define DESCRIPTOR_OUTPUT_MORE 0
38#define DESCRIPTOR_OUTPUT_LAST (1 << 12) 40#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
@@ -678,6 +680,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
678 680
679 /* FIXME: Document how the locking works. */ 681 /* FIXME: Document how the locking works. */
680 if (ohci->generation != packet->generation) { 682 if (ohci->generation != packet->generation) {
683 if (packet->payload_length > 0)
684 dma_unmap_single(ohci->card.device, payload_bus,
685 packet->payload_length, DMA_TO_DEVICE);
681 packet->ack = RCODE_GENERATION; 686 packet->ack = RCODE_GENERATION;
682 return -1; 687 return -1;
683 } 688 }
@@ -912,10 +917,15 @@ static void bus_reset_tasklet(unsigned long data)
912 917
913 reg = reg_read(ohci, OHCI1394_NodeID); 918 reg = reg_read(ohci, OHCI1394_NodeID);
914 if (!(reg & OHCI1394_NodeID_idValid)) { 919 if (!(reg & OHCI1394_NodeID_idValid)) {
915 fw_error("node ID not valid, new bus reset in progress\n"); 920 fw_notify("node ID not valid, new bus reset in progress\n");
916 return; 921 return;
917 } 922 }
918 ohci->node_id = reg & 0xffff; 923 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
924 fw_notify("malconfigured bus\n");
925 return;
926 }
927 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
928 OHCI1394_NodeID_nodeNumber);
919 929
920 /* 930 /*
921 * The count in the SelfIDCount register is the number of 931 * The count in the SelfIDCount register is the number of
@@ -926,12 +936,14 @@ static void bus_reset_tasklet(unsigned long data)
926 936
927 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; 937 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
928 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 938 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
939 rmb();
929 940
930 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 941 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
931 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) 942 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
932 fw_error("inconsistent self IDs\n"); 943 fw_error("inconsistent self IDs\n");
933 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); 944 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
934 } 945 }
946 rmb();
935 947
936 /* 948 /*
937 * Check the consistency of the self IDs we just read. The 949 * Check the consistency of the self IDs we just read. The
@@ -1046,6 +1058,9 @@ static irqreturn_t irq_handler(int irq, void *data)
1046 iso_event &= ~(1 << i); 1058 iso_event &= ~(1 << i);
1047 } 1059 }
1048 1060
1061 if (unlikely(event & OHCI1394_postedWriteErr))
1062 fw_error("PCI posted write error\n");
1063
1049 if (event & OHCI1394_cycle64Seconds) { 1064 if (event & OHCI1394_cycle64Seconds) {
1050 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1065 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1051 if ((cycle_time & 0x80000000) == 0) 1066 if ((cycle_time & 0x80000000) == 0)
@@ -1119,8 +1134,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1119 OHCI1394_RQPkt | OHCI1394_RSPkt | 1134 OHCI1394_RQPkt | OHCI1394_RSPkt |
1120 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1135 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1121 OHCI1394_isochRx | OHCI1394_isochTx | 1136 OHCI1394_isochRx | OHCI1394_isochTx |
1122 OHCI1394_masterIntEnable | 1137 OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds |
1123 OHCI1394_cycle64Seconds); 1138 OHCI1394_masterIntEnable);
1124 1139
1125 /* Activate link_on bit and contender bit in our self ID packets.*/ 1140 /* Activate link_on bit and contender bit in our self ID packets.*/
1126 if (ohci_update_phy_reg(card, 4, 0, 1141 if (ohci_update_phy_reg(card, 4, 0,
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
index fa15706397d..dec4f04e6b2 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/fw-ohci.h
@@ -59,6 +59,8 @@
59#define OHCI1394_LinkControl_cycleSource (1 << 22) 59#define OHCI1394_LinkControl_cycleSource (1 << 22)
60#define OHCI1394_NodeID 0x0E8 60#define OHCI1394_NodeID 0x0E8
61#define OHCI1394_NodeID_idValid 0x80000000 61#define OHCI1394_NodeID_idValid 0x80000000
62#define OHCI1394_NodeID_nodeNumber 0x0000003f
63#define OHCI1394_NodeID_busNumber 0x0000ffc0
62#define OHCI1394_PhyControl 0x0EC 64#define OHCI1394_PhyControl 0x0EC
63#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000) 65#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
64#define OHCI1394_PhyControl_ReadDone 0x80000000 66#define OHCI1394_PhyControl_ReadDone 0x80000000
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 238730f75db..5596df65c8e 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -37,11 +37,12 @@
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38#include <linux/blkdev.h> 38#include <linux/blkdev.h>
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/stringify.h>
40#include <linux/timer.h> 41#include <linux/timer.h>
42#include <linux/workqueue.h>
41 43
42#include <scsi/scsi.h> 44#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h> 45#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_dbg.h>
45#include <scsi/scsi_device.h> 46#include <scsi/scsi_device.h>
46#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
47 48
@@ -61,36 +62,94 @@ module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
61MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " 62MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
62 "(default = Y, use N for concurrent initiators)"); 63 "(default = Y, use N for concurrent initiators)");
63 64
65/*
66 * Flags for firmware oddities
67 *
68 * - 128kB max transfer
69 * Limit transfer size. Necessary for some old bridges.
70 *
71 * - 36 byte inquiry
72 * When scsi_mod probes the device, let the inquiry command look like that
73 * from MS Windows.
74 *
75 * - skip mode page 8
76 * Suppress sending of mode_sense for mode page 8 if the device pretends to
77 * support the SCSI Primary Block commands instead of Reduced Block Commands.
78 *
79 * - fix capacity
80 * Tell sd_mod to correct the last sector number reported by read_capacity.
81 * Avoids access beyond actual disk limits on devices with an off-by-one bug.
82 * Don't use this with devices which don't have this bug.
83 *
84 * - override internal blacklist
85 * Instead of adding to the built-in blacklist, use only the workarounds
86 * specified in the module load parameter.
87 * Useful if a blacklist entry interfered with a non-broken device.
88 */
89#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
90#define SBP2_WORKAROUND_INQUIRY_36 0x2
91#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
92#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
93#define SBP2_WORKAROUND_OVERRIDE 0x100
94
95static int sbp2_param_workarounds;
96module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
97MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
98 ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
99 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
100 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
101 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
102 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
103 ", or a combination)");
104
64/* I don't know why the SCSI stack doesn't define something like this... */ 105/* I don't know why the SCSI stack doesn't define something like this... */
65typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); 106typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
66 107
67static const char sbp2_driver_name[] = "sbp2"; 108static const char sbp2_driver_name[] = "sbp2";
68 109
69struct sbp2_device { 110/*
70 struct kref kref; 111 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
71 struct fw_unit *unit; 112 * and one struct scsi_device per sbp2_logical_unit.
113 */
114struct sbp2_logical_unit {
115 struct sbp2_target *tgt;
116 struct list_head link;
117 struct scsi_device *sdev;
72 struct fw_address_handler address_handler; 118 struct fw_address_handler address_handler;
73 struct list_head orb_list; 119 struct list_head orb_list;
74 u64 management_agent_address; 120
75 u64 command_block_agent_address; 121 u64 command_block_agent_address;
76 u32 workarounds; 122 u16 lun;
77 int login_id; 123 int login_id;
78 124
79 /* 125 /*
80 * We cache these addresses and only update them once we've 126 * The generation is updated once we've logged in or reconnected
81 * logged in or reconnected to the sbp2 device. That way, any 127 * to the logical unit. Thus, I/O to the device will automatically
82 * IO to the device will automatically fail and get retried if 128 * fail and get retried if it happens in a window where the device
83 * it happens in a window where the device is not ready to 129 * is not ready, e.g. after a bus reset but before we reconnect.
84 * handle it (e.g. after a bus reset but before we reconnect).
85 */ 130 */
86 int node_id;
87 int address_high;
88 int generation; 131 int generation;
89
90 int retries; 132 int retries;
91 struct delayed_work work; 133 struct delayed_work work;
92}; 134};
93 135
136/*
137 * We create one struct sbp2_target per IEEE 1212 Unit Directory
138 * and one struct Scsi_Host per sbp2_target.
139 */
140struct sbp2_target {
141 struct kref kref;
142 struct fw_unit *unit;
143
144 u64 management_agent_address;
145 int directory_id;
146 int node_id;
147 int address_high;
148
149 unsigned workarounds;
150 struct list_head lu_list;
151};
152
94#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 153#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
95#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 154#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
96#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ 155#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
@@ -101,17 +160,9 @@ struct sbp2_device {
101#define SBP2_DIRECTION_FROM_MEDIA 0x1 160#define SBP2_DIRECTION_FROM_MEDIA 0x1
102 161
103/* Unit directory keys */ 162/* Unit directory keys */
104#define SBP2_COMMAND_SET_SPECIFIER 0x38 163#define SBP2_CSR_FIRMWARE_REVISION 0x3c
105#define SBP2_COMMAND_SET 0x39 164#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
106#define SBP2_COMMAND_SET_REVISION 0x3b 165#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
107#define SBP2_FIRMWARE_REVISION 0x3c
108
109/* Flags for detected oddities and brokeness */
110#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
111#define SBP2_WORKAROUND_INQUIRY_36 0x2
112#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
113#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
114#define SBP2_WORKAROUND_OVERRIDE 0x100
115 166
116/* Management orb opcodes */ 167/* Management orb opcodes */
117#define SBP2_LOGIN_REQUEST 0x0 168#define SBP2_LOGIN_REQUEST 0x0
@@ -219,7 +270,7 @@ struct sbp2_command_orb {
219 } request; 270 } request;
220 struct scsi_cmnd *cmd; 271 struct scsi_cmnd *cmd;
221 scsi_done_fn_t done; 272 scsi_done_fn_t done;
222 struct fw_unit *unit; 273 struct sbp2_logical_unit *lu;
223 274
224 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); 275 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
225 dma_addr_t page_table_bus; 276 dma_addr_t page_table_bus;
@@ -295,7 +346,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
295 unsigned long long offset, 346 unsigned long long offset,
296 void *payload, size_t length, void *callback_data) 347 void *payload, size_t length, void *callback_data)
297{ 348{
298 struct sbp2_device *sd = callback_data; 349 struct sbp2_logical_unit *lu = callback_data;
299 struct sbp2_orb *orb; 350 struct sbp2_orb *orb;
300 struct sbp2_status status; 351 struct sbp2_status status;
301 size_t header_size; 352 size_t header_size;
@@ -319,7 +370,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
319 370
320 /* Lookup the orb corresponding to this status write. */ 371 /* Lookup the orb corresponding to this status write. */
321 spin_lock_irqsave(&card->lock, flags); 372 spin_lock_irqsave(&card->lock, flags);
322 list_for_each_entry(orb, &sd->orb_list, link) { 373 list_for_each_entry(orb, &lu->orb_list, link) {
323 if (STATUS_GET_ORB_HIGH(status) == 0 && 374 if (STATUS_GET_ORB_HIGH(status) == 0 &&
324 STATUS_GET_ORB_LOW(status) == orb->request_bus) { 375 STATUS_GET_ORB_LOW(status) == orb->request_bus) {
325 orb->rcode = RCODE_COMPLETE; 376 orb->rcode = RCODE_COMPLETE;
@@ -329,7 +380,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
329 } 380 }
330 spin_unlock_irqrestore(&card->lock, flags); 381 spin_unlock_irqrestore(&card->lock, flags);
331 382
332 if (&orb->link != &sd->orb_list) 383 if (&orb->link != &lu->orb_list)
333 orb->callback(orb, &status); 384 orb->callback(orb, &status);
334 else 385 else
335 fw_error("status write for unknown orb\n"); 386 fw_error("status write for unknown orb\n");
@@ -361,20 +412,20 @@ complete_transaction(struct fw_card *card, int rcode,
361 orb->rcode = rcode; 412 orb->rcode = rcode;
362 if (orb->rcode != RCODE_COMPLETE) { 413 if (orb->rcode != RCODE_COMPLETE) {
363 list_del(&orb->link); 414 list_del(&orb->link);
415 spin_unlock_irqrestore(&card->lock, flags);
364 orb->callback(orb, NULL); 416 orb->callback(orb, NULL);
417 } else {
418 spin_unlock_irqrestore(&card->lock, flags);
365 } 419 }
366 420
367 spin_unlock_irqrestore(&card->lock, flags);
368
369 kref_put(&orb->kref, free_orb); 421 kref_put(&orb->kref, free_orb);
370} 422}
371 423
372static void 424static void
373sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, 425sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
374 int node_id, int generation, u64 offset) 426 int node_id, int generation, u64 offset)
375{ 427{
376 struct fw_device *device = fw_device(unit->device.parent); 428 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
377 struct sbp2_device *sd = unit->device.driver_data;
378 unsigned long flags; 429 unsigned long flags;
379 430
380 orb->pointer.high = 0; 431 orb->pointer.high = 0;
@@ -382,7 +433,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
382 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer)); 433 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
383 434
384 spin_lock_irqsave(&device->card->lock, flags); 435 spin_lock_irqsave(&device->card->lock, flags);
385 list_add_tail(&orb->link, &sd->orb_list); 436 list_add_tail(&orb->link, &lu->orb_list);
386 spin_unlock_irqrestore(&device->card->lock, flags); 437 spin_unlock_irqrestore(&device->card->lock, flags);
387 438
388 /* Take a ref for the orb list and for the transaction callback. */ 439 /* Take a ref for the orb list and for the transaction callback. */
@@ -395,10 +446,9 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
395 complete_transaction, orb); 446 complete_transaction, orb);
396} 447}
397 448
398static int sbp2_cancel_orbs(struct fw_unit *unit) 449static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
399{ 450{
400 struct fw_device *device = fw_device(unit->device.parent); 451 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
401 struct sbp2_device *sd = unit->device.driver_data;
402 struct sbp2_orb *orb, *next; 452 struct sbp2_orb *orb, *next;
403 struct list_head list; 453 struct list_head list;
404 unsigned long flags; 454 unsigned long flags;
@@ -406,7 +456,7 @@ static int sbp2_cancel_orbs(struct fw_unit *unit)
406 456
407 INIT_LIST_HEAD(&list); 457 INIT_LIST_HEAD(&list);
408 spin_lock_irqsave(&device->card->lock, flags); 458 spin_lock_irqsave(&device->card->lock, flags);
409 list_splice_init(&sd->orb_list, &list); 459 list_splice_init(&lu->orb_list, &list);
410 spin_unlock_irqrestore(&device->card->lock, flags); 460 spin_unlock_irqrestore(&device->card->lock, flags);
411 461
412 list_for_each_entry_safe(orb, next, &list, link) { 462 list_for_each_entry_safe(orb, next, &list, link) {
@@ -433,11 +483,11 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
433} 483}
434 484
435static int 485static int
436sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, 486sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
437 int function, int lun, void *response) 487 int generation, int function, int lun_or_login_id,
488 void *response)
438{ 489{
439 struct fw_device *device = fw_device(unit->device.parent); 490 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
440 struct sbp2_device *sd = unit->device.driver_data;
441 struct sbp2_management_orb *orb; 491 struct sbp2_management_orb *orb;
442 int retval = -ENOMEM; 492 int retval = -ENOMEM;
443 493
@@ -458,12 +508,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
458 orb->request.misc = 508 orb->request.misc =
459 MANAGEMENT_ORB_NOTIFY | 509 MANAGEMENT_ORB_NOTIFY |
460 MANAGEMENT_ORB_FUNCTION(function) | 510 MANAGEMENT_ORB_FUNCTION(function) |
461 MANAGEMENT_ORB_LUN(lun); 511 MANAGEMENT_ORB_LUN(lun_or_login_id);
462 orb->request.length = 512 orb->request.length =
463 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)); 513 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
464 514
465 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 515 orb->request.status_fifo.high = lu->address_handler.offset >> 32;
466 orb->request.status_fifo.low = sd->address_handler.offset; 516 orb->request.status_fifo.low = lu->address_handler.offset;
467 517
468 if (function == SBP2_LOGIN_REQUEST) { 518 if (function == SBP2_LOGIN_REQUEST) {
469 orb->request.misc |= 519 orb->request.misc |=
@@ -482,14 +532,14 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
482 if (dma_mapping_error(orb->base.request_bus)) 532 if (dma_mapping_error(orb->base.request_bus))
483 goto fail_mapping_request; 533 goto fail_mapping_request;
484 534
485 sbp2_send_orb(&orb->base, unit, 535 sbp2_send_orb(&orb->base, lu, node_id, generation,
486 node_id, generation, sd->management_agent_address); 536 lu->tgt->management_agent_address);
487 537
488 wait_for_completion_timeout(&orb->done, 538 wait_for_completion_timeout(&orb->done,
489 msecs_to_jiffies(SBP2_ORB_TIMEOUT)); 539 msecs_to_jiffies(SBP2_ORB_TIMEOUT));
490 540
491 retval = -EIO; 541 retval = -EIO;
492 if (sbp2_cancel_orbs(unit) == 0) { 542 if (sbp2_cancel_orbs(lu) == 0) {
493 fw_error("orb reply timed out, rcode=0x%02x\n", 543 fw_error("orb reply timed out, rcode=0x%02x\n",
494 orb->base.rcode); 544 orb->base.rcode);
495 goto out; 545 goto out;
@@ -534,10 +584,9 @@ complete_agent_reset_write(struct fw_card *card, int rcode,
534 kfree(t); 584 kfree(t);
535} 585}
536 586
537static int sbp2_agent_reset(struct fw_unit *unit) 587static int sbp2_agent_reset(struct sbp2_logical_unit *lu)
538{ 588{
539 struct fw_device *device = fw_device(unit->device.parent); 589 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
540 struct sbp2_device *sd = unit->device.driver_data;
541 struct fw_transaction *t; 590 struct fw_transaction *t;
542 static u32 zero; 591 static u32 zero;
543 592
@@ -546,181 +595,272 @@ static int sbp2_agent_reset(struct fw_unit *unit)
546 return -ENOMEM; 595 return -ENOMEM;
547 596
548 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 597 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
549 sd->node_id, sd->generation, device->max_speed, 598 lu->tgt->node_id, lu->generation, device->max_speed,
550 sd->command_block_agent_address + SBP2_AGENT_RESET, 599 lu->command_block_agent_address + SBP2_AGENT_RESET,
551 &zero, sizeof(zero), complete_agent_reset_write, t); 600 &zero, sizeof(zero), complete_agent_reset_write, t);
552 601
553 return 0; 602 return 0;
554} 603}
555 604
556static void sbp2_reconnect(struct work_struct *work); 605static void sbp2_release_target(struct kref *kref)
557static struct scsi_host_template scsi_driver_template;
558
559static void release_sbp2_device(struct kref *kref)
560{ 606{
561 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref); 607 struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
562 struct Scsi_Host *host = 608 struct sbp2_logical_unit *lu, *next;
563 container_of((void *)sd, struct Scsi_Host, hostdata[0]); 609 struct Scsi_Host *shost =
564 610 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
565 scsi_remove_host(host); 611
566 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation, 612 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
567 SBP2_LOGOUT_REQUEST, sd->login_id, NULL); 613 if (lu->sdev)
568 fw_core_remove_address_handler(&sd->address_handler); 614 scsi_remove_device(lu->sdev);
569 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id); 615
570 put_device(&sd->unit->device); 616 sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
571 scsi_host_put(host); 617 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
618 fw_core_remove_address_handler(&lu->address_handler);
619 list_del(&lu->link);
620 kfree(lu);
621 }
622 scsi_remove_host(shost);
623 fw_notify("released %s\n", tgt->unit->device.bus_id);
624
625 put_device(&tgt->unit->device);
626 scsi_host_put(shost);
572} 627}
573 628
629static struct workqueue_struct *sbp2_wq;
630
631static void sbp2_reconnect(struct work_struct *work);
632
574static void sbp2_login(struct work_struct *work) 633static void sbp2_login(struct work_struct *work)
575{ 634{
576 struct sbp2_device *sd = 635 struct sbp2_logical_unit *lu =
577 container_of(work, struct sbp2_device, work.work); 636 container_of(work, struct sbp2_logical_unit, work.work);
578 struct Scsi_Host *host = 637 struct Scsi_Host *shost =
579 container_of((void *)sd, struct Scsi_Host, hostdata[0]); 638 container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]);
580 struct fw_unit *unit = sd->unit; 639 struct scsi_device *sdev;
640 struct scsi_lun eight_bytes_lun;
641 struct fw_unit *unit = lu->tgt->unit;
581 struct fw_device *device = fw_device(unit->device.parent); 642 struct fw_device *device = fw_device(unit->device.parent);
582 struct sbp2_login_response response; 643 struct sbp2_login_response response;
583 int generation, node_id, local_node_id, lun, retval; 644 int generation, node_id, local_node_id;
584
585 /* FIXME: Make this work for multi-lun devices. */
586 lun = 0;
587 645
588 generation = device->card->generation; 646 generation = device->card->generation;
589 node_id = device->node->node_id; 647 node_id = device->node->node_id;
590 local_node_id = device->card->local_node->node_id; 648 local_node_id = device->card->local_node->node_id;
591 649
592 if (sbp2_send_management_orb(unit, node_id, generation, 650 if (sbp2_send_management_orb(lu, node_id, generation,
593 SBP2_LOGIN_REQUEST, lun, &response) < 0) { 651 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
594 if (sd->retries++ < 5) { 652 if (lu->retries++ < 5) {
595 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5)); 653 queue_delayed_work(sbp2_wq, &lu->work,
654 DIV_ROUND_UP(HZ, 5));
596 } else { 655 } else {
597 fw_error("failed to login to %s\n", 656 fw_error("failed to login to %s LUN %04x\n",
598 unit->device.bus_id); 657 unit->device.bus_id, lu->lun);
599 kref_put(&sd->kref, release_sbp2_device); 658 kref_put(&lu->tgt->kref, sbp2_release_target);
600 } 659 }
601 return; 660 return;
602 } 661 }
603 662
604 sd->generation = generation; 663 lu->generation = generation;
605 sd->node_id = node_id; 664 lu->tgt->node_id = node_id;
606 sd->address_high = local_node_id << 16; 665 lu->tgt->address_high = local_node_id << 16;
607 666
608 /* Get command block agent offset and login id. */ 667 /* Get command block agent offset and login id. */
609 sd->command_block_agent_address = 668 lu->command_block_agent_address =
610 ((u64) (response.command_block_agent.high & 0xffff) << 32) | 669 ((u64) (response.command_block_agent.high & 0xffff) << 32) |
611 response.command_block_agent.low; 670 response.command_block_agent.low;
612 sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); 671 lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
613 672
614 fw_notify("logged in to sbp2 unit %s (%d retries)\n", 673 fw_notify("logged in to %s LUN %04x (%d retries)\n",
615 unit->device.bus_id, sd->retries); 674 unit->device.bus_id, lu->lun, lu->retries);
616 fw_notify(" - management_agent_address: 0x%012llx\n",
617 (unsigned long long) sd->management_agent_address);
618 fw_notify(" - command_block_agent_address: 0x%012llx\n",
619 (unsigned long long) sd->command_block_agent_address);
620 fw_notify(" - status write address: 0x%012llx\n",
621 (unsigned long long) sd->address_handler.offset);
622 675
623#if 0 676#if 0
624 /* FIXME: The linux1394 sbp2 does this last step. */ 677 /* FIXME: The linux1394 sbp2 does this last step. */
625 sbp2_set_busy_timeout(scsi_id); 678 sbp2_set_busy_timeout(scsi_id);
626#endif 679#endif
627 680
628 PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect); 681 PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
629 sbp2_agent_reset(unit); 682 sbp2_agent_reset(lu);
683
684 memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
685 eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
686 eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
630 687
631 /* FIXME: Loop over luns here. */ 688 sdev = __scsi_add_device(shost, 0, 0,
632 lun = 0; 689 scsilun_to_int(&eight_bytes_lun), lu);
633 retval = scsi_add_device(host, 0, 0, lun); 690 if (IS_ERR(sdev)) {
634 if (retval < 0) { 691 sbp2_send_management_orb(lu, node_id, generation,
635 sbp2_send_management_orb(unit, sd->node_id, sd->generation, 692 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
636 SBP2_LOGOUT_REQUEST, sd->login_id,
637 NULL);
638 /* 693 /*
639 * Set this back to sbp2_login so we fall back and 694 * Set this back to sbp2_login so we fall back and
640 * retry login on bus reset. 695 * retry login on bus reset.
641 */ 696 */
642 PREPARE_DELAYED_WORK(&sd->work, sbp2_login); 697 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
698 } else {
699 lu->sdev = sdev;
700 scsi_device_put(sdev);
643 } 701 }
644 kref_put(&sd->kref, release_sbp2_device); 702 kref_put(&lu->tgt->kref, sbp2_release_target);
645} 703}
646 704
647static int sbp2_probe(struct device *dev) 705static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
648{ 706{
649 struct fw_unit *unit = fw_unit(dev); 707 struct sbp2_logical_unit *lu;
650 struct fw_device *device = fw_device(unit->device.parent);
651 struct sbp2_device *sd;
652 struct fw_csr_iterator ci;
653 struct Scsi_Host *host;
654 int i, key, value, err;
655 u32 model, firmware_revision;
656 708
657 err = -ENOMEM; 709 lu = kmalloc(sizeof(*lu), GFP_KERNEL);
658 host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd)); 710 if (!lu)
659 if (host == NULL) 711 return -ENOMEM;
660 goto fail;
661 712
662 sd = (struct sbp2_device *) host->hostdata; 713 lu->address_handler.length = 0x100;
663 unit->device.driver_data = sd; 714 lu->address_handler.address_callback = sbp2_status_write;
664 sd->unit = unit; 715 lu->address_handler.callback_data = lu;
665 INIT_LIST_HEAD(&sd->orb_list);
666 kref_init(&sd->kref);
667 716
668 sd->address_handler.length = 0x100; 717 if (fw_core_add_address_handler(&lu->address_handler,
669 sd->address_handler.address_callback = sbp2_status_write; 718 &fw_high_memory_region) < 0) {
670 sd->address_handler.callback_data = sd; 719 kfree(lu);
720 return -ENOMEM;
721 }
671 722
672 err = fw_core_add_address_handler(&sd->address_handler, 723 lu->tgt = tgt;
673 &fw_high_memory_region); 724 lu->sdev = NULL;
674 if (err < 0) 725 lu->lun = lun_entry & 0xffff;
675 goto fail_host; 726 lu->retries = 0;
727 INIT_LIST_HEAD(&lu->orb_list);
728 INIT_DELAYED_WORK(&lu->work, sbp2_login);
676 729
677 err = fw_device_enable_phys_dma(device); 730 list_add_tail(&lu->link, &tgt->lu_list);
678 if (err < 0) 731 return 0;
679 goto fail_address_handler; 732}
680 733
681 err = scsi_add_host(host, &unit->device); 734static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
682 if (err < 0) 735{
683 goto fail_address_handler; 736 struct fw_csr_iterator ci;
737 int key, value;
684 738
685 /* 739 fw_csr_iterator_init(&ci, directory);
686 * Scan unit directory to get management agent address, 740 while (fw_csr_iterator_next(&ci, &key, &value))
687 * firmware revison and model. Initialize firmware_revision 741 if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
688 * and model to values that wont match anything in our table. 742 sbp2_add_logical_unit(tgt, value) < 0)
689 */ 743 return -ENOMEM;
690 firmware_revision = 0xff000000; 744 return 0;
691 model = 0xff000000; 745}
692 fw_csr_iterator_init(&ci, unit->directory); 746
747static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
748 u32 *model, u32 *firmware_revision)
749{
750 struct fw_csr_iterator ci;
751 int key, value;
752
753 fw_csr_iterator_init(&ci, directory);
693 while (fw_csr_iterator_next(&ci, &key, &value)) { 754 while (fw_csr_iterator_next(&ci, &key, &value)) {
694 switch (key) { 755 switch (key) {
756
695 case CSR_DEPENDENT_INFO | CSR_OFFSET: 757 case CSR_DEPENDENT_INFO | CSR_OFFSET:
696 sd->management_agent_address = 758 tgt->management_agent_address =
697 0xfffff0000000ULL + 4 * value; 759 CSR_REGISTER_BASE + 4 * value;
698 break; 760 break;
699 case SBP2_FIRMWARE_REVISION: 761
700 firmware_revision = value; 762 case CSR_DIRECTORY_ID:
763 tgt->directory_id = value;
701 break; 764 break;
765
702 case CSR_MODEL: 766 case CSR_MODEL:
703 model = value; 767 *model = value;
768 break;
769
770 case SBP2_CSR_FIRMWARE_REVISION:
771 *firmware_revision = value;
772 break;
773
774 case SBP2_CSR_LOGICAL_UNIT_NUMBER:
775 if (sbp2_add_logical_unit(tgt, value) < 0)
776 return -ENOMEM;
777 break;
778
779 case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
780 if (sbp2_scan_logical_unit_dir(tgt, ci.p + value) < 0)
781 return -ENOMEM;
704 break; 782 break;
705 } 783 }
706 } 784 }
785 return 0;
786}
787
788static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
789 u32 firmware_revision)
790{
791 int i;
792 unsigned w = sbp2_param_workarounds;
793
794 if (w)
795 fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
796 "if you need the workarounds parameter for %s\n",
797 tgt->unit->device.bus_id);
798
799 if (w & SBP2_WORKAROUND_OVERRIDE)
800 goto out;
707 801
708 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { 802 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
803
709 if (sbp2_workarounds_table[i].firmware_revision != 804 if (sbp2_workarounds_table[i].firmware_revision !=
710 (firmware_revision & 0xffffff00)) 805 (firmware_revision & 0xffffff00))
711 continue; 806 continue;
807
712 if (sbp2_workarounds_table[i].model != model && 808 if (sbp2_workarounds_table[i].model != model &&
713 sbp2_workarounds_table[i].model != ~0) 809 sbp2_workarounds_table[i].model != ~0)
714 continue; 810 continue;
715 sd->workarounds |= sbp2_workarounds_table[i].workarounds; 811
812 w |= sbp2_workarounds_table[i].workarounds;
716 break; 813 break;
717 } 814 }
718 815 out:
719 if (sd->workarounds) 816 if (w)
720 fw_notify("Workarounds for node %s: 0x%x " 817 fw_notify("Workarounds for %s: 0x%x "
721 "(firmware_revision 0x%06x, model_id 0x%06x)\n", 818 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
722 unit->device.bus_id, 819 tgt->unit->device.bus_id,
723 sd->workarounds, firmware_revision, model); 820 w, firmware_revision, model);
821 tgt->workarounds = w;
822}
823
824static struct scsi_host_template scsi_driver_template;
825
826static int sbp2_probe(struct device *dev)
827{
828 struct fw_unit *unit = fw_unit(dev);
829 struct fw_device *device = fw_device(unit->device.parent);
830 struct sbp2_target *tgt;
831 struct sbp2_logical_unit *lu;
832 struct Scsi_Host *shost;
833 u32 model, firmware_revision;
834
835 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
836 if (shost == NULL)
837 return -ENOMEM;
838
839 tgt = (struct sbp2_target *)shost->hostdata;
840 unit->device.driver_data = tgt;
841 tgt->unit = unit;
842 kref_init(&tgt->kref);
843 INIT_LIST_HEAD(&tgt->lu_list);
844
845 if (fw_device_enable_phys_dma(device) < 0)
846 goto fail_shost_put;
847
848 if (scsi_add_host(shost, &unit->device) < 0)
849 goto fail_shost_put;
850
851 /* Initialize to values that won't match anything in our table. */
852 firmware_revision = 0xff000000;
853 model = 0xff000000;
854
855 /* implicit directory ID */
856 tgt->directory_id = ((unit->directory - device->config_rom) * 4
857 + CSR_CONFIG_ROM) & 0xffffff;
858
859 if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
860 &firmware_revision) < 0)
861 goto fail_tgt_put;
862
863 sbp2_init_workarounds(tgt, model, firmware_revision);
724 864
725 get_device(&unit->device); 865 get_device(&unit->device);
726 866
@@ -729,35 +869,34 @@ static int sbp2_probe(struct device *dev)
729 * reschedule retries. Always get the ref before scheduling 869 * reschedule retries. Always get the ref before scheduling
730 * work. 870 * work.
731 */ 871 */
732 INIT_DELAYED_WORK(&sd->work, sbp2_login); 872 list_for_each_entry(lu, &tgt->lu_list, link)
733 if (schedule_delayed_work(&sd->work, 0)) 873 if (queue_delayed_work(sbp2_wq, &lu->work, 0))
734 kref_get(&sd->kref); 874 kref_get(&tgt->kref);
735
736 return 0; 875 return 0;
737 876
738 fail_address_handler: 877 fail_tgt_put:
739 fw_core_remove_address_handler(&sd->address_handler); 878 kref_put(&tgt->kref, sbp2_release_target);
740 fail_host: 879 return -ENOMEM;
741 scsi_host_put(host); 880
742 fail: 881 fail_shost_put:
743 return err; 882 scsi_host_put(shost);
883 return -ENOMEM;
744} 884}
745 885
746static int sbp2_remove(struct device *dev) 886static int sbp2_remove(struct device *dev)
747{ 887{
748 struct fw_unit *unit = fw_unit(dev); 888 struct fw_unit *unit = fw_unit(dev);
749 struct sbp2_device *sd = unit->device.driver_data; 889 struct sbp2_target *tgt = unit->device.driver_data;
750
751 kref_put(&sd->kref, release_sbp2_device);
752 890
891 kref_put(&tgt->kref, sbp2_release_target);
753 return 0; 892 return 0;
754} 893}
755 894
756static void sbp2_reconnect(struct work_struct *work) 895static void sbp2_reconnect(struct work_struct *work)
757{ 896{
758 struct sbp2_device *sd = 897 struct sbp2_logical_unit *lu =
759 container_of(work, struct sbp2_device, work.work); 898 container_of(work, struct sbp2_logical_unit, work.work);
760 struct fw_unit *unit = sd->unit; 899 struct fw_unit *unit = lu->tgt->unit;
761 struct fw_device *device = fw_device(unit->device.parent); 900 struct fw_device *device = fw_device(unit->device.parent);
762 int generation, node_id, local_node_id; 901 int generation, node_id, local_node_id;
763 902
@@ -765,40 +904,49 @@ static void sbp2_reconnect(struct work_struct *work)
765 node_id = device->node->node_id; 904 node_id = device->node->node_id;
766 local_node_id = device->card->local_node->node_id; 905 local_node_id = device->card->local_node->node_id;
767 906
768 if (sbp2_send_management_orb(unit, node_id, generation, 907 if (sbp2_send_management_orb(lu, node_id, generation,
769 SBP2_RECONNECT_REQUEST, 908 SBP2_RECONNECT_REQUEST,
770 sd->login_id, NULL) < 0) { 909 lu->login_id, NULL) < 0) {
771 if (sd->retries++ >= 5) { 910 if (lu->retries++ >= 5) {
772 fw_error("failed to reconnect to %s\n", 911 fw_error("failed to reconnect to %s\n",
773 unit->device.bus_id); 912 unit->device.bus_id);
774 /* Fall back and try to log in again. */ 913 /* Fall back and try to log in again. */
775 sd->retries = 0; 914 lu->retries = 0;
776 PREPARE_DELAYED_WORK(&sd->work, sbp2_login); 915 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
777 } 916 }
778 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5)); 917 queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5));
779 return; 918 return;
780 } 919 }
781 920
782 sd->generation = generation; 921 lu->generation = generation;
783 sd->node_id = node_id; 922 lu->tgt->node_id = node_id;
784 sd->address_high = local_node_id << 16; 923 lu->tgt->address_high = local_node_id << 16;
785 924
786 fw_notify("reconnected to unit %s (%d retries)\n", 925 fw_notify("reconnected to %s LUN %04x (%d retries)\n",
787 unit->device.bus_id, sd->retries); 926 unit->device.bus_id, lu->lun, lu->retries);
788 sbp2_agent_reset(unit); 927
789 sbp2_cancel_orbs(unit); 928 sbp2_agent_reset(lu);
790 kref_put(&sd->kref, release_sbp2_device); 929 sbp2_cancel_orbs(lu);
930
931 kref_put(&lu->tgt->kref, sbp2_release_target);
791} 932}
792 933
793static void sbp2_update(struct fw_unit *unit) 934static void sbp2_update(struct fw_unit *unit)
794{ 935{
795 struct fw_device *device = fw_device(unit->device.parent); 936 struct sbp2_target *tgt = unit->device.driver_data;
796 struct sbp2_device *sd = unit->device.driver_data; 937 struct sbp2_logical_unit *lu;
797 938
798 sd->retries = 0; 939 fw_device_enable_phys_dma(fw_device(unit->device.parent));
799 fw_device_enable_phys_dma(device); 940
800 if (schedule_delayed_work(&sd->work, 0)) 941 /*
801 kref_get(&sd->kref); 942 * Fw-core serializes sbp2_update() against sbp2_remove().
943 * Iteration over tgt->lu_list is therefore safe here.
944 */
945 list_for_each_entry(lu, &tgt->lu_list, link) {
946 lu->retries = 0;
947 if (queue_delayed_work(sbp2_wq, &lu->work, 0))
948 kref_get(&tgt->kref);
949 }
802} 950}
803 951
804#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e 952#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
@@ -868,13 +1016,12 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
868{ 1016{
869 struct sbp2_command_orb *orb = 1017 struct sbp2_command_orb *orb =
870 container_of(base_orb, struct sbp2_command_orb, base); 1018 container_of(base_orb, struct sbp2_command_orb, base);
871 struct fw_unit *unit = orb->unit; 1019 struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
872 struct fw_device *device = fw_device(unit->device.parent);
873 int result; 1020 int result;
874 1021
875 if (status != NULL) { 1022 if (status != NULL) {
876 if (STATUS_GET_DEAD(*status)) 1023 if (STATUS_GET_DEAD(*status))
877 sbp2_agent_reset(unit); 1024 sbp2_agent_reset(orb->lu);
878 1025
879 switch (STATUS_GET_RESPONSE(*status)) { 1026 switch (STATUS_GET_RESPONSE(*status)) {
880 case SBP2_STATUS_REQUEST_COMPLETE: 1027 case SBP2_STATUS_REQUEST_COMPLETE:
@@ -918,12 +1065,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
918 orb->done(orb->cmd); 1065 orb->done(orb->cmd);
919} 1066}
920 1067
921static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) 1068static int
1069sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1070 struct sbp2_logical_unit *lu)
922{ 1071{
923 struct sbp2_device *sd =
924 (struct sbp2_device *)orb->cmd->device->host->hostdata;
925 struct fw_unit *unit = sd->unit;
926 struct fw_device *device = fw_device(unit->device.parent);
927 struct scatterlist *sg; 1072 struct scatterlist *sg;
928 int sg_len, l, i, j, count; 1073 int sg_len, l, i, j, count;
929 dma_addr_t sg_addr; 1074 dma_addr_t sg_addr;
@@ -942,10 +1087,9 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
942 * tables. 1087 * tables.
943 */ 1088 */
944 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 1089 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
945 orb->request.data_descriptor.high = sd->address_high; 1090 orb->request.data_descriptor.high = lu->tgt->address_high;
946 orb->request.data_descriptor.low = sg_dma_address(sg); 1091 orb->request.data_descriptor.low = sg_dma_address(sg);
947 orb->request.misc |= 1092 orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
948 COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
949 return 0; 1093 return 0;
950 } 1094 }
951 1095
@@ -989,7 +1133,7 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
989 * initiator (i.e. us), but data_descriptor can refer to data 1133 * initiator (i.e. us), but data_descriptor can refer to data
990 * on other nodes so we need to put our ID in descriptor.high. 1134 * on other nodes so we need to put our ID in descriptor.high.
991 */ 1135 */
992 orb->request.data_descriptor.high = sd->address_high; 1136 orb->request.data_descriptor.high = lu->tgt->address_high;
993 orb->request.data_descriptor.low = orb->page_table_bus; 1137 orb->request.data_descriptor.low = orb->page_table_bus;
994 orb->request.misc |= 1138 orb->request.misc |=
995 COMMAND_ORB_PAGE_TABLE_PRESENT | 1139 COMMAND_ORB_PAGE_TABLE_PRESENT |
@@ -1008,12 +1152,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
1008 1152
1009static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) 1153static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1010{ 1154{
1011 struct sbp2_device *sd = 1155 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1012 (struct sbp2_device *)cmd->device->host->hostdata; 1156 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1013 struct fw_unit *unit = sd->unit;
1014 struct fw_device *device = fw_device(unit->device.parent);
1015 struct sbp2_command_orb *orb; 1157 struct sbp2_command_orb *orb;
1016 unsigned max_payload; 1158 unsigned max_payload;
1159 int retval = SCSI_MLQUEUE_HOST_BUSY;
1017 1160
1018 /* 1161 /*
1019 * Bidirectional commands are not yet implemented, and unknown 1162 * Bidirectional commands are not yet implemented, and unknown
@@ -1029,14 +1172,14 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1029 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 1172 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1030 if (orb == NULL) { 1173 if (orb == NULL) {
1031 fw_notify("failed to alloc orb\n"); 1174 fw_notify("failed to alloc orb\n");
1032 goto fail_alloc; 1175 return SCSI_MLQUEUE_HOST_BUSY;
1033 } 1176 }
1034 1177
1035 /* Initialize rcode to something not RCODE_COMPLETE. */ 1178 /* Initialize rcode to something not RCODE_COMPLETE. */
1036 orb->base.rcode = -1; 1179 orb->base.rcode = -1;
1037 kref_init(&orb->base.kref); 1180 kref_init(&orb->base.kref);
1038 1181
1039 orb->unit = unit; 1182 orb->lu = lu;
1040 orb->done = done; 1183 orb->done = done;
1041 orb->cmd = cmd; 1184 orb->cmd = cmd;
1042 1185
@@ -1062,8 +1205,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1062 orb->request.misc |= 1205 orb->request.misc |=
1063 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); 1206 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1064 1207
1065 if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0) 1208 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1066 goto fail_mapping; 1209 goto out;
1067 1210
1068 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); 1211 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1069 1212
@@ -1076,49 +1219,47 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1076 dma_map_single(device->card->device, &orb->request, 1219 dma_map_single(device->card->device, &orb->request,
1077 sizeof(orb->request), DMA_TO_DEVICE); 1220 sizeof(orb->request), DMA_TO_DEVICE);
1078 if (dma_mapping_error(orb->base.request_bus)) 1221 if (dma_mapping_error(orb->base.request_bus))
1079 goto fail_mapping; 1222 goto out;
1080
1081 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
1082 sd->command_block_agent_address + SBP2_ORB_POINTER);
1083
1084 kref_put(&orb->base.kref, free_orb);
1085 return 0;
1086 1223
1087 fail_mapping: 1224 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
1225 lu->command_block_agent_address + SBP2_ORB_POINTER);
1226 retval = 0;
1227 out:
1088 kref_put(&orb->base.kref, free_orb); 1228 kref_put(&orb->base.kref, free_orb);
1089 fail_alloc: 1229 return retval;
1090 return SCSI_MLQUEUE_HOST_BUSY;
1091} 1230}
1092 1231
1093static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) 1232static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1094{ 1233{
1095 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata; 1234 struct sbp2_logical_unit *lu = sdev->hostdata;
1096 1235
1097 sdev->allow_restart = 1; 1236 sdev->allow_restart = 1;
1098 1237
1099 if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36) 1238 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1100 sdev->inquiry_len = 36; 1239 sdev->inquiry_len = 36;
1240
1101 return 0; 1241 return 0;
1102} 1242}
1103 1243
1104static int sbp2_scsi_slave_configure(struct scsi_device *sdev) 1244static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1105{ 1245{
1106 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata; 1246 struct sbp2_logical_unit *lu = sdev->hostdata;
1107 struct fw_unit *unit = sd->unit;
1108 1247
1109 sdev->use_10_for_rw = 1; 1248 sdev->use_10_for_rw = 1;
1110 1249
1111 if (sdev->type == TYPE_ROM) 1250 if (sdev->type == TYPE_ROM)
1112 sdev->use_10_for_ms = 1; 1251 sdev->use_10_for_ms = 1;
1252
1113 if (sdev->type == TYPE_DISK && 1253 if (sdev->type == TYPE_DISK &&
1114 sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 1254 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1115 sdev->skip_ms_page_8 = 1; 1255 sdev->skip_ms_page_8 = 1;
1116 if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) { 1256
1117 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); 1257 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
1118 sdev->fix_capacity = 1; 1258 sdev->fix_capacity = 1;
1119 } 1259
1120 if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 1260 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1121 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 1261 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
1262
1122 return 0; 1263 return 0;
1123} 1264}
1124 1265
@@ -1128,13 +1269,11 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1128 */ 1269 */
1129static int sbp2_scsi_abort(struct scsi_cmnd *cmd) 1270static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1130{ 1271{
1131 struct sbp2_device *sd = 1272 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1132 (struct sbp2_device *)cmd->device->host->hostdata;
1133 struct fw_unit *unit = sd->unit;
1134 1273
1135 fw_notify("sbp2_scsi_abort\n"); 1274 fw_notify("sbp2_scsi_abort\n");
1136 sbp2_agent_reset(unit); 1275 sbp2_agent_reset(lu);
1137 sbp2_cancel_orbs(unit); 1276 sbp2_cancel_orbs(lu);
1138 1277
1139 return SUCCESS; 1278 return SUCCESS;
1140} 1279}
@@ -1151,37 +1290,18 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
1151 char *buf) 1290 char *buf)
1152{ 1291{
1153 struct scsi_device *sdev = to_scsi_device(dev); 1292 struct scsi_device *sdev = to_scsi_device(dev);
1154 struct sbp2_device *sd; 1293 struct sbp2_logical_unit *lu;
1155 struct fw_unit *unit;
1156 struct fw_device *device; 1294 struct fw_device *device;
1157 u32 directory_id;
1158 struct fw_csr_iterator ci;
1159 int key, value, lun;
1160 1295
1161 if (!sdev) 1296 if (!sdev)
1162 return 0; 1297 return 0;
1163 sd = (struct sbp2_device *)sdev->host->hostdata;
1164 unit = sd->unit;
1165 device = fw_device(unit->device.parent);
1166
1167 /* implicit directory ID */
1168 directory_id = ((unit->directory - device->config_rom) * 4
1169 + CSR_CONFIG_ROM) & 0xffffff;
1170
1171 /* explicit directory ID, overrides implicit ID if present */
1172 fw_csr_iterator_init(&ci, unit->directory);
1173 while (fw_csr_iterator_next(&ci, &key, &value))
1174 if (key == CSR_DIRECTORY_ID) {
1175 directory_id = value;
1176 break;
1177 }
1178 1298
1179 /* FIXME: Make this work for multi-lun devices. */ 1299 lu = sdev->hostdata;
1180 lun = 0; 1300 device = fw_device(lu->tgt->unit->device.parent);
1181 1301
1182 return sprintf(buf, "%08x%08x:%06x:%04x\n", 1302 return sprintf(buf, "%08x%08x:%06x:%04x\n",
1183 device->config_rom[3], device->config_rom[4], 1303 device->config_rom[3], device->config_rom[4],
1184 directory_id, lun); 1304 lu->tgt->directory_id, lu->lun);
1185} 1305}
1186 1306
1187static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); 1307static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
@@ -1219,12 +1339,17 @@ MODULE_ALIAS("sbp2");
1219 1339
1220static int __init sbp2_init(void) 1340static int __init sbp2_init(void)
1221{ 1341{
1342 sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
1343 if (!sbp2_wq)
1344 return -ENOMEM;
1345
1222 return driver_register(&sbp2_driver.driver); 1346 return driver_register(&sbp2_driver.driver);
1223} 1347}
1224 1348
1225static void __exit sbp2_cleanup(void) 1349static void __exit sbp2_cleanup(void)
1226{ 1350{
1227 driver_unregister(&sbp2_driver.driver); 1351 driver_unregister(&sbp2_driver.driver);
1352 destroy_workqueue(sbp2_wq);
1228} 1353}
1229 1354
1230module_init(sbp2_init); 1355module_init(sbp2_init);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 39e5cd12aa5..0fc9b000e99 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -152,6 +152,10 @@ static void update_hop_count(struct fw_node *node)
152 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2); 152 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
153} 153}
154 154
155static inline struct fw_node *fw_node(struct list_head *l)
156{
157 return list_entry(l, struct fw_node, link);
158}
155 159
156/** 160/**
157 * build_tree - Build the tree representation of the topology 161 * build_tree - Build the tree representation of the topology
@@ -162,7 +166,7 @@ static void update_hop_count(struct fw_node *node)
162 * This function builds the tree representation of the topology given 166 * This function builds the tree representation of the topology given
163 * by the self IDs from the latest bus reset. During the construction 167 * by the self IDs from the latest bus reset. During the construction
164 * of the tree, the function checks that the self IDs are valid and 168 * of the tree, the function checks that the self IDs are valid and
165 * internally consistent. On succcess this funtions returns the 169 * internally consistent. On succcess this function returns the
166 * fw_node corresponding to the local card otherwise NULL. 170 * fw_node corresponding to the local card otherwise NULL.
167 */ 171 */
168static struct fw_node *build_tree(struct fw_card *card, 172static struct fw_node *build_tree(struct fw_card *card,
@@ -211,6 +215,10 @@ static struct fw_node *build_tree(struct fw_card *card,
211 */ 215 */
212 for (i = 0, h = &stack; i < child_port_count; i++) 216 for (i = 0, h = &stack; i < child_port_count; i++)
213 h = h->prev; 217 h = h->prev;
218 /*
219 * When the stack is empty, this yields an invalid value,
220 * but that pointer will never be dereferenced.
221 */
214 child = fw_node(h); 222 child = fw_node(h);
215 223
216 node = fw_node_create(q, port_count, card->color); 224 node = fw_node_create(q, port_count, card->color);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index 1b56b4ac7fb..cedc1ec906e 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -51,12 +51,6 @@ struct fw_node {
51}; 51};
52 52
53static inline struct fw_node * 53static inline struct fw_node *
54fw_node(struct list_head *l)
55{
56 return list_entry(l, struct fw_node, link);
57}
58
59static inline struct fw_node *
60fw_node_get(struct fw_node *node) 54fw_node_get(struct fw_node *node)
61{ 55{
62 atomic_inc(&node->ref_count); 56 atomic_inc(&node->ref_count);
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 3e1cb12e43c..9959b799dbe 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -410,7 +410,12 @@ EXPORT_SYMBOL(fw_unit_space_region);
410 * controller. When a request is received that falls within the 410 * controller. When a request is received that falls within the
411 * specified address range, the specified callback is invoked. The 411 * specified address range, the specified callback is invoked. The
412 * parameters passed to the callback give the details of the 412 * parameters passed to the callback give the details of the
413 * particular request 413 * particular request.
414 *
415 * Return value: 0 on success, non-zero otherwise.
416 * The start offset of the handler's address region is determined by
417 * fw_core_add_address_handler() and is returned in handler->offset.
418 * The offset is quadlet-aligned.
414 */ 419 */
415int 420int
416fw_core_add_address_handler(struct fw_address_handler *handler, 421fw_core_add_address_handler(struct fw_address_handler *handler,
@@ -422,14 +427,15 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
422 427
423 spin_lock_irqsave(&address_handler_lock, flags); 428 spin_lock_irqsave(&address_handler_lock, flags);
424 429
425 handler->offset = region->start; 430 handler->offset = roundup(region->start, 4);
426 while (handler->offset + handler->length <= region->end) { 431 while (handler->offset + handler->length <= region->end) {
427 other = 432 other =
428 lookup_overlapping_address_handler(&address_handler_list, 433 lookup_overlapping_address_handler(&address_handler_list,
429 handler->offset, 434 handler->offset,
430 handler->length); 435 handler->length);
431 if (other != NULL) { 436 if (other != NULL) {
432 handler->offset += other->length; 437 handler->offset =
438 roundup(other->offset + other->length, 4);
433 } else { 439 } else {
434 list_add_tail(&handler->link, &address_handler_list); 440 list_add_tail(&handler->link, &address_handler_list);
435 ret = 0; 441 ret = 0;
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index d08166bda1c..e8122def164 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -218,12 +218,10 @@ static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
218 if (!kv) 218 if (!kv)
219 return NULL; 219 return NULL;
220 220
221 atomic_set(&kv->refcnt, 1);
221 kv->key.type = type; 222 kv->key.type = type;
222 kv->key.id = key; 223 kv->key.id = key;
223
224 kv->associate = NULL; 224 kv->associate = NULL;
225 kv->refcnt = 1;
226
227 kv->next = NULL; 225 kv->next = NULL;
228 kv->prev = NULL; 226 kv->prev = NULL;
229 kv->offset = 0; 227 kv->offset = 0;
@@ -326,12 +324,13 @@ void csr1212_associate_keyval(struct csr1212_keyval *kv,
326 if (kv->associate) 324 if (kv->associate)
327 csr1212_release_keyval(kv->associate); 325 csr1212_release_keyval(kv->associate);
328 326
329 associate->refcnt++; 327 csr1212_keep_keyval(associate);
330 kv->associate = associate; 328 kv->associate = associate;
331} 329}
332 330
333int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, 331static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
334 struct csr1212_keyval *kv) 332 struct csr1212_keyval *kv,
333 bool keep_keyval)
335{ 334{
336 struct csr1212_dentry *dentry; 335 struct csr1212_dentry *dentry;
337 336
@@ -341,10 +340,10 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
341 if (!dentry) 340 if (!dentry)
342 return -ENOMEM; 341 return -ENOMEM;
343 342
343 if (keep_keyval)
344 csr1212_keep_keyval(kv);
344 dentry->kv = kv; 345 dentry->kv = kv;
345 346
346 kv->refcnt++;
347
348 dentry->next = NULL; 347 dentry->next = NULL;
349 dentry->prev = dir->value.directory.dentries_tail; 348 dentry->prev = dir->value.directory.dentries_tail;
350 349
@@ -358,6 +357,12 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
358 return CSR1212_SUCCESS; 357 return CSR1212_SUCCESS;
359} 358}
360 359
360int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
361 struct csr1212_keyval *kv)
362{
363 return __csr1212_attach_keyval_to_directory(dir, kv, true);
364}
365
361#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \ 366#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
362 (&((kv)->value.leaf.data[1])) 367 (&((kv)->value.leaf.data[1]))
363 368
@@ -483,15 +488,18 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
483 488
484/* This function is used to free the memory taken by a keyval. If the given 489/* This function is used to free the memory taken by a keyval. If the given
485 * keyval is a directory type, then any keyvals contained in that directory 490 * keyval is a directory type, then any keyvals contained in that directory
486 * will be destroyed as well if their respective refcnts are 0. By means of 491 * will be destroyed as well if noone holds a reference on them. By means of
487 * list manipulation, this routine will descend a directory structure in a 492 * list manipulation, this routine will descend a directory structure in a
488 * non-recursive manner. */ 493 * non-recursive manner. */
489static void csr1212_destroy_keyval(struct csr1212_keyval *kv) 494void csr1212_release_keyval(struct csr1212_keyval *kv)
490{ 495{
491 struct csr1212_keyval *k, *a; 496 struct csr1212_keyval *k, *a;
492 struct csr1212_dentry dentry; 497 struct csr1212_dentry dentry;
493 struct csr1212_dentry *head, *tail; 498 struct csr1212_dentry *head, *tail;
494 499
500 if (!atomic_dec_and_test(&kv->refcnt))
501 return;
502
495 dentry.kv = kv; 503 dentry.kv = kv;
496 dentry.next = NULL; 504 dentry.next = NULL;
497 dentry.prev = NULL; 505 dentry.prev = NULL;
@@ -503,9 +511,8 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
503 k = head->kv; 511 k = head->kv;
504 512
505 while (k) { 513 while (k) {
506 k->refcnt--; 514 /* must not dec_and_test kv->refcnt again */
507 515 if (k != kv && !atomic_dec_and_test(&k->refcnt))
508 if (k->refcnt > 0)
509 break; 516 break;
510 517
511 a = k->associate; 518 a = k->associate;
@@ -536,14 +543,6 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
536 } 543 }
537} 544}
538 545
539void csr1212_release_keyval(struct csr1212_keyval *kv)
540{
541 if (kv->refcnt > 1)
542 kv->refcnt--;
543 else
544 csr1212_destroy_keyval(kv);
545}
546
547void csr1212_destroy_csr(struct csr1212_csr *csr) 546void csr1212_destroy_csr(struct csr1212_csr *csr)
548{ 547{
549 struct csr1212_csr_rom_cache *c, *oc; 548 struct csr1212_csr_rom_cache *c, *oc;
@@ -1126,6 +1125,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1126 int ret = CSR1212_SUCCESS; 1125 int ret = CSR1212_SUCCESS;
1127 struct csr1212_keyval *k = NULL; 1126 struct csr1212_keyval *k = NULL;
1128 u32 offset; 1127 u32 offset;
1128 bool keep_keyval = true;
1129 1129
1130 switch (CSR1212_KV_KEY_TYPE(ki)) { 1130 switch (CSR1212_KV_KEY_TYPE(ki)) {
1131 case CSR1212_KV_TYPE_IMMEDIATE: 1131 case CSR1212_KV_TYPE_IMMEDIATE:
@@ -1135,8 +1135,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1135 ret = -ENOMEM; 1135 ret = -ENOMEM;
1136 goto out; 1136 goto out;
1137 } 1137 }
1138 1138 /* Don't keep local reference when parsing. */
1139 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1139 keep_keyval = false;
1140 break; 1140 break;
1141 1141
1142 case CSR1212_KV_TYPE_CSR_OFFSET: 1142 case CSR1212_KV_TYPE_CSR_OFFSET:
@@ -1146,7 +1146,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1146 ret = -ENOMEM; 1146 ret = -ENOMEM;
1147 goto out; 1147 goto out;
1148 } 1148 }
1149 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1149 /* Don't keep local reference when parsing. */
1150 keep_keyval = false;
1150 break; 1151 break;
1151 1152
1152 default: 1153 default:
@@ -1174,8 +1175,10 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1174 ret = -ENOMEM; 1175 ret = -ENOMEM;
1175 goto out; 1176 goto out;
1176 } 1177 }
1177 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1178 /* Don't keep local reference when parsing. */
1178 k->valid = 0; /* Contents not read yet so it's not valid. */ 1179 keep_keyval = false;
1180 /* Contents not read yet so it's not valid. */
1181 k->valid = 0;
1179 k->offset = offset; 1182 k->offset = offset;
1180 1183
1181 k->prev = dir; 1184 k->prev = dir;
@@ -1183,7 +1186,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1183 dir->next->prev = k; 1186 dir->next->prev = k;
1184 dir->next = k; 1187 dir->next = k;
1185 } 1188 }
1186 ret = csr1212_attach_keyval_to_directory(dir, k); 1189 ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
1187out: 1190out:
1188 if (ret != CSR1212_SUCCESS && k != NULL) 1191 if (ret != CSR1212_SUCCESS && k != NULL)
1189 free_keyval(k); 1192 free_keyval(k);
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index df909ce6630..043039fc63e 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -32,6 +32,7 @@
32 32
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/atomic.h>
35 36
36#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL) 37#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
37#define CSR1212_FREE(ptr) kfree(ptr) 38#define CSR1212_FREE(ptr) kfree(ptr)
@@ -149,7 +150,7 @@ struct csr1212_keyval {
149 struct csr1212_directory directory; 150 struct csr1212_directory directory;
150 } value; 151 } value;
151 struct csr1212_keyval *associate; 152 struct csr1212_keyval *associate;
152 int refcnt; 153 atomic_t refcnt;
153 154
154 /* used in generating and/or parsing CSR image */ 155 /* used in generating and/or parsing CSR image */
155 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */ 156 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
@@ -350,7 +351,8 @@ csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
350 * need for code to retain a keyval that has been parsed. */ 351 * need for code to retain a keyval that has been parsed. */
351static inline void csr1212_keep_keyval(struct csr1212_keyval *kv) 352static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
352{ 353{
353 kv->refcnt++; 354 atomic_inc(&kv->refcnt);
355 smp_mb__after_atomic_inc();
354} 356}
355 357
356 358
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index dc9dce22f6a..b166b3575fa 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1153,8 +1153,6 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1153 pdg->sz++; 1153 pdg->sz++;
1154 lh = find_partial_datagram(pdgl, dgl); 1154 lh = find_partial_datagram(pdgl, dgl);
1155 } else { 1155 } else {
1156 struct partial_datagram *pd;
1157
1158 pd = list_entry(lh, struct partial_datagram, list); 1156 pd = list_entry(lh, struct partial_datagram, list);
1159 1157
1160 if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) { 1158 if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
@@ -1222,23 +1220,19 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1222 priv->stats.rx_errors++; 1220 priv->stats.rx_errors++;
1223 priv->stats.rx_dropped++; 1221 priv->stats.rx_dropped++;
1224 dev_kfree_skb_any(skb); 1222 dev_kfree_skb_any(skb);
1225 goto bad_proto; 1223 } else if (netif_rx(skb) == NET_RX_DROP) {
1226 }
1227
1228 if (netif_rx(skb) == NET_RX_DROP) {
1229 priv->stats.rx_errors++; 1224 priv->stats.rx_errors++;
1230 priv->stats.rx_dropped++; 1225 priv->stats.rx_dropped++;
1231 goto bad_proto; 1226 } else {
1227 priv->stats.rx_packets++;
1228 priv->stats.rx_bytes += skb->len;
1232 } 1229 }
1233 1230
1234 /* Statistics */ 1231 spin_unlock_irqrestore(&priv->lock, flags);
1235 priv->stats.rx_packets++;
1236 priv->stats.rx_bytes += skb->len;
1237 1232
1238bad_proto: 1233bad_proto:
1239 if (netif_queue_stopped(dev)) 1234 if (netif_queue_stopped(dev))
1240 netif_wake_queue(dev); 1235 netif_wake_queue(dev);
1241 spin_unlock_irqrestore(&priv->lock, flags);
1242 1236
1243 dev->last_rx = jiffies; 1237 dev->last_rx = jiffies;
1244 1238
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 98fd985a32f..36c747b277d 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -488,7 +488,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
488 highlevel_host_reset(host); 488 highlevel_host_reset(host);
489} 489}
490 490
491static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED; 491static DEFINE_SPINLOCK(pending_packets_lock);
492 492
493/** 493/**
494 * hpsb_packet_sent - notify core of sending a packet 494 * hpsb_packet_sent - notify core of sending a packet
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 1939fee616e..90dc75be341 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1014,13 +1014,13 @@ static struct unit_directory *nodemgr_process_unit_directory
1014 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) { 1014 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
1015 switch (last_key_id) { 1015 switch (last_key_id) {
1016 case CSR1212_KV_ID_VENDOR: 1016 case CSR1212_KV_ID_VENDOR:
1017 ud->vendor_name_kv = kv;
1018 csr1212_keep_keyval(kv); 1017 csr1212_keep_keyval(kv);
1018 ud->vendor_name_kv = kv;
1019 break; 1019 break;
1020 1020
1021 case CSR1212_KV_ID_MODEL: 1021 case CSR1212_KV_ID_MODEL:
1022 ud->model_name_kv = kv;
1023 csr1212_keep_keyval(kv); 1022 csr1212_keep_keyval(kv);
1023 ud->model_name_kv = kv;
1024 break; 1024 break;
1025 1025
1026 } 1026 }
@@ -1112,7 +1112,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1112{ 1112{
1113 unsigned int ud_id = 0; 1113 unsigned int ud_id = 0;
1114 struct csr1212_dentry *dentry; 1114 struct csr1212_dentry *dentry;
1115 struct csr1212_keyval *kv; 1115 struct csr1212_keyval *kv, *vendor_name_kv = NULL;
1116 u8 last_key_id = 0; 1116 u8 last_key_id = 0;
1117 1117
1118 ne->needs_probe = 0; 1118 ne->needs_probe = 0;
@@ -1139,8 +1139,8 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1139 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 && 1139 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
1140 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 && 1140 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
1141 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) { 1141 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
1142 ne->vendor_name_kv = kv;
1143 csr1212_keep_keyval(kv); 1142 csr1212_keep_keyval(kv);
1143 vendor_name_kv = kv;
1144 } 1144 }
1145 } 1145 }
1146 break; 1146 break;
@@ -1149,10 +1149,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1149 } 1149 }
1150 1150
1151 if (ne->vendor_name_kv) { 1151 if (ne->vendor_name_kv) {
1152 int error = device_create_file(&ne->device, 1152 kv = ne->vendor_name_kv;
1153 &dev_attr_ne_vendor_name_kv); 1153 ne->vendor_name_kv = vendor_name_kv;
1154 1154 csr1212_release_keyval(kv);
1155 if (error && error != -EEXIST) 1155 } else if (vendor_name_kv) {
1156 ne->vendor_name_kv = vendor_name_kv;
1157 if (device_create_file(&ne->device,
1158 &dev_attr_ne_vendor_name_kv) != 0)
1156 HPSB_ERR("Failed to add sysfs attribute"); 1159 HPSB_ERR("Failed to add sysfs attribute");
1157 } 1160 }
1158} 1161}
@@ -1712,7 +1715,8 @@ static int nodemgr_host_thread(void *__hi)
1712 * to make sure things settle down. */ 1715 * to make sure things settle down. */
1713 g = get_hpsb_generation(host); 1716 g = get_hpsb_generation(host);
1714 for (i = 0; i < 4 ; i++) { 1717 for (i = 0; i < 4 ; i++) {
1715 if (msleep_interruptible(63) || kthread_should_stop()) 1718 msleep_interruptible(63);
1719 if (kthread_should_stop())
1716 goto exit; 1720 goto exit;
1717 1721
1718 /* Now get the generation in which the node ID's we collect 1722 /* Now get the generation in which the node ID's we collect
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index d1a5bcdb5e0..8af01ab30cc 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -121,16 +121,6 @@ static int bit_getsda(void *data)
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010; 121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
122} 122}
123 123
124static int bit_reg(struct i2c_client *client)
125{
126 return 0;
127}
128
129static int bit_unreg(struct i2c_client *client)
130{
131 return 0;
132}
133
134static struct i2c_algo_bit_data bit_data = { 124static struct i2c_algo_bit_data bit_data = {
135 .setsda = bit_setsda, 125 .setsda = bit_setsda,
136 .setscl = bit_setscl, 126 .setscl = bit_setscl,
@@ -140,14 +130,6 @@ static struct i2c_algo_bit_data bit_data = {
140 .timeout = 100, 130 .timeout = 100,
141}; 131};
142 132
143static struct i2c_adapter bit_ops = {
144 .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
145 .client_register = bit_reg,
146 .client_unregister = bit_unreg,
147 .name = "PCILynx I2C",
148};
149
150
151 133
152/* 134/*
153 * PCL handling functions. 135 * PCL handling functions.
@@ -765,7 +747,6 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
765 } else { 747 } else {
766 struct ti_pcl pcl; 748 struct ti_pcl pcl;
767 u32 ack; 749 u32 ack;
768 struct hpsb_packet *packet;
769 750
770 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL"); 751 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
771 752
@@ -1436,9 +1417,11 @@ static int __devinit add_card(struct pci_dev *dev,
1436 struct i2c_algo_bit_data i2c_adapter_data; 1417 struct i2c_algo_bit_data i2c_adapter_data;
1437 1418
1438 error = -ENOMEM; 1419 error = -ENOMEM;
1439 i2c_ad = kmemdup(&bit_ops, sizeof(*i2c_ad), GFP_KERNEL); 1420 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1440 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1421 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1441 1422
1423 i2c_ad->id = I2C_HW_B_PCILYNX;
1424 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1442 i2c_adapter_data = bit_data; 1425 i2c_adapter_data = bit_data;
1443 i2c_ad->algo_data = &i2c_adapter_data; 1426 i2c_ad->algo_data = &i2c_adapter_data;
1444 i2c_adapter_data.data = lynx; 1427 i2c_adapter_data.data = lynx;
@@ -1465,13 +1448,11 @@ static int __devinit add_card(struct pci_dev *dev,
1465 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block } 1448 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1466 }; 1449 };
1467 1450
1468 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we 1451 /* we use i2c_transfer because we have no i2c_client
1469 do it more efficiently in one transaction rather then using several reads */ 1452 at hand */
1470 if (i2c_transfer(i2c_ad, msg, 2) < 0) { 1453 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1471 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c"); 1454 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1472 } else { 1455 } else {
1473 int i;
1474
1475 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom"); 1456 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1476 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a), 1457 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1477 * generation(1394a) and link_spd(1394a) field and recalculate 1458 * generation(1394a) and link_spd(1394a) field and recalculate
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a81ba8fca0d..1b353b964b3 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -242,6 +242,8 @@ static int sbp2_max_speed_and_size(struct sbp2_lu *);
242 242
243static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; 243static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
244 244
245static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
246
245static struct hpsb_highlevel sbp2_highlevel = { 247static struct hpsb_highlevel sbp2_highlevel = {
246 .name = SBP2_DEVICE_NAME, 248 .name = SBP2_DEVICE_NAME,
247 .host_reset = sbp2_host_reset, 249 .host_reset = sbp2_host_reset,
@@ -732,6 +734,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
732 struct sbp2_fwhost_info *hi; 734 struct sbp2_fwhost_info *hi;
733 struct Scsi_Host *shost = NULL; 735 struct Scsi_Host *shost = NULL;
734 struct sbp2_lu *lu = NULL; 736 struct sbp2_lu *lu = NULL;
737 unsigned long flags;
735 738
736 lu = kzalloc(sizeof(*lu), GFP_KERNEL); 739 lu = kzalloc(sizeof(*lu), GFP_KERNEL);
737 if (!lu) { 740 if (!lu) {
@@ -784,7 +787,9 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
784 787
785 lu->hi = hi; 788 lu->hi = hi;
786 789
790 write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
787 list_add_tail(&lu->lu_list, &hi->logical_units); 791 list_add_tail(&lu->lu_list, &hi->logical_units);
792 write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
788 793
789 /* Register the status FIFO address range. We could use the same FIFO 794 /* Register the status FIFO address range. We could use the same FIFO
790 * for targets at different nodes. However we need different FIFOs per 795 * for targets at different nodes. However we need different FIFOs per
@@ -828,16 +833,20 @@ static void sbp2_host_reset(struct hpsb_host *host)
828{ 833{
829 struct sbp2_fwhost_info *hi; 834 struct sbp2_fwhost_info *hi;
830 struct sbp2_lu *lu; 835 struct sbp2_lu *lu;
836 unsigned long flags;
831 837
832 hi = hpsb_get_hostinfo(&sbp2_highlevel, host); 838 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
833 if (!hi) 839 if (!hi)
834 return; 840 return;
841
842 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
835 list_for_each_entry(lu, &hi->logical_units, lu_list) 843 list_for_each_entry(lu, &hi->logical_units, lu_list)
836 if (likely(atomic_read(&lu->state) != 844 if (likely(atomic_read(&lu->state) !=
837 SBP2LU_STATE_IN_SHUTDOWN)) { 845 SBP2LU_STATE_IN_SHUTDOWN)) {
838 atomic_set(&lu->state, SBP2LU_STATE_IN_RESET); 846 atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
839 scsi_block_requests(lu->shost); 847 scsi_block_requests(lu->shost);
840 } 848 }
849 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
841} 850}
842 851
843static int sbp2_start_device(struct sbp2_lu *lu) 852static int sbp2_start_device(struct sbp2_lu *lu)
@@ -919,6 +928,7 @@ alloc_fail:
919static void sbp2_remove_device(struct sbp2_lu *lu) 928static void sbp2_remove_device(struct sbp2_lu *lu)
920{ 929{
921 struct sbp2_fwhost_info *hi; 930 struct sbp2_fwhost_info *hi;
931 unsigned long flags;
922 932
923 if (!lu) 933 if (!lu)
924 return; 934 return;
@@ -933,7 +943,9 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
933 flush_scheduled_work(); 943 flush_scheduled_work();
934 sbp2util_remove_command_orb_pool(lu, hi->host); 944 sbp2util_remove_command_orb_pool(lu, hi->host);
935 945
946 write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
936 list_del(&lu->lu_list); 947 list_del(&lu->lu_list);
948 write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
937 949
938 if (lu->login_response) 950 if (lu->login_response)
939 dma_free_coherent(hi->host->device.parent, 951 dma_free_coherent(hi->host->device.parent,
@@ -1707,6 +1719,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1707 } 1719 }
1708 1720
1709 /* Find the unit which wrote the status. */ 1721 /* Find the unit which wrote the status. */
1722 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
1710 list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) { 1723 list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
1711 if (lu_tmp->ne->nodeid == nodeid && 1724 if (lu_tmp->ne->nodeid == nodeid &&
1712 lu_tmp->status_fifo_addr == addr) { 1725 lu_tmp->status_fifo_addr == addr) {
@@ -1714,6 +1727,8 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1714 break; 1727 break;
1715 } 1728 }
1716 } 1729 }
1730 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
1731
1717 if (unlikely(!lu)) { 1732 if (unlikely(!lu)) {
1718 SBP2_ERR("lu is NULL - device is gone?"); 1733 SBP2_ERR("lu is NULL - device is gone?");
1719 return RCODE_ADDRESS_ERROR; 1734 return RCODE_ADDRESS_ERROR;
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index ce22bf5de35..f99cb77e7b4 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2225,8 +2225,9 @@ static void cirrusfb_pci_unmap(struct fb_info *info)
2225#endif /* CONFIG_PCI */ 2225#endif /* CONFIG_PCI */
2226 2226
2227#ifdef CONFIG_ZORRO 2227#ifdef CONFIG_ZORRO
2228static void __devexit cirrusfb_zorro_unmap(struct cirrusfb_info *cinfo) 2228static void __devexit cirrusfb_zorro_unmap(struct fb_info *info)
2229{ 2229{
2230 struct cirrusfb_info *cinfo = info->par;
2230 zorro_release_device(cinfo->zdev); 2231 zorro_release_device(cinfo->zdev);
2231 2232
2232 if (cinfo->btype == BT_PICASSO4) { 2233 if (cinfo->btype == BT_PICASSO4) {
@@ -2573,7 +2574,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
2573 printk(KERN_INFO "Cirrus Logic chipset on Zorro bus\n"); 2574 printk(KERN_INFO "Cirrus Logic chipset on Zorro bus\n");
2574 zorro_set_drvdata(z, info); 2575 zorro_set_drvdata(z, info);
2575 2576
2576 ret = cirrusfb_register(cinfo); 2577 ret = cirrusfb_register(info);
2577 if (ret) { 2578 if (ret) {
2578 if (btype == BT_PICASSO4) { 2579 if (btype == BT_PICASSO4) {
2579 iounmap(info->screen_base); 2580 iounmap(info->screen_base);
diff --git a/fs/open.c b/fs/open.c
index 1d9e5e98bf4..044bfa891c9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -757,6 +757,10 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
757 f->f_op = fops_get(inode->i_fop); 757 f->f_op = fops_get(inode->i_fop);
758 file_move(f, &inode->i_sb->s_files); 758 file_move(f, &inode->i_sb->s_files);
759 759
760 error = security_dentry_open(f);
761 if (error)
762 goto cleanup_all;
763
760 if (!open && f->f_op) 764 if (!open && f->f_op)
761 open = f->f_op->open; 765 open = f->f_op->open;
762 if (open) { 766 if (open) {
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 2af321f36ab..65be95dd03a 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,6 +6,149 @@
6 */ 6 */
7#ifndef _ASM_DMA_MAPPING_H 7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H
9#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <asm/io.h>
17
18#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19
20#ifdef CONFIG_NOT_COHERENT_CACHE
21/*
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
27 */
28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29extern void __dma_free_coherent(size_t size, void *vaddr);
30extern void __dma_sync(void *vaddr, size_t size, int direction);
31extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction);
33
34#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/*
36 * Cache coherent cores.
37 */
38
39#define __dma_alloc_coherent(gfp, size, handle) NULL
40#define __dma_free_coherent(size, addr) ((void)0)
41#define __dma_sync(addr, size, rw) ((void)0)
42#define __dma_sync_page(pg, off, sz, rw) ((void)0)
43
44#endif /* ! CONFIG_NOT_COHERENT_CACHE */
45
46#ifdef CONFIG_PPC64
47/*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */
50struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65};
66
67static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68{
69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us.
73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL;
76 return dev->archdata.dma_ops;
77}
78
79static inline int dma_supported(struct device *dev, u64 mask)
80{
81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
82
83 if (unlikely(dma_ops == NULL))
84 return 0;
85 if (dma_ops->dma_supported == NULL)
86 return 1;
87 return dma_ops->dma_supported(dev, mask);
88}
89
90static inline int dma_set_mask(struct device *dev, u64 dma_mask)
91{
92 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
93
94 if (unlikely(dma_ops == NULL))
95 return -EIO;
96 if (dma_ops->set_dma_mask != NULL)
97 return dma_ops->set_dma_mask(dev, dma_mask);
98 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
99 return -EIO;
100 *dev->dma_mask = dma_mask;
101 return 0;
102}
103
104static inline void *dma_alloc_coherent(struct device *dev, size_t size,
105 dma_addr_t *dma_handle, gfp_t flag)
106{
107 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
108
109 BUG_ON(!dma_ops);
110 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
111}
112
113static inline void dma_free_coherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t dma_handle)
115{
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
117
118 BUG_ON(!dma_ops);
119 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
120}
121
122static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
123 size_t size,
124 enum dma_data_direction direction)
125{
126 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
127
128 BUG_ON(!dma_ops);
129 return dma_ops->map_single(dev, cpu_addr, size, direction);
130}
131
132static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
133 size_t size,
134 enum dma_data_direction direction)
135{
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
137
138 BUG_ON(!dma_ops);
139 dma_ops->unmap_single(dev, dma_addr, size, direction);
140}
141
142static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
143 unsigned long offset, size_t size,
144 enum dma_data_direction direction)
145{
146 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
147
148 BUG_ON(!dma_ops);
149 return dma_ops->map_single(dev, page_address(page) + offset, size,
150 direction);
151}
9 152
10static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 153static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
11 size_t size, 154 size_t size,
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 1a45d6f41b0..0f0e271f97f 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -178,6 +178,7 @@ union fw_cdev_event {
178#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) 178#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
179#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) 179#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
180#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) 180#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
181#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
181 182
182/* FW_CDEV_VERSION History 183/* FW_CDEV_VERSION History
183 * 184 *
@@ -459,4 +460,18 @@ struct fw_cdev_stop_iso {
459 __u32 handle; 460 __u32 handle;
460}; 461};
461 462
463/**
464 * struct fw_cdev_get_cycle_timer - read cycle timer register
465 * @local_time: system time, in microseconds since the Epoch
466 * @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
467 *
468 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
469 * and also the system clock. This allows to express the receive time of an
470 * isochronous packet as a system time with microsecond accuracy.
471 */
472struct fw_cdev_get_cycle_timer {
473 __u64 local_time;
474 __u32 cycle_timer;
475};
476
462#endif /* _LINUX_FIREWIRE_CDEV_H */ 477#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 1a15526e9f6..928d4793c6f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -504,6 +504,13 @@ struct request_sock;
504 * @file contains the file structure being received. 504 * @file contains the file structure being received.
505 * Return 0 if permission is granted. 505 * Return 0 if permission is granted.
506 * 506 *
507 * Security hook for dentry
508 *
509 * @dentry_open
510 * Save open-time permission checking state for later use upon
511 * file_permission, and recheck access if anything has changed
512 * since inode_permission.
513 *
507 * Security hooks for task operations. 514 * Security hooks for task operations.
508 * 515 *
509 * @task_create: 516 * @task_create:
@@ -1256,6 +1263,7 @@ struct security_operations {
1256 int (*file_send_sigiotask) (struct task_struct * tsk, 1263 int (*file_send_sigiotask) (struct task_struct * tsk,
1257 struct fown_struct * fown, int sig); 1264 struct fown_struct * fown, int sig);
1258 int (*file_receive) (struct file * file); 1265 int (*file_receive) (struct file * file);
1266 int (*dentry_open) (struct file *file);
1259 1267
1260 int (*task_create) (unsigned long clone_flags); 1268 int (*task_create) (unsigned long clone_flags);
1261 int (*task_alloc_security) (struct task_struct * p); 1269 int (*task_alloc_security) (struct task_struct * p);
@@ -1864,6 +1872,11 @@ static inline int security_file_receive (struct file *file)
1864 return security_ops->file_receive (file); 1872 return security_ops->file_receive (file);
1865} 1873}
1866 1874
1875static inline int security_dentry_open (struct file *file)
1876{
1877 return security_ops->dentry_open (file);
1878}
1879
1867static inline int security_task_create (unsigned long clone_flags) 1880static inline int security_task_create (unsigned long clone_flags)
1868{ 1881{
1869 return security_ops->task_create (clone_flags); 1882 return security_ops->task_create (clone_flags);
@@ -2546,6 +2559,11 @@ static inline int security_file_receive (struct file *file)
2546 return 0; 2559 return 0;
2547} 2560}
2548 2561
2562static inline int security_dentry_open (struct file *file)
2563{
2564 return 0;
2565}
2566
2549static inline int security_task_create (unsigned long clone_flags) 2567static inline int security_task_create (unsigned long clone_flags)
2550{ 2568{
2551 return 0; 2569 return 0;
diff --git a/security/dummy.c b/security/dummy.c
index 853ec229279..64b647a0d9a 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -463,6 +463,11 @@ static int dummy_file_receive (struct file *file)
463 return 0; 463 return 0;
464} 464}
465 465
466static int dummy_dentry_open (struct file *file)
467{
468 return 0;
469}
470
466static int dummy_task_create (unsigned long clone_flags) 471static int dummy_task_create (unsigned long clone_flags)
467{ 472{
468 return 0; 473 return 0;
@@ -1033,6 +1038,7 @@ void security_fixup_ops (struct security_operations *ops)
1033 set_to_dummy_if_null(ops, file_set_fowner); 1038 set_to_dummy_if_null(ops, file_set_fowner);
1034 set_to_dummy_if_null(ops, file_send_sigiotask); 1039 set_to_dummy_if_null(ops, file_send_sigiotask);
1035 set_to_dummy_if_null(ops, file_receive); 1040 set_to_dummy_if_null(ops, file_receive);
1041 set_to_dummy_if_null(ops, dentry_open);
1036 set_to_dummy_if_null(ops, task_create); 1042 set_to_dummy_if_null(ops, task_create);
1037 set_to_dummy_if_null(ops, task_alloc_security); 1043 set_to_dummy_if_null(ops, task_alloc_security);
1038 set_to_dummy_if_null(ops, task_free_security); 1044 set_to_dummy_if_null(ops, task_free_security);
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 0e69adf63bd..81b3dff3cbf 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -916,3 +916,8 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
916 avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); 916 avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
917 return rc; 917 return rc;
918} 918}
919
920u32 avc_policy_seqno(void)
921{
922 return avc_cache.latest_notif;
923}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index cf76150e623..97b7e273809 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -14,6 +14,8 @@
14 * <dgoeddel@trustedcs.com> 14 * <dgoeddel@trustedcs.com>
15 * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. 15 * Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
16 * Paul Moore, <paul.moore@hp.com> 16 * Paul Moore, <paul.moore@hp.com>
17 * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
18 * Yuichi Nakamura <ynakam@hitachisoft.jp>
17 * 19 *
18 * This program is free software; you can redistribute it and/or modify 20 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2, 21 * it under the terms of the GNU General Public License version 2,
@@ -2464,7 +2466,7 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
2464 2466
2465/* file security operations */ 2467/* file security operations */
2466 2468
2467static int selinux_file_permission(struct file *file, int mask) 2469static int selinux_revalidate_file_permission(struct file *file, int mask)
2468{ 2470{
2469 int rc; 2471 int rc;
2470 struct inode *inode = file->f_path.dentry->d_inode; 2472 struct inode *inode = file->f_path.dentry->d_inode;
@@ -2486,6 +2488,25 @@ static int selinux_file_permission(struct file *file, int mask)
2486 return selinux_netlbl_inode_permission(inode, mask); 2488 return selinux_netlbl_inode_permission(inode, mask);
2487} 2489}
2488 2490
2491static int selinux_file_permission(struct file *file, int mask)
2492{
2493 struct inode *inode = file->f_path.dentry->d_inode;
2494 struct task_security_struct *tsec = current->security;
2495 struct file_security_struct *fsec = file->f_security;
2496 struct inode_security_struct *isec = inode->i_security;
2497
2498 if (!mask) {
2499 /* No permission to check. Existence test. */
2500 return 0;
2501 }
2502
2503 if (tsec->sid == fsec->sid && fsec->isid == isec->sid
2504 && fsec->pseqno == avc_policy_seqno())
2505 return selinux_netlbl_inode_permission(inode, mask);
2506
2507 return selinux_revalidate_file_permission(file, mask);
2508}
2509
2489static int selinux_file_alloc_security(struct file *file) 2510static int selinux_file_alloc_security(struct file *file)
2490{ 2511{
2491 return file_alloc_security(file); 2512 return file_alloc_security(file);
@@ -2725,6 +2746,34 @@ static int selinux_file_receive(struct file *file)
2725 return file_has_perm(current, file, file_to_av(file)); 2746 return file_has_perm(current, file, file_to_av(file));
2726} 2747}
2727 2748
2749static int selinux_dentry_open(struct file *file)
2750{
2751 struct file_security_struct *fsec;
2752 struct inode *inode;
2753 struct inode_security_struct *isec;
2754 inode = file->f_path.dentry->d_inode;
2755 fsec = file->f_security;
2756 isec = inode->i_security;
2757 /*
2758 * Save inode label and policy sequence number
2759 * at open-time so that selinux_file_permission
2760 * can determine whether revalidation is necessary.
2761 * Task label is already saved in the file security
2762 * struct as its SID.
2763 */
2764 fsec->isid = isec->sid;
2765 fsec->pseqno = avc_policy_seqno();
2766 /*
2767 * Since the inode label or policy seqno may have changed
2768 * between the selinux_inode_permission check and the saving
2769 * of state above, recheck that access is still permitted.
2770 * Otherwise, access might never be revalidated against the
2771 * new inode label or new policy.
2772 * This check is not redundant - do not remove.
2773 */
2774 return inode_has_perm(current, inode, file_to_av(file), NULL);
2775}
2776
2728/* task security operations */ 2777/* task security operations */
2729 2778
2730static int selinux_task_create(unsigned long clone_flags) 2779static int selinux_task_create(unsigned long clone_flags)
@@ -4794,6 +4843,8 @@ static struct security_operations selinux_ops = {
4794 .file_send_sigiotask = selinux_file_send_sigiotask, 4843 .file_send_sigiotask = selinux_file_send_sigiotask,
4795 .file_receive = selinux_file_receive, 4844 .file_receive = selinux_file_receive,
4796 4845
4846 .dentry_open = selinux_dentry_open,
4847
4797 .task_create = selinux_task_create, 4848 .task_create = selinux_task_create,
4798 .task_alloc_security = selinux_task_alloc_security, 4849 .task_alloc_security = selinux_task_alloc_security,
4799 .task_free_security = selinux_task_free_security, 4850 .task_free_security = selinux_task_free_security,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index e145f6e13b0..553607a19e9 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -112,6 +112,8 @@ int avc_has_perm(u32 ssid, u32 tsid,
112 u16 tclass, u32 requested, 112 u16 tclass, u32 requested,
113 struct avc_audit_data *auditdata); 113 struct avc_audit_data *auditdata);
114 114
115u32 avc_policy_seqno(void);
116
115#define AVC_CALLBACK_GRANT 1 117#define AVC_CALLBACK_GRANT 1
116#define AVC_CALLBACK_TRY_REVOKE 2 118#define AVC_CALLBACK_TRY_REVOKE 2
117#define AVC_CALLBACK_REVOKE 4 119#define AVC_CALLBACK_REVOKE 4
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 91b88f0ba20..642a9fd319a 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -53,6 +53,8 @@ struct file_security_struct {
53 struct file *file; /* back pointer to file object */ 53 struct file *file; /* back pointer to file object */
54 u32 sid; /* SID of open file description */ 54 u32 sid; /* SID of open file description */
55 u32 fown_sid; /* SID of file owner (for SIGIO) */ 55 u32 fown_sid; /* SID of file owner (for SIGIO) */
56 u32 isid; /* SID of inode at the time of file open */
57 u32 pseqno; /* Policy seqno at the time of file open */
56}; 58};
57 59
58struct superblock_security_struct { 60struct superblock_security_struct {
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 83bdd4d2a29..39337afffec 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -90,6 +90,8 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
90 90
91int security_get_classes(char ***classes, int *nclasses); 91int security_get_classes(char ***classes, int *nclasses);
92int security_get_permissions(char *class, char ***perms, int *nperms); 92int security_get_permissions(char *class, char ***perms, int *nperms);
93int security_get_reject_unknown(void);
94int security_get_allow_unknown(void);
93 95
94#define SECURITY_FS_USE_XATTR 1 /* use xattr */ 96#define SECURITY_FS_USE_XATTR 1 /* use xattr */
95#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */ 97#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index c9e92daedee..f5f3e6da5da 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -103,6 +103,8 @@ enum sel_inos {
103 SEL_MEMBER, /* compute polyinstantiation membership decision */ 103 SEL_MEMBER, /* compute polyinstantiation membership decision */
104 SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */ 104 SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */
105 SEL_COMPAT_NET, /* whether to use old compat network packet controls */ 105 SEL_COMPAT_NET, /* whether to use old compat network packet controls */
106 SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
107 SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
106 SEL_INO_NEXT, /* The next inode number to use */ 108 SEL_INO_NEXT, /* The next inode number to use */
107}; 109};
108 110
@@ -177,6 +179,23 @@ static const struct file_operations sel_enforce_ops = {
177 .write = sel_write_enforce, 179 .write = sel_write_enforce,
178}; 180};
179 181
182static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
183 size_t count, loff_t *ppos)
184{
185 char tmpbuf[TMPBUFLEN];
186 ssize_t length;
187 ino_t ino = filp->f_path.dentry->d_inode->i_ino;
188 int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
189 security_get_reject_unknown() : !security_get_allow_unknown();
190
191 length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown);
192 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
193}
194
195static const struct file_operations sel_handle_unknown_ops = {
196 .read = sel_read_handle_unknown,
197};
198
180#ifdef CONFIG_SECURITY_SELINUX_DISABLE 199#ifdef CONFIG_SECURITY_SELINUX_DISABLE
181static ssize_t sel_write_disable(struct file * file, const char __user * buf, 200static ssize_t sel_write_disable(struct file * file, const char __user * buf,
182 size_t count, loff_t *ppos) 201 size_t count, loff_t *ppos)
@@ -309,6 +328,11 @@ static ssize_t sel_write_load(struct file * file, const char __user * buf,
309 length = count; 328 length = count;
310 329
311out1: 330out1:
331
332 printk(KERN_INFO "SELinux: policy loaded with handle_unknown=%s\n",
333 (security_get_reject_unknown() ? "reject" :
334 (security_get_allow_unknown() ? "allow" : "deny")));
335
312 audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, 336 audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
313 "policy loaded auid=%u", 337 "policy loaded auid=%u",
314 audit_get_loginuid(current->audit_context)); 338 audit_get_loginuid(current->audit_context));
@@ -1575,6 +1599,8 @@ static int sel_fill_super(struct super_block * sb, void * data, int silent)
1575 [SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO}, 1599 [SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO},
1576 [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR}, 1600 [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
1577 [SEL_COMPAT_NET] = {"compat_net", &sel_compat_net_ops, S_IRUGO|S_IWUSR}, 1601 [SEL_COMPAT_NET] = {"compat_net", &sel_compat_net_ops, S_IRUGO|S_IWUSR},
1602 [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
1603 [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
1578 /* last one */ {""} 1604 /* last one */ {""}
1579 }; 1605 };
1580 ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); 1606 ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 85705eb289e..7551af1f789 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -12,24 +12,25 @@
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2. 14 * the Free Software Foundation, version 2.
15 *
16 * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
17 * Tuned number of hash slots for avtab to reduce memory usage
15 */ 18 */
16 19
17#include <linux/kernel.h> 20#include <linux/kernel.h>
18#include <linux/slab.h> 21#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/errno.h> 22#include <linux/errno.h>
21
22#include "avtab.h" 23#include "avtab.h"
23#include "policydb.h" 24#include "policydb.h"
24 25
25#define AVTAB_HASH(keyp) \
26((keyp->target_class + \
27 (keyp->target_type << 2) + \
28 (keyp->source_type << 9)) & \
29 AVTAB_HASH_MASK)
30
31static struct kmem_cache *avtab_node_cachep; 26static struct kmem_cache *avtab_node_cachep;
32 27
28static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
29{
30 return ((keyp->target_class + (keyp->target_type << 2) +
31 (keyp->source_type << 9)) & mask);
32}
33
33static struct avtab_node* 34static struct avtab_node*
34avtab_insert_node(struct avtab *h, int hvalue, 35avtab_insert_node(struct avtab *h, int hvalue,
35 struct avtab_node * prev, struct avtab_node * cur, 36 struct avtab_node * prev, struct avtab_node * cur,
@@ -59,10 +60,10 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
59 struct avtab_node *prev, *cur, *newnode; 60 struct avtab_node *prev, *cur, *newnode;
60 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); 61 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
61 62
62 if (!h) 63 if (!h || !h->htable)
63 return -EINVAL; 64 return -EINVAL;
64 65
65 hvalue = AVTAB_HASH(key); 66 hvalue = avtab_hash(key, h->mask);
66 for (prev = NULL, cur = h->htable[hvalue]; 67 for (prev = NULL, cur = h->htable[hvalue];
67 cur; 68 cur;
68 prev = cur, cur = cur->next) { 69 prev = cur, cur = cur->next) {
@@ -100,9 +101,9 @@ avtab_insert_nonunique(struct avtab * h, struct avtab_key * key, struct avtab_da
100 struct avtab_node *prev, *cur, *newnode; 101 struct avtab_node *prev, *cur, *newnode;
101 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); 102 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
102 103
103 if (!h) 104 if (!h || !h->htable)
104 return NULL; 105 return NULL;
105 hvalue = AVTAB_HASH(key); 106 hvalue = avtab_hash(key, h->mask);
106 for (prev = NULL, cur = h->htable[hvalue]; 107 for (prev = NULL, cur = h->htable[hvalue];
107 cur; 108 cur;
108 prev = cur, cur = cur->next) { 109 prev = cur, cur = cur->next) {
@@ -132,10 +133,10 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
132 struct avtab_node *cur; 133 struct avtab_node *cur;
133 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); 134 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
134 135
135 if (!h) 136 if (!h || !h->htable)
136 return NULL; 137 return NULL;
137 138
138 hvalue = AVTAB_HASH(key); 139 hvalue = avtab_hash(key, h->mask);
139 for (cur = h->htable[hvalue]; cur; cur = cur->next) { 140 for (cur = h->htable[hvalue]; cur; cur = cur->next) {
140 if (key->source_type == cur->key.source_type && 141 if (key->source_type == cur->key.source_type &&
141 key->target_type == cur->key.target_type && 142 key->target_type == cur->key.target_type &&
@@ -167,10 +168,10 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
167 struct avtab_node *cur; 168 struct avtab_node *cur;
168 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); 169 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
169 170
170 if (!h) 171 if (!h || !h->htable)
171 return NULL; 172 return NULL;
172 173
173 hvalue = AVTAB_HASH(key); 174 hvalue = avtab_hash(key, h->mask);
174 for (cur = h->htable[hvalue]; cur; cur = cur->next) { 175 for (cur = h->htable[hvalue]; cur; cur = cur->next) {
175 if (key->source_type == cur->key.source_type && 176 if (key->source_type == cur->key.source_type &&
176 key->target_type == cur->key.target_type && 177 key->target_type == cur->key.target_type &&
@@ -228,7 +229,7 @@ void avtab_destroy(struct avtab *h)
228 if (!h || !h->htable) 229 if (!h || !h->htable)
229 return; 230 return;
230 231
231 for (i = 0; i < AVTAB_SIZE; i++) { 232 for (i = 0; i < h->nslot; i++) {
232 cur = h->htable[i]; 233 cur = h->htable[i];
233 while (cur != NULL) { 234 while (cur != NULL) {
234 temp = cur; 235 temp = cur;
@@ -237,32 +238,63 @@ void avtab_destroy(struct avtab *h)
237 } 238 }
238 h->htable[i] = NULL; 239 h->htable[i] = NULL;
239 } 240 }
240 vfree(h->htable); 241 kfree(h->htable);
241 h->htable = NULL; 242 h->htable = NULL;
243 h->nslot = 0;
244 h->mask = 0;
242} 245}
243 246
244
245int avtab_init(struct avtab *h) 247int avtab_init(struct avtab *h)
246{ 248{
247 int i; 249 h->htable = NULL;
250 h->nel = 0;
251 return 0;
252}
253
254int avtab_alloc(struct avtab *h, u32 nrules)
255{
256 u16 mask = 0;
257 u32 shift = 0;
258 u32 work = nrules;
259 u32 nslot = 0;
260
261 if (nrules == 0)
262 goto avtab_alloc_out;
248 263
249 h->htable = vmalloc(sizeof(*(h->htable)) * AVTAB_SIZE); 264 while (work) {
265 work = work >> 1;
266 shift++;
267 }
268 if (shift > 2)
269 shift = shift - 2;
270 nslot = 1 << shift;
271 if (nslot > MAX_AVTAB_SIZE)
272 nslot = MAX_AVTAB_SIZE;
273 mask = nslot - 1;
274
275 h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
250 if (!h->htable) 276 if (!h->htable)
251 return -ENOMEM; 277 return -ENOMEM;
252 for (i = 0; i < AVTAB_SIZE; i++) 278
253 h->htable[i] = NULL; 279 avtab_alloc_out:
254 h->nel = 0; 280 h->nel = 0;
281 h->nslot = nslot;
282 h->mask = mask;
283 printk(KERN_DEBUG "SELinux:%d avtab hash slots allocated."
284 "Num of rules:%d\n", h->nslot, nrules);
255 return 0; 285 return 0;
256} 286}
257 287
258void avtab_hash_eval(struct avtab *h, char *tag) 288void avtab_hash_eval(struct avtab *h, char *tag)
259{ 289{
260 int i, chain_len, slots_used, max_chain_len; 290 int i, chain_len, slots_used, max_chain_len;
291 unsigned long long chain2_len_sum;
261 struct avtab_node *cur; 292 struct avtab_node *cur;
262 293
263 slots_used = 0; 294 slots_used = 0;
264 max_chain_len = 0; 295 max_chain_len = 0;
265 for (i = 0; i < AVTAB_SIZE; i++) { 296 chain2_len_sum = 0;
297 for (i = 0; i < h->nslot; i++) {
266 cur = h->htable[i]; 298 cur = h->htable[i];
267 if (cur) { 299 if (cur) {
268 slots_used++; 300 slots_used++;
@@ -274,12 +306,14 @@ void avtab_hash_eval(struct avtab *h, char *tag)
274 306
275 if (chain_len > max_chain_len) 307 if (chain_len > max_chain_len)
276 max_chain_len = chain_len; 308 max_chain_len = chain_len;
309 chain2_len_sum += chain_len * chain_len;
277 } 310 }
278 } 311 }
279 312
280 printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest " 313 printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
281 "chain length %d\n", tag, h->nel, slots_used, AVTAB_SIZE, 314 "chain length %d sum of chain length^2 %Lu\n",
282 max_chain_len); 315 tag, h->nel, slots_used, h->nslot, max_chain_len,
316 chain2_len_sum);
283} 317}
284 318
285static uint16_t spec_order[] = { 319static uint16_t spec_order[] = {
@@ -419,6 +453,11 @@ int avtab_read(struct avtab *a, void *fp, u32 vers)
419 rc = -EINVAL; 453 rc = -EINVAL;
420 goto bad; 454 goto bad;
421 } 455 }
456
457 rc = avtab_alloc(a, nel);
458 if (rc)
459 goto bad;
460
422 for (i = 0; i < nel; i++) { 461 for (i = 0; i < nel; i++) {
423 rc = avtab_read_item(fp,vers, a, avtab_insertf, NULL); 462 rc = avtab_read_item(fp,vers, a, avtab_insertf, NULL);
424 if (rc) { 463 if (rc) {
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 0a90d939af9..d8edf8ca56d 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -16,6 +16,9 @@
16 * This program is free software; you can redistribute it and/or modify 16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by 17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation, version 2. 18 * the Free Software Foundation, version 2.
19 *
20 * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
21 * Tuned number of hash slots for avtab to reduce memory usage
19 */ 22 */
20#ifndef _SS_AVTAB_H_ 23#ifndef _SS_AVTAB_H_
21#define _SS_AVTAB_H_ 24#define _SS_AVTAB_H_
@@ -50,9 +53,13 @@ struct avtab_node {
50struct avtab { 53struct avtab {
51 struct avtab_node **htable; 54 struct avtab_node **htable;
52 u32 nel; /* number of elements */ 55 u32 nel; /* number of elements */
56 u32 nslot; /* number of hash slots */
57 u16 mask; /* mask to compute hash func */
58
53}; 59};
54 60
55int avtab_init(struct avtab *); 61int avtab_init(struct avtab *);
62int avtab_alloc(struct avtab *, u32);
56struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k); 63struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
57void avtab_destroy(struct avtab *h); 64void avtab_destroy(struct avtab *h);
58void avtab_hash_eval(struct avtab *h, char *tag); 65void avtab_hash_eval(struct avtab *h, char *tag);
@@ -74,11 +81,10 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
74void avtab_cache_init(void); 81void avtab_cache_init(void);
75void avtab_cache_destroy(void); 82void avtab_cache_destroy(void);
76 83
77#define AVTAB_HASH_BITS 15 84#define MAX_AVTAB_HASH_BITS 13
78#define AVTAB_HASH_BUCKETS (1 << AVTAB_HASH_BITS) 85#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
79#define AVTAB_HASH_MASK (AVTAB_HASH_BUCKETS-1) 86#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
80 87#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
81#define AVTAB_SIZE AVTAB_HASH_BUCKETS
82 88
83#endif /* _SS_AVTAB_H_ */ 89#endif /* _SS_AVTAB_H_ */
84 90
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index d2737edba54..45b93a827c8 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -456,6 +456,10 @@ int cond_read_list(struct policydb *p, void *fp)
456 456
457 len = le32_to_cpu(buf[0]); 457 len = le32_to_cpu(buf[0]);
458 458
459 rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
460 if (rc)
461 goto err;
462
459 for (i = 0; i < len; i++) { 463 for (i = 0; i < len; i++) {
460 node = kzalloc(sizeof(struct cond_node), GFP_KERNEL); 464 node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
461 if (!node) 465 if (!node)
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index ce492a6b38e..c1a6b22d48d 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -10,6 +10,10 @@
10 * 10 *
11 * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 11 * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
12 */ 12 */
13/*
14 * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
15 * Applied standard bit operations to improve bitmap scanning.
16 */
13 17
14#include <linux/kernel.h> 18#include <linux/kernel.h>
15#include <linux/slab.h> 19#include <linux/slab.h>
@@ -29,7 +33,7 @@ int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
29 n2 = e2->node; 33 n2 = e2->node;
30 while (n1 && n2 && 34 while (n1 && n2 &&
31 (n1->startbit == n2->startbit) && 35 (n1->startbit == n2->startbit) &&
32 (n1->map == n2->map)) { 36 !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
33 n1 = n1->next; 37 n1 = n1->next;
34 n2 = n2->next; 38 n2 = n2->next;
35 } 39 }
@@ -54,7 +58,7 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
54 return -ENOMEM; 58 return -ENOMEM;
55 } 59 }
56 new->startbit = n->startbit; 60 new->startbit = n->startbit;
57 new->map = n->map; 61 memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
58 new->next = NULL; 62 new->next = NULL;
59 if (prev) 63 if (prev)
60 prev->next = new; 64 prev->next = new;
@@ -84,13 +88,15 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
84{ 88{
85 struct ebitmap_node *e_iter = ebmap->node; 89 struct ebitmap_node *e_iter = ebmap->node;
86 struct netlbl_lsm_secattr_catmap *c_iter; 90 struct netlbl_lsm_secattr_catmap *c_iter;
87 u32 cmap_idx; 91 u32 cmap_idx, cmap_sft;
92 int i;
88 93
89 /* This function is a much simpler because SELinux's MAPTYPE happens 94 /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
90 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is 95 * however, it is not always compatible with an array of unsigned long
91 * changed from a u64 this function will most likely need to be changed 96 * in ebitmap_node.
92 * as well. It's not ideal but I think the tradeoff in terms of 97 * In addition, you should pay attention the following implementation
93 * neatness and speed is worth it. */ 98 * assumes unsigned long has a width equal with or less than 64-bit.
99 */
94 100
95 if (e_iter == NULL) { 101 if (e_iter == NULL) {
96 *catmap = NULL; 102 *catmap = NULL;
@@ -104,19 +110,27 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
104 c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1); 110 c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1);
105 111
106 while (e_iter != NULL) { 112 while (e_iter != NULL) {
107 if (e_iter->startbit >= 113 for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
108 (c_iter->startbit + NETLBL_CATMAP_SIZE)) { 114 unsigned int delta, e_startbit, c_endbit;
109 c_iter->next = netlbl_secattr_catmap_alloc(GFP_ATOMIC); 115
110 if (c_iter->next == NULL) 116 e_startbit = e_iter->startbit + i * EBITMAP_UNIT_SIZE;
111 goto netlbl_export_failure; 117 c_endbit = c_iter->startbit + NETLBL_CATMAP_SIZE;
112 c_iter = c_iter->next; 118 if (e_startbit >= c_endbit) {
113 c_iter->startbit = e_iter->startbit & 119 c_iter->next
114 ~(NETLBL_CATMAP_SIZE - 1); 120 = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
121 if (c_iter->next == NULL)
122 goto netlbl_export_failure;
123 c_iter = c_iter->next;
124 c_iter->startbit
125 = e_startbit & ~(NETLBL_CATMAP_SIZE - 1);
126 }
127 delta = e_startbit - c_iter->startbit;
128 cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
129 cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
130 c_iter->bitmap[cmap_idx]
131 |= e_iter->maps[cmap_idx] << cmap_sft;
132 e_iter = e_iter->next;
115 } 133 }
116 cmap_idx = (e_iter->startbit - c_iter->startbit) /
117 NETLBL_CATMAP_MAPSIZE;
118 c_iter->bitmap[cmap_idx] = e_iter->map;
119 e_iter = e_iter->next;
120 } 134 }
121 135
122 return 0; 136 return 0;
@@ -128,7 +142,7 @@ netlbl_export_failure:
128 142
129/** 143/**
130 * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap 144 * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
131 * @ebmap: the ebitmap to export 145 * @ebmap: the ebitmap to import
132 * @catmap: the NetLabel category bitmap 146 * @catmap: the NetLabel category bitmap
133 * 147 *
134 * Description: 148 * Description:
@@ -142,36 +156,50 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
142 struct ebitmap_node *e_iter = NULL; 156 struct ebitmap_node *e_iter = NULL;
143 struct ebitmap_node *emap_prev = NULL; 157 struct ebitmap_node *emap_prev = NULL;
144 struct netlbl_lsm_secattr_catmap *c_iter = catmap; 158 struct netlbl_lsm_secattr_catmap *c_iter = catmap;
145 u32 c_idx; 159 u32 c_idx, c_pos, e_idx, e_sft;
146 160
147 /* This function is a much simpler because SELinux's MAPTYPE happens 161 /* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
148 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is 162 * however, it is not always compatible with an array of unsigned long
149 * changed from a u64 this function will most likely need to be changed 163 * in ebitmap_node.
150 * as well. It's not ideal but I think the tradeoff in terms of 164 * In addition, you should pay attention the following implementation
151 * neatness and speed is worth it. */ 165 * assumes unsigned long has a width equal with or less than 64-bit.
166 */
152 167
153 do { 168 do {
154 for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) { 169 for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) {
155 if (c_iter->bitmap[c_idx] == 0) 170 unsigned int delta;
171 u64 map = c_iter->bitmap[c_idx];
172
173 if (!map)
156 continue; 174 continue;
157 175
158 e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); 176 c_pos = c_iter->startbit
159 if (e_iter == NULL) 177 + c_idx * NETLBL_CATMAP_MAPSIZE;
160 goto netlbl_import_failure; 178 if (!e_iter
161 if (emap_prev == NULL) 179 || c_pos >= e_iter->startbit + EBITMAP_SIZE) {
162 ebmap->node = e_iter; 180 e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
163 else 181 if (!e_iter)
164 emap_prev->next = e_iter; 182 goto netlbl_import_failure;
165 emap_prev = e_iter; 183 e_iter->startbit
166 184 = c_pos - (c_pos % EBITMAP_SIZE);
167 e_iter->startbit = c_iter->startbit + 185 if (emap_prev == NULL)
168 NETLBL_CATMAP_MAPSIZE * c_idx; 186 ebmap->node = e_iter;
169 e_iter->map = c_iter->bitmap[c_idx]; 187 else
188 emap_prev->next = e_iter;
189 emap_prev = e_iter;
190 }
191 delta = c_pos - e_iter->startbit;
192 e_idx = delta / EBITMAP_UNIT_SIZE;
193 e_sft = delta % EBITMAP_UNIT_SIZE;
194 while (map) {
195 e_iter->maps[e_idx++] |= map & (-1UL);
196 map = EBITMAP_SHIFT_UNIT_SIZE(map);
197 }
170 } 198 }
171 c_iter = c_iter->next; 199 c_iter = c_iter->next;
172 } while (c_iter != NULL); 200 } while (c_iter != NULL);
173 if (e_iter != NULL) 201 if (e_iter != NULL)
174 ebmap->highbit = e_iter->startbit + MAPSIZE; 202 ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
175 else 203 else
176 ebitmap_destroy(ebmap); 204 ebitmap_destroy(ebmap);
177 205
@@ -186,6 +214,7 @@ netlbl_import_failure:
186int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) 214int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
187{ 215{
188 struct ebitmap_node *n1, *n2; 216 struct ebitmap_node *n1, *n2;
217 int i;
189 218
190 if (e1->highbit < e2->highbit) 219 if (e1->highbit < e2->highbit)
191 return 0; 220 return 0;
@@ -197,8 +226,10 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
197 n1 = n1->next; 226 n1 = n1->next;
198 continue; 227 continue;
199 } 228 }
200 if ((n1->map & n2->map) != n2->map) 229 for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
201 return 0; 230 if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
231 return 0;
232 }
202 233
203 n1 = n1->next; 234 n1 = n1->next;
204 n2 = n2->next; 235 n2 = n2->next;
@@ -219,12 +250,8 @@ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit)
219 250
220 n = e->node; 251 n = e->node;
221 while (n && (n->startbit <= bit)) { 252 while (n && (n->startbit <= bit)) {
222 if ((n->startbit + MAPSIZE) > bit) { 253 if ((n->startbit + EBITMAP_SIZE) > bit)
223 if (n->map & (MAPBIT << (bit - n->startbit))) 254 return ebitmap_node_get_bit(n, bit);
224 return 1;
225 else
226 return 0;
227 }
228 n = n->next; 255 n = n->next;
229 } 256 }
230 257
@@ -238,31 +265,35 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
238 prev = NULL; 265 prev = NULL;
239 n = e->node; 266 n = e->node;
240 while (n && n->startbit <= bit) { 267 while (n && n->startbit <= bit) {
241 if ((n->startbit + MAPSIZE) > bit) { 268 if ((n->startbit + EBITMAP_SIZE) > bit) {
242 if (value) { 269 if (value) {
243 n->map |= (MAPBIT << (bit - n->startbit)); 270 ebitmap_node_set_bit(n, bit);
244 } else { 271 } else {
245 n->map &= ~(MAPBIT << (bit - n->startbit)); 272 unsigned int s;
246 if (!n->map) { 273
247 /* drop this node from the bitmap */ 274 ebitmap_node_clr_bit(n, bit);
248 275
249 if (!n->next) { 276 s = find_first_bit(n->maps, EBITMAP_SIZE);
250 /* 277 if (s < EBITMAP_SIZE)
251 * this was the highest map 278 return 0;
252 * within the bitmap 279
253 */ 280 /* drop this node from the bitmap */
254 if (prev) 281 if (!n->next) {
255 e->highbit = prev->startbit + MAPSIZE; 282 /*
256 else 283 * this was the highest map
257 e->highbit = 0; 284 * within the bitmap
258 } 285 */
259 if (prev) 286 if (prev)
260 prev->next = n->next; 287 e->highbit = prev->startbit
288 + EBITMAP_SIZE;
261 else 289 else
262 e->node = n->next; 290 e->highbit = 0;
263
264 kfree(n);
265 } 291 }
292 if (prev)
293 prev->next = n->next;
294 else
295 e->node = n->next;
296 kfree(n);
266 } 297 }
267 return 0; 298 return 0;
268 } 299 }
@@ -277,12 +308,12 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
277 if (!new) 308 if (!new)
278 return -ENOMEM; 309 return -ENOMEM;
279 310
280 new->startbit = bit & ~(MAPSIZE - 1); 311 new->startbit = bit - (bit % EBITMAP_SIZE);
281 new->map = (MAPBIT << (bit - new->startbit)); 312 ebitmap_node_set_bit(new, bit);
282 313
283 if (!n) 314 if (!n)
284 /* this node will be the highest map within the bitmap */ 315 /* this node will be the highest map within the bitmap */
285 e->highbit = new->startbit + MAPSIZE; 316 e->highbit = new->startbit + EBITMAP_SIZE;
286 317
287 if (prev) { 318 if (prev) {
288 new->next = prev->next; 319 new->next = prev->next;
@@ -316,11 +347,11 @@ void ebitmap_destroy(struct ebitmap *e)
316 347
317int ebitmap_read(struct ebitmap *e, void *fp) 348int ebitmap_read(struct ebitmap *e, void *fp)
318{ 349{
319 int rc; 350 struct ebitmap_node *n = NULL;
320 struct ebitmap_node *n, *l; 351 u32 mapunit, count, startbit, index;
352 u64 map;
321 __le32 buf[3]; 353 __le32 buf[3];
322 u32 mapsize, count, i; 354 int rc, i;
323 __le64 map;
324 355
325 ebitmap_init(e); 356 ebitmap_init(e);
326 357
@@ -328,85 +359,88 @@ int ebitmap_read(struct ebitmap *e, void *fp)
328 if (rc < 0) 359 if (rc < 0)
329 goto out; 360 goto out;
330 361
331 mapsize = le32_to_cpu(buf[0]); 362 mapunit = le32_to_cpu(buf[0]);
332 e->highbit = le32_to_cpu(buf[1]); 363 e->highbit = le32_to_cpu(buf[1]);
333 count = le32_to_cpu(buf[2]); 364 count = le32_to_cpu(buf[2]);
334 365
335 if (mapsize != MAPSIZE) { 366 if (mapunit != sizeof(u64) * 8) {
336 printk(KERN_ERR "security: ebitmap: map size %u does not " 367 printk(KERN_ERR "security: ebitmap: map size %u does not "
337 "match my size %Zd (high bit was %d)\n", mapsize, 368 "match my size %Zd (high bit was %d)\n",
338 MAPSIZE, e->highbit); 369 mapunit, sizeof(u64) * 8, e->highbit);
339 goto bad; 370 goto bad;
340 } 371 }
372
373 /* round up e->highbit */
374 e->highbit += EBITMAP_SIZE - 1;
375 e->highbit -= (e->highbit % EBITMAP_SIZE);
376
341 if (!e->highbit) { 377 if (!e->highbit) {
342 e->node = NULL; 378 e->node = NULL;
343 goto ok; 379 goto ok;
344 } 380 }
345 if (e->highbit & (MAPSIZE - 1)) { 381
346 printk(KERN_ERR "security: ebitmap: high bit (%d) is not a "
347 "multiple of the map size (%Zd)\n", e->highbit, MAPSIZE);
348 goto bad;
349 }
350 l = NULL;
351 for (i = 0; i < count; i++) { 382 for (i = 0; i < count; i++) {
352 rc = next_entry(buf, fp, sizeof(u32)); 383 rc = next_entry(&startbit, fp, sizeof(u32));
353 if (rc < 0) { 384 if (rc < 0) {
354 printk(KERN_ERR "security: ebitmap: truncated map\n"); 385 printk(KERN_ERR "security: ebitmap: truncated map\n");
355 goto bad; 386 goto bad;
356 } 387 }
357 n = kzalloc(sizeof(*n), GFP_KERNEL); 388 startbit = le32_to_cpu(startbit);
358 if (!n) {
359 printk(KERN_ERR "security: ebitmap: out of memory\n");
360 rc = -ENOMEM;
361 goto bad;
362 }
363
364 n->startbit = le32_to_cpu(buf[0]);
365 389
366 if (n->startbit & (MAPSIZE - 1)) { 390 if (startbit & (mapunit - 1)) {
367 printk(KERN_ERR "security: ebitmap start bit (%d) is " 391 printk(KERN_ERR "security: ebitmap start bit (%d) is "
368 "not a multiple of the map size (%Zd)\n", 392 "not a multiple of the map unit size (%u)\n",
369 n->startbit, MAPSIZE); 393 startbit, mapunit);
370 goto bad_free; 394 goto bad;
371 } 395 }
372 if (n->startbit > (e->highbit - MAPSIZE)) { 396 if (startbit > e->highbit - mapunit) {
373 printk(KERN_ERR "security: ebitmap start bit (%d) is " 397 printk(KERN_ERR "security: ebitmap start bit (%d) is "
374 "beyond the end of the bitmap (%Zd)\n", 398 "beyond the end of the bitmap (%u)\n",
375 n->startbit, (e->highbit - MAPSIZE)); 399 startbit, (e->highbit - mapunit));
376 goto bad_free; 400 goto bad;
401 }
402
403 if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
404 struct ebitmap_node *tmp;
405 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) {
407 printk(KERN_ERR
408 "security: ebitmap: out of memory\n");
409 rc = -ENOMEM;
410 goto bad;
411 }
412 /* round down */
413 tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
414 if (n) {
415 n->next = tmp;
416 } else {
417 e->node = tmp;
418 }
419 n = tmp;
420 } else if (startbit <= n->startbit) {
421 printk(KERN_ERR "security: ebitmap: start bit %d"
422 " comes after start bit %d\n",
423 startbit, n->startbit);
424 goto bad;
377 } 425 }
426
378 rc = next_entry(&map, fp, sizeof(u64)); 427 rc = next_entry(&map, fp, sizeof(u64));
379 if (rc < 0) { 428 if (rc < 0) {
380 printk(KERN_ERR "security: ebitmap: truncated map\n"); 429 printk(KERN_ERR "security: ebitmap: truncated map\n");
381 goto bad_free; 430 goto bad;
382 } 431 }
383 n->map = le64_to_cpu(map); 432 map = le64_to_cpu(map);
384 433
385 if (!n->map) { 434 index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
386 printk(KERN_ERR "security: ebitmap: null map in " 435 while (map) {
387 "ebitmap (startbit %d)\n", n->startbit); 436 n->maps[index++] = map & (-1UL);
388 goto bad_free; 437 map = EBITMAP_SHIFT_UNIT_SIZE(map);
389 } 438 }
390 if (l) {
391 if (n->startbit <= l->startbit) {
392 printk(KERN_ERR "security: ebitmap: start "
393 "bit %d comes after start bit %d\n",
394 n->startbit, l->startbit);
395 goto bad_free;
396 }
397 l->next = n;
398 } else
399 e->node = n;
400
401 l = n;
402 } 439 }
403
404ok: 440ok:
405 rc = 0; 441 rc = 0;
406out: 442out:
407 return rc; 443 return rc;
408bad_free:
409 kfree(n);
410bad: 444bad:
411 if (!rc) 445 if (!rc)
412 rc = -EINVAL; 446 rc = -EINVAL;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 1270e34b61c..f283b4367f5 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,14 +16,18 @@
16 16
17#include <net/netlabel.h> 17#include <net/netlabel.h>
18 18
19#define MAPTYPE u64 /* portion of bitmap in each node */ 19#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
20#define MAPSIZE (sizeof(MAPTYPE) * 8) /* number of bits in node bitmap */ 20 / sizeof(unsigned long))
21#define MAPBIT 1ULL /* a bit in the node bitmap */ 21#define EBITMAP_UNIT_SIZE BITS_PER_LONG
22#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
23#define EBITMAP_BIT 1ULL
24#define EBITMAP_SHIFT_UNIT_SIZE(x) \
25 (((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
22 26
23struct ebitmap_node { 27struct ebitmap_node {
24 u32 startbit; /* starting position in the total bitmap */
25 MAPTYPE map; /* this node's portion of the bitmap */
26 struct ebitmap_node *next; 28 struct ebitmap_node *next;
29 unsigned long maps[EBITMAP_UNIT_NUMS];
30 u32 startbit;
27}; 31};
28 32
29struct ebitmap { 33struct ebitmap {
@@ -34,11 +38,17 @@ struct ebitmap {
34#define ebitmap_length(e) ((e)->highbit) 38#define ebitmap_length(e) ((e)->highbit)
35#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0) 39#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0)
36 40
37static inline unsigned int ebitmap_start(struct ebitmap *e, 41static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
38 struct ebitmap_node **n) 42 struct ebitmap_node **n)
39{ 43{
40 *n = e->node; 44 unsigned int ofs;
41 return ebitmap_startbit(e); 45
46 for (*n = e->node; *n; *n = (*n)->next) {
47 ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
48 if (ofs < EBITMAP_SIZE)
49 return (*n)->startbit + ofs;
50 }
51 return ebitmap_length(e);
42} 52}
43 53
44static inline void ebitmap_init(struct ebitmap *e) 54static inline void ebitmap_init(struct ebitmap *e)
@@ -46,28 +56,65 @@ static inline void ebitmap_init(struct ebitmap *e)
46 memset(e, 0, sizeof(*e)); 56 memset(e, 0, sizeof(*e));
47} 57}
48 58
49static inline unsigned int ebitmap_next(struct ebitmap_node **n, 59static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
50 unsigned int bit) 60 struct ebitmap_node **n,
61 unsigned int bit)
51{ 62{
52 if ((bit == ((*n)->startbit + MAPSIZE - 1)) && 63 unsigned int ofs;
53 (*n)->next) { 64
54 *n = (*n)->next; 65 ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
55 return (*n)->startbit; 66 if (ofs < EBITMAP_SIZE)
56 } 67 return ofs + (*n)->startbit;
57 68
58 return (bit+1); 69 for (*n = (*n)->next; *n; *n = (*n)->next) {
70 ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
71 if (ofs < EBITMAP_SIZE)
72 return ofs + (*n)->startbit;
73 }
74 return ebitmap_length(e);
59} 75}
60 76
61static inline int ebitmap_node_get_bit(struct ebitmap_node * n, 77#define EBITMAP_NODE_INDEX(node, bit) \
78 (((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
79#define EBITMAP_NODE_OFFSET(node, bit) \
80 (((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
81
82static inline int ebitmap_node_get_bit(struct ebitmap_node *n,
62 unsigned int bit) 83 unsigned int bit)
63{ 84{
64 if (n->map & (MAPBIT << (bit - n->startbit))) 85 unsigned int index = EBITMAP_NODE_INDEX(n, bit);
86 unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
87
88 BUG_ON(index >= EBITMAP_UNIT_NUMS);
89 if ((n->maps[index] & (EBITMAP_BIT << ofs)))
65 return 1; 90 return 1;
66 return 0; 91 return 0;
67} 92}
68 93
69#define ebitmap_for_each_bit(e, n, bit) \ 94static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
70 for (bit = ebitmap_start(e, &n); bit < ebitmap_length(e); bit = ebitmap_next(&n, bit)) \ 95 unsigned int bit)
96{
97 unsigned int index = EBITMAP_NODE_INDEX(n, bit);
98 unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
99
100 BUG_ON(index >= EBITMAP_UNIT_NUMS);
101 n->maps[index] |= (EBITMAP_BIT << ofs);
102}
103
104static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
105 unsigned int bit)
106{
107 unsigned int index = EBITMAP_NODE_INDEX(n, bit);
108 unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
109
110 BUG_ON(index >= EBITMAP_UNIT_NUMS);
111 n->maps[index] &= ~(EBITMAP_BIT << ofs);
112}
113
114#define ebitmap_for_each_positive_bit(e, n, bit) \
115 for (bit = ebitmap_start_positive(e, &n); \
116 bit < ebitmap_length(e); \
117 bit = ebitmap_next_positive(e, &n, bit)) \
71 118
72int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2); 119int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
73int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src); 120int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 4a8bab2f3c7..9a11deaaa9e 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -34,7 +34,9 @@
34 */ 34 */
35int mls_compute_context_len(struct context * context) 35int mls_compute_context_len(struct context * context)
36{ 36{
37 int i, l, len, range; 37 int i, l, len, head, prev;
38 char *nm;
39 struct ebitmap *e;
38 struct ebitmap_node *node; 40 struct ebitmap_node *node;
39 41
40 if (!selinux_mls_enabled) 42 if (!selinux_mls_enabled)
@@ -42,31 +44,33 @@ int mls_compute_context_len(struct context * context)
42 44
43 len = 1; /* for the beginning ":" */ 45 len = 1; /* for the beginning ":" */
44 for (l = 0; l < 2; l++) { 46 for (l = 0; l < 2; l++) {
45 range = 0; 47 int index_sens = context->range.level[l].sens;
46 len += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); 48 len += strlen(policydb.p_sens_val_to_name[index_sens - 1]);
47
48 ebitmap_for_each_bit(&context->range.level[l].cat, node, i) {
49 if (ebitmap_node_get_bit(node, i)) {
50 if (range) {
51 range++;
52 continue;
53 }
54 49
55 len += strlen(policydb.p_cat_val_to_name[i]) + 1; 50 /* categories */
56 range++; 51 head = -2;
57 } else { 52 prev = -2;
58 if (range > 1) 53 e = &context->range.level[l].cat;
59 len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1; 54 ebitmap_for_each_positive_bit(e, node, i) {
60 range = 0; 55 if (i - prev > 1) {
56 /* one or more negative bits are skipped */
57 if (head != prev) {
58 nm = policydb.p_cat_val_to_name[prev];
59 len += strlen(nm) + 1;
60 }
61 nm = policydb.p_cat_val_to_name[i];
62 len += strlen(nm) + 1;
63 head = i;
61 } 64 }
65 prev = i;
66 }
67 if (prev != head) {
68 nm = policydb.p_cat_val_to_name[prev];
69 len += strlen(nm) + 1;
62 } 70 }
63 /* Handle case where last category is the end of range */
64 if (range > 1)
65 len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1;
66
67 if (l == 0) { 71 if (l == 0) {
68 if (mls_level_eq(&context->range.level[0], 72 if (mls_level_eq(&context->range.level[0],
69 &context->range.level[1])) 73 &context->range.level[1]))
70 break; 74 break;
71 else 75 else
72 len++; 76 len++;
@@ -84,8 +88,9 @@ int mls_compute_context_len(struct context * context)
84void mls_sid_to_context(struct context *context, 88void mls_sid_to_context(struct context *context,
85 char **scontext) 89 char **scontext)
86{ 90{
87 char *scontextp; 91 char *scontextp, *nm;
88 int i, l, range, wrote_sep; 92 int i, l, head, prev;
93 struct ebitmap *e;
89 struct ebitmap_node *node; 94 struct ebitmap_node *node;
90 95
91 if (!selinux_mls_enabled) 96 if (!selinux_mls_enabled)
@@ -97,61 +102,54 @@ void mls_sid_to_context(struct context *context,
97 scontextp++; 102 scontextp++;
98 103
99 for (l = 0; l < 2; l++) { 104 for (l = 0; l < 2; l++) {
100 range = 0;
101 wrote_sep = 0;
102 strcpy(scontextp, 105 strcpy(scontextp,
103 policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); 106 policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
104 scontextp += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); 107 scontextp += strlen(scontextp);
105 108
106 /* categories */ 109 /* categories */
107 ebitmap_for_each_bit(&context->range.level[l].cat, node, i) { 110 head = -2;
108 if (ebitmap_node_get_bit(node, i)) { 111 prev = -2;
109 if (range) { 112 e = &context->range.level[l].cat;
110 range++; 113 ebitmap_for_each_positive_bit(e, node, i) {
111 continue; 114 if (i - prev > 1) {
112 } 115 /* one or more negative bits are skipped */
113 116 if (prev != head) {
114 if (!wrote_sep) { 117 if (prev - head > 1)
115 *scontextp++ = ':';
116 wrote_sep = 1;
117 } else
118 *scontextp++ = ',';
119 strcpy(scontextp, policydb.p_cat_val_to_name[i]);
120 scontextp += strlen(policydb.p_cat_val_to_name[i]);
121 range++;
122 } else {
123 if (range > 1) {
124 if (range > 2)
125 *scontextp++ = '.'; 118 *scontextp++ = '.';
126 else 119 else
127 *scontextp++ = ','; 120 *scontextp++ = ',';
128 121 nm = policydb.p_cat_val_to_name[prev];
129 strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]); 122 strcpy(scontextp, nm);
130 scontextp += strlen(policydb.p_cat_val_to_name[i - 1]); 123 scontextp += strlen(nm);
131 } 124 }
132 range = 0; 125 if (prev < 0)
126 *scontextp++ = ':';
127 else
128 *scontextp++ = ',';
129 nm = policydb.p_cat_val_to_name[i];
130 strcpy(scontextp, nm);
131 scontextp += strlen(nm);
132 head = i;
133 } 133 }
134 prev = i;
134 } 135 }
135 136
136 /* Handle case where last category is the end of range */ 137 if (prev != head) {
137 if (range > 1) { 138 if (prev - head > 1)
138 if (range > 2)
139 *scontextp++ = '.'; 139 *scontextp++ = '.';
140 else 140 else
141 *scontextp++ = ','; 141 *scontextp++ = ',';
142 142 nm = policydb.p_cat_val_to_name[prev];
143 strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]); 143 strcpy(scontextp, nm);
144 scontextp += strlen(policydb.p_cat_val_to_name[i - 1]); 144 scontextp += strlen(nm);
145 } 145 }
146 146
147 if (l == 0) { 147 if (l == 0) {
148 if (mls_level_eq(&context->range.level[0], 148 if (mls_level_eq(&context->range.level[0],
149 &context->range.level[1])) 149 &context->range.level[1]))
150 break; 150 break;
151 else { 151 else
152 *scontextp = '-'; 152 *scontextp++ = '-';
153 scontextp++;
154 }
155 } 153 }
156 } 154 }
157 155
@@ -190,17 +188,15 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
190 if (!levdatum) 188 if (!levdatum)
191 return 0; 189 return 0;
192 190
193 ebitmap_for_each_bit(&c->range.level[l].cat, node, i) { 191 ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
194 if (ebitmap_node_get_bit(node, i)) { 192 if (i > p->p_cats.nprim)
195 if (i > p->p_cats.nprim) 193 return 0;
196 return 0; 194 if (!ebitmap_get_bit(&levdatum->level->cat, i))
197 if (!ebitmap_get_bit(&levdatum->level->cat, i)) 195 /*
198 /* 196 * Category may not be associated with
199 * Category may not be associated with 197 * sensitivity in low level.
200 * sensitivity in low level. 198 */
201 */ 199 return 0;
202 return 0;
203 }
204 } 200 }
205 } 201 }
206 202
@@ -485,18 +481,16 @@ int mls_convert_context(struct policydb *oldp,
485 c->range.level[l].sens = levdatum->level->sens; 481 c->range.level[l].sens = levdatum->level->sens;
486 482
487 ebitmap_init(&bitmap); 483 ebitmap_init(&bitmap);
488 ebitmap_for_each_bit(&c->range.level[l].cat, node, i) { 484 ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
489 if (ebitmap_node_get_bit(node, i)) { 485 int rc;
490 int rc; 486
491 487 catdatum = hashtab_search(newp->p_cats.table,
492 catdatum = hashtab_search(newp->p_cats.table, 488 oldp->p_cat_val_to_name[i]);
493 oldp->p_cat_val_to_name[i]); 489 if (!catdatum)
494 if (!catdatum) 490 return -EINVAL;
495 return -EINVAL; 491 rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
496 rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1); 492 if (rc)
497 if (rc) 493 return rc;
498 return rc;
499 }
500 } 494 }
501 ebitmap_destroy(&c->range.level[l].cat); 495 ebitmap_destroy(&c->range.level[l].cat);
502 c->range.level[l].cat = bitmap; 496 c->range.level[l].cat = bitmap;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f05f97a2bc3..539828b229b 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -177,18 +177,15 @@ static int policydb_init(struct policydb *p)
177 177
178 rc = roles_init(p); 178 rc = roles_init(p);
179 if (rc) 179 if (rc)
180 goto out_free_avtab; 180 goto out_free_symtab;
181 181
182 rc = cond_policydb_init(p); 182 rc = cond_policydb_init(p);
183 if (rc) 183 if (rc)
184 goto out_free_avtab; 184 goto out_free_symtab;
185 185
186out: 186out:
187 return rc; 187 return rc;
188 188
189out_free_avtab:
190 avtab_destroy(&p->te_avtab);
191
192out_free_symtab: 189out_free_symtab:
193 for (i = 0; i < SYM_NUM; i++) 190 for (i = 0; i < SYM_NUM; i++)
194 hashtab_destroy(p->symtab[i].table); 191 hashtab_destroy(p->symtab[i].table);
@@ -677,6 +674,8 @@ void policydb_destroy(struct policydb *p)
677 } 674 }
678 kfree(p->type_attr_map); 675 kfree(p->type_attr_map);
679 676
677 kfree(p->undefined_perms);
678
680 return; 679 return;
681} 680}
682 681
@@ -1530,6 +1529,8 @@ int policydb_read(struct policydb *p, void *fp)
1530 goto bad; 1529 goto bad;
1531 } 1530 }
1532 } 1531 }
1532 p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
1533 p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
1533 1534
1534 info = policydb_lookup_compat(p->policyvers); 1535 info = policydb_lookup_compat(p->policyvers);
1535 if (!info) { 1536 if (!info) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 8319d5ff594..844d310f4f1 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -242,6 +242,10 @@ struct policydb {
242 struct ebitmap *type_attr_map; 242 struct ebitmap *type_attr_map;
243 243
244 unsigned int policyvers; 244 unsigned int policyvers;
245
246 unsigned int reject_unknown : 1;
247 unsigned int allow_unknown : 1;
248 u32 *undefined_perms;
245}; 249};
246 250
247extern void policydb_destroy(struct policydb *p); 251extern void policydb_destroy(struct policydb *p);
@@ -253,6 +257,10 @@ extern int policydb_read(struct policydb *p, void *fp);
253 257
254#define POLICYDB_CONFIG_MLS 1 258#define POLICYDB_CONFIG_MLS 1
255 259
260/* the config flags related to unknown classes/perms are bits 2 and 3 */
261#define REJECT_UNKNOWN 0x00000002
262#define ALLOW_UNKNOWN 0x00000004
263
256#define OBJECT_R "object_r" 264#define OBJECT_R "object_r"
257#define OBJECT_R_VAL 1 265#define OBJECT_R_VAL 1
258 266
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 6100fc02305..d572dc908f3 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -292,6 +292,7 @@ static int context_struct_compute_av(struct context *scontext,
292 struct class_datum *tclass_datum; 292 struct class_datum *tclass_datum;
293 struct ebitmap *sattr, *tattr; 293 struct ebitmap *sattr, *tattr;
294 struct ebitmap_node *snode, *tnode; 294 struct ebitmap_node *snode, *tnode;
295 const struct selinux_class_perm *kdefs = &selinux_class_perm;
295 unsigned int i, j; 296 unsigned int i, j;
296 297
297 /* 298 /*
@@ -305,13 +306,6 @@ static int context_struct_compute_av(struct context *scontext,
305 tclass <= SECCLASS_NETLINK_DNRT_SOCKET) 306 tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
306 tclass = SECCLASS_NETLINK_SOCKET; 307 tclass = SECCLASS_NETLINK_SOCKET;
307 308
308 if (!tclass || tclass > policydb.p_classes.nprim) {
309 printk(KERN_ERR "security_compute_av: unrecognized class %d\n",
310 tclass);
311 return -EINVAL;
312 }
313 tclass_datum = policydb.class_val_to_struct[tclass - 1];
314
315 /* 309 /*
316 * Initialize the access vectors to the default values. 310 * Initialize the access vectors to the default values.
317 */ 311 */
@@ -322,6 +316,36 @@ static int context_struct_compute_av(struct context *scontext,
322 avd->seqno = latest_granting; 316 avd->seqno = latest_granting;
323 317
324 /* 318 /*
319 * Check for all the invalid cases.
320 * - tclass 0
321 * - tclass > policy and > kernel
322 * - tclass > policy but is a userspace class
323 * - tclass > policy but we do not allow unknowns
324 */
325 if (unlikely(!tclass))
326 goto inval_class;
327 if (unlikely(tclass > policydb.p_classes.nprim))
328 if (tclass > kdefs->cts_len ||
329 !kdefs->class_to_string[tclass - 1] ||
330 !policydb.allow_unknown)
331 goto inval_class;
332
333 /*
334 * Kernel class and we allow unknown so pad the allow decision
335 * the pad will be all 1 for unknown classes.
336 */
337 if (tclass <= kdefs->cts_len && policydb.allow_unknown)
338 avd->allowed = policydb.undefined_perms[tclass - 1];
339
340 /*
341 * Not in policy. Since decision is completed (all 1 or all 0) return.
342 */
343 if (unlikely(tclass > policydb.p_classes.nprim))
344 return 0;
345
346 tclass_datum = policydb.class_val_to_struct[tclass - 1];
347
348 /*
325 * If a specific type enforcement rule was defined for 349 * If a specific type enforcement rule was defined for
326 * this permission check, then use it. 350 * this permission check, then use it.
327 */ 351 */
@@ -329,12 +353,8 @@ static int context_struct_compute_av(struct context *scontext,
329 avkey.specified = AVTAB_AV; 353 avkey.specified = AVTAB_AV;
330 sattr = &policydb.type_attr_map[scontext->type - 1]; 354 sattr = &policydb.type_attr_map[scontext->type - 1];
331 tattr = &policydb.type_attr_map[tcontext->type - 1]; 355 tattr = &policydb.type_attr_map[tcontext->type - 1];
332 ebitmap_for_each_bit(sattr, snode, i) { 356 ebitmap_for_each_positive_bit(sattr, snode, i) {
333 if (!ebitmap_node_get_bit(snode, i)) 357 ebitmap_for_each_positive_bit(tattr, tnode, j) {
334 continue;
335 ebitmap_for_each_bit(tattr, tnode, j) {
336 if (!ebitmap_node_get_bit(tnode, j))
337 continue;
338 avkey.source_type = i + 1; 358 avkey.source_type = i + 1;
339 avkey.target_type = j + 1; 359 avkey.target_type = j + 1;
340 for (node = avtab_search_node(&policydb.te_avtab, &avkey); 360 for (node = avtab_search_node(&policydb.te_avtab, &avkey);
@@ -387,6 +407,10 @@ static int context_struct_compute_av(struct context *scontext,
387 } 407 }
388 408
389 return 0; 409 return 0;
410
411inval_class:
412 printk(KERN_ERR "%s: unrecognized class %d\n", __FUNCTION__, tclass);
413 return -EINVAL;
390} 414}
391 415
392static int security_validtrans_handle_fail(struct context *ocontext, 416static int security_validtrans_handle_fail(struct context *ocontext,
@@ -1054,6 +1078,13 @@ static int validate_classes(struct policydb *p)
1054 const char *def_class, *def_perm, *pol_class; 1078 const char *def_class, *def_perm, *pol_class;
1055 struct symtab *perms; 1079 struct symtab *perms;
1056 1080
1081 if (p->allow_unknown) {
1082 u32 num_classes = kdefs->cts_len;
1083 p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL);
1084 if (!p->undefined_perms)
1085 return -ENOMEM;
1086 }
1087
1057 for (i = 1; i < kdefs->cts_len; i++) { 1088 for (i = 1; i < kdefs->cts_len; i++) {
1058 def_class = kdefs->class_to_string[i]; 1089 def_class = kdefs->class_to_string[i];
1059 if (!def_class) 1090 if (!def_class)
@@ -1062,6 +1093,10 @@ static int validate_classes(struct policydb *p)
1062 printk(KERN_INFO 1093 printk(KERN_INFO
1063 "security: class %s not defined in policy\n", 1094 "security: class %s not defined in policy\n",
1064 def_class); 1095 def_class);
1096 if (p->reject_unknown)
1097 return -EINVAL;
1098 if (p->allow_unknown)
1099 p->undefined_perms[i-1] = ~0U;
1065 continue; 1100 continue;
1066 } 1101 }
1067 pol_class = p->p_class_val_to_name[i-1]; 1102 pol_class = p->p_class_val_to_name[i-1];
@@ -1087,12 +1122,16 @@ static int validate_classes(struct policydb *p)
1087 printk(KERN_INFO 1122 printk(KERN_INFO
1088 "security: permission %s in class %s not defined in policy\n", 1123 "security: permission %s in class %s not defined in policy\n",
1089 def_perm, pol_class); 1124 def_perm, pol_class);
1125 if (p->reject_unknown)
1126 return -EINVAL;
1127 if (p->allow_unknown)
1128 p->undefined_perms[class_val-1] |= perm_val;
1090 continue; 1129 continue;
1091 } 1130 }
1092 perdatum = hashtab_search(perms->table, def_perm); 1131 perdatum = hashtab_search(perms->table, def_perm);
1093 if (perdatum == NULL) { 1132 if (perdatum == NULL) {
1094 printk(KERN_ERR 1133 printk(KERN_ERR
1095 "security: permission %s in class %s not found in policy\n", 1134 "security: permission %s in class %s not found in policy, bad policy\n",
1096 def_perm, pol_class); 1135 def_perm, pol_class);
1097 return -EINVAL; 1136 return -EINVAL;
1098 } 1137 }
@@ -1130,12 +1169,16 @@ static int validate_classes(struct policydb *p)
1130 printk(KERN_INFO 1169 printk(KERN_INFO
1131 "security: permission %s in class %s not defined in policy\n", 1170 "security: permission %s in class %s not defined in policy\n",
1132 def_perm, pol_class); 1171 def_perm, pol_class);
1172 if (p->reject_unknown)
1173 return -EINVAL;
1174 if (p->allow_unknown)
1175 p->undefined_perms[class_val-1] |= (1 << j);
1133 continue; 1176 continue;
1134 } 1177 }
1135 perdatum = hashtab_search(perms->table, def_perm); 1178 perdatum = hashtab_search(perms->table, def_perm);
1136 if (perdatum == NULL) { 1179 if (perdatum == NULL) {
1137 printk(KERN_ERR 1180 printk(KERN_ERR
1138 "security: permission %s in class %s not found in policy\n", 1181 "security: permission %s in class %s not found in policy, bad policy\n",
1139 def_perm, pol_class); 1182 def_perm, pol_class);
1140 return -EINVAL; 1183 return -EINVAL;
1141 } 1184 }
@@ -1621,14 +1664,10 @@ int security_get_user_sids(u32 fromsid,
1621 goto out_unlock; 1664 goto out_unlock;
1622 } 1665 }
1623 1666
1624 ebitmap_for_each_bit(&user->roles, rnode, i) { 1667 ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
1625 if (!ebitmap_node_get_bit(rnode, i))
1626 continue;
1627 role = policydb.role_val_to_struct[i]; 1668 role = policydb.role_val_to_struct[i];
1628 usercon.role = i+1; 1669 usercon.role = i+1;
1629 ebitmap_for_each_bit(&role->types, tnode, j) { 1670 ebitmap_for_each_positive_bit(&role->types, tnode, j) {
1630 if (!ebitmap_node_get_bit(tnode, j))
1631 continue;
1632 usercon.type = j+1; 1671 usercon.type = j+1;
1633 1672
1634 if (mls_setup_user_range(fromcon, user, &usercon)) 1673 if (mls_setup_user_range(fromcon, user, &usercon))
@@ -2102,6 +2141,16 @@ err:
2102 return rc; 2141 return rc;
2103} 2142}
2104 2143
2144int security_get_reject_unknown(void)
2145{
2146 return policydb.reject_unknown;
2147}
2148
2149int security_get_allow_unknown(void)
2150{
2151 return policydb.allow_unknown;
2152}
2153
2105struct selinux_audit_rule { 2154struct selinux_audit_rule {
2106 u32 au_seqno; 2155 u32 au_seqno;
2107 struct context au_ctxt; 2156 struct context au_ctxt;