diff options
502 files changed, 56610 insertions, 2446 deletions
@@ -598,6 +598,11 @@ S: Tamsui town, Taipei county, | |||
598 | S: Taiwan 251 | 598 | S: Taiwan 251 |
599 | S: Republic of China | 599 | S: Republic of China |
600 | 600 | ||
601 | N: Reinette Chatre | ||
602 | E: reinette.chatre@intel.com | ||
603 | D: WiMedia Link Protocol implementation | ||
604 | D: UWB stack bits and pieces | ||
605 | |||
601 | N: Michael Elizabeth Chastain | 606 | N: Michael Elizabeth Chastain |
602 | E: mec@shout.net | 607 | E: mec@shout.net |
603 | D: Configure, Menuconfig, xconfig | 608 | D: Configure, Menuconfig, xconfig |
@@ -2695,6 +2700,12 @@ S: Demonstratsii 8-382 | |||
2695 | S: Tula 300000 | 2700 | S: Tula 300000 |
2696 | S: Russia | 2701 | S: Russia |
2697 | 2702 | ||
2703 | N: Inaky Perez-Gonzalez | ||
2704 | E: inaky.perez-gonzalez@intel.com | ||
2705 | D: UWB stack, HWA-RC driver and HWA-HC drivers | ||
2706 | D: Wireless USB additions to the USB stack | ||
2707 | D: WiMedia Link Protocol bits and pieces | ||
2708 | |||
2698 | N: Gordon Peters | 2709 | N: Gordon Peters |
2699 | E: GordPeters@smarttech.com | 2710 | E: GordPeters@smarttech.com |
2700 | D: Isochronous receive for IEEE 1394 driver (OHCI module). | 2711 | D: Isochronous receive for IEEE 1394 driver (OHCI module). |
diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc new file mode 100644 index 000000000000..948fec412446 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-umc | |||
@@ -0,0 +1,28 @@ | |||
1 | What: /sys/bus/umc/ | ||
2 | Date: July 2008 | ||
3 | KernelVersion: 2.6.27 | ||
4 | Contact: David Vrabel <david.vrabel@csr.com> | ||
5 | Description: | ||
6 | The Wireless Host Controller Interface (WHCI) | ||
7 | specification describes a PCI-based device with | ||
8 | multiple capabilities; the UWB Multi-interface | ||
9 | Controller (UMC). | ||
10 | |||
11 | The umc bus presents each of the individual | ||
12 | capabilties as a device. | ||
13 | |||
14 | What: /sys/bus/umc/devices/.../capability_id | ||
15 | Date: July 2008 | ||
16 | KernelVersion: 2.6.27 | ||
17 | Contact: David Vrabel <david.vrabel@csr.com> | ||
18 | Description: | ||
19 | The ID of this capability, with 0 being the radio | ||
20 | controller capability. | ||
21 | |||
22 | What: /sys/bus/umc/devices/.../version | ||
23 | Date: July 2008 | ||
24 | KernelVersion: 2.6.27 | ||
25 | Contact: David Vrabel <david.vrabel@csr.com> | ||
26 | Description: | ||
27 | The specification version this capability's hardware | ||
28 | interface complies with. | ||
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index df6c8a0159f1..7772928ee48f 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb | |||
@@ -101,3 +101,46 @@ Description: | |||
101 | Users: | 101 | Users: |
102 | USB PM tool | 102 | USB PM tool |
103 | git://git.moblin.org/users/sarah/usb-pm-tool/ | 103 | git://git.moblin.org/users/sarah/usb-pm-tool/ |
104 | |||
105 | What: /sys/bus/usb/device/.../authorized | ||
106 | Date: July 2008 | ||
107 | KernelVersion: 2.6.26 | ||
108 | Contact: David Vrabel <david.vrabel@csr.com> | ||
109 | Description: | ||
110 | Authorized devices are available for use by device | ||
111 | drivers, non-authorized one are not. By default, wired | ||
112 | USB devices are authorized. | ||
113 | |||
114 | Certified Wireless USB devices are not authorized | ||
115 | initially and should be (by writing 1) after the | ||
116 | device has been authenticated. | ||
117 | |||
118 | What: /sys/bus/usb/device/.../wusb_cdid | ||
119 | Date: July 2008 | ||
120 | KernelVersion: 2.6.27 | ||
121 | Contact: David Vrabel <david.vrabel@csr.com> | ||
122 | Description: | ||
123 | For Certified Wireless USB devices only. | ||
124 | |||
125 | A devices's CDID, as 16 space-separated hex octets. | ||
126 | |||
127 | What: /sys/bus/usb/device/.../wusb_ck | ||
128 | Date: July 2008 | ||
129 | KernelVersion: 2.6.27 | ||
130 | Contact: David Vrabel <david.vrabel@csr.com> | ||
131 | Description: | ||
132 | For Certified Wireless USB devices only. | ||
133 | |||
134 | Write the device's connection key (CK) to start the | ||
135 | authentication of the device. The CK is 16 | ||
136 | space-separated hex octets. | ||
137 | |||
138 | What: /sys/bus/usb/device/.../wusb_disconnect | ||
139 | Date: July 2008 | ||
140 | KernelVersion: 2.6.27 | ||
141 | Contact: David Vrabel <david.vrabel@csr.com> | ||
142 | Description: | ||
143 | For Certified Wireless USB devices only. | ||
144 | |||
145 | Write a 1 to force the device to disconnect | ||
146 | (equivalent to unplugging a wired USB device). | ||
diff --git a/Documentation/ABI/testing/sysfs-class-usb_host b/Documentation/ABI/testing/sysfs-class-usb_host new file mode 100644 index 000000000000..46b66ad1f1b4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-usb_host | |||
@@ -0,0 +1,25 @@ | |||
1 | What: /sys/class/usb_host/usb_hostN/wusb_chid | ||
2 | Date: July 2008 | ||
3 | KernelVersion: 2.6.27 | ||
4 | Contact: David Vrabel <david.vrabel@csr.com> | ||
5 | Description: | ||
6 | Write the CHID (16 space-separated hex octets) for this host controller. | ||
7 | This starts the host controller, allowing it to accept connection from | ||
8 | WUSB devices. | ||
9 | |||
10 | Set an all zero CHID to stop the host controller. | ||
11 | |||
12 | What: /sys/class/usb_host/usb_hostN/wusb_trust_timeout | ||
13 | Date: July 2008 | ||
14 | KernelVersion: 2.6.27 | ||
15 | Contact: David Vrabel <david.vrabel@csr.com> | ||
16 | Description: | ||
17 | Devices that haven't sent a WUSB packet to the host | ||
18 | within 'wusb_trust_timeout' ms are considered to have | ||
19 | disconnected and are removed. The default value of | ||
20 | 4000 ms is the value required by the WUSB | ||
21 | specification. | ||
22 | |||
23 | Since this relates to security (specifically, the | ||
24 | lifetime of PTKs and GTKs) it should not be changed | ||
25 | from the default. | ||
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc new file mode 100644 index 000000000000..a0d18dbeb7a9 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-uwb_rc | |||
@@ -0,0 +1,144 @@ | |||
1 | What: /sys/class/uwb_rc | ||
2 | Date: July 2008 | ||
3 | KernelVersion: 2.6.27 | ||
4 | Contact: linux-usb@vger.kernel.org | ||
5 | Description: | ||
6 | Interfaces for WiMedia Ultra Wideband Common Radio | ||
7 | Platform (UWB) radio controllers. | ||
8 | |||
9 | Familiarity with the ECMA-368 'High Rate Ultra | ||
10 | Wideband MAC and PHY Specification' is assumed. | ||
11 | |||
12 | What: /sys/class/uwb_rc/beacon_timeout_ms | ||
13 | Date: July 2008 | ||
14 | KernelVersion: 2.6.27 | ||
15 | Description: | ||
16 | If no beacons are received from a device for at least | ||
17 | this time, the device will be considered to have gone | ||
18 | and it will be removed. The default is 3 superframes | ||
19 | (~197 ms) as required by the specification. | ||
20 | |||
21 | What: /sys/class/uwb_rc/uwbN/ | ||
22 | Date: July 2008 | ||
23 | KernelVersion: 2.6.27 | ||
24 | Contact: linux-usb@vger.kernel.org | ||
25 | Description: | ||
26 | An individual UWB radio controller. | ||
27 | |||
28 | What: /sys/class/uwb_rc/uwbN/beacon | ||
29 | Date: July 2008 | ||
30 | KernelVersion: 2.6.27 | ||
31 | Contact: linux-usb@vger.kernel.org | ||
32 | Description: | ||
33 | Write: | ||
34 | |||
35 | <channel> [<bpst offset>] | ||
36 | |||
37 | to start beaconing on a specific channel, or stop | ||
38 | beaconing if <channel> is -1. Valid channels depends | ||
39 | on the radio controller's supported band groups. | ||
40 | |||
41 | <bpst offset> may be used to try and join a specific | ||
42 | beacon group if more than one was found during a scan. | ||
43 | |||
44 | What: /sys/class/uwb_rc/uwbN/scan | ||
45 | Date: July 2008 | ||
46 | KernelVersion: 2.6.27 | ||
47 | Contact: linux-usb@vger.kernel.org | ||
48 | Description: | ||
49 | Write: | ||
50 | |||
51 | <channel> <type> [<bpst offset>] | ||
52 | |||
53 | to start (or stop) scanning on a channel. <type> is one of: | ||
54 | 0 - scan | ||
55 | 1 - scan outside BP | ||
56 | 2 - scan while inactive | ||
57 | 3 - scanning disabled | ||
58 | 4 - scan (with start time of <bpst offset>) | ||
59 | |||
60 | What: /sys/class/uwb_rc/uwbN/mac_address | ||
61 | Date: July 2008 | ||
62 | KernelVersion: 2.6.27 | ||
63 | Contact: linux-usb@vger.kernel.org | ||
64 | Description: | ||
65 | The EUI-48, in colon-separated hex octets, for this | ||
66 | radio controller. A write will change the radio | ||
67 | controller's EUI-48 but only do so while the device is | ||
68 | not beaconing or scanning. | ||
69 | |||
70 | What: /sys/class/uwb_rc/uwbN/wusbhc | ||
71 | Date: July 2008 | ||
72 | KernelVersion: 2.6.27 | ||
73 | Contact: linux-usb@vger.kernel.org | ||
74 | Description: | ||
75 | A symlink to the device (if any) of the WUSB Host | ||
76 | Controller PAL using this radio controller. | ||
77 | |||
78 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/ | ||
79 | Date: July 2008 | ||
80 | KernelVersion: 2.6.27 | ||
81 | Contact: linux-usb@vger.kernel.org | ||
82 | Description: | ||
83 | A neighbour UWB device that has either been detected | ||
84 | as part of a scan or is a member of the radio | ||
85 | controllers beacon group. | ||
86 | |||
87 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/BPST | ||
88 | Date: July 2008 | ||
89 | KernelVersion: 2.6.27 | ||
90 | Contact: linux-usb@vger.kernel.org | ||
91 | Description: | ||
92 | The time (using the radio controllers internal 1 ms | ||
93 | interval superframe timer) of the last beacon from | ||
94 | this device was received. | ||
95 | |||
96 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/DevAddr | ||
97 | Date: July 2008 | ||
98 | KernelVersion: 2.6.27 | ||
99 | Contact: linux-usb@vger.kernel.org | ||
100 | Description: | ||
101 | The current DevAddr of this device in colon separated | ||
102 | hex octets. | ||
103 | |||
104 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/EUI_48 | ||
105 | Date: July 2008 | ||
106 | KernelVersion: 2.6.27 | ||
107 | Contact: linux-usb@vger.kernel.org | ||
108 | Description: | ||
109 | |||
110 | The EUI-48 of this device in colon separated hex | ||
111 | octets. | ||
112 | |||
113 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/BPST | ||
114 | Date: July 2008 | ||
115 | KernelVersion: 2.6.27 | ||
116 | Contact: linux-usb@vger.kernel.org | ||
117 | Description: | ||
118 | |||
119 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/IEs | ||
120 | Date: July 2008 | ||
121 | KernelVersion: 2.6.27 | ||
122 | Contact: linux-usb@vger.kernel.org | ||
123 | Description: | ||
124 | The latest IEs included in this device's beacon, in | ||
125 | space separated hex octets with one IE per line. | ||
126 | |||
127 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/LQE | ||
128 | Date: July 2008 | ||
129 | KernelVersion: 2.6.27 | ||
130 | Contact: linux-usb@vger.kernel.org | ||
131 | Description: | ||
132 | Link Quality Estimate - the Signal to Noise Ratio | ||
133 | (SNR) of all packets received from this device in dB. | ||
134 | This gives an estimate on a suitable PHY rate. Refer | ||
135 | to [ECMA-368] section 13.3 for more details. | ||
136 | |||
137 | What: /sys/class/uwb_rc/uwbN/<EUI-48>/RSSI | ||
138 | Date: July 2008 | ||
139 | KernelVersion: 2.6.27 | ||
140 | Contact: linux-usb@vger.kernel.org | ||
141 | Description: | ||
142 | Received Signal Strength Indication - the strength of | ||
143 | the received signal in dB. LQE is a more useful | ||
144 | measure of the radio link quality. | ||
diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf new file mode 100644 index 000000000000..a99c5f86a37a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-wusb_cbaf | |||
@@ -0,0 +1,100 @@ | |||
1 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_* | ||
2 | Date: August 2008 | ||
3 | KernelVersion: 2.6.27 | ||
4 | Contact: David Vrabel <david.vrabel@csr.com> | ||
5 | Description: | ||
6 | Various files for managing Cable Based Association of | ||
7 | (wireless) USB devices. | ||
8 | |||
9 | The sequence of operations should be: | ||
10 | |||
11 | 1. Device is plugged in. | ||
12 | |||
13 | 2. The connection manager (CM) sees a device with CBA capability. | ||
14 | (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). | ||
15 | |||
16 | 3. The CM writes the host name, supported band groups, | ||
17 | and the CHID (host ID) into the wusb_host_name, | ||
18 | wusb_host_band_groups and wusb_chid files. These | ||
19 | get sent to the device and the CDID (if any) for | ||
20 | this host is requested. | ||
21 | |||
22 | 4. The CM can verify that the device's supported band | ||
23 | groups (wusb_device_band_groups) are compatible | ||
24 | with the host. | ||
25 | |||
26 | 5. The CM reads the wusb_cdid file. | ||
27 | |||
28 | 6. The CM looks it up its database. | ||
29 | |||
30 | - If it has a matching CHID,CDID entry, the device | ||
31 | has been authorized before and nothing further | ||
32 | needs to be done. | ||
33 | |||
34 | - If the CDID is zero (or the CM doesn't find a | ||
35 | matching CDID in its database), the device is | ||
36 | assumed to be not known. The CM may associate | ||
37 | the host with device by: writing a randomly | ||
38 | generated CDID to wusb_cdid and then a random CK | ||
39 | to wusb_ck (this uploads the new CC to the | ||
40 | device). | ||
41 | |||
42 | CMD may choose to prompt the user before | ||
43 | associating with a new device. | ||
44 | |||
45 | 7. Device is unplugged. | ||
46 | |||
47 | References: | ||
48 | [WUSB-AM] Association Models Supplement to the | ||
49 | Certified Wireless Universal Serial Bus | ||
50 | Specification, version 1.0. | ||
51 | |||
52 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid | ||
53 | Date: August 2008 | ||
54 | KernelVersion: 2.6.27 | ||
55 | Contact: David Vrabel <david.vrabel@csr.com> | ||
56 | Description: | ||
57 | The CHID of the host formatted as 16 space-separated | ||
58 | hex octets. | ||
59 | |||
60 | Writes fetches device's supported band groups and the | ||
61 | the CDID for any existing association with this host. | ||
62 | |||
63 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name | ||
64 | Date: August 2008 | ||
65 | KernelVersion: 2.6.27 | ||
66 | Contact: David Vrabel <david.vrabel@csr.com> | ||
67 | Description: | ||
68 | A friendly name for the host as a UTF-8 encoded string. | ||
69 | |||
70 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups | ||
71 | Date: August 2008 | ||
72 | KernelVersion: 2.6.27 | ||
73 | Contact: David Vrabel <david.vrabel@csr.com> | ||
74 | Description: | ||
75 | The band groups supported by the host, in the format | ||
76 | defined in [WUSB-AM]. | ||
77 | |||
78 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups | ||
79 | Date: August 2008 | ||
80 | KernelVersion: 2.6.27 | ||
81 | Contact: David Vrabel <david.vrabel@csr.com> | ||
82 | Description: | ||
83 | The band groups supported by the device, in the format | ||
84 | defined in [WUSB-AM]. | ||
85 | |||
86 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid | ||
87 | Date: August 2008 | ||
88 | KernelVersion: 2.6.27 | ||
89 | Contact: David Vrabel <david.vrabel@csr.com> | ||
90 | Description: | ||
91 | The device's CDID formatted as 16 space-separated hex | ||
92 | octets. | ||
93 | |||
94 | What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck | ||
95 | Date: August 2008 | ||
96 | KernelVersion: 2.6.27 | ||
97 | Contact: David Vrabel <david.vrabel@csr.com> | ||
98 | Description: | ||
99 | Write 16 space-separated random, hex octets to | ||
100 | associate with the device. | ||
diff --git a/Documentation/ia64/xen.txt b/Documentation/ia64/xen.txt new file mode 100644 index 000000000000..c61a99f7c8bb --- /dev/null +++ b/Documentation/ia64/xen.txt | |||
@@ -0,0 +1,183 @@ | |||
1 | Recipe for getting/building/running Xen/ia64 with pv_ops | ||
2 | -------------------------------------------------------- | ||
3 | |||
4 | This recipe describes how to get xen-ia64 source and build it, | ||
5 | and run domU with pv_ops. | ||
6 | |||
7 | ============ | ||
8 | Requirements | ||
9 | ============ | ||
10 | |||
11 | - python | ||
12 | - mercurial | ||
13 | it (aka "hg") is an open-source source code | ||
14 | management software. See the below. | ||
15 | http://www.selenic.com/mercurial/wiki/ | ||
16 | - git | ||
17 | - bridge-utils | ||
18 | |||
19 | ================================= | ||
20 | Getting and Building Xen and Dom0 | ||
21 | ================================= | ||
22 | |||
23 | My environment is; | ||
24 | Machine : Tiger4 | ||
25 | Domain0 OS : RHEL5 | ||
26 | DomainU OS : RHEL5 | ||
27 | |||
28 | 1. Download source | ||
29 | # hg clone http://xenbits.xensource.com/ext/ia64/xen-unstable.hg | ||
30 | # cd xen-unstable.hg | ||
31 | # hg clone http://xenbits.xensource.com/ext/ia64/linux-2.6.18-xen.hg | ||
32 | |||
33 | 2. # make world | ||
34 | |||
35 | 3. # make install-tools | ||
36 | |||
37 | 4. copy kernels and xen | ||
38 | # cp xen/xen.gz /boot/efi/efi/redhat/ | ||
39 | # cp build-linux-2.6.18-xen_ia64/vmlinux.gz \ | ||
40 | /boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen | ||
41 | |||
42 | 5. make initrd for Dom0/DomU | ||
43 | # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \ | ||
44 | O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64 | ||
45 | # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \ | ||
46 | 2.6.18.8-xen --builtin mptspi --builtin mptbase \ | ||
47 | --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \ | ||
48 | --builtin ehci-hcd | ||
49 | |||
50 | ================================ | ||
51 | Making a disk image for guest OS | ||
52 | ================================ | ||
53 | |||
54 | 1. make file | ||
55 | # dd if=/dev/zero of=/root/rhel5.img bs=1M seek=4096 count=0 | ||
56 | # mke2fs -F -j /root/rhel5.img | ||
57 | # mount -o loop /root/rhel5.img /mnt | ||
58 | # cp -ax /{dev,var,etc,usr,bin,sbin,lib} /mnt | ||
59 | # mkdir /mnt/{root,proc,sys,home,tmp} | ||
60 | |||
61 | Note: You may miss some device files. If so, please create them | ||
62 | with mknod. Or you can use tar instead of cp. | ||
63 | |||
64 | 2. modify DomU's fstab | ||
65 | # vi /mnt/etc/fstab | ||
66 | /dev/xvda1 / ext3 defaults 1 1 | ||
67 | none /dev/pts devpts gid=5,mode=620 0 0 | ||
68 | none /dev/shm tmpfs defaults 0 0 | ||
69 | none /proc proc defaults 0 0 | ||
70 | none /sys sysfs defaults 0 0 | ||
71 | |||
72 | 3. modify inittab | ||
73 | set runlevel to 3 to avoid X trying to start | ||
74 | # vi /mnt/etc/inittab | ||
75 | id:3:initdefault: | ||
76 | Start a getty on the hvc0 console | ||
77 | X0:2345:respawn:/sbin/mingetty hvc0 | ||
78 | tty1-6 mingetty can be commented out | ||
79 | |||
80 | 4. add hvc0 into /etc/securetty | ||
81 | # vi /mnt/etc/securetty (add hvc0) | ||
82 | |||
83 | 5. umount | ||
84 | # umount /mnt | ||
85 | |||
86 | FYI, virt-manager can also make a disk image for guest OS. | ||
87 | It's GUI tools and easy to make it. | ||
88 | |||
89 | ================== | ||
90 | Boot Xen & Domain0 | ||
91 | ================== | ||
92 | |||
93 | 1. replace elilo | ||
94 | elilo of RHEL5 can boot Xen and Dom0. | ||
95 | If you use old elilo (e.g RHEL4), please download from the below | ||
96 | http://elilo.sourceforge.net/cgi-bin/blosxom | ||
97 | and copy into /boot/efi/efi/redhat/ | ||
98 | # cp elilo-3.6-ia64.efi /boot/efi/efi/redhat/elilo.efi | ||
99 | |||
100 | 2. modify elilo.conf (like the below) | ||
101 | # vi /boot/efi/efi/redhat/elilo.conf | ||
102 | prompt | ||
103 | timeout=20 | ||
104 | default=xen | ||
105 | relocatable | ||
106 | |||
107 | image=vmlinuz-2.6.18.8-xen | ||
108 | label=xen | ||
109 | vmm=xen.gz | ||
110 | initrd=initrd-2.6.18.8-xen.img | ||
111 | read-only | ||
112 | append=" -- rhgb root=/dev/sda2" | ||
113 | |||
114 | The append options before "--" are for xen hypervisor, | ||
115 | the options after "--" are for dom0. | ||
116 | |||
117 | FYI, your machine may need console options like | ||
118 | "com1=19200,8n1 console=vga,com1". For example, | ||
119 | append="com1=19200,8n1 console=vga,com1 -- rhgb console=tty0 \ | ||
120 | console=ttyS0 root=/dev/sda2" | ||
121 | |||
122 | ===================================== | ||
123 | Getting and Building domU with pv_ops | ||
124 | ===================================== | ||
125 | |||
126 | 1. get pv_ops tree | ||
127 | # git clone http://people.valinux.co.jp/~yamahata/xen-ia64/linux-2.6-xen-ia64.git/ | ||
128 | |||
129 | 2. git branch (if necessary) | ||
130 | # cd linux-2.6-xen-ia64/ | ||
131 | # git checkout -b your_branch origin/xen-ia64-domu-minimal-2008may19 | ||
132 | (Note: The current branch is xen-ia64-domu-minimal-2008may19. | ||
133 | But you would find the new branch. You can see with | ||
134 | "git branch -r" to get the branch lists. | ||
135 | http://people.valinux.co.jp/~yamahata/xen-ia64/for_eagl/linux-2.6-ia64-pv-ops.git/ | ||
136 | is also available. The tree is based on | ||
137 | git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6 test) | ||
138 | |||
139 | |||
140 | 3. copy .config for pv_ops of domU | ||
141 | # cp arch/ia64/configs/xen_domu_wip_defconfig .config | ||
142 | |||
143 | 4. make kernel with pv_ops | ||
144 | # make oldconfig | ||
145 | # make | ||
146 | |||
147 | 5. install the kernel and initrd | ||
148 | # cp vmlinux.gz /boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU | ||
149 | # make modules_install | ||
150 | # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img \ | ||
151 | 2.6.26-rc3xen-ia64-08941-g1b12161 --builtin mptspi \ | ||
152 | --builtin mptbase --builtin mptscsih --builtin uhci-hcd \ | ||
153 | --builtin ohci-hcd --builtin ehci-hcd | ||
154 | |||
155 | ======================== | ||
156 | Boot DomainU with pv_ops | ||
157 | ======================== | ||
158 | |||
159 | 1. make config of DomU | ||
160 | # vi /etc/xen/rhel5 | ||
161 | kernel = "/boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU" | ||
162 | ramdisk = "/boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img" | ||
163 | vcpus = 1 | ||
164 | memory = 512 | ||
165 | name = "rhel5" | ||
166 | disk = [ 'file:/root/rhel5.img,xvda1,w' ] | ||
167 | root = "/dev/xvda1 ro" | ||
168 | extra= "rhgb console=hvc0" | ||
169 | |||
170 | 2. After boot xen and dom0, start xend | ||
171 | # /etc/init.d/xend start | ||
172 | ( In the debugging case, # XEND_DEBUG=1 xend trace_start ) | ||
173 | |||
174 | 3. start domU | ||
175 | # xm create -c rhel5 | ||
176 | |||
177 | ========= | ||
178 | Reference | ||
179 | ========= | ||
180 | - Wiki of Xen/IA64 upstream merge | ||
181 | http://wiki.xensource.com/xenwiki/XenIA64/UpstreamMerge | ||
182 | |||
183 | Written by Akio Takebe <takebe_akio@jp.fujitsu.com> on 28 May 2008 | ||
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index 0705040531a5..3f4bc840da8b 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt | |||
@@ -109,7 +109,8 @@ There are two possible methods of using Kdump. | |||
109 | 2) Or use the system kernel binary itself as dump-capture kernel and there is | 109 | 2) Or use the system kernel binary itself as dump-capture kernel and there is |
110 | no need to build a separate dump-capture kernel. This is possible | 110 | no need to build a separate dump-capture kernel. This is possible |
111 | only with the architecutres which support a relocatable kernel. As | 111 | only with the architecutres which support a relocatable kernel. As |
112 | of today, i386, x86_64 and ia64 architectures support relocatable kernel. | 112 | of today, i386, x86_64, ppc64 and ia64 architectures support relocatable |
113 | kernel. | ||
113 | 114 | ||
114 | Building a relocatable kernel is advantageous from the point of view that | 115 | Building a relocatable kernel is advantageous from the point of view that |
115 | one does not have to build a second kernel for capturing the dump. But | 116 | one does not have to build a second kernel for capturing the dump. But |
@@ -207,8 +208,15 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64) | |||
207 | Dump-capture kernel config options (Arch Dependent, ppc64) | 208 | Dump-capture kernel config options (Arch Dependent, ppc64) |
208 | ---------------------------------------------------------- | 209 | ---------------------------------------------------------- |
209 | 210 | ||
210 | * Make and install the kernel and its modules. DO NOT add this kernel | 211 | 1) Enable "Build a kdump crash kernel" support under "Kernel" options: |
211 | to the boot loader configuration files. | 212 | |
213 | CONFIG_CRASH_DUMP=y | ||
214 | |||
215 | 2) Enable "Build a relocatable kernel" support | ||
216 | |||
217 | CONFIG_RELOCATABLE=y | ||
218 | |||
219 | Make and install the kernel and its modules. | ||
212 | 220 | ||
213 | Dump-capture kernel config options (Arch Dependent, ia64) | 221 | Dump-capture kernel config options (Arch Dependent, ia64) |
214 | ---------------------------------------------------------- | 222 | ---------------------------------------------------------- |
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index de4063cb4fdc..02ea9a971b8e 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt | |||
@@ -1917,6 +1917,8 @@ platforms are moved over to use the flattened-device-tree model. | |||
1917 | inverse clock polarity (CPOL) mode | 1917 | inverse clock polarity (CPOL) mode |
1918 | - spi-cpha - (optional) Empty property indicating device requires | 1918 | - spi-cpha - (optional) Empty property indicating device requires |
1919 | shifted clock phase (CPHA) mode | 1919 | shifted clock phase (CPHA) mode |
1920 | - spi-cs-high - (optional) Empty property indicating device requires | ||
1921 | chip select active high | ||
1920 | 1922 | ||
1921 | SPI example for an MPC5200 SPI bus: | 1923 | SPI example for an MPC5200 SPI bus: |
1922 | spi@f00 { | 1924 | spi@f00 { |
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt index 74ae6f1cd2d6..81a917ef96e9 100644 --- a/Documentation/powerpc/dts-bindings/fsl/board.txt +++ b/Documentation/powerpc/dts-bindings/fsl/board.txt | |||
@@ -2,13 +2,13 @@ | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | 4 | ||
5 | - device_type : Should be "board-control" | 5 | - compatible : Should be "fsl,<board>-bcsr" |
6 | - reg : Offset and length of the register set for the device | 6 | - reg : Offset and length of the register set for the device |
7 | 7 | ||
8 | Example: | 8 | Example: |
9 | 9 | ||
10 | bcsr@f8000000 { | 10 | bcsr@f8000000 { |
11 | device_type = "board-control"; | 11 | compatible = "fsl,mpc8360mds-bcsr"; |
12 | reg = <f8000000 8000>; | 12 | reg = <f8000000 8000>; |
13 | }; | 13 | }; |
14 | 14 | ||
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt new file mode 100644 index 000000000000..4c3d62c7843a --- /dev/null +++ b/Documentation/usb/WUSB-Design-overview.txt | |||
@@ -0,0 +1,448 @@ | |||
1 | |||
2 | Linux UWB + Wireless USB + WiNET | ||
3 | |||
4 | (C) 2005-2006 Intel Corporation | ||
5 | Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
6 | |||
7 | This program is free software; you can redistribute it and/or | ||
8 | modify it under the terms of the GNU General Public License version | ||
9 | 2 as published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | 02110-1301, USA. | ||
20 | |||
21 | |||
22 | Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for | ||
23 | updated content. | ||
24 | |||
25 | * Design-overview.txt-1.8 | ||
26 | |||
27 | This code implements a Ultra Wide Band stack for Linux, as well as | ||
28 | drivers for the the USB based UWB radio controllers defined in the | ||
29 | Wireless USB 1.0 specification (including Wireless USB host controller | ||
30 | and an Intel WiNET controller). | ||
31 | |||
32 | 1. Introduction | ||
33 | 1. HWA: Host Wire adapters, your Wireless USB dongle | ||
34 | |||
35 | 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired | ||
36 | devices | ||
37 | 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host | ||
38 | adapter | ||
39 | 2. The UWB stack | ||
40 | 1. Devices and hosts: the basic structure | ||
41 | |||
42 | 2. Host Controller life cycle | ||
43 | |||
44 | 3. On the air: beacons and enumerating the radio neighborhood | ||
45 | |||
46 | 4. Device lists | ||
47 | 5. Bandwidth allocation | ||
48 | |||
49 | 3. Wireless USB Host Controller drivers | ||
50 | |||
51 | 4. Glossary | ||
52 | |||
53 | |||
54 | Introduction | ||
55 | |||
56 | UWB is a wide-band communication protocol that is to serve also as the | ||
57 | low-level protocol for others (much like TCP sits on IP). Currently | ||
58 | these others are Wireless USB and TCP/IP, but seems Bluetooth and | ||
59 | Firewire/1394 are coming along. | ||
60 | |||
61 | UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of | ||
62 | ~-41dB (or 0.074 uW/MHz--geography specific data is still being | ||
63 | negotiated w/ regulators, so watch for changes). That band is divided in | ||
64 | a bunch of ~1.5 GHz wide channels (or band groups) composed of three | ||
65 | subbands/subchannels (528 MHz each). Each channel is independent of each | ||
66 | other, so you could consider them different "busses". Initially this | ||
67 | driver considers them all a single one. | ||
68 | |||
69 | Radio time is divided in 65536 us long /superframes/, each one divided | ||
70 | in 256 256us long /MASs/ (Media Allocation Slots), which are the basic | ||
71 | time/media allocation units for transferring data. At the beginning of | ||
72 | each superframe there is a Beacon Period (BP), where every device | ||
73 | transmit its beacon on a single MAS. The length of the BP depends on how | ||
74 | many devices are present and the length of their beacons. | ||
75 | |||
76 | Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16 | ||
77 | bit address) and send periodic beacons to advertise themselves and pass | ||
78 | info on what they are and do. They advertise their capabilities and a | ||
79 | bunch of other stuff. | ||
80 | |||
81 | The different logical parts of this driver are: | ||
82 | |||
83 | * | ||
84 | |||
85 | *UWB*: the Ultra-Wide-Band stack -- manages the radio and | ||
86 | associated spectrum to allow for devices sharing it. Allows to | ||
87 | control bandwidth assingment, beaconing, scanning, etc | ||
88 | |||
89 | * | ||
90 | |||
91 | *WUSB*: the layer that sits on top of UWB to provide Wireless USB. | ||
92 | The Wireless USB spec defines means to control a UWB radio and to | ||
93 | do the actual WUSB. | ||
94 | |||
95 | |||
96 | HWA: Host Wire adapters, your Wireless USB dongle | ||
97 | |||
98 | WUSB also defines a device called a Host Wire Adaptor (HWA), which in | ||
99 | mere terms is a USB dongle that enables your PC to have UWB and Wireless | ||
100 | USB. The Wireless USB Host Controller in a HWA looks to the host like a | ||
101 | [Wireless] USB controller connected via USB (!) | ||
102 | |||
103 | The HWA itself is broken in two or three main interfaces: | ||
104 | |||
105 | * | ||
106 | |||
107 | *RC*: Radio control -- this implements an interface to the | ||
108 | Ultra-Wide-Band radio controller. The driver for this implements a | ||
109 | USB-based UWB Radio Controller to the UWB stack. | ||
110 | |||
111 | * | ||
112 | |||
113 | *HC*: the wireless USB host controller. It looks like a USB host | ||
114 | whose root port is the radio and the WUSB devices connect to it. | ||
115 | To the system it looks like a separate USB host. The driver (will) | ||
116 | implement a USB host controller (similar to UHCI, OHCI or EHCI) | ||
117 | for which the root hub is the radio...To reiterate: it is a USB | ||
118 | controller that is connected via USB instead of PCI. | ||
119 | |||
120 | * | ||
121 | |||
122 | *WINET*: some HW provide a WiNET interface (IP over UWB). This | ||
123 | package provides a driver for it (it looks like a network | ||
124 | interface, winetX). The driver detects when there is a link up for | ||
125 | their type and kick into gear. | ||
126 | |||
127 | |||
128 | DWA: Device Wired Adaptor, a Wireless USB hub for wired devices | ||
129 | |||
130 | These are the complement to HWAs. They are a USB host for connecting | ||
131 | wired devices, but it is connected to your PC connected via Wireless | ||
132 | USB. To the system it looks like yet another USB host. To the untrained | ||
133 | eye, it looks like a hub that connects upstream wirelessly. | ||
134 | |||
135 | We still offer no support for this; however, it should share a lot of | ||
136 | code with the HWA-RC driver; there is a bunch of factorization work that | ||
137 | has been done to support that in upcoming releases. | ||
138 | |||
139 | |||
140 | WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter | ||
141 | |||
142 | This is your usual PCI device that implements WHCI. Similar in concept | ||
143 | to EHCI, it allows your wireless USB devices (including DWAs) to connect | ||
144 | to your host via a PCI interface. As in the case of the HWA, it has a | ||
145 | Radio Control interface and the WUSB Host Controller interface per se. | ||
146 | |||
147 | There is still no driver support for this, but will be in upcoming | ||
148 | releases. | ||
149 | |||
150 | |||
151 | The UWB stack | ||
152 | |||
153 | The main mission of the UWB stack is to keep a tally of which devices | ||
154 | are in radio proximity to allow drivers to connect to them. As well, it | ||
155 | provides an API for controlling the local radio controllers (RCs from | ||
156 | now on), such as to start/stop beaconing, scan, allocate bandwidth, etc. | ||
157 | |||
158 | |||
159 | Devices and hosts: the basic structure | ||
160 | |||
161 | The main building block here is the UWB device (struct uwb_dev). For | ||
162 | each device that pops up in radio presence (ie: the UWB host receives a | ||
163 | beacon from it) you get a struct uwb_dev that will show up in | ||
164 | /sys/class/uwb and in /sys/bus/uwb/devices. | ||
165 | |||
166 | For each RC that is detected, a new struct uwb_rc is created. In turn, a | ||
167 | RC is also a device, so they also show in /sys/class/uwb and | ||
168 | /sys/bus/uwb/devices, but at the same time, only radio controllers show | ||
169 | up in /sys/class/uwb_rc. | ||
170 | |||
171 | * | ||
172 | |||
173 | [*] The reason for RCs being also devices is that not only we can | ||
174 | see them while enumerating the system device tree, but also on the | ||
175 | radio (their beacons and stuff), so the handling has to be | ||
176 | likewise to that of a device. | ||
177 | |||
178 | Each RC driver is implemented by a separate driver that plugs into the | ||
179 | interface that the UWB stack provides through a struct uwb_rc_ops. The | ||
180 | spec creators have been nice enough to make the message format the same | ||
181 | for HWA and WHCI RCs, so the driver is really a very thin transport that | ||
182 | moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/] | ||
183 | and sends the replies and notifications back to the API | ||
184 | [/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that | ||
185 | is chartered, among other things, to keep the tab of how the UWB radio | ||
186 | neighborhood looks, creating and destroying devices as they show up or | ||
187 | dissapear. | ||
188 | |||
189 | Command execution is very simple: a command block is sent and a event | ||
190 | block or reply is expected back. For sending/receiving command/events, a | ||
191 | handle called /neh/ (Notification/Event Handle) is opened with | ||
192 | /uwb_rc_neh_open()/. | ||
193 | |||
194 | The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for | ||
195 | the USB connected HWA. Eventually, drivers/whci-rc.c will do the same | ||
196 | for the PCI connected WHCI controller. | ||
197 | |||
198 | |||
199 | Host Controller life cycle | ||
200 | |||
201 | So let's say we connect a dongle to the system: it is detected and | ||
202 | firmware uploaded if needed [for Intel's i1480 | ||
203 | /drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated. | ||
204 | Now we have a real HWA device connected and | ||
205 | /drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the | ||
206 | Wire-Adaptor environment and then suck it into the UWB stack's vision of | ||
207 | the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/]. | ||
208 | |||
209 | * | ||
210 | |||
211 | [*] The stack should put a new RC to scan for devices | ||
212 | [/uwb_rc_scan()/] so it finds what's available around and tries to | ||
213 | connect to them, but this is policy stuff and should be driven | ||
214 | from user space. As of now, the operator is expected to do it | ||
215 | manually; see the release notes for documentation on the procedure. | ||
216 | |||
217 | When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/ | ||
218 | takes time of tearing everything down safely (or not...). | ||
219 | |||
220 | |||
221 | On the air: beacons and enumerating the radio neighborhood | ||
222 | |||
223 | So assuming we have devices and we have agreed for a channel to connect | ||
224 | on (let's say 9), we put the new RC to beacon: | ||
225 | |||
226 | * | ||
227 | |||
228 | $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon | ||
229 | |||
230 | Now it is visible. If there were other devices in the same radio channel | ||
231 | and beacon group (that's what the zero is for), the dongle's radio | ||
232 | control interface will send beacon notifications on its | ||
233 | notification/event endpoint (NEEP). The beacon notifications are part of | ||
234 | the event stream that is funneled into the API with | ||
235 | /drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB | ||
236 | daemon through a notification list. | ||
237 | |||
238 | UWBD wakes up and scans the event list; finds a beacon and adds it to | ||
239 | the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from | ||
240 | the same device, he considers it to be 'onair' and creates a new device | ||
241 | [/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons | ||
242 | are received in some time, the device is considered gone and wiped out | ||
243 | [uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge | ||
244 | the beacon cache of dead devices]. | ||
245 | |||
246 | |||
247 | Device lists | ||
248 | |||
249 | All UWB devices are kept in the list of the struct bus_type uwb_bus. | ||
250 | |||
251 | |||
252 | Bandwidth allocation | ||
253 | |||
254 | The UWB stack maintains a local copy of DRP availability through | ||
255 | processing of incoming *DRP Availability Change* notifications. This | ||
256 | local copy is currently used to present the current bandwidth | ||
257 | availability to the user through the sysfs file | ||
258 | /sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth | ||
259 | availability information will be used by the bandwidth reservation | ||
260 | routines. | ||
261 | |||
262 | The bandwidth reservation routines are in progress and are thus not | ||
263 | present in the current release. When completed they will enable a user | ||
264 | to initiate DRP reservation requests through interaction with sysfs. DRP | ||
265 | reservation requests from remote UWB devices will also be handled. The | ||
266 | bandwidth management done by the UWB stack will include callbacks to the | ||
267 | higher layers will enable the higher layers to use the reservations upon | ||
268 | completion. [Note: The bandwidth reservation work is in progress and | ||
269 | subject to change.] | ||
270 | |||
271 | |||
272 | Wireless USB Host Controller drivers | ||
273 | |||
274 | *WARNING* This section needs a lot of work! | ||
275 | |||
276 | As explained above, there are three different types of HCs in the WUSB | ||
277 | world: HWA-HC, DWA-HC and WHCI-HC. | ||
278 | |||
279 | HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB | ||
280 | connected controllers), and their transfer management system is almost | ||
281 | identical. So is their notification delivery system. | ||
282 | |||
283 | HWA-HC and WHCI-HC share that they are both WUSB host controllers, so | ||
284 | they have to deal with WUSB device life cycle and maintenance, wireless | ||
285 | root-hub | ||
286 | |||
287 | HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has | ||
288 | three endpoints (Notifications, Data Transfer In and Data Transfer | ||
289 | Out--known as NEP, DTI and DTO in the code). | ||
290 | |||
291 | We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster | ||
292 | ID and tell the HC to use all that. Then we start it. This means the HC | ||
293 | starts sending MMCs. | ||
294 | |||
295 | * | ||
296 | |||
297 | The MMCs are blocks of data defined somewhere in the WUSB1.0 spec | ||
298 | that define a stream in the UWB channel time allocated for sending | ||
299 | WUSB IEs (host to device commands/notifications) and Device | ||
300 | Notifications (device initiated to host). Each host defines a | ||
301 | unique Wireless USB cluster through MMCs. Devices can connect to a | ||
302 | single cluster at the time. The IEs are Information Elements, and | ||
303 | among them are the bandwidth allocations that tell each device | ||
304 | when can they transmit or receive. | ||
305 | |||
306 | Now it all depends on external stimuli. | ||
307 | |||
308 | *New device connection* | ||
309 | |||
310 | A new device pops up, it scans the radio looking for MMCs that give out | ||
311 | the existence of Wireless USB channels. Once one (or more) are found, | ||
312 | selects which one to connect to. Sends a /DN_Connect/ (device | ||
313 | notification connect) during the DNTS (Device Notification Time | ||
314 | Slot--announced in the MMCs | ||
315 | |||
316 | HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery | ||
317 | into /devconnect/). This process starts the authentication process for | ||
318 | the device. First we allocate a /fake port/ and assign an | ||
319 | unauthenticated address (128 to 255--what we really do is | ||
320 | 0x80 | fake_port_idx). We fiddle with the fake port status and /khubd/ | ||
321 | sees a new connection, so he moves on to enable the fake port with a reset. | ||
322 | |||
323 | So now we are in the reset path -- we know we have a non-yet enumerated | ||
324 | device with an unauthorized address; we ask user space to authenticate | ||
325 | (FIXME: not yet done, similar to bluetooth pairing), then we do the key | ||
326 | exchange (FIXME: not yet done) and issue a /set address 0/ to bring the | ||
327 | device to the default state. Device is authenticated. | ||
328 | |||
329 | From here, the USB stack takes control through the usb_hcd ops. khubd | ||
330 | has seen the port status changes, as we have been toggling them. It will | ||
331 | start enumerating and doing transfers through usb_hcd->urb_enqueue() to | ||
332 | read descriptors and move our data. | ||
333 | |||
334 | *Device life cycle and keep alives* | ||
335 | |||
336 | Everytime there is a succesful transfer to/from a device, we update a | ||
337 | per-device activity timestamp. If not, every now and then we check and | ||
338 | if the activity timestamp gets old, we ping the device by sending it a | ||
339 | Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this | ||
340 | arrives to us as a notification through | ||
341 | devconnect.c:wusb_handle_dn_alive(). If a device times out, we | ||
342 | disconnect it from the system (cleaning up internal information and | ||
343 | toggling the bits in the fake hub port, which kicks khubd into removing | ||
344 | the rest of the stuff). | ||
345 | |||
346 | This is done through devconnect:__wusb_check_devs(), which will scan the | ||
347 | device list looking for whom needs refreshing. | ||
348 | |||
349 | If the device wants to disconnect, it will either die (ugly) or send a | ||
350 | /DN_Disconnect/ that will prompt a disconnection from the system. | ||
351 | |||
352 | *Sending and receiving data* | ||
353 | |||
354 | Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is | ||
355 | /aimed/ at an endpoint in a WUSB device. This is the same for HWAs and | ||
356 | DWAs. | ||
357 | |||
358 | Each HC has a number of rpipes and buffers that can be assigned to them; | ||
359 | when doing a data transfer (xfer), first the rpipe has to be aimed and | ||
360 | prepared (buffers assigned), then we can start queueing requests for | ||
361 | data in or out. | ||
362 | |||
363 | Data buffers have to be segmented out before sending--so we send first a | ||
364 | header (segment request) and then if there is any data, a data buffer | ||
365 | immediately after to the DTI interface (yep, even the request). If our | ||
366 | buffer is bigger than the max segment size, then we just do multiple | ||
367 | requests. | ||
368 | |||
369 | [This sucks, because doing USB scatter gatter in Linux is resource | ||
370 | intensive, if any...not that the current approach is not. It just has to | ||
371 | be cleaned up a lot :)]. | ||
372 | |||
373 | If reading, we don't send data buffers, just the segment headers saying | ||
374 | we want to read segments. | ||
375 | |||
376 | When the xfer is executed, we receive a notification that says data is | ||
377 | ready in the DTI endpoint (handled through | ||
378 | xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a | ||
379 | descriptor that gives us the status of the transfer, its identification | ||
380 | (given when we issued it) and the segment number. If it was a data read, | ||
381 | we issue another URB to read into the destination buffer the chunk of | ||
382 | data coming out of the remote endpoint. Done, wait for the next guy. The | ||
383 | callbacks for the URBs issued from here are the ones that will declare | ||
384 | the xfer complete at some point and call it's callback. | ||
385 | |||
386 | Seems simple, but the implementation is not trivial. | ||
387 | |||
388 | * | ||
389 | |||
390 | *WARNING* Old!! | ||
391 | |||
392 | The main xfer descriptor, wa_xfer (equivalent to a URB) contains an | ||
393 | array of segments, tallys on segments and buffers and callback | ||
394 | information. Buried in there is a lot of URBs for executing the segments | ||
395 | and buffer transfers. | ||
396 | |||
397 | For OUT xfers, there is an array of segments, one URB for each, another | ||
398 | one of buffer URB. When submitting, we submit URBs for segment request | ||
399 | 1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer | ||
400 | result data; when all the segments are complete, we call the callback to | ||
401 | finalize the transfer. | ||
402 | |||
403 | For IN xfers, we only issue URBs for the segments we want to read and | ||
404 | then wait for the xfer result data. | ||
405 | |||
406 | *URB mapping into xfers* | ||
407 | |||
408 | This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an | ||
409 | rpipe to the endpoint where we have to transmit, create a transfer | ||
410 | context (wa_xfer) and submit it. When the xfer is done, our callback is | ||
411 | called and we assign the status bits and release the xfer resources. | ||
412 | |||
413 | In dequeue() we are basically cancelling/aborting the transfer. We issue | ||
414 | a xfer abort request to the HC, cancell all the URBs we had submitted | ||
415 | and not yet done and when all that is done, the xfer callback will be | ||
416 | called--this will call the URB callback. | ||
417 | |||
418 | |||
419 | Glossary | ||
420 | |||
421 | *DWA* -- Device Wire Adapter | ||
422 | |||
423 | USB host, wired for downstream devices, upstream connects wirelessly | ||
424 | with Wireless USB. | ||
425 | |||
426 | *EVENT* -- Response to a command on the NEEP | ||
427 | |||
428 | *HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB | ||
429 | |||
430 | *NEH* -- Notification/Event Handle | ||
431 | |||
432 | Handle/file descriptor for receiving notifications or events. The WA | ||
433 | code requires you to get one of this to listen for notifications or | ||
434 | events on the NEEP. | ||
435 | |||
436 | *NEEP* -- Notification/Event EndPoint | ||
437 | |||
438 | Stuff related to the management of the first endpoint of a HWA USB | ||
439 | dongle that is used to deliver an stream of events and notifications to | ||
440 | the host. | ||
441 | |||
442 | *NOTIFICATION* -- Message coming in the NEEP as response to something. | ||
443 | |||
444 | *RC* -- Radio Control | ||
445 | |||
446 | Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by | ||
447 | InakyPerezGonzalez) | ||
448 | |||
diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf new file mode 100644 index 000000000000..2e78b70f3adc --- /dev/null +++ b/Documentation/usb/wusb-cbaf | |||
@@ -0,0 +1,139 @@ | |||
1 | #! /bin/bash | ||
2 | # | ||
3 | |||
4 | set -e | ||
5 | |||
6 | progname=$(basename $0) | ||
7 | function help | ||
8 | { | ||
9 | cat <<EOF | ||
10 | Usage: $progname COMMAND DEVICEs [ARGS] | ||
11 | |||
12 | Command for manipulating the pairing/authentication credentials of a | ||
13 | Wireless USB device that supports wired-mode Cable-Based-Association. | ||
14 | |||
15 | Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org. | ||
16 | |||
17 | |||
18 | DEVICE | ||
19 | |||
20 | sysfs path to the device to authenticate; for example, both this | ||
21 | guys are the same: | ||
22 | |||
23 | /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1 | ||
24 | /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1 | ||
25 | |||
26 | COMMAND/ARGS are | ||
27 | |||
28 | start | ||
29 | |||
30 | Start a WUSB host controller (by setting up a CHID) | ||
31 | |||
32 | set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME | ||
33 | |||
34 | Sets host information in the device; after this you can call the | ||
35 | get-cdid to see how does this device report itself to us. | ||
36 | |||
37 | get-cdid DEVICE | ||
38 | |||
39 | Get the device ID associated to the HOST-CHDI we sent with | ||
40 | 'set-chid'. We might not know about it. | ||
41 | |||
42 | set-cc DEVICE | ||
43 | |||
44 | If we allow the device to connect, set a random new CDID and CK | ||
45 | (connection key). Device saves them for the next time it wants to | ||
46 | connect wireless. We save them for that next time also so we can | ||
47 | authenticate the device (when we see the CDID he uses to id | ||
48 | itself) and the CK to crypto talk to it. | ||
49 | |||
50 | CHID is always 16 hex bytes in 'XX YY ZZ...' form | ||
51 | BANDGROUP is almost always 0001 | ||
52 | |||
53 | Examples: | ||
54 | |||
55 | You can default most arguments to '' to get a sane value: | ||
56 | |||
57 | $ $progname set-chid '' '' '' "My host name" | ||
58 | |||
59 | A full sequence: | ||
60 | |||
61 | $ $progname set-chid '' '' '' "My host name" | ||
62 | $ $progname get-cdid '' | ||
63 | $ $progname set-cc '' | ||
64 | |||
65 | EOF | ||
66 | } | ||
67 | |||
68 | |||
69 | # Defaults | ||
70 | # FIXME: CHID should come from a database :), band group from the host | ||
71 | host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff" | ||
72 | host_band_group="0001" | ||
73 | host_name=$(hostname) | ||
74 | |||
75 | devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)" | ||
76 | hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)" | ||
77 | |||
78 | result=0 | ||
79 | case $1 in | ||
80 | start) | ||
81 | for dev in ${2:-$hdevs} | ||
82 | do | ||
83 | uwb_rc=$(readlink -f $dev/uwb_rc) | ||
84 | if cat $uwb_rc/beacon | grep -q -- "-1" | ||
85 | then | ||
86 | echo 13 0 > $uwb_rc/beacon | ||
87 | echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2 | ||
88 | fi | ||
89 | echo $host_CHID > $dev/wusb_chid | ||
90 | echo I: started host $(basename $dev) >&2 | ||
91 | done | ||
92 | ;; | ||
93 | stop) | ||
94 | for dev in ${2:-$hdevs} | ||
95 | do | ||
96 | echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid | ||
97 | echo I: stopped host $(basename $dev) >&2 | ||
98 | uwb_rc=$(readlink -f $dev/uwb_rc) | ||
99 | echo -1 | cat > $uwb_rc/beacon | ||
100 | echo I: stopped beaconing on $(basename $uwb_rc) >&2 | ||
101 | done | ||
102 | ;; | ||
103 | set-chid) | ||
104 | shift | ||
105 | for dev in ${2:-$devs}; do | ||
106 | echo "${4:-$host_name}" > $dev/wusb_host_name | ||
107 | echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups | ||
108 | echo ${2:-$host_CHID} > $dev/wusb_chid | ||
109 | done | ||
110 | ;; | ||
111 | get-cdid) | ||
112 | for dev in ${2:-$devs} | ||
113 | do | ||
114 | cat $dev/wusb_cdid | ||
115 | done | ||
116 | ;; | ||
117 | set-cc) | ||
118 | for dev in ${2:-$devs}; do | ||
119 | shift | ||
120 | CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)" | ||
121 | CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)" | ||
122 | echo "$CDID" > $dev/wusb_cdid | ||
123 | echo "$CK" > $dev/wusb_ck | ||
124 | |||
125 | echo I: CC set >&2 | ||
126 | echo "CHID: $(cat $dev/wusb_chid)" | ||
127 | echo "CDID:$CDID" | ||
128 | echo "CK: $CK" | ||
129 | done | ||
130 | ;; | ||
131 | help|h|--help|-h) | ||
132 | help | ||
133 | ;; | ||
134 | *) | ||
135 | echo "E: Unknown usage" 1>&2 | ||
136 | help 1>&2 | ||
137 | result=1 | ||
138 | esac | ||
139 | exit $result | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 5c3f79c26384..cecf1592609e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1053,6 +1053,12 @@ L: cbe-oss-dev@ozlabs.org | |||
1053 | W: http://www.ibm.com/developerworks/power/cell/ | 1053 | W: http://www.ibm.com/developerworks/power/cell/ |
1054 | S: Supported | 1054 | S: Supported |
1055 | 1055 | ||
1056 | CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: | ||
1057 | P: David Vrabel | ||
1058 | M: david.vrabel@csr.com | ||
1059 | L: linux-usb@vger.kernel.org | ||
1060 | S: Supported | ||
1061 | |||
1056 | CFAG12864B LCD DRIVER | 1062 | CFAG12864B LCD DRIVER |
1057 | P: Miguel Ojeda Sandonis | 1063 | P: Miguel Ojeda Sandonis |
1058 | M: miguel.ojeda.sandonis@gmail.com | 1064 | M: miguel.ojeda.sandonis@gmail.com |
@@ -2176,6 +2182,13 @@ M: maciej.sosnowski@intel.com | |||
2176 | L: linux-kernel@vger.kernel.org | 2182 | L: linux-kernel@vger.kernel.org |
2177 | S: Supported | 2183 | S: Supported |
2178 | 2184 | ||
2185 | INTEL IOMMU (VT-d) | ||
2186 | P: David Woodhouse | ||
2187 | M: dwmw2@infradead.org | ||
2188 | L: iommu@lists.linux-foundation.org | ||
2189 | T: git://git.infradead.org/iommu-2.6.git | ||
2190 | S: Supported | ||
2191 | |||
2179 | INTEL IOP-ADMA DMA DRIVER | 2192 | INTEL IOP-ADMA DMA DRIVER |
2180 | P: Dan Williams | 2193 | P: Dan Williams |
2181 | M: dan.j.williams@intel.com | 2194 | M: dan.j.williams@intel.com |
@@ -2928,9 +2941,9 @@ S: Maintained | |||
2928 | 2941 | ||
2929 | NETEFFECT IWARP RNIC DRIVER (IW_NES) | 2942 | NETEFFECT IWARP RNIC DRIVER (IW_NES) |
2930 | P: Faisal Latif | 2943 | P: Faisal Latif |
2931 | M: flatif@neteffect.com | 2944 | M: faisal.latif@intel.com |
2932 | P: Chien Tung | 2945 | P: Chien Tung |
2933 | M: ctung@neteffect.com | 2946 | M: chien.tin.tung@intel.com |
2934 | L: general@lists.openfabrics.org | 2947 | L: general@lists.openfabrics.org |
2935 | W: http://www.neteffect.com | 2948 | W: http://www.neteffect.com |
2936 | S: Supported | 2949 | S: Supported |
@@ -4191,6 +4204,12 @@ L: sparclinux@vger.kernel.org | |||
4191 | T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git | 4204 | T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git |
4192 | S: Maintained | 4205 | S: Maintained |
4193 | 4206 | ||
4207 | ULTRA-WIDEBAND (UWB) SUBSYSTEM: | ||
4208 | P: David Vrabel | ||
4209 | M: david.vrabel@csr.com | ||
4210 | L: linux-usb@vger.kernel.org | ||
4211 | S: Supported | ||
4212 | |||
4194 | UNIFORM CDROM DRIVER | 4213 | UNIFORM CDROM DRIVER |
4195 | P: Jens Axboe | 4214 | P: Jens Axboe |
4196 | M: axboe@kernel.dk | 4215 | M: axboe@kernel.dk |
@@ -4616,6 +4635,11 @@ M: zaga@fly.cc.fer.hr | |||
4616 | L: linux-scsi@vger.kernel.org | 4635 | L: linux-scsi@vger.kernel.org |
4617 | S: Maintained | 4636 | S: Maintained |
4618 | 4637 | ||
4638 | WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM | ||
4639 | P: David Vrabel | ||
4640 | M: david.vrabel@csr.com | ||
4641 | S: Maintained | ||
4642 | |||
4619 | WISTRON LAPTOP BUTTON DRIVER | 4643 | WISTRON LAPTOP BUTTON DRIVER |
4620 | P: Miloslav Trmac | 4644 | P: Miloslav Trmac |
4621 | M: mitr@volny.cz | 4645 | M: mitr@volny.cz |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index df39d20f7425..f504c801792f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -356,7 +356,7 @@ config ARCH_IXP4XX | |||
356 | select GENERIC_GPIO | 356 | select GENERIC_GPIO |
357 | select GENERIC_TIME | 357 | select GENERIC_TIME |
358 | select GENERIC_CLOCKEVENTS | 358 | select GENERIC_CLOCKEVENTS |
359 | select ZONE_DMA if PCI | 359 | select DMABOUNCE if PCI |
360 | help | 360 | help |
361 | Support for Intel's IXP4XX (XScale) family of processors. | 361 | Support for Intel's IXP4XX (XScale) family of processors. |
362 | 362 | ||
@@ -1256,6 +1256,8 @@ source "drivers/hid/Kconfig" | |||
1256 | 1256 | ||
1257 | source "drivers/usb/Kconfig" | 1257 | source "drivers/usb/Kconfig" |
1258 | 1258 | ||
1259 | source "drivers/uwb/Kconfig" | ||
1260 | |||
1259 | source "drivers/mmc/Kconfig" | 1261 | source "drivers/mmc/Kconfig" |
1260 | 1262 | ||
1261 | source "drivers/memstick/Kconfig" | 1263 | source "drivers/memstick/Kconfig" |
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 2e32acca02fb..86b5e6982660 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -13,10 +13,10 @@ config ICST307 | |||
13 | config SA1111 | 13 | config SA1111 |
14 | bool | 14 | bool |
15 | select DMABOUNCE if !ARCH_PXA | 15 | select DMABOUNCE if !ARCH_PXA |
16 | select ZONE_DMA if !ARCH_PXA | ||
17 | 16 | ||
18 | config DMABOUNCE | 17 | config DMABOUNCE |
19 | bool | 18 | bool |
19 | select ZONE_DMA | ||
20 | 20 | ||
21 | config TIMER_ACORN | 21 | config TIMER_ACORN |
22 | bool | 22 | bool |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index fb86f248aab8..47ccec95f3e8 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -581,6 +581,7 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, | |||
581 | goto out; | 581 | goto out; |
582 | } | 582 | } |
583 | 583 | ||
584 | #ifdef CONFIG_DMABOUNCE | ||
584 | /* | 585 | /* |
585 | * If the parent device has a DMA mask associated with it, | 586 | * If the parent device has a DMA mask associated with it, |
586 | * propagate it down to the children. | 587 | * propagate it down to the children. |
@@ -598,6 +599,7 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, | |||
598 | } | 599 | } |
599 | } | 600 | } |
600 | } | 601 | } |
602 | #endif | ||
601 | 603 | ||
602 | out: | 604 | out: |
603 | return ret; | 605 | return ret; |
@@ -937,7 +939,7 @@ static int sa1111_resume(struct platform_device *dev) | |||
937 | #define sa1111_resume NULL | 939 | #define sa1111_resume NULL |
938 | #endif | 940 | #endif |
939 | 941 | ||
940 | static int sa1111_probe(struct platform_device *pdev) | 942 | static int __devinit sa1111_probe(struct platform_device *pdev) |
941 | { | 943 | { |
942 | struct resource *mem; | 944 | struct resource *mem; |
943 | int irq; | 945 | int irq; |
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig index 8b7a431a8bfc..9033d147f052 100644 --- a/arch/arm/configs/trizeps4_defconfig +++ b/arch/arm/configs/trizeps4_defconfig | |||
@@ -147,6 +147,7 @@ CONFIG_ARCH_PXA=y | |||
147 | # CONFIG_MACH_MAINSTONE is not set | 147 | # CONFIG_MACH_MAINSTONE is not set |
148 | # CONFIG_ARCH_PXA_IDP is not set | 148 | # CONFIG_ARCH_PXA_IDP is not set |
149 | # CONFIG_PXA_SHARPSL is not set | 149 | # CONFIG_PXA_SHARPSL is not set |
150 | CONFIG_TRIZEPS_PXA=y | ||
150 | CONFIG_MACH_TRIZEPS4=y | 151 | CONFIG_MACH_TRIZEPS4=y |
151 | CONFIG_MACH_TRIZEPS4_CONXS=y | 152 | CONFIG_MACH_TRIZEPS4_CONXS=y |
152 | # CONFIG_MACH_TRIZEPS4_ANY is not set | 153 | # CONFIG_MACH_TRIZEPS4_ANY is not set |
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h index 71c2fa70c8e8..98ec30c97bbe 100644 --- a/arch/arm/mach-clps711x/include/mach/memory.h +++ b/arch/arm/mach-clps711x/include/mach/memory.h | |||
@@ -89,6 +89,8 @@ | |||
89 | * node 3: 0xd8000000 - 0xdfffffff | 89 | * node 3: 0xd8000000 - 0xdfffffff |
90 | */ | 90 | */ |
91 | #define NODE_MEM_SIZE_BITS 24 | 91 | #define NODE_MEM_SIZE_BITS 24 |
92 | #define SECTION_SIZE_BITS 24 | ||
93 | #define MAX_PHYSMEM_BITS 32 | ||
92 | 94 | ||
93 | #endif | 95 | #endif |
94 | 96 | ||
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index db8b5fe06c0d..2c5a02b8520e 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig | |||
@@ -167,11 +167,6 @@ config MACH_GTWX5715 | |||
167 | 167 | ||
168 | comment "IXP4xx Options" | 168 | comment "IXP4xx Options" |
169 | 169 | ||
170 | config DMABOUNCE | ||
171 | bool | ||
172 | default y | ||
173 | depends on PCI | ||
174 | |||
175 | config IXP4XX_INDIRECT_PCI | 170 | config IXP4XX_INDIRECT_PCI |
176 | bool "Use indirect PCI memory access" | 171 | bool "Use indirect PCI memory access" |
177 | depends on PCI | 172 | depends on PCI |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 85cad05d8c5b..0bb1fbd84ccb 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mv643xx_eth.h> | 16 | #include <linux/mv643xx_eth.h> |
17 | #include <linux/ata_platform.h> | 17 | #include <linux/ata_platform.h> |
18 | #include <linux/spi/orion_spi.h> | 18 | #include <linux/spi/orion_spi.h> |
19 | #include <net/dsa.h> | ||
19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
20 | #include <asm/timex.h> | 21 | #include <asm/timex.h> |
21 | #include <asm/mach/map.h> | 22 | #include <asm/mach/map.h> |
@@ -152,6 +153,40 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) | |||
152 | 153 | ||
153 | 154 | ||
154 | /***************************************************************************** | 155 | /***************************************************************************** |
156 | * Ethernet switch | ||
157 | ****************************************************************************/ | ||
158 | static struct resource kirkwood_switch_resources[] = { | ||
159 | { | ||
160 | .start = 0, | ||
161 | .end = 0, | ||
162 | .flags = IORESOURCE_IRQ, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | static struct platform_device kirkwood_switch_device = { | ||
167 | .name = "dsa", | ||
168 | .id = 0, | ||
169 | .num_resources = 0, | ||
170 | .resource = kirkwood_switch_resources, | ||
171 | }; | ||
172 | |||
173 | void __init kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq) | ||
174 | { | ||
175 | if (irq != NO_IRQ) { | ||
176 | kirkwood_switch_resources[0].start = irq; | ||
177 | kirkwood_switch_resources[0].end = irq; | ||
178 | kirkwood_switch_device.num_resources = 1; | ||
179 | } | ||
180 | |||
181 | d->mii_bus = &kirkwood_ge00_shared.dev; | ||
182 | d->netdev = &kirkwood_ge00.dev; | ||
183 | kirkwood_switch_device.dev.platform_data = d; | ||
184 | |||
185 | platform_device_register(&kirkwood_switch_device); | ||
186 | } | ||
187 | |||
188 | |||
189 | /***************************************************************************** | ||
155 | * SoC RTC | 190 | * SoC RTC |
156 | ****************************************************************************/ | 191 | ****************************************************************************/ |
157 | static struct resource kirkwood_rtc_resource = { | 192 | static struct resource kirkwood_rtc_resource = { |
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 8fa0f6a27635..5774632a67e3 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ARCH_KIRKWOOD_COMMON_H | 11 | #ifndef __ARCH_KIRKWOOD_COMMON_H |
12 | #define __ARCH_KIRKWOOD_COMMON_H | 12 | #define __ARCH_KIRKWOOD_COMMON_H |
13 | 13 | ||
14 | struct dsa_platform_data; | ||
14 | struct mv643xx_eth_platform_data; | 15 | struct mv643xx_eth_platform_data; |
15 | struct mv_sata_platform_data; | 16 | struct mv_sata_platform_data; |
16 | 17 | ||
@@ -29,6 +30,7 @@ void kirkwood_pcie_id(u32 *dev, u32 *rev); | |||
29 | 30 | ||
30 | void kirkwood_ehci_init(void); | 31 | void kirkwood_ehci_init(void); |
31 | void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data); | 32 | void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data); |
33 | void kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq); | ||
32 | void kirkwood_pcie_init(void); | 34 | void kirkwood_pcie_init(void); |
33 | void kirkwood_rtc_init(void); | 35 | void kirkwood_rtc_init(void); |
34 | void kirkwood_sata_init(struct mv_sata_platform_data *sata_data); | 36 | void kirkwood_sata_init(struct mv_sata_platform_data *sata_data); |
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index f785093e433f..175054abd630 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ata_platform.h> | 19 | #include <linux/ata_platform.h> |
20 | #include <linux/mv643xx_eth.h> | 20 | #include <linux/mv643xx_eth.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <net/dsa.h> | ||
22 | #include <asm/mach-types.h> | 23 | #include <asm/mach-types.h> |
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/pci.h> | 25 | #include <asm/mach/pci.h> |
@@ -74,6 +75,15 @@ static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { | |||
74 | .duplex = DUPLEX_FULL, | 75 | .duplex = DUPLEX_FULL, |
75 | }; | 76 | }; |
76 | 77 | ||
78 | static struct dsa_platform_data rd88f6281_switch_data = { | ||
79 | .port_names[0] = "lan1", | ||
80 | .port_names[1] = "lan2", | ||
81 | .port_names[2] = "lan3", | ||
82 | .port_names[3] = "lan4", | ||
83 | .port_names[4] = "wan", | ||
84 | .port_names[5] = "cpu", | ||
85 | }; | ||
86 | |||
77 | static struct mv_sata_platform_data rd88f6281_sata_data = { | 87 | static struct mv_sata_platform_data rd88f6281_sata_data = { |
78 | .n_ports = 2, | 88 | .n_ports = 2, |
79 | }; | 89 | }; |
@@ -87,6 +97,7 @@ static void __init rd88f6281_init(void) | |||
87 | 97 | ||
88 | kirkwood_ehci_init(); | 98 | kirkwood_ehci_init(); |
89 | kirkwood_ge00_init(&rd88f6281_ge00_data); | 99 | kirkwood_ge00_init(&rd88f6281_ge00_data); |
100 | kirkwood_ge00_switch_init(&rd88f6281_switch_data, NO_IRQ); | ||
90 | kirkwood_rtc_init(); | 101 | kirkwood_rtc_init(); |
91 | kirkwood_sata_init(&rd88f6281_sata_data); | 102 | kirkwood_sata_init(&rd88f6281_sata_data); |
92 | kirkwood_uart0_init(); | 103 | kirkwood_uart0_init(); |
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c index 49f434c39eb7..2e285bbb7bbd 100644 --- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c +++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/ata_platform.h> | 14 | #include <linux/ata_platform.h> |
15 | #include <linux/mv643xx_eth.h> | 15 | #include <linux/mv643xx_eth.h> |
16 | #include <linux/ethtool.h> | ||
16 | #include <mach/mv78xx0.h> | 17 | #include <mach/mv78xx0.h> |
17 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
18 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
@@ -28,10 +29,14 @@ static struct mv643xx_eth_platform_data db78x00_ge01_data = { | |||
28 | 29 | ||
29 | static struct mv643xx_eth_platform_data db78x00_ge10_data = { | 30 | static struct mv643xx_eth_platform_data db78x00_ge10_data = { |
30 | .phy_addr = MV643XX_ETH_PHY_NONE, | 31 | .phy_addr = MV643XX_ETH_PHY_NONE, |
32 | .speed = SPEED_1000, | ||
33 | .duplex = DUPLEX_FULL, | ||
31 | }; | 34 | }; |
32 | 35 | ||
33 | static struct mv643xx_eth_platform_data db78x00_ge11_data = { | 36 | static struct mv643xx_eth_platform_data db78x00_ge11_data = { |
34 | .phy_addr = MV643XX_ETH_PHY_NONE, | 37 | .phy_addr = MV643XX_ETH_PHY_NONE, |
38 | .speed = SPEED_1000, | ||
39 | .duplex = DUPLEX_FULL, | ||
35 | }; | 40 | }; |
36 | 41 | ||
37 | static struct mv_sata_platform_data db78x00_sata_data = { | 42 | static struct mv_sata_platform_data db78x00_sata_data = { |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 9625ef5975d0..437065c25c9c 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mv643xx_i2c.h> | 19 | #include <linux/mv643xx_i2c.h> |
20 | #include <linux/ata_platform.h> | 20 | #include <linux/ata_platform.h> |
21 | #include <linux/spi/orion_spi.h> | 21 | #include <linux/spi/orion_spi.h> |
22 | #include <net/dsa.h> | ||
22 | #include <asm/page.h> | 23 | #include <asm/page.h> |
23 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
24 | #include <asm/timex.h> | 25 | #include <asm/timex.h> |
@@ -198,6 +199,40 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) | |||
198 | 199 | ||
199 | 200 | ||
200 | /***************************************************************************** | 201 | /***************************************************************************** |
202 | * Ethernet switch | ||
203 | ****************************************************************************/ | ||
204 | static struct resource orion5x_switch_resources[] = { | ||
205 | { | ||
206 | .start = 0, | ||
207 | .end = 0, | ||
208 | .flags = IORESOURCE_IRQ, | ||
209 | }, | ||
210 | }; | ||
211 | |||
212 | static struct platform_device orion5x_switch_device = { | ||
213 | .name = "dsa", | ||
214 | .id = 0, | ||
215 | .num_resources = 0, | ||
216 | .resource = orion5x_switch_resources, | ||
217 | }; | ||
218 | |||
219 | void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq) | ||
220 | { | ||
221 | if (irq != NO_IRQ) { | ||
222 | orion5x_switch_resources[0].start = irq; | ||
223 | orion5x_switch_resources[0].end = irq; | ||
224 | orion5x_switch_device.num_resources = 1; | ||
225 | } | ||
226 | |||
227 | d->mii_bus = &orion5x_eth_shared.dev; | ||
228 | d->netdev = &orion5x_eth.dev; | ||
229 | orion5x_switch_device.dev.platform_data = d; | ||
230 | |||
231 | platform_device_register(&orion5x_switch_device); | ||
232 | } | ||
233 | |||
234 | |||
235 | /***************************************************************************** | ||
201 | * I2C | 236 | * I2C |
202 | ****************************************************************************/ | 237 | ****************************************************************************/ |
203 | static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = { | 238 | static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = { |
@@ -275,7 +310,8 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data) | |||
275 | * SPI | 310 | * SPI |
276 | ****************************************************************************/ | 311 | ****************************************************************************/ |
277 | static struct orion_spi_info orion5x_spi_plat_data = { | 312 | static struct orion_spi_info orion5x_spi_plat_data = { |
278 | .tclk = 0, | 313 | .tclk = 0, |
314 | .enable_clock_fix = 1, | ||
279 | }; | 315 | }; |
280 | 316 | ||
281 | static struct resource orion5x_spi_resources[] = { | 317 | static struct resource orion5x_spi_resources[] = { |
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index 1f8b2da676a5..a000c7c6ee96 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __ARCH_ORION5X_COMMON_H | 1 | #ifndef __ARCH_ORION5X_COMMON_H |
2 | #define __ARCH_ORION5X_COMMON_H | 2 | #define __ARCH_ORION5X_COMMON_H |
3 | 3 | ||
4 | struct dsa_platform_data; | ||
4 | struct mv643xx_eth_platform_data; | 5 | struct mv643xx_eth_platform_data; |
5 | struct mv_sata_platform_data; | 6 | struct mv_sata_platform_data; |
6 | 7 | ||
@@ -29,6 +30,7 @@ void orion5x_setup_pcie_wa_win(u32 base, u32 size); | |||
29 | void orion5x_ehci0_init(void); | 30 | void orion5x_ehci0_init(void); |
30 | void orion5x_ehci1_init(void); | 31 | void orion5x_ehci1_init(void); |
31 | void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data); | 32 | void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data); |
33 | void orion5x_eth_switch_init(struct dsa_platform_data *d, int irq); | ||
32 | void orion5x_i2c_init(void); | 34 | void orion5x_i2c_init(void); |
33 | void orion5x_sata_init(struct mv_sata_platform_data *sata_data); | 35 | void orion5x_sata_init(struct mv_sata_platform_data *sata_data); |
34 | void orion5x_spi_init(void); | 36 | void orion5x_spi_init(void); |
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c index 500cdadaf09c..15f53235ee30 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mtd/physmap.h> | 16 | #include <linux/mtd/physmap.h> |
17 | #include <linux/mv643xx_eth.h> | 17 | #include <linux/mv643xx_eth.h> |
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <net/dsa.h> | ||
19 | #include <asm/mach-types.h> | 20 | #include <asm/mach-types.h> |
20 | #include <asm/gpio.h> | 21 | #include <asm/gpio.h> |
21 | #include <asm/leds.h> | 22 | #include <asm/leds.h> |
@@ -93,6 +94,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { | |||
93 | .duplex = DUPLEX_FULL, | 94 | .duplex = DUPLEX_FULL, |
94 | }; | 95 | }; |
95 | 96 | ||
97 | static struct dsa_platform_data rd88f5181l_fxo_switch_data = { | ||
98 | .port_names[0] = "lan2", | ||
99 | .port_names[1] = "lan1", | ||
100 | .port_names[2] = "wan", | ||
101 | .port_names[3] = "cpu", | ||
102 | .port_names[5] = "lan4", | ||
103 | .port_names[7] = "lan3", | ||
104 | }; | ||
105 | |||
96 | static void __init rd88f5181l_fxo_init(void) | 106 | static void __init rd88f5181l_fxo_init(void) |
97 | { | 107 | { |
98 | /* | 108 | /* |
@@ -107,6 +117,7 @@ static void __init rd88f5181l_fxo_init(void) | |||
107 | */ | 117 | */ |
108 | orion5x_ehci0_init(); | 118 | orion5x_ehci0_init(); |
109 | orion5x_eth_init(&rd88f5181l_fxo_eth_data); | 119 | orion5x_eth_init(&rd88f5181l_fxo_eth_data); |
120 | orion5x_eth_switch_init(&rd88f5181l_fxo_switch_data, NO_IRQ); | ||
110 | orion5x_uart0_init(); | 121 | orion5x_uart0_init(); |
111 | 122 | ||
112 | orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, | 123 | orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, |
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c index ebde81416499..8ad3934399d4 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/mv643xx_eth.h> | 17 | #include <linux/mv643xx_eth.h> |
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
20 | #include <net/dsa.h> | ||
20 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
21 | #include <asm/gpio.h> | 22 | #include <asm/gpio.h> |
22 | #include <asm/leds.h> | 23 | #include <asm/leds.h> |
@@ -94,6 +95,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = { | |||
94 | .duplex = DUPLEX_FULL, | 95 | .duplex = DUPLEX_FULL, |
95 | }; | 96 | }; |
96 | 97 | ||
98 | static struct dsa_platform_data rd88f5181l_ge_switch_data = { | ||
99 | .port_names[0] = "lan2", | ||
100 | .port_names[1] = "lan1", | ||
101 | .port_names[2] = "wan", | ||
102 | .port_names[3] = "cpu", | ||
103 | .port_names[5] = "lan4", | ||
104 | .port_names[7] = "lan3", | ||
105 | }; | ||
106 | |||
97 | static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = { | 107 | static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = { |
98 | I2C_BOARD_INFO("ds1338", 0x68), | 108 | I2C_BOARD_INFO("ds1338", 0x68), |
99 | }; | 109 | }; |
@@ -112,6 +122,7 @@ static void __init rd88f5181l_ge_init(void) | |||
112 | */ | 122 | */ |
113 | orion5x_ehci0_init(); | 123 | orion5x_ehci0_init(); |
114 | orion5x_eth_init(&rd88f5181l_ge_eth_data); | 124 | orion5x_eth_init(&rd88f5181l_ge_eth_data); |
125 | orion5x_eth_switch_init(&rd88f5181l_ge_switch_data, gpio_to_irq(8)); | ||
115 | orion5x_i2c_init(); | 126 | orion5x_i2c_init(); |
116 | orion5x_uart0_init(); | 127 | orion5x_uart0_init(); |
117 | 128 | ||
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c index 40e049539091..262e25e4dace 100644 --- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/spi/orion_spi.h> | 19 | #include <linux/spi/orion_spi.h> |
20 | #include <linux/spi/flash.h> | 20 | #include <linux/spi/flash.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <net/dsa.h> | ||
22 | #include <asm/mach-types.h> | 23 | #include <asm/mach-types.h> |
23 | #include <asm/gpio.h> | 24 | #include <asm/gpio.h> |
24 | #include <asm/leds.h> | 25 | #include <asm/leds.h> |
@@ -34,6 +35,15 @@ static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = { | |||
34 | .duplex = DUPLEX_FULL, | 35 | .duplex = DUPLEX_FULL, |
35 | }; | 36 | }; |
36 | 37 | ||
38 | static struct dsa_platform_data rd88f6183ap_ge_switch_data = { | ||
39 | .port_names[0] = "lan1", | ||
40 | .port_names[1] = "lan2", | ||
41 | .port_names[2] = "lan3", | ||
42 | .port_names[3] = "lan4", | ||
43 | .port_names[4] = "wan", | ||
44 | .port_names[5] = "cpu", | ||
45 | }; | ||
46 | |||
37 | static struct mtd_partition rd88f6183ap_ge_partitions[] = { | 47 | static struct mtd_partition rd88f6183ap_ge_partitions[] = { |
38 | { | 48 | { |
39 | .name = "kernel", | 49 | .name = "kernel", |
@@ -79,6 +89,7 @@ static void __init rd88f6183ap_ge_init(void) | |||
79 | */ | 89 | */ |
80 | orion5x_ehci0_init(); | 90 | orion5x_ehci0_init(); |
81 | orion5x_eth_init(&rd88f6183ap_ge_eth_data); | 91 | orion5x_eth_init(&rd88f6183ap_ge_eth_data); |
92 | orion5x_eth_switch_init(&rd88f6183ap_ge_switch_data, gpio_to_irq(3)); | ||
82 | spi_register_board_info(rd88f6183ap_ge_spi_slave_info, | 93 | spi_register_board_info(rd88f6183ap_ge_spi_slave_info, |
83 | ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); | 94 | ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); |
84 | orion5x_spi_init(); | 95 | orion5x_spi_init(); |
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index 9a4fd5256462..cc8f89200865 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mtd/physmap.h> | 15 | #include <linux/mtd/physmap.h> |
16 | #include <linux/mv643xx_eth.h> | 16 | #include <linux/mv643xx_eth.h> |
17 | #include <linux/ethtool.h> | 17 | #include <linux/ethtool.h> |
18 | #include <net/dsa.h> | ||
18 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
19 | #include <asm/gpio.h> | 20 | #include <asm/gpio.h> |
20 | #include <asm/mach/arch.h> | 21 | #include <asm/mach/arch.h> |
@@ -105,6 +106,15 @@ static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = { | |||
105 | .duplex = DUPLEX_FULL, | 106 | .duplex = DUPLEX_FULL, |
106 | }; | 107 | }; |
107 | 108 | ||
109 | static struct dsa_platform_data wrt350n_v2_switch_data = { | ||
110 | .port_names[0] = "lan2", | ||
111 | .port_names[1] = "lan1", | ||
112 | .port_names[2] = "wan", | ||
113 | .port_names[3] = "cpu", | ||
114 | .port_names[5] = "lan3", | ||
115 | .port_names[7] = "lan4", | ||
116 | }; | ||
117 | |||
108 | static void __init wrt350n_v2_init(void) | 118 | static void __init wrt350n_v2_init(void) |
109 | { | 119 | { |
110 | /* | 120 | /* |
@@ -119,6 +129,7 @@ static void __init wrt350n_v2_init(void) | |||
119 | */ | 129 | */ |
120 | orion5x_ehci0_init(); | 130 | orion5x_ehci0_init(); |
121 | orion5x_eth_init(&wrt350n_v2_eth_data); | 131 | orion5x_eth_init(&wrt350n_v2_eth_data); |
132 | orion5x_eth_switch_init(&wrt350n_v2_switch_data, NO_IRQ); | ||
122 | orion5x_uart0_init(); | 133 | orion5x_uart0_init(); |
123 | 134 | ||
124 | orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE, | 135 | orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE, |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index f27f6b3d6e6f..f781873431f3 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -257,7 +257,6 @@ config MACH_ARMCORE | |||
257 | bool "CompuLab CM-X255/CM-X270 modules" | 257 | bool "CompuLab CM-X255/CM-X270 modules" |
258 | select PXA27x | 258 | select PXA27x |
259 | select IWMMXT | 259 | select IWMMXT |
260 | select ZONE_DMA if PCI | ||
261 | select PXA25x | 260 | select PXA25x |
262 | select PXA_SSP | 261 | select PXA_SSP |
263 | 262 | ||
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index 9c163e19ada9..32bb4a2eb7f1 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h | |||
@@ -9,7 +9,8 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #ifndef __ASM_MACH_IRQS_H | |
13 | #define __ASM_MACH_IRQS_H | ||
13 | 14 | ||
14 | #ifdef CONFIG_PXA_HAVE_ISA_IRQS | 15 | #ifdef CONFIG_PXA_HAVE_ISA_IRQS |
15 | #define PXA_ISA_IRQ(x) (x) | 16 | #define PXA_ISA_IRQ(x) (x) |
@@ -264,3 +265,5 @@ | |||
264 | #endif | 265 | #endif |
265 | 266 | ||
266 | #endif /* CONFIG_PCI_HOST_ITE8152 */ | 267 | #endif /* CONFIG_PCI_HOST_ITE8152 */ |
268 | |||
269 | #endif /* __ASM_MACH_IRQS_H */ | ||
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h index 31ac26b55bc1..e8488dfb7e91 100644 --- a/arch/arm/mach-pxa/include/mach/spitz.h +++ b/arch/arm/mach-pxa/include/mach/spitz.h | |||
@@ -142,7 +142,7 @@ | |||
142 | 142 | ||
143 | #define SPITZ_SCP2_GPIO_BASE (NR_BUILTIN_GPIO + 12) | 143 | #define SPITZ_SCP2_GPIO_BASE (NR_BUILTIN_GPIO + 12) |
144 | #define SPITZ_GPIO_IR_ON (SPITZ_SCP2_GPIO_BASE + 0) | 144 | #define SPITZ_GPIO_IR_ON (SPITZ_SCP2_GPIO_BASE + 0) |
145 | #define SPITZ_GPIO_AKIN_PULLUP (SPITZ_SCP2_GPIO_BASE + 1 | 145 | #define SPITZ_GPIO_AKIN_PULLUP (SPITZ_SCP2_GPIO_BASE + 1) |
146 | #define SPITZ_GPIO_RESERVED_1 (SPITZ_SCP2_GPIO_BASE + 2) | 146 | #define SPITZ_GPIO_RESERVED_1 (SPITZ_SCP2_GPIO_BASE + 2) |
147 | #define SPITZ_GPIO_RESERVED_2 (SPITZ_SCP2_GPIO_BASE + 3) | 147 | #define SPITZ_GPIO_RESERVED_2 (SPITZ_SCP2_GPIO_BASE + 3) |
148 | #define SPITZ_GPIO_RESERVED_3 (SPITZ_SCP2_GPIO_BASE + 4) | 148 | #define SPITZ_GPIO_RESERVED_3 (SPITZ_SCP2_GPIO_BASE + 4) |
diff --git a/arch/arm/mach-pxa/pwm.c b/arch/arm/mach-pxa/pwm.c index 316cd986da5c..74e2ead8cee8 100644 --- a/arch/arm/mach-pxa/pwm.c +++ b/arch/arm/mach-pxa/pwm.c | |||
@@ -60,7 +60,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) | |||
60 | do_div(c, 1000000000); | 60 | do_div(c, 1000000000); |
61 | period_cycles = c; | 61 | period_cycles = c; |
62 | 62 | ||
63 | if (period_cycles < 0) | 63 | if (period_cycles < 1) |
64 | period_cycles = 1; | 64 | period_cycles = 1; |
65 | prescale = (period_cycles - 1) / 1024; | 65 | prescale = (period_cycles - 1) / 1024; |
66 | pv = period_cycles / (prescale + 1) - 1; | 66 | pv = period_cycles / (prescale + 1) - 1; |
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index a13dbf3c2c05..a72e3add743c 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c | |||
@@ -399,7 +399,7 @@ static void trizeps4_irda_transceiver_mode(struct device *dev, int mode) | |||
399 | /* Switch mode */ | 399 | /* Switch mode */ |
400 | if (mode & IR_SIRMODE) | 400 | if (mode & IR_SIRMODE) |
401 | trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */ | 401 | trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */ |
402 | else if (mode & IR_FIRMODE) { | 402 | else if (mode & IR_FIRMODE) |
403 | trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */ | 403 | trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */ |
404 | 404 | ||
405 | /* Switch power */ | 405 | /* Switch power */ |
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c index 2f60bf6b8d43..f854e7385e3c 100644 --- a/arch/arm/mach-s3c2443/clock.c +++ b/arch/arm/mach-s3c2443/clock.c | |||
@@ -1033,8 +1033,7 @@ void __init s3c2443_init_clocks(int xtal) | |||
1033 | 1033 | ||
1034 | fclk = pll / s3c2443_fclk_div(clkdiv0); | 1034 | fclk = pll / s3c2443_fclk_div(clkdiv0); |
1035 | hclk = s3c2443_prediv_getrate(&clk_prediv); | 1035 | hclk = s3c2443_prediv_getrate(&clk_prediv); |
1036 | hclk = hclk / s3c2443_get_hdiv(clkdiv0); | 1036 | hclk /= s3c2443_get_hdiv(clkdiv0); |
1037 | hclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_HCLK) ? 2 : 1); | ||
1038 | pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1); | 1037 | pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1); |
1039 | 1038 | ||
1040 | s3c24xx_setup_clocks(xtal, fclk, hclk, pclk); | 1039 | s3c24xx_setup_clocks(xtal, fclk, hclk, pclk); |
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 33926c9fcda6..5786adf10040 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -29,7 +29,7 @@ ENTRY(v4_flush_user_cache_all) | |||
29 | * Clean and invalidate the entire cache. | 29 | * Clean and invalidate the entire cache. |
30 | */ | 30 | */ |
31 | ENTRY(v4_flush_kern_cache_all) | 31 | ENTRY(v4_flush_kern_cache_all) |
32 | #ifdef CPU_CP15 | 32 | #ifdef CONFIG_CPU_CP15 |
33 | mov r0, #0 | 33 | mov r0, #0 |
34 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache | 34 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache |
35 | mov pc, lr | 35 | mov pc, lr |
@@ -48,7 +48,7 @@ ENTRY(v4_flush_kern_cache_all) | |||
48 | * - flags - vma_area_struct flags describing address space | 48 | * - flags - vma_area_struct flags describing address space |
49 | */ | 49 | */ |
50 | ENTRY(v4_flush_user_cache_range) | 50 | ENTRY(v4_flush_user_cache_range) |
51 | #ifdef CPU_CP15 | 51 | #ifdef CONFIG_CPU_CP15 |
52 | mov ip, #0 | 52 | mov ip, #0 |
53 | mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache | 53 | mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache |
54 | mov pc, lr | 54 | mov pc, lr |
@@ -116,7 +116,7 @@ ENTRY(v4_dma_inv_range) | |||
116 | * - end - virtual end address | 116 | * - end - virtual end address |
117 | */ | 117 | */ |
118 | ENTRY(v4_dma_flush_range) | 118 | ENTRY(v4_dma_flush_range) |
119 | #ifdef CPU_CP15 | 119 | #ifdef CONFIG_CPU_CP15 |
120 | mov r0, #0 | 120 | mov r0, #0 |
121 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache | 121 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache |
122 | #endif | 122 | #endif |
diff --git a/arch/arm/plat-s3c24xx/pwm-clock.c b/arch/arm/plat-s3c24xx/pwm-clock.c index b8e854f1b1d5..3fad68a1e6bc 100644 --- a/arch/arm/plat-s3c24xx/pwm-clock.c +++ b/arch/arm/plat-s3c24xx/pwm-clock.c | |||
@@ -315,7 +315,7 @@ static int clk_pwm_tin_set_parent(struct clk *clk, struct clk *parent) | |||
315 | if (parent == s3c24xx_pwmclk_tclk(id)) | 315 | if (parent == s3c24xx_pwmclk_tclk(id)) |
316 | bits = S3C2410_TCFG1_MUX_TCLK << shift; | 316 | bits = S3C2410_TCFG1_MUX_TCLK << shift; |
317 | else if (parent == s3c24xx_pwmclk_tdiv(id)) | 317 | else if (parent == s3c24xx_pwmclk_tdiv(id)) |
318 | bits = clk_pwm_tdiv_bits(to_tdiv(clk)) << shift; | 318 | bits = clk_pwm_tdiv_bits(to_tdiv(parent)) << shift; |
319 | else | 319 | else |
320 | return -EINVAL; | 320 | return -EINVAL; |
321 | 321 | ||
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c index feb770f2e84e..ec56b88866c4 100644 --- a/arch/arm/plat-s3c24xx/pwm.c +++ b/arch/arm/plat-s3c24xx/pwm.c | |||
@@ -56,7 +56,7 @@ static struct clk *clk_scaler[2]; | |||
56 | } \ | 56 | } \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #define DEFINE_TIMER(_tmr_no, _irq) \ | 59 | #define DEFINE_S3C_TIMER(_tmr_no, _irq) \ |
60 | .name = "s3c24xx-pwm", \ | 60 | .name = "s3c24xx-pwm", \ |
61 | .id = _tmr_no, \ | 61 | .id = _tmr_no, \ |
62 | .num_resources = TIMER_RESOURCE_SIZE, \ | 62 | .num_resources = TIMER_RESOURCE_SIZE, \ |
@@ -67,11 +67,11 @@ static struct clk *clk_scaler[2]; | |||
67 | */ | 67 | */ |
68 | 68 | ||
69 | struct platform_device s3c_device_timer[] = { | 69 | struct platform_device s3c_device_timer[] = { |
70 | [0] = { DEFINE_TIMER(0, IRQ_TIMER0) }, | 70 | [0] = { DEFINE_S3C_TIMER(0, IRQ_TIMER0) }, |
71 | [1] = { DEFINE_TIMER(1, IRQ_TIMER1) }, | 71 | [1] = { DEFINE_S3C_TIMER(1, IRQ_TIMER1) }, |
72 | [2] = { DEFINE_TIMER(2, IRQ_TIMER2) }, | 72 | [2] = { DEFINE_S3C_TIMER(2, IRQ_TIMER2) }, |
73 | [3] = { DEFINE_TIMER(3, IRQ_TIMER3) }, | 73 | [3] = { DEFINE_S3C_TIMER(3, IRQ_TIMER3) }, |
74 | [4] = { DEFINE_TIMER(4, IRQ_TIMER4) }, | 74 | [4] = { DEFINE_S3C_TIMER(4, IRQ_TIMER4) }, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static inline int pwm_is_tdiv(struct pwm_device *pwm) | 77 | static inline int pwm_is_tdiv(struct pwm_device *pwm) |
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 07335e719bf8..b17aeea8d620 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig | |||
@@ -679,6 +679,8 @@ source "fs/Kconfig" | |||
679 | 679 | ||
680 | source "drivers/usb/Kconfig" | 680 | source "drivers/usb/Kconfig" |
681 | 681 | ||
682 | source "drivers/uwb/Kconfig" | ||
683 | |||
682 | source "arch/cris/Kconfig.debug" | 684 | source "arch/cris/Kconfig.debug" |
683 | 685 | ||
684 | source "security/Kconfig" | 686 | source "security/Kconfig" |
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index bd1995403c67..28f06fd9b7b7 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
@@ -216,6 +216,8 @@ source "drivers/hwmon/Kconfig" | |||
216 | 216 | ||
217 | source "drivers/usb/Kconfig" | 217 | source "drivers/usb/Kconfig" |
218 | 218 | ||
219 | source "drivers/uwb/Kconfig" | ||
220 | |||
219 | endmenu | 221 | endmenu |
220 | 222 | ||
221 | source "fs/Kconfig" | 223 | source "fs/Kconfig" |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 912c57db2d21..27eec71429b0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -23,6 +23,7 @@ config IA64 | |||
23 | select HAVE_KRETPROBES | 23 | select HAVE_KRETPROBES |
24 | select HAVE_DMA_ATTRS | 24 | select HAVE_DMA_ATTRS |
25 | select HAVE_KVM | 25 | select HAVE_KVM |
26 | select HAVE_ARCH_TRACEHOOK | ||
26 | default y | 27 | default y |
27 | help | 28 | help |
28 | The Itanium Processor Family is Intel's 64-bit successor to | 29 | The Itanium Processor Family is Intel's 64-bit successor to |
@@ -110,6 +111,33 @@ config AUDIT_ARCH | |||
110 | bool | 111 | bool |
111 | default y | 112 | default y |
112 | 113 | ||
114 | menuconfig PARAVIRT_GUEST | ||
115 | bool "Paravirtualized guest support" | ||
116 | help | ||
117 | Say Y here to get to see options related to running Linux under | ||
118 | various hypervisors. This option alone does not add any kernel code. | ||
119 | |||
120 | If you say N, all options in this submenu will be skipped and disabled. | ||
121 | |||
122 | if PARAVIRT_GUEST | ||
123 | |||
124 | config PARAVIRT | ||
125 | bool "Enable paravirtualization code" | ||
126 | depends on PARAVIRT_GUEST | ||
127 | default y | ||
128 | bool | ||
129 | default y | ||
130 | help | ||
131 | This changes the kernel so it can modify itself when it is run | ||
132 | under a hypervisor, potentially improving performance significantly | ||
133 | over full virtualization. However, when run without a hypervisor | ||
134 | the kernel is theoretically slower and slightly larger. | ||
135 | |||
136 | |||
137 | source "arch/ia64/xen/Kconfig" | ||
138 | |||
139 | endif | ||
140 | |||
113 | choice | 141 | choice |
114 | prompt "System type" | 142 | prompt "System type" |
115 | default IA64_GENERIC | 143 | default IA64_GENERIC |
@@ -119,6 +147,7 @@ config IA64_GENERIC | |||
119 | select NUMA | 147 | select NUMA |
120 | select ACPI_NUMA | 148 | select ACPI_NUMA |
121 | select SWIOTLB | 149 | select SWIOTLB |
150 | select PCI_MSI | ||
122 | help | 151 | help |
123 | This selects the system type of your hardware. A "generic" kernel | 152 | This selects the system type of your hardware. A "generic" kernel |
124 | will run on any supported IA-64 system. However, if you configure | 153 | will run on any supported IA-64 system. However, if you configure |
@@ -126,11 +155,13 @@ config IA64_GENERIC | |||
126 | 155 | ||
127 | generic For any supported IA-64 system | 156 | generic For any supported IA-64 system |
128 | DIG-compliant For DIG ("Developer's Interface Guide") compliant systems | 157 | DIG-compliant For DIG ("Developer's Interface Guide") compliant systems |
158 | DIG+Intel+IOMMU For DIG systems with Intel IOMMU | ||
129 | HP-zx1/sx1000 For HP systems | 159 | HP-zx1/sx1000 For HP systems |
130 | HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. | 160 | HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. |
131 | SGI-SN2 For SGI Altix systems | 161 | SGI-SN2 For SGI Altix systems |
132 | SGI-UV For SGI UV systems | 162 | SGI-UV For SGI UV systems |
133 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> | 163 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> |
164 | Xen-domU For xen domU system | ||
134 | 165 | ||
135 | If you don't know what to do, choose "generic". | 166 | If you don't know what to do, choose "generic". |
136 | 167 | ||
@@ -138,6 +169,11 @@ config IA64_DIG | |||
138 | bool "DIG-compliant" | 169 | bool "DIG-compliant" |
139 | select SWIOTLB | 170 | select SWIOTLB |
140 | 171 | ||
172 | config IA64_DIG_VTD | ||
173 | bool "DIG+Intel+IOMMU" | ||
174 | select DMAR | ||
175 | select PCI_MSI | ||
176 | |||
141 | config IA64_HP_ZX1 | 177 | config IA64_HP_ZX1 |
142 | bool "HP-zx1/sx1000" | 178 | bool "HP-zx1/sx1000" |
143 | help | 179 | help |
@@ -181,6 +217,10 @@ config IA64_HP_SIM | |||
181 | bool "Ski-simulator" | 217 | bool "Ski-simulator" |
182 | select SWIOTLB | 218 | select SWIOTLB |
183 | 219 | ||
220 | config IA64_XEN_GUEST | ||
221 | bool "Xen guest" | ||
222 | depends on XEN | ||
223 | |||
184 | endchoice | 224 | endchoice |
185 | 225 | ||
186 | choice | 226 | choice |
@@ -583,6 +623,16 @@ source "drivers/pci/hotplug/Kconfig" | |||
583 | 623 | ||
584 | source "drivers/pcmcia/Kconfig" | 624 | source "drivers/pcmcia/Kconfig" |
585 | 625 | ||
626 | config DMAR | ||
627 | bool "Support for DMA Remapping Devices (EXPERIMENTAL)" | ||
628 | depends on IA64_GENERIC && ACPI && EXPERIMENTAL | ||
629 | help | ||
630 | DMA remapping (DMAR) devices support enables independent address | ||
631 | translations for Direct Memory Access (DMA) from devices. | ||
632 | These DMA remapping devices are reported via ACPI tables | ||
633 | and include PCI device scope covered by these DMA | ||
634 | remapping devices. | ||
635 | |||
586 | endmenu | 636 | endmenu |
587 | 637 | ||
588 | endif | 638 | endif |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 905d25b13d5a..58a7e46affda 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -53,12 +53,15 @@ libs-y += arch/ia64/lib/ | |||
53 | core-y += arch/ia64/kernel/ arch/ia64/mm/ | 53 | core-y += arch/ia64/kernel/ arch/ia64/mm/ |
54 | core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ | 54 | core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ |
55 | core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ | 55 | core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ |
56 | core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ | ||
56 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ | 57 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ |
57 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ | 58 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ |
58 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ | 59 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ |
60 | core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ | ||
59 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ | 61 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ |
60 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ | 62 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ |
61 | core-$(CONFIG_KVM) += arch/ia64/kvm/ | 63 | core-$(CONFIG_KVM) += arch/ia64/kvm/ |
64 | core-$(CONFIG_XEN) += arch/ia64/xen/ | ||
62 | 65 | ||
63 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ | 66 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ |
64 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ | 67 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 9f483976228f..e05f9e1d3faa 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -233,6 +233,8 @@ CONFIG_DMIID=y | |||
233 | CONFIG_BINFMT_ELF=y | 233 | CONFIG_BINFMT_ELF=y |
234 | CONFIG_BINFMT_MISC=m | 234 | CONFIG_BINFMT_MISC=m |
235 | 235 | ||
236 | # CONFIG_DMAR is not set | ||
237 | |||
236 | # | 238 | # |
237 | # Power management and ACPI | 239 | # Power management and ACPI |
238 | # | 240 | # |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 797acf9066c1..c522edf23c62 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -172,6 +172,8 @@ CONFIG_DMIID=y | |||
172 | CONFIG_BINFMT_ELF=y | 172 | CONFIG_BINFMT_ELF=y |
173 | CONFIG_BINFMT_MISC=m | 173 | CONFIG_BINFMT_MISC=m |
174 | 174 | ||
175 | # CONFIG_DMAR is not set | ||
176 | |||
175 | # | 177 | # |
176 | # Power management and ACPI | 178 | # Power management and ACPI |
177 | # | 179 | # |
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile index 971cd7870dd4..5c0283830bd6 100644 --- a/arch/ia64/dig/Makefile +++ b/arch/ia64/dig/Makefile | |||
@@ -6,4 +6,9 @@ | |||
6 | # | 6 | # |
7 | 7 | ||
8 | obj-y := setup.o | 8 | obj-y := setup.o |
9 | ifeq ($(CONFIG_DMAR), y) | ||
10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o | ||
11 | else | ||
9 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
13 | endif | ||
14 | obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o | ||
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c new file mode 100644 index 000000000000..1c8a079017a3 --- /dev/null +++ b/arch/ia64/dig/dig_vtd_iommu.c | |||
@@ -0,0 +1,59 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/intel-iommu.h> | ||
5 | |||
6 | void * | ||
7 | vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
8 | gfp_t flags) | ||
9 | { | ||
10 | return intel_alloc_coherent(dev, size, dma_handle, flags); | ||
11 | } | ||
12 | EXPORT_SYMBOL_GPL(vtd_alloc_coherent); | ||
13 | |||
14 | void | ||
15 | vtd_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
16 | dma_addr_t dma_handle) | ||
17 | { | ||
18 | intel_free_coherent(dev, size, vaddr, dma_handle); | ||
19 | } | ||
20 | EXPORT_SYMBOL_GPL(vtd_free_coherent); | ||
21 | |||
22 | dma_addr_t | ||
23 | vtd_map_single_attrs(struct device *dev, void *addr, size_t size, | ||
24 | int dir, struct dma_attrs *attrs) | ||
25 | { | ||
26 | return intel_map_single(dev, (phys_addr_t)addr, size, dir); | ||
27 | } | ||
28 | EXPORT_SYMBOL_GPL(vtd_map_single_attrs); | ||
29 | |||
30 | void | ||
31 | vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
32 | int dir, struct dma_attrs *attrs) | ||
33 | { | ||
34 | intel_unmap_single(dev, iova, size, dir); | ||
35 | } | ||
36 | EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); | ||
37 | |||
38 | int | ||
39 | vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
40 | int dir, struct dma_attrs *attrs) | ||
41 | { | ||
42 | return intel_map_sg(dev, sglist, nents, dir); | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); | ||
45 | |||
46 | void | ||
47 | vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | ||
48 | int nents, int dir, struct dma_attrs *attrs) | ||
49 | { | ||
50 | intel_unmap_sg(dev, sglist, nents, dir); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); | ||
53 | |||
54 | int | ||
55 | vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); | ||
diff --git a/arch/ia64/dig/machvec_vtd.c b/arch/ia64/dig/machvec_vtd.c new file mode 100644 index 000000000000..7cd3eb471cad --- /dev/null +++ b/arch/ia64/dig/machvec_vtd.c | |||
@@ -0,0 +1,3 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME dig_vtd | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_dig_vtd.h> | ||
3 | #include <asm/machvec_init.h> | ||
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index 53505bb04771..a8cf19958850 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -108,6 +108,11 @@ GLOBAL_ENTRY(ia32_trace_syscall) | |||
108 | ;; | 108 | ;; |
109 | st8 [r2]=r3 // initialize return code to -ENOSYS | 109 | st8 [r2]=r3 // initialize return code to -ENOSYS |
110 | br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args | 110 | br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args |
111 | cmp.lt p6,p0=r8,r0 // check tracehook | ||
112 | adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 | ||
113 | ;; | ||
114 | (p6) st8.spill [r2]=r8 // store return value in slot for r8 | ||
115 | (p6) br.spnt.few .ret4 | ||
111 | .ret2: // Need to reload arguments (they may be changed by the tracing process) | 116 | .ret2: // Need to reload arguments (they may be changed by the tracing process) |
112 | adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 | 117 | adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 |
113 | adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 | 118 | adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 |
@@ -199,10 +204,10 @@ ia32_syscall_table: | |||
199 | data8 sys_setuid /* 16-bit version */ | 204 | data8 sys_setuid /* 16-bit version */ |
200 | data8 sys_getuid /* 16-bit version */ | 205 | data8 sys_getuid /* 16-bit version */ |
201 | data8 compat_sys_stime /* 25 */ | 206 | data8 compat_sys_stime /* 25 */ |
202 | data8 sys32_ptrace | 207 | data8 compat_sys_ptrace |
203 | data8 sys32_alarm | 208 | data8 sys32_alarm |
204 | data8 sys_ni_syscall | 209 | data8 sys_ni_syscall |
205 | data8 sys32_pause | 210 | data8 sys_pause |
206 | data8 compat_sys_utime /* 30 */ | 211 | data8 compat_sys_utime /* 30 */ |
207 | data8 sys_ni_syscall /* old stty syscall holder */ | 212 | data8 sys_ni_syscall /* old stty syscall holder */ |
208 | data8 sys_ni_syscall /* old gtty syscall holder */ | 213 | data8 sys_ni_syscall /* old gtty syscall holder */ |
@@ -215,7 +220,7 @@ ia32_syscall_table: | |||
215 | data8 sys_mkdir | 220 | data8 sys_mkdir |
216 | data8 sys_rmdir /* 40 */ | 221 | data8 sys_rmdir /* 40 */ |
217 | data8 sys_dup | 222 | data8 sys_dup |
218 | data8 sys32_pipe | 223 | data8 sys_pipe |
219 | data8 compat_sys_times | 224 | data8 compat_sys_times |
220 | data8 sys_ni_syscall /* old prof syscall holder */ | 225 | data8 sys_ni_syscall /* old prof syscall holder */ |
221 | data8 sys32_brk /* 45 */ | 226 | data8 sys32_brk /* 45 */ |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index f4430bb4bbdc..5e92ae00bdbb 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -1098,21 +1098,6 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, | |||
1098 | return ret; | 1098 | return ret; |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | asmlinkage long | ||
1102 | sys32_pipe (int __user *fd) | ||
1103 | { | ||
1104 | int retval; | ||
1105 | int fds[2]; | ||
1106 | |||
1107 | retval = do_pipe_flags(fds, 0); | ||
1108 | if (retval) | ||
1109 | goto out; | ||
1110 | if (copy_to_user(fd, fds, sizeof(fds))) | ||
1111 | retval = -EFAULT; | ||
1112 | out: | ||
1113 | return retval; | ||
1114 | } | ||
1115 | |||
1116 | asmlinkage unsigned long | 1101 | asmlinkage unsigned long |
1117 | sys32_alarm (unsigned int seconds) | 1102 | sys32_alarm (unsigned int seconds) |
1118 | { | 1103 | { |
@@ -1209,25 +1194,6 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options) | |||
1209 | return compat_sys_wait4(pid, stat_addr, options, NULL); | 1194 | return compat_sys_wait4(pid, stat_addr, options, NULL); |
1210 | } | 1195 | } |
1211 | 1196 | ||
1212 | static unsigned int | ||
1213 | ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val) | ||
1214 | { | ||
1215 | size_t copied; | ||
1216 | unsigned int ret; | ||
1217 | |||
1218 | copied = access_process_vm(child, addr, val, sizeof(*val), 0); | ||
1219 | return (copied != sizeof(ret)) ? -EIO : 0; | ||
1220 | } | ||
1221 | |||
1222 | static unsigned int | ||
1223 | ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val) | ||
1224 | { | ||
1225 | |||
1226 | if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) | ||
1227 | return -EIO; | ||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | /* | 1197 | /* |
1232 | * The order in which registers are stored in the ptrace regs structure | 1198 | * The order in which registers are stored in the ptrace regs structure |
1233 | */ | 1199 | */ |
@@ -1525,49 +1491,15 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u | |||
1525 | return 0; | 1491 | return 0; |
1526 | } | 1492 | } |
1527 | 1493 | ||
1528 | asmlinkage long | 1494 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
1529 | sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) | 1495 | compat_ulong_t caddr, compat_ulong_t cdata) |
1530 | { | 1496 | { |
1531 | struct task_struct *child; | 1497 | unsigned long addr = caddr; |
1532 | unsigned int value, tmp; | 1498 | unsigned long data = cdata; |
1499 | unsigned int tmp; | ||
1533 | long i, ret; | 1500 | long i, ret; |
1534 | 1501 | ||
1535 | lock_kernel(); | ||
1536 | if (request == PTRACE_TRACEME) { | ||
1537 | ret = ptrace_traceme(); | ||
1538 | goto out; | ||
1539 | } | ||
1540 | |||
1541 | child = ptrace_get_task_struct(pid); | ||
1542 | if (IS_ERR(child)) { | ||
1543 | ret = PTR_ERR(child); | ||
1544 | goto out; | ||
1545 | } | ||
1546 | |||
1547 | if (request == PTRACE_ATTACH) { | ||
1548 | ret = sys_ptrace(request, pid, addr, data); | ||
1549 | goto out_tsk; | ||
1550 | } | ||
1551 | |||
1552 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
1553 | if (ret < 0) | ||
1554 | goto out_tsk; | ||
1555 | |||
1556 | switch (request) { | 1502 | switch (request) { |
1557 | case PTRACE_PEEKTEXT: | ||
1558 | case PTRACE_PEEKDATA: /* read word at location addr */ | ||
1559 | ret = ia32_peek(child, addr, &value); | ||
1560 | if (ret == 0) | ||
1561 | ret = put_user(value, (unsigned int __user *) compat_ptr(data)); | ||
1562 | else | ||
1563 | ret = -EIO; | ||
1564 | goto out_tsk; | ||
1565 | |||
1566 | case PTRACE_POKETEXT: | ||
1567 | case PTRACE_POKEDATA: /* write the word at location addr */ | ||
1568 | ret = ia32_poke(child, addr, data); | ||
1569 | goto out_tsk; | ||
1570 | |||
1571 | case PTRACE_PEEKUSR: /* read word at addr in USER area */ | 1503 | case PTRACE_PEEKUSR: /* read word at addr in USER area */ |
1572 | ret = -EIO; | 1504 | ret = -EIO; |
1573 | if ((addr & 3) || addr > 17*sizeof(int)) | 1505 | if ((addr & 3) || addr > 17*sizeof(int)) |
@@ -1632,27 +1564,9 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) | |||
1632 | compat_ptr(data)); | 1564 | compat_ptr(data)); |
1633 | break; | 1565 | break; |
1634 | 1566 | ||
1635 | case PTRACE_GETEVENTMSG: | ||
1636 | ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data)); | ||
1637 | break; | ||
1638 | |||
1639 | case PTRACE_SYSCALL: /* continue, stop after next syscall */ | ||
1640 | case PTRACE_CONT: /* restart after signal. */ | ||
1641 | case PTRACE_KILL: | ||
1642 | case PTRACE_SINGLESTEP: /* execute chile for one instruction */ | ||
1643 | case PTRACE_DETACH: /* detach a process */ | ||
1644 | ret = sys_ptrace(request, pid, addr, data); | ||
1645 | break; | ||
1646 | |||
1647 | default: | 1567 | default: |
1648 | ret = ptrace_request(child, request, addr, data); | 1568 | return compat_ptrace_request(child, request, caddr, cdata); |
1649 | break; | ||
1650 | |||
1651 | } | 1569 | } |
1652 | out_tsk: | ||
1653 | put_task_struct(child); | ||
1654 | out: | ||
1655 | unlock_kernel(); | ||
1656 | return ret; | 1570 | return ret; |
1657 | } | 1571 | } |
1658 | 1572 | ||
@@ -1704,14 +1618,6 @@ out: | |||
1704 | } | 1618 | } |
1705 | 1619 | ||
1706 | asmlinkage int | 1620 | asmlinkage int |
1707 | sys32_pause (void) | ||
1708 | { | ||
1709 | current->state = TASK_INTERRUPTIBLE; | ||
1710 | schedule(); | ||
1711 | return -ERESTARTNOHAND; | ||
1712 | } | ||
1713 | |||
1714 | asmlinkage int | ||
1715 | sys32_msync (unsigned int start, unsigned int len, int flags) | 1621 | sys32_msync (unsigned int start, unsigned int len, int flags) |
1716 | { | 1622 | { |
1717 | unsigned int addr; | 1623 | unsigned int addr; |
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h index f03402039896..e90c40ec9edf 100644 --- a/arch/ia64/include/asm/break.h +++ b/arch/ia64/include/asm/break.h | |||
@@ -20,4 +20,13 @@ | |||
20 | */ | 20 | */ |
21 | #define __IA64_BREAK_SYSCALL 0x100000 | 21 | #define __IA64_BREAK_SYSCALL 0x100000 |
22 | 22 | ||
23 | /* | ||
24 | * Xen specific break numbers: | ||
25 | */ | ||
26 | #define __IA64_XEN_HYPERCALL 0x1000 | ||
27 | /* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used | ||
28 | for xen hyperprivops */ | ||
29 | #define __IA64_XEN_HYPERPRIVOP_START 0x1 | ||
30 | #define __IA64_XEN_HYPERPRIVOP_MAX 0x1a | ||
31 | |||
23 | #endif /* _ASM_IA64_BREAK_H */ | 32 | #endif /* _ASM_IA64_BREAK_H */ |
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index afcfbda76e20..c8ce2719fee8 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h | |||
@@ -34,6 +34,8 @@ do { \ | |||
34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
35 | 35 | ||
36 | extern void flush_icache_range (unsigned long start, unsigned long end); | 36 | extern void flush_icache_range (unsigned long start, unsigned long end); |
37 | extern void clflush_cache_range(void *addr, int size); | ||
38 | |||
37 | 39 | ||
38 | #define flush_icache_user_range(vma, page, user_addr, len) \ | 40 | #define flush_icache_user_range(vma, page, user_addr, len) \ |
39 | do { \ | 41 | do { \ |
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index 3db6daf7f251..41ab85d66f33 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h | |||
@@ -10,6 +10,9 @@ struct dev_archdata { | |||
10 | #ifdef CONFIG_ACPI | 10 | #ifdef CONFIG_ACPI |
11 | void *acpi_handle; | 11 | void *acpi_handle; |
12 | #endif | 12 | #endif |
13 | #ifdef CONFIG_DMAR | ||
14 | void *iommu; /* hook for IOMMU specific extension */ | ||
15 | #endif | ||
13 | }; | 16 | }; |
14 | 17 | ||
15 | #endif /* _ASM_IA64_DEVICE_H */ | 18 | #endif /* _ASM_IA64_DEVICE_H */ |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 06ff1ba21465..bbab7e2b0fc9 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -7,6 +7,49 @@ | |||
7 | */ | 7 | */ |
8 | #include <asm/machvec.h> | 8 | #include <asm/machvec.h> |
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <asm/swiotlb.h> | ||
11 | |||
12 | struct dma_mapping_ops { | ||
13 | int (*mapping_error)(struct device *dev, | ||
14 | dma_addr_t dma_addr); | ||
15 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
16 | dma_addr_t *dma_handle, gfp_t gfp); | ||
17 | void (*free_coherent)(struct device *dev, size_t size, | ||
18 | void *vaddr, dma_addr_t dma_handle); | ||
19 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | ||
20 | size_t size, int direction); | ||
21 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
22 | size_t size, int direction); | ||
23 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
24 | dma_addr_t dma_handle, size_t size, | ||
25 | int direction); | ||
26 | void (*sync_single_for_device)(struct device *hwdev, | ||
27 | dma_addr_t dma_handle, size_t size, | ||
28 | int direction); | ||
29 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
30 | dma_addr_t dma_handle, unsigned long offset, | ||
31 | size_t size, int direction); | ||
32 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
33 | dma_addr_t dma_handle, unsigned long offset, | ||
34 | size_t size, int direction); | ||
35 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
36 | struct scatterlist *sg, int nelems, | ||
37 | int direction); | ||
38 | void (*sync_sg_for_device)(struct device *hwdev, | ||
39 | struct scatterlist *sg, int nelems, | ||
40 | int direction); | ||
41 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
42 | int nents, int direction); | ||
43 | void (*unmap_sg)(struct device *hwdev, | ||
44 | struct scatterlist *sg, int nents, | ||
45 | int direction); | ||
46 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | ||
47 | int is_phys; | ||
48 | }; | ||
49 | |||
50 | extern struct dma_mapping_ops *dma_ops; | ||
51 | extern struct ia64_machine_vector ia64_mv; | ||
52 | extern void set_iommu_machvec(void); | ||
10 | 53 | ||
11 | #define dma_alloc_coherent(dev, size, handle, gfp) \ | 54 | #define dma_alloc_coherent(dev, size, handle, gfp) \ |
12 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | 55 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) |
@@ -96,4 +139,11 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, | |||
96 | 139 | ||
97 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ | 140 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
98 | 141 | ||
142 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
143 | { | ||
144 | return dma_ops; | ||
145 | } | ||
146 | |||
147 | |||
148 | |||
99 | #endif /* _ASM_IA64_DMA_MAPPING_H */ | 149 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h new file mode 100644 index 000000000000..5fb2bb93de3b --- /dev/null +++ b/arch/ia64/include/asm/iommu.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_IA64_IOMMU_H | ||
2 | #define _ASM_IA64_IOMMU_H 1 | ||
3 | |||
4 | #define cpu_has_x2apic 0 | ||
5 | /* 10 seconds */ | ||
6 | #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) | ||
7 | |||
8 | extern void pci_iommu_shutdown(void); | ||
9 | extern void no_iommu_init(void); | ||
10 | extern int force_iommu, no_iommu; | ||
11 | extern int iommu_detected; | ||
12 | extern void iommu_dma_init(void); | ||
13 | extern void machvec_init(const char *name); | ||
14 | extern int forbid_dac; | ||
15 | |||
16 | #endif | ||
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h index aefcdfee7f23..39e65f6639f5 100644 --- a/arch/ia64/include/asm/kregs.h +++ b/arch/ia64/include/asm/kregs.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ | 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ |
33 | 33 | ||
34 | #define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ | 34 | #define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ |
35 | #define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ | 35 | #define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/ |
36 | 36 | ||
37 | /* Processor status register bits: */ | 37 | /* Processor status register bits: */ |
38 | #define IA64_PSR_BE_BIT 1 | 38 | #define IA64_PSR_BE_BIT 1 |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 2b850ccafef5..1ea28bcee33b 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -120,6 +120,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
120 | # include <asm/machvec_hpsim.h> | 120 | # include <asm/machvec_hpsim.h> |
121 | # elif defined (CONFIG_IA64_DIG) | 121 | # elif defined (CONFIG_IA64_DIG) |
122 | # include <asm/machvec_dig.h> | 122 | # include <asm/machvec_dig.h> |
123 | # elif defined(CONFIG_IA64_DIG_VTD) | ||
124 | # include <asm/machvec_dig_vtd.h> | ||
123 | # elif defined (CONFIG_IA64_HP_ZX1) | 125 | # elif defined (CONFIG_IA64_HP_ZX1) |
124 | # include <asm/machvec_hpzx1.h> | 126 | # include <asm/machvec_hpzx1.h> |
125 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) | 127 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) |
@@ -128,6 +130,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
128 | # include <asm/machvec_sn2.h> | 130 | # include <asm/machvec_sn2.h> |
129 | # elif defined (CONFIG_IA64_SGI_UV) | 131 | # elif defined (CONFIG_IA64_SGI_UV) |
130 | # include <asm/machvec_uv.h> | 132 | # include <asm/machvec_uv.h> |
133 | # elif defined (CONFIG_IA64_XEN_GUEST) | ||
134 | # include <asm/machvec_xen.h> | ||
131 | # elif defined (CONFIG_IA64_GENERIC) | 135 | # elif defined (CONFIG_IA64_GENERIC) |
132 | 136 | ||
133 | # ifdef MACHVEC_PLATFORM_HEADER | 137 | # ifdef MACHVEC_PLATFORM_HEADER |
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h new file mode 100644 index 000000000000..3400b561e711 --- /dev/null +++ b/arch/ia64/include/asm/machvec_dig_vtd.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _ASM_IA64_MACHVEC_DIG_VTD_h | ||
2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h | ||
3 | |||
4 | extern ia64_mv_setup_t dig_setup; | ||
5 | extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; | ||
6 | extern ia64_mv_dma_free_coherent vtd_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported iommu_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; | ||
13 | extern ia64_mv_dma_init pci_iommu_alloc; | ||
14 | |||
15 | /* | ||
16 | * This stuff has dual use! | ||
17 | * | ||
18 | * For a generic kernel, the macros are used to initialize the | ||
19 | * platform's machvec structure. When compiling a non-generic kernel, | ||
20 | * the macros are used directly. | ||
21 | */ | ||
22 | #define platform_name "dig_vtd" | ||
23 | #define platform_setup dig_setup | ||
24 | #define platform_dma_init pci_iommu_alloc | ||
25 | #define platform_dma_alloc_coherent vtd_alloc_coherent | ||
26 | #define platform_dma_free_coherent vtd_free_coherent | ||
27 | #define platform_dma_map_single_attrs vtd_map_single_attrs | ||
28 | #define platform_dma_unmap_single_attrs vtd_unmap_single_attrs | ||
29 | #define platform_dma_map_sg_attrs vtd_map_sg_attrs | ||
30 | #define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs | ||
31 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
32 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
33 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
34 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
35 | #define platform_dma_supported iommu_dma_supported | ||
36 | #define platform_dma_mapping_error vtd_dma_mapping_error | ||
37 | |||
38 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ | ||
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h index 7f21249fba3f..ef964b286842 100644 --- a/arch/ia64/include/asm/machvec_init.h +++ b/arch/ia64/include/asm/machvec_init.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <asm/iommu.h> | ||
1 | #include <asm/machvec.h> | 2 | #include <asm/machvec.h> |
2 | 3 | ||
3 | extern ia64_mv_send_ipi_t ia64_send_ipi; | 4 | extern ia64_mv_send_ipi_t ia64_send_ipi; |
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h new file mode 100644 index 000000000000..55f9228056cd --- /dev/null +++ b/arch/ia64/include/asm/machvec_xen.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ASM_IA64_MACHVEC_XEN_h | ||
2 | #define _ASM_IA64_MACHVEC_XEN_h | ||
3 | |||
4 | extern ia64_mv_setup_t dig_setup; | ||
5 | extern ia64_mv_cpu_init_t xen_cpu_init; | ||
6 | extern ia64_mv_irq_init_t xen_irq_init; | ||
7 | extern ia64_mv_send_ipi_t xen_platform_send_ipi; | ||
8 | |||
9 | /* | ||
10 | * This stuff has dual use! | ||
11 | * | ||
12 | * For a generic kernel, the macros are used to initialize the | ||
13 | * platform's machvec structure. When compiling a non-generic kernel, | ||
14 | * the macros are used directly. | ||
15 | */ | ||
16 | #define platform_name "xen" | ||
17 | #define platform_setup dig_setup | ||
18 | #define platform_cpu_init xen_cpu_init | ||
19 | #define platform_irq_init xen_irq_init | ||
20 | #define platform_send_ipi xen_platform_send_ipi | ||
21 | |||
22 | #endif /* _ASM_IA64_MACHVEC_XEN_h */ | ||
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 7245a5781594..6bc96ee54327 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h | |||
@@ -18,10 +18,11 @@ | |||
18 | * - crash dumping code reserved region | 18 | * - crash dumping code reserved region |
19 | * - Kernel memory map built from EFI memory map | 19 | * - Kernel memory map built from EFI memory map |
20 | * - ELF core header | 20 | * - ELF core header |
21 | * - xen start info if CONFIG_XEN | ||
21 | * | 22 | * |
22 | * More could be added if necessary | 23 | * More could be added if necessary |
23 | */ | 24 | */ |
24 | #define IA64_MAX_RSVD_REGIONS 8 | 25 | #define IA64_MAX_RSVD_REGIONS 9 |
25 | 26 | ||
26 | struct rsvd_region { | 27 | struct rsvd_region { |
27 | unsigned long start; /* virtual address of beginning of element */ | 28 | unsigned long start; /* virtual address of beginning of element */ |
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h index c8efbf7b849e..0a1026cca4fa 100644 --- a/arch/ia64/include/asm/native/inst.h +++ b/arch/ia64/include/asm/native/inst.h | |||
@@ -36,8 +36,13 @@ | |||
36 | ;; \ | 36 | ;; \ |
37 | movl clob = PARAVIRT_POISON; \ | 37 | movl clob = PARAVIRT_POISON; \ |
38 | ;; | 38 | ;; |
39 | # define CLOBBER_PRED(pred_clob) \ | ||
40 | ;; \ | ||
41 | cmp.eq pred_clob, p0 = r0, r0 \ | ||
42 | ;; | ||
39 | #else | 43 | #else |
40 | # define CLOBBER(clob) /* nothing */ | 44 | # define CLOBBER(clob) /* nothing */ |
45 | # define CLOBBER_PRED(pred_clob) /* nothing */ | ||
41 | #endif | 46 | #endif |
42 | 47 | ||
43 | #define MOV_FROM_IFA(reg) \ | 48 | #define MOV_FROM_IFA(reg) \ |
@@ -136,7 +141,8 @@ | |||
136 | 141 | ||
137 | #define SSM_PSR_I(pred, pred_clob, clob) \ | 142 | #define SSM_PSR_I(pred, pred_clob, clob) \ |
138 | (pred) ssm psr.i \ | 143 | (pred) ssm psr.i \ |
139 | CLOBBER(clob) | 144 | CLOBBER(clob) \ |
145 | CLOBBER_PRED(pred_clob) | ||
140 | 146 | ||
141 | #define RSM_PSR_I(pred, clob0, clob1) \ | 147 | #define RSM_PSR_I(pred, clob0, clob1) \ |
142 | (pred) rsm psr.i \ | 148 | (pred) rsm psr.i \ |
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h new file mode 100644 index 000000000000..b8e6eb1090d7 --- /dev/null +++ b/arch/ia64/include/asm/native/pvchk_inst.h | |||
@@ -0,0 +1,263 @@ | |||
1 | #ifndef _ASM_NATIVE_PVCHK_INST_H | ||
2 | #define _ASM_NATIVE_PVCHK_INST_H | ||
3 | |||
4 | /****************************************************************************** | ||
5 | * arch/ia64/include/asm/native/pvchk_inst.h | ||
6 | * Checker for paravirtualizations of privileged operations. | ||
7 | * | ||
8 | * Copyright (C) 2005 Hewlett-Packard Co | ||
9 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
10 | * | ||
11 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
12 | * VA Linux Systems Japan K.K. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | /********************************************** | ||
31 | * Instructions paravirtualized for correctness | ||
32 | **********************************************/ | ||
33 | |||
34 | /* "fc" and "thash" are privilege-sensitive instructions, meaning they | ||
35 | * may have different semantics depending on whether they are executed | ||
36 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't | ||
37 | * be allowed to execute directly, lest incorrect semantics result. | ||
38 | */ | ||
39 | |||
40 | #define fc .error "fc should not be used directly." | ||
41 | #define thash .error "thash should not be used directly." | ||
42 | |||
43 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | ||
44 | * is not currently used (though it may be in a long-format VHPT system!) | ||
45 | * and the semantics of cover only change if psr.ic is off which is very | ||
46 | * rare (and currently non-existent outside of assembly code | ||
47 | */ | ||
48 | #define ttag .error "ttag should not be used directly." | ||
49 | #define cover .error "cover should not be used directly." | ||
50 | |||
51 | /* There are also privilege-sensitive registers. These registers are | ||
52 | * readable at any privilege level but only writable at PL0. | ||
53 | */ | ||
54 | #define cpuid .error "cpuid should not be used directly." | ||
55 | #define pmd .error "pmd should not be used directly." | ||
56 | |||
57 | /* | ||
58 | * mov ar.eflag = | ||
59 | * mov = ar.eflag | ||
60 | */ | ||
61 | |||
62 | /********************************************** | ||
63 | * Instructions paravirtualized for performance | ||
64 | **********************************************/ | ||
65 | /* | ||
66 | * Those instructions include '.' which can't be handled by cpp. | ||
67 | * or can't be handled by cpp easily. | ||
68 | * They are handled by sed instead of cpp. | ||
69 | */ | ||
70 | |||
71 | /* for .S | ||
72 | * itc.i | ||
73 | * itc.d | ||
74 | * | ||
75 | * bsw.0 | ||
76 | * bsw.1 | ||
77 | * | ||
78 | * ssm psr.ic | PSR_DEFAULT_BITS | ||
79 | * ssm psr.ic | ||
80 | * rsm psr.ic | ||
81 | * ssm psr.i | ||
82 | * rsm psr.i | ||
83 | * rsm psr.i | psr.ic | ||
84 | * rsm psr.dt | ||
85 | * ssm psr.dt | ||
86 | * | ||
87 | * mov = cr.ifa | ||
88 | * mov = cr.itir | ||
89 | * mov = cr.isr | ||
90 | * mov = cr.iha | ||
91 | * mov = cr.ipsr | ||
92 | * mov = cr.iim | ||
93 | * mov = cr.iip | ||
94 | * mov = cr.ivr | ||
95 | * mov = psr | ||
96 | * | ||
97 | * mov cr.ifa = | ||
98 | * mov cr.itir = | ||
99 | * mov cr.iha = | ||
100 | * mov cr.ipsr = | ||
101 | * mov cr.ifs = | ||
102 | * mov cr.iip = | ||
103 | * mov cr.kr = | ||
104 | */ | ||
105 | |||
106 | /* for intrinsics | ||
107 | * ssm psr.i | ||
108 | * rsm psr.i | ||
109 | * mov = psr | ||
110 | * mov = ivr | ||
111 | * mov = tpr | ||
112 | * mov cr.itm = | ||
113 | * mov eoi = | ||
114 | * mov rr[] = | ||
115 | * mov = rr[] | ||
116 | * mov = kr | ||
117 | * mov kr = | ||
118 | * ptc.ga | ||
119 | */ | ||
120 | |||
121 | /************************************************************* | ||
122 | * define paravirtualized instrcution macros as nop to ingore. | ||
123 | * and check whether arguments are appropriate. | ||
124 | *************************************************************/ | ||
125 | |||
126 | /* check whether reg is a regular register */ | ||
127 | .macro is_rreg_in reg | ||
128 | .ifc "\reg", "r0" | ||
129 | nop 0 | ||
130 | .exitm | ||
131 | .endif | ||
132 | ;; | ||
133 | mov \reg = r0 | ||
134 | ;; | ||
135 | .endm | ||
136 | #define IS_RREG_IN(reg) is_rreg_in reg ; | ||
137 | |||
138 | #define IS_RREG_OUT(reg) \ | ||
139 | ;; \ | ||
140 | mov reg = r0 \ | ||
141 | ;; | ||
142 | |||
143 | #define IS_RREG_CLOB(reg) IS_RREG_OUT(reg) | ||
144 | |||
145 | /* check whether pred is a predicate register */ | ||
146 | #define IS_PRED_IN(pred) \ | ||
147 | ;; \ | ||
148 | (pred) nop 0 \ | ||
149 | ;; | ||
150 | |||
151 | #define IS_PRED_OUT(pred) \ | ||
152 | ;; \ | ||
153 | cmp.eq pred, p0 = r0, r0 \ | ||
154 | ;; | ||
155 | |||
156 | #define IS_PRED_CLOB(pred) IS_PRED_OUT(pred) | ||
157 | |||
158 | |||
159 | #define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \ | ||
160 | nop 0 | ||
161 | #define MOV_FROM_IFA(reg) \ | ||
162 | IS_RREG_OUT(reg) | ||
163 | #define MOV_FROM_ITIR(reg) \ | ||
164 | IS_RREG_OUT(reg) | ||
165 | #define MOV_FROM_ISR(reg) \ | ||
166 | IS_RREG_OUT(reg) | ||
167 | #define MOV_FROM_IHA(reg) \ | ||
168 | IS_RREG_OUT(reg) | ||
169 | #define MOV_FROM_IPSR(pred, reg) \ | ||
170 | IS_PRED_IN(pred) \ | ||
171 | IS_RREG_OUT(reg) | ||
172 | #define MOV_FROM_IIM(reg) \ | ||
173 | IS_RREG_OUT(reg) | ||
174 | #define MOV_FROM_IIP(reg) \ | ||
175 | IS_RREG_OUT(reg) | ||
176 | #define MOV_FROM_IVR(reg, clob) \ | ||
177 | IS_RREG_OUT(reg) \ | ||
178 | IS_RREG_CLOB(clob) | ||
179 | #define MOV_FROM_PSR(pred, reg, clob) \ | ||
180 | IS_PRED_IN(pred) \ | ||
181 | IS_RREG_OUT(reg) \ | ||
182 | IS_RREG_CLOB(clob) | ||
183 | #define MOV_TO_IFA(reg, clob) \ | ||
184 | IS_RREG_IN(reg) \ | ||
185 | IS_RREG_CLOB(clob) | ||
186 | #define MOV_TO_ITIR(pred, reg, clob) \ | ||
187 | IS_PRED_IN(pred) \ | ||
188 | IS_RREG_IN(reg) \ | ||
189 | IS_RREG_CLOB(clob) | ||
190 | #define MOV_TO_IHA(pred, reg, clob) \ | ||
191 | IS_PRED_IN(pred) \ | ||
192 | IS_RREG_IN(reg) \ | ||
193 | IS_RREG_CLOB(clob) | ||
194 | #define MOV_TO_IPSR(pred, reg, clob) \ | ||
195 | IS_PRED_IN(pred) \ | ||
196 | IS_RREG_IN(reg) \ | ||
197 | IS_RREG_CLOB(clob) | ||
198 | #define MOV_TO_IFS(pred, reg, clob) \ | ||
199 | IS_PRED_IN(pred) \ | ||
200 | IS_RREG_IN(reg) \ | ||
201 | IS_RREG_CLOB(clob) | ||
202 | #define MOV_TO_IIP(reg, clob) \ | ||
203 | IS_RREG_IN(reg) \ | ||
204 | IS_RREG_CLOB(clob) | ||
205 | #define MOV_TO_KR(kr, reg, clob0, clob1) \ | ||
206 | IS_RREG_IN(reg) \ | ||
207 | IS_RREG_CLOB(clob0) \ | ||
208 | IS_RREG_CLOB(clob1) | ||
209 | #define ITC_I(pred, reg, clob) \ | ||
210 | IS_PRED_IN(pred) \ | ||
211 | IS_RREG_IN(reg) \ | ||
212 | IS_RREG_CLOB(clob) | ||
213 | #define ITC_D(pred, reg, clob) \ | ||
214 | IS_PRED_IN(pred) \ | ||
215 | IS_RREG_IN(reg) \ | ||
216 | IS_RREG_CLOB(clob) | ||
217 | #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ | ||
218 | IS_PRED_IN(pred_i) \ | ||
219 | IS_PRED_IN(pred_d) \ | ||
220 | IS_RREG_IN(reg) \ | ||
221 | IS_RREG_CLOB(clob) | ||
222 | #define THASH(pred, reg0, reg1, clob) \ | ||
223 | IS_PRED_IN(pred) \ | ||
224 | IS_RREG_OUT(reg0) \ | ||
225 | IS_RREG_IN(reg1) \ | ||
226 | IS_RREG_CLOB(clob) | ||
227 | #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ | ||
228 | IS_RREG_CLOB(clob0) \ | ||
229 | IS_RREG_CLOB(clob1) | ||
230 | #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ | ||
231 | IS_RREG_CLOB(clob0) \ | ||
232 | IS_RREG_CLOB(clob1) | ||
233 | #define RSM_PSR_IC(clob) \ | ||
234 | IS_RREG_CLOB(clob) | ||
235 | #define SSM_PSR_I(pred, pred_clob, clob) \ | ||
236 | IS_PRED_IN(pred) \ | ||
237 | IS_PRED_CLOB(pred_clob) \ | ||
238 | IS_RREG_CLOB(clob) | ||
239 | #define RSM_PSR_I(pred, clob0, clob1) \ | ||
240 | IS_PRED_IN(pred) \ | ||
241 | IS_RREG_CLOB(clob0) \ | ||
242 | IS_RREG_CLOB(clob1) | ||
243 | #define RSM_PSR_I_IC(clob0, clob1, clob2) \ | ||
244 | IS_RREG_CLOB(clob0) \ | ||
245 | IS_RREG_CLOB(clob1) \ | ||
246 | IS_RREG_CLOB(clob2) | ||
247 | #define RSM_PSR_DT \ | ||
248 | nop 0 | ||
249 | #define SSM_PSR_DT_AND_SRLZ_I \ | ||
250 | nop 0 | ||
251 | #define BSW_0(clob0, clob1, clob2) \ | ||
252 | IS_RREG_CLOB(clob0) \ | ||
253 | IS_RREG_CLOB(clob1) \ | ||
254 | IS_RREG_CLOB(clob2) | ||
255 | #define BSW_1(clob0, clob1) \ | ||
256 | IS_RREG_CLOB(clob0) \ | ||
257 | IS_RREG_CLOB(clob1) | ||
258 | #define COVER \ | ||
259 | nop 0 | ||
260 | #define RFI \ | ||
261 | br.ret.sptk.many rp /* defining nop causes dependency error */ | ||
262 | |||
263 | #endif /* _ASM_NATIVE_PVCHK_INST_H */ | ||
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 660cab044834..2bf3636473fe 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h | |||
@@ -117,7 +117,7 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void) | |||
117 | struct pv_iosapic_ops { | 117 | struct pv_iosapic_ops { |
118 | void (*pcat_compat_init)(void); | 118 | void (*pcat_compat_init)(void); |
119 | 119 | ||
120 | struct irq_chip *(*get_irq_chip)(unsigned long trigger); | 120 | struct irq_chip *(*__get_irq_chip)(unsigned long trigger); |
121 | 121 | ||
122 | unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); | 122 | unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); |
123 | void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); | 123 | void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); |
@@ -135,7 +135,7 @@ iosapic_pcat_compat_init(void) | |||
135 | static inline struct irq_chip* | 135 | static inline struct irq_chip* |
136 | iosapic_get_irq_chip(unsigned long trigger) | 136 | iosapic_get_irq_chip(unsigned long trigger) |
137 | { | 137 | { |
138 | return pv_iosapic_ops.get_irq_chip(trigger); | 138 | return pv_iosapic_ops.__get_irq_chip(trigger); |
139 | } | 139 | } |
140 | 140 | ||
141 | static inline unsigned int | 141 | static inline unsigned int |
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index ce342fb74246..1d660d89db0d 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -156,4 +156,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
156 | return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); | 156 | return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); |
157 | } | 157 | } |
158 | 158 | ||
159 | #ifdef CONFIG_DMAR | ||
160 | extern void pci_iommu_alloc(void); | ||
161 | #endif | ||
159 | #endif /* _ASM_IA64_PCI_H */ | 162 | #endif /* _ASM_IA64_PCI_H */ |
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 15f8dcfe6eee..6417c1ecb44e 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h | |||
@@ -240,6 +240,12 @@ struct switch_stack { | |||
240 | */ | 240 | */ |
241 | # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) | 241 | # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) |
242 | 242 | ||
243 | static inline unsigned long user_stack_pointer(struct pt_regs *regs) | ||
244 | { | ||
245 | /* FIXME: should this be bspstore + nr_dirty regs? */ | ||
246 | return regs->ar_bspstore; | ||
247 | } | ||
248 | |||
243 | #define regs_return_value(regs) ((regs)->r8) | 249 | #define regs_return_value(regs) ((regs)->r8) |
244 | 250 | ||
245 | /* Conserve space in histogram by encoding slot bits in address | 251 | /* Conserve space in histogram by encoding slot bits in address |
@@ -319,6 +325,8 @@ struct switch_stack { | |||
319 | #define arch_has_block_step() (1) | 325 | #define arch_has_block_step() (1) |
320 | extern void user_enable_block_step(struct task_struct *); | 326 | extern void user_enable_block_step(struct task_struct *); |
321 | 327 | ||
328 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
329 | |||
322 | #endif /* !__KERNEL__ */ | 330 | #endif /* !__KERNEL__ */ |
323 | 331 | ||
324 | /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ | 332 | /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ |
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h new file mode 100644 index 000000000000..44ef9ef8f5b3 --- /dev/null +++ b/arch/ia64/include/asm/pvclock-abi.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * same structure to x86's | ||
3 | * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic. | ||
4 | * For now, define same duplicated definitions. | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_IA64__PVCLOCK_ABI_H | ||
8 | #define _ASM_IA64__PVCLOCK_ABI_H | ||
9 | #ifndef __ASSEMBLY__ | ||
10 | |||
11 | /* | ||
12 | * These structs MUST NOT be changed. | ||
13 | * They are the ABI between hypervisor and guest OS. | ||
14 | * Both Xen and KVM are using this. | ||
15 | * | ||
16 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp | ||
17 | * of the last update. So the guest can use the tsc delta to get a | ||
18 | * more precise system time. There is one per virtual cpu. | ||
19 | * | ||
20 | * pvclock_wall_clock references the point in time when the system | ||
21 | * time was zero (usually boot time), thus the guest calculates the | ||
22 | * current wall clock by adding the system time. | ||
23 | * | ||
24 | * Protocol for the "version" fields is: hypervisor raises it (making | ||
25 | * it uneven) before it starts updating the fields and raises it again | ||
26 | * (making it even) when it is done. Thus the guest can make sure the | ||
27 | * time values it got are consistent by checking the version before | ||
28 | * and after reading them. | ||
29 | */ | ||
30 | |||
31 | struct pvclock_vcpu_time_info { | ||
32 | u32 version; | ||
33 | u32 pad0; | ||
34 | u64 tsc_timestamp; | ||
35 | u64 system_time; | ||
36 | u32 tsc_to_system_mul; | ||
37 | s8 tsc_shift; | ||
38 | u8 pad[3]; | ||
39 | } __attribute__((__packed__)); /* 32 bytes */ | ||
40 | |||
41 | struct pvclock_wall_clock { | ||
42 | u32 version; | ||
43 | u32 sec; | ||
44 | u32 nsec; | ||
45 | } __attribute__((__packed__)); | ||
46 | |||
47 | #endif /* __ASSEMBLY__ */ | ||
48 | #endif /* _ASM_IA64__PVCLOCK_ABI_H */ | ||
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h new file mode 100644 index 000000000000..fb79423834d0 --- /dev/null +++ b/arch/ia64/include/asm/swiotlb.h | |||
@@ -0,0 +1,56 @@ | |||
1 | #ifndef ASM_IA64__SWIOTLB_H | ||
2 | #define ASM_IA64__SWIOTLB_H | ||
3 | |||
4 | #include <linux/dma-mapping.h> | ||
5 | |||
6 | /* SWIOTLB interface */ | ||
7 | |||
8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | ||
9 | size_t size, int dir); | ||
10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
11 | dma_addr_t *dma_handle, gfp_t flags); | ||
12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
13 | size_t size, int dir); | ||
14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | ||
15 | dma_addr_t dev_addr, | ||
16 | size_t size, int dir); | ||
17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | ||
18 | dma_addr_t dev_addr, | ||
19 | size_t size, int dir); | ||
20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | ||
21 | dma_addr_t dev_addr, | ||
22 | unsigned long offset, | ||
23 | size_t size, int dir); | ||
24 | extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | ||
25 | dma_addr_t dev_addr, | ||
26 | unsigned long offset, | ||
27 | size_t size, int dir); | ||
28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | ||
29 | struct scatterlist *sg, int nelems, | ||
30 | int dir); | ||
31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, | ||
32 | struct scatterlist *sg, int nelems, | ||
33 | int dir); | ||
34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
35 | int nents, int direction); | ||
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
37 | int nents, int direction); | ||
38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | ||
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | ||
40 | void *vaddr, dma_addr_t dma_handle); | ||
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | ||
42 | extern void swiotlb_init(void); | ||
43 | |||
44 | extern int swiotlb_force; | ||
45 | |||
46 | #ifdef CONFIG_SWIOTLB | ||
47 | extern int swiotlb; | ||
48 | extern void pci_swiotlb_init(void); | ||
49 | #else | ||
50 | #define swiotlb 0 | ||
51 | static inline void pci_swiotlb_init(void) | ||
52 | { | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | #endif /* ASM_IA64__SWIOTLB_H */ | ||
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h new file mode 100644 index 000000000000..593c12eeb270 --- /dev/null +++ b/arch/ia64/include/asm/sync_bitops.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef _ASM_IA64_SYNC_BITOPS_H | ||
2 | #define _ASM_IA64_SYNC_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
6 | * | ||
7 | * Based on synch_bitops.h which Dan Magenhaimer wrote. | ||
8 | * | ||
9 | * bit operations which provide guaranteed strong synchronisation | ||
10 | * when communicating with Xen or other guest OSes running on other CPUs. | ||
11 | */ | ||
12 | |||
13 | static inline void sync_set_bit(int nr, volatile void *addr) | ||
14 | { | ||
15 | set_bit(nr, addr); | ||
16 | } | ||
17 | |||
18 | static inline void sync_clear_bit(int nr, volatile void *addr) | ||
19 | { | ||
20 | clear_bit(nr, addr); | ||
21 | } | ||
22 | |||
23 | static inline void sync_change_bit(int nr, volatile void *addr) | ||
24 | { | ||
25 | change_bit(nr, addr); | ||
26 | } | ||
27 | |||
28 | static inline int sync_test_and_set_bit(int nr, volatile void *addr) | ||
29 | { | ||
30 | return test_and_set_bit(nr, addr); | ||
31 | } | ||
32 | |||
33 | static inline int sync_test_and_clear_bit(int nr, volatile void *addr) | ||
34 | { | ||
35 | return test_and_clear_bit(nr, addr); | ||
36 | } | ||
37 | |||
38 | static inline int sync_test_and_change_bit(int nr, volatile void *addr) | ||
39 | { | ||
40 | return test_and_change_bit(nr, addr); | ||
41 | } | ||
42 | |||
43 | static inline int sync_test_bit(int nr, const volatile void *addr) | ||
44 | { | ||
45 | return test_bit(nr, addr); | ||
46 | } | ||
47 | |||
48 | #define sync_cmpxchg(ptr, old, new) \ | ||
49 | ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) | ||
50 | |||
51 | #endif /* _ASM_IA64_SYNC_BITOPS_H */ | ||
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h new file mode 100644 index 000000000000..2f758a42f94b --- /dev/null +++ b/arch/ia64/include/asm/syscall.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Access to user system call parameters and results | ||
3 | * | ||
4 | * Copyright (C) 2008 Intel Corp. Shaohua Li <shaohua.li@intel.com> | ||
5 | * | ||
6 | * This copyrighted material is made available to anyone wishing to use, | ||
7 | * modify, copy, or redistribute it subject to the terms and conditions | ||
8 | * of the GNU General Public License v.2. | ||
9 | * | ||
10 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_SYSCALL_H | ||
14 | #define _ASM_SYSCALL_H 1 | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/err.h> | ||
18 | |||
19 | static inline long syscall_get_nr(struct task_struct *task, | ||
20 | struct pt_regs *regs) | ||
21 | { | ||
22 | if ((long)regs->cr_ifs < 0) /* Not a syscall */ | ||
23 | return -1; | ||
24 | |||
25 | #ifdef CONFIG_IA32_SUPPORT | ||
26 | if (IS_IA32_PROCESS(regs)) | ||
27 | return regs->r1; | ||
28 | #endif | ||
29 | |||
30 | return regs->r15; | ||
31 | } | ||
32 | |||
33 | static inline void syscall_rollback(struct task_struct *task, | ||
34 | struct pt_regs *regs) | ||
35 | { | ||
36 | #ifdef CONFIG_IA32_SUPPORT | ||
37 | if (IS_IA32_PROCESS(regs)) | ||
38 | regs->r8 = regs->r1; | ||
39 | #endif | ||
40 | |||
41 | /* do nothing */ | ||
42 | } | ||
43 | |||
44 | static inline long syscall_get_error(struct task_struct *task, | ||
45 | struct pt_regs *regs) | ||
46 | { | ||
47 | #ifdef CONFIG_IA32_SUPPORT | ||
48 | if (IS_IA32_PROCESS(regs)) | ||
49 | return regs->r8; | ||
50 | #endif | ||
51 | |||
52 | return regs->r10 == -1 ? regs->r8:0; | ||
53 | } | ||
54 | |||
55 | static inline long syscall_get_return_value(struct task_struct *task, | ||
56 | struct pt_regs *regs) | ||
57 | { | ||
58 | return regs->r8; | ||
59 | } | ||
60 | |||
61 | static inline void syscall_set_return_value(struct task_struct *task, | ||
62 | struct pt_regs *regs, | ||
63 | int error, long val) | ||
64 | { | ||
65 | #ifdef CONFIG_IA32_SUPPORT | ||
66 | if (IS_IA32_PROCESS(regs)) { | ||
67 | regs->r8 = (long) error ? error : val; | ||
68 | return; | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | if (error) { | ||
73 | /* error < 0, but ia64 uses > 0 return value */ | ||
74 | regs->r8 = -error; | ||
75 | regs->r10 = -1; | ||
76 | } else { | ||
77 | regs->r8 = val; | ||
78 | regs->r10 = 0; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | extern void ia64_syscall_get_set_arguments(struct task_struct *task, | ||
83 | struct pt_regs *regs, unsigned int i, unsigned int n, | ||
84 | unsigned long *args, int rw); | ||
85 | static inline void syscall_get_arguments(struct task_struct *task, | ||
86 | struct pt_regs *regs, | ||
87 | unsigned int i, unsigned int n, | ||
88 | unsigned long *args) | ||
89 | { | ||
90 | BUG_ON(i + n > 6); | ||
91 | |||
92 | #ifdef CONFIG_IA32_SUPPORT | ||
93 | if (IS_IA32_PROCESS(regs)) { | ||
94 | switch (i + n) { | ||
95 | case 6: | ||
96 | if (!n--) break; | ||
97 | *args++ = regs->r13; | ||
98 | case 5: | ||
99 | if (!n--) break; | ||
100 | *args++ = regs->r15; | ||
101 | case 4: | ||
102 | if (!n--) break; | ||
103 | *args++ = regs->r14; | ||
104 | case 3: | ||
105 | if (!n--) break; | ||
106 | *args++ = regs->r10; | ||
107 | case 2: | ||
108 | if (!n--) break; | ||
109 | *args++ = regs->r9; | ||
110 | case 1: | ||
111 | if (!n--) break; | ||
112 | *args++ = regs->r11; | ||
113 | case 0: | ||
114 | if (!n--) break; | ||
115 | default: | ||
116 | BUG(); | ||
117 | break; | ||
118 | } | ||
119 | |||
120 | return; | ||
121 | } | ||
122 | #endif | ||
123 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); | ||
124 | } | ||
125 | |||
126 | static inline void syscall_set_arguments(struct task_struct *task, | ||
127 | struct pt_regs *regs, | ||
128 | unsigned int i, unsigned int n, | ||
129 | unsigned long *args) | ||
130 | { | ||
131 | BUG_ON(i + n > 6); | ||
132 | |||
133 | #ifdef CONFIG_IA32_SUPPORT | ||
134 | if (IS_IA32_PROCESS(regs)) { | ||
135 | switch (i + n) { | ||
136 | case 6: | ||
137 | if (!n--) break; | ||
138 | regs->r13 = *args++; | ||
139 | case 5: | ||
140 | if (!n--) break; | ||
141 | regs->r15 = *args++; | ||
142 | case 4: | ||
143 | if (!n--) break; | ||
144 | regs->r14 = *args++; | ||
145 | case 3: | ||
146 | if (!n--) break; | ||
147 | regs->r10 = *args++; | ||
148 | case 2: | ||
149 | if (!n--) break; | ||
150 | regs->r9 = *args++; | ||
151 | case 1: | ||
152 | if (!n--) break; | ||
153 | regs->r11 = *args++; | ||
154 | case 0: | ||
155 | if (!n--) break; | ||
156 | } | ||
157 | |||
158 | return; | ||
159 | } | ||
160 | #endif | ||
161 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); | ||
162 | } | ||
163 | #endif /* _ASM_SYSCALL_H */ | ||
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 7c60fcdd2efd..ae6922626bf4 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -87,9 +87,6 @@ struct thread_info { | |||
87 | #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) | 87 | #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) |
88 | #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) | 88 | #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) |
89 | 89 | ||
90 | #define tsk_set_notify_resume(tsk) \ | ||
91 | set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME) | ||
92 | extern void tsk_clear_notify_resume(struct task_struct *tsk); | ||
93 | #endif /* !__ASSEMBLY */ | 90 | #endif /* !__ASSEMBLY */ |
94 | 91 | ||
95 | /* | 92 | /* |
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 05a6baf8a472..4e03cfe74a0c 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h | |||
@@ -39,4 +39,6 @@ get_cycles (void) | |||
39 | return ret; | 39 | return ret; |
40 | } | 40 | } |
41 | 41 | ||
42 | extern void ia64_cpu_local_tick (void); | ||
43 | |||
42 | #endif /* _ASM_IA64_TIMEX_H */ | 44 | #endif /* _ASM_IA64_TIMEX_H */ |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index d535833aab5e..f791576355ad 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -337,6 +337,7 @@ | |||
337 | # define __ARCH_WANT_SYS_NICE | 337 | # define __ARCH_WANT_SYS_NICE |
338 | # define __ARCH_WANT_SYS_OLD_GETRLIMIT | 338 | # define __ARCH_WANT_SYS_OLD_GETRLIMIT |
339 | # define __ARCH_WANT_SYS_OLDUMOUNT | 339 | # define __ARCH_WANT_SYS_OLDUMOUNT |
340 | # define __ARCH_WANT_SYS_PAUSE | ||
340 | # define __ARCH_WANT_SYS_SIGPENDING | 341 | # define __ARCH_WANT_SYS_SIGPENDING |
341 | # define __ARCH_WANT_SYS_SIGPROCMASK | 342 | # define __ARCH_WANT_SYS_SIGPROCMASK |
342 | # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | 343 | # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND |
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h new file mode 100644 index 000000000000..73248781fba8 --- /dev/null +++ b/arch/ia64/include/asm/xen/events.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/events.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | #ifndef _ASM_IA64_XEN_EVENTS_H | ||
23 | #define _ASM_IA64_XEN_EVENTS_H | ||
24 | |||
25 | enum ipi_vector { | ||
26 | XEN_RESCHEDULE_VECTOR, | ||
27 | XEN_IPI_VECTOR, | ||
28 | XEN_CMCP_VECTOR, | ||
29 | XEN_CPEP_VECTOR, | ||
30 | |||
31 | XEN_NR_IPIS, | ||
32 | }; | ||
33 | |||
34 | static inline int xen_irqs_disabled(struct pt_regs *regs) | ||
35 | { | ||
36 | return !(ia64_psr(regs)->i); | ||
37 | } | ||
38 | |||
39 | static inline void xen_do_IRQ(int irq, struct pt_regs *regs) | ||
40 | { | ||
41 | struct pt_regs *old_regs; | ||
42 | old_regs = set_irq_regs(regs); | ||
43 | irq_enter(); | ||
44 | __do_IRQ(irq); | ||
45 | irq_exit(); | ||
46 | set_irq_regs(old_regs); | ||
47 | } | ||
48 | #define irq_ctx_init(cpu) do { } while (0) | ||
49 | |||
50 | #endif /* _ASM_IA64_XEN_EVENTS_H */ | ||
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h new file mode 100644 index 000000000000..2b1fae0e2d11 --- /dev/null +++ b/arch/ia64/include/asm/xen/grant_table.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/grant_table.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_IA64_XEN_GRANT_TABLE_H | ||
24 | #define _ASM_IA64_XEN_GRANT_TABLE_H | ||
25 | |||
26 | struct vm_struct *xen_alloc_vm_area(unsigned long size); | ||
27 | void xen_free_vm_area(struct vm_struct *area); | ||
28 | |||
29 | #endif /* _ASM_IA64_XEN_GRANT_TABLE_H */ | ||
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h new file mode 100644 index 000000000000..96fc62366aa4 --- /dev/null +++ b/arch/ia64/include/asm/xen/hypercall.h | |||
@@ -0,0 +1,265 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypercall.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _ASM_IA64_XEN_HYPERCALL_H | ||
34 | #define _ASM_IA64_XEN_HYPERCALL_H | ||
35 | |||
36 | #include <xen/interface/xen.h> | ||
37 | #include <xen/interface/physdev.h> | ||
38 | #include <xen/interface/sched.h> | ||
39 | #include <asm/xen/xcom_hcall.h> | ||
40 | struct xencomm_handle; | ||
41 | extern unsigned long __hypercall(unsigned long a1, unsigned long a2, | ||
42 | unsigned long a3, unsigned long a4, | ||
43 | unsigned long a5, unsigned long cmd); | ||
44 | |||
45 | /* | ||
46 | * Assembler stubs for hyper-calls. | ||
47 | */ | ||
48 | |||
49 | #define _hypercall0(type, name) \ | ||
50 | ({ \ | ||
51 | long __res; \ | ||
52 | __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ | ||
53 | (type)__res; \ | ||
54 | }) | ||
55 | |||
56 | #define _hypercall1(type, name, a1) \ | ||
57 | ({ \ | ||
58 | long __res; \ | ||
59 | __res = __hypercall((unsigned long)a1, \ | ||
60 | 0, 0, 0, 0, __HYPERVISOR_##name); \ | ||
61 | (type)__res; \ | ||
62 | }) | ||
63 | |||
64 | #define _hypercall2(type, name, a1, a2) \ | ||
65 | ({ \ | ||
66 | long __res; \ | ||
67 | __res = __hypercall((unsigned long)a1, \ | ||
68 | (unsigned long)a2, \ | ||
69 | 0, 0, 0, __HYPERVISOR_##name); \ | ||
70 | (type)__res; \ | ||
71 | }) | ||
72 | |||
73 | #define _hypercall3(type, name, a1, a2, a3) \ | ||
74 | ({ \ | ||
75 | long __res; \ | ||
76 | __res = __hypercall((unsigned long)a1, \ | ||
77 | (unsigned long)a2, \ | ||
78 | (unsigned long)a3, \ | ||
79 | 0, 0, __HYPERVISOR_##name); \ | ||
80 | (type)__res; \ | ||
81 | }) | ||
82 | |||
83 | #define _hypercall4(type, name, a1, a2, a3, a4) \ | ||
84 | ({ \ | ||
85 | long __res; \ | ||
86 | __res = __hypercall((unsigned long)a1, \ | ||
87 | (unsigned long)a2, \ | ||
88 | (unsigned long)a3, \ | ||
89 | (unsigned long)a4, \ | ||
90 | 0, __HYPERVISOR_##name); \ | ||
91 | (type)__res; \ | ||
92 | }) | ||
93 | |||
94 | #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ | ||
95 | ({ \ | ||
96 | long __res; \ | ||
97 | __res = __hypercall((unsigned long)a1, \ | ||
98 | (unsigned long)a2, \ | ||
99 | (unsigned long)a3, \ | ||
100 | (unsigned long)a4, \ | ||
101 | (unsigned long)a5, \ | ||
102 | __HYPERVISOR_##name); \ | ||
103 | (type)__res; \ | ||
104 | }) | ||
105 | |||
106 | |||
107 | static inline int | ||
108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) | ||
109 | { | ||
110 | return _hypercall2(int, sched_op_new, cmd, arg); | ||
111 | } | ||
112 | |||
113 | static inline long | ||
114 | HYPERVISOR_set_timer_op(u64 timeout) | ||
115 | { | ||
116 | unsigned long timeout_hi = (unsigned long)(timeout >> 32); | ||
117 | unsigned long timeout_lo = (unsigned long)timeout; | ||
118 | return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); | ||
119 | } | ||
120 | |||
121 | static inline int | ||
122 | xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, | ||
123 | int nr_calls) | ||
124 | { | ||
125 | return _hypercall2(int, multicall, call_list, nr_calls); | ||
126 | } | ||
127 | |||
128 | static inline int | ||
129 | xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) | ||
130 | { | ||
131 | return _hypercall2(int, memory_op, cmd, arg); | ||
132 | } | ||
133 | |||
134 | static inline int | ||
135 | xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) | ||
136 | { | ||
137 | return _hypercall2(int, event_channel_op, cmd, arg); | ||
138 | } | ||
139 | |||
140 | static inline int | ||
141 | xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) | ||
142 | { | ||
143 | return _hypercall2(int, xen_version, cmd, arg); | ||
144 | } | ||
145 | |||
146 | static inline int | ||
147 | xencomm_arch_hypercall_console_io(int cmd, int count, | ||
148 | struct xencomm_handle *str) | ||
149 | { | ||
150 | return _hypercall3(int, console_io, cmd, count, str); | ||
151 | } | ||
152 | |||
153 | static inline int | ||
154 | xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) | ||
155 | { | ||
156 | return _hypercall2(int, physdev_op, cmd, arg); | ||
157 | } | ||
158 | |||
159 | static inline int | ||
160 | xencomm_arch_hypercall_grant_table_op(unsigned int cmd, | ||
161 | struct xencomm_handle *uop, | ||
162 | unsigned int count) | ||
163 | { | ||
164 | return _hypercall3(int, grant_table_op, cmd, uop, count); | ||
165 | } | ||
166 | |||
167 | int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); | ||
168 | |||
169 | extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); | ||
170 | |||
171 | static inline int | ||
172 | xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) | ||
173 | { | ||
174 | return _hypercall2(int, callback_op, cmd, arg); | ||
175 | } | ||
176 | |||
177 | static inline long | ||
178 | xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) | ||
179 | { | ||
180 | return _hypercall3(long, vcpu_op, cmd, cpu, arg); | ||
181 | } | ||
182 | |||
183 | static inline int | ||
184 | HYPERVISOR_physdev_op(int cmd, void *arg) | ||
185 | { | ||
186 | switch (cmd) { | ||
187 | case PHYSDEVOP_eoi: | ||
188 | return _hypercall1(int, ia64_fast_eoi, | ||
189 | ((struct physdev_eoi *)arg)->irq); | ||
190 | default: | ||
191 | return xencomm_hypercall_physdev_op(cmd, arg); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | static inline long | ||
196 | xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) | ||
197 | { | ||
198 | return _hypercall1(long, opt_feature, arg); | ||
199 | } | ||
200 | |||
201 | /* for balloon driver */ | ||
202 | #define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) | ||
203 | |||
204 | /* Use xencomm to do hypercalls. */ | ||
205 | #define HYPERVISOR_sched_op xencomm_hypercall_sched_op | ||
206 | #define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op | ||
207 | #define HYPERVISOR_callback_op xencomm_hypercall_callback_op | ||
208 | #define HYPERVISOR_multicall xencomm_hypercall_multicall | ||
209 | #define HYPERVISOR_xen_version xencomm_hypercall_xen_version | ||
210 | #define HYPERVISOR_console_io xencomm_hypercall_console_io | ||
211 | #define HYPERVISOR_memory_op xencomm_hypercall_memory_op | ||
212 | #define HYPERVISOR_suspend xencomm_hypercall_suspend | ||
213 | #define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op | ||
214 | #define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature | ||
215 | |||
216 | /* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ | ||
217 | #define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) | ||
218 | |||
219 | static inline int | ||
220 | HYPERVISOR_shutdown( | ||
221 | unsigned int reason) | ||
222 | { | ||
223 | struct sched_shutdown sched_shutdown = { | ||
224 | .reason = reason | ||
225 | }; | ||
226 | |||
227 | int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); | ||
228 | |||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | /* for netfront.c, netback.c */ | ||
233 | #define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ | ||
234 | |||
235 | static inline void | ||
236 | MULTI_update_va_mapping( | ||
237 | struct multicall_entry *mcl, unsigned long va, | ||
238 | pte_t new_val, unsigned long flags) | ||
239 | { | ||
240 | mcl->op = __HYPERVISOR_update_va_mapping; | ||
241 | mcl->result = 0; | ||
242 | } | ||
243 | |||
244 | static inline void | ||
245 | MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, | ||
246 | void *uop, unsigned int count) | ||
247 | { | ||
248 | mcl->op = __HYPERVISOR_grant_table_op; | ||
249 | mcl->args[0] = cmd; | ||
250 | mcl->args[1] = (unsigned long)uop; | ||
251 | mcl->args[2] = count; | ||
252 | } | ||
253 | |||
254 | static inline void | ||
255 | MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, | ||
256 | int count, int *success_count, domid_t domid) | ||
257 | { | ||
258 | mcl->op = __HYPERVISOR_mmu_update; | ||
259 | mcl->args[0] = (unsigned long)req; | ||
260 | mcl->args[1] = count; | ||
261 | mcl->args[2] = (unsigned long)success_count; | ||
262 | mcl->args[3] = domid; | ||
263 | } | ||
264 | |||
265 | #endif /* _ASM_IA64_XEN_HYPERCALL_H */ | ||
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h new file mode 100644 index 000000000000..7a804e80fc67 --- /dev/null +++ b/arch/ia64/include/asm/xen/hypervisor.h | |||
@@ -0,0 +1,89 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypervisor.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H | ||
34 | #define _ASM_IA64_XEN_HYPERVISOR_H | ||
35 | |||
36 | #ifdef CONFIG_XEN | ||
37 | |||
38 | #include <linux/init.h> | ||
39 | #include <xen/interface/xen.h> | ||
40 | #include <xen/interface/version.h> /* to compile feature.c */ | ||
41 | #include <xen/features.h> /* to comiple xen-netfront.c */ | ||
42 | #include <asm/xen/hypercall.h> | ||
43 | |||
44 | /* xen_domain_type is set before executing any C code by early_xen_setup */ | ||
45 | enum xen_domain_type { | ||
46 | XEN_NATIVE, | ||
47 | XEN_PV_DOMAIN, | ||
48 | XEN_HVM_DOMAIN, | ||
49 | }; | ||
50 | |||
51 | extern enum xen_domain_type xen_domain_type; | ||
52 | |||
53 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | ||
54 | #define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) | ||
55 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
56 | (xen_start_info->flags & SIF_INITDOMAIN)) | ||
57 | #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) | ||
58 | |||
59 | /* deprecated. remove this */ | ||
60 | #define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) | ||
61 | |||
62 | extern struct shared_info *HYPERVISOR_shared_info; | ||
63 | extern struct start_info *xen_start_info; | ||
64 | |||
65 | void __init xen_setup_vcpu_info_placement(void); | ||
66 | void force_evtchn_callback(void); | ||
67 | |||
68 | /* for drivers/xen/balloon/balloon.c */ | ||
69 | #ifdef CONFIG_XEN_SCRUB_PAGES | ||
70 | #define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) | ||
71 | #else | ||
72 | #define scrub_pages(_p, _n) ((void)0) | ||
73 | #endif | ||
74 | |||
75 | /* For setup_arch() in arch/ia64/kernel/setup.c */ | ||
76 | void xen_ia64_enable_opt_feature(void); | ||
77 | |||
78 | #else /* CONFIG_XEN */ | ||
79 | |||
80 | #define xen_domain() (0) | ||
81 | #define xen_pv_domain() (0) | ||
82 | #define xen_initial_domain() (0) | ||
83 | #define xen_hvm_domain() (0) | ||
84 | #define is_running_on_xen() (0) /* deprecated. remove this */ | ||
85 | #endif | ||
86 | |||
87 | #define is_initial_xendomain() (0) /* deprecated. remove this */ | ||
88 | |||
89 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ | ||
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h new file mode 100644 index 000000000000..19c2ae1d878a --- /dev/null +++ b/arch/ia64/include/asm/xen/inst.h | |||
@@ -0,0 +1,458 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/inst.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <asm/xen/privop.h> | ||
24 | |||
25 | #define ia64_ivt xen_ivt | ||
26 | #define DO_SAVE_MIN XEN_DO_SAVE_MIN | ||
27 | |||
28 | #define __paravirt_switch_to xen_switch_to | ||
29 | #define __paravirt_leave_syscall xen_leave_syscall | ||
30 | #define __paravirt_work_processed_syscall xen_work_processed_syscall | ||
31 | #define __paravirt_leave_kernel xen_leave_kernel | ||
32 | #define __paravirt_pending_syscall_end xen_work_pending_syscall_end | ||
33 | #define __paravirt_work_processed_syscall_target \ | ||
34 | xen_work_processed_syscall | ||
35 | |||
36 | #define MOV_FROM_IFA(reg) \ | ||
37 | movl reg = XSI_IFA; \ | ||
38 | ;; \ | ||
39 | ld8 reg = [reg] | ||
40 | |||
41 | #define MOV_FROM_ITIR(reg) \ | ||
42 | movl reg = XSI_ITIR; \ | ||
43 | ;; \ | ||
44 | ld8 reg = [reg] | ||
45 | |||
46 | #define MOV_FROM_ISR(reg) \ | ||
47 | movl reg = XSI_ISR; \ | ||
48 | ;; \ | ||
49 | ld8 reg = [reg] | ||
50 | |||
51 | #define MOV_FROM_IHA(reg) \ | ||
52 | movl reg = XSI_IHA; \ | ||
53 | ;; \ | ||
54 | ld8 reg = [reg] | ||
55 | |||
56 | #define MOV_FROM_IPSR(pred, reg) \ | ||
57 | (pred) movl reg = XSI_IPSR; \ | ||
58 | ;; \ | ||
59 | (pred) ld8 reg = [reg] | ||
60 | |||
61 | #define MOV_FROM_IIM(reg) \ | ||
62 | movl reg = XSI_IIM; \ | ||
63 | ;; \ | ||
64 | ld8 reg = [reg] | ||
65 | |||
66 | #define MOV_FROM_IIP(reg) \ | ||
67 | movl reg = XSI_IIP; \ | ||
68 | ;; \ | ||
69 | ld8 reg = [reg] | ||
70 | |||
71 | .macro __MOV_FROM_IVR reg, clob | ||
72 | .ifc "\reg", "r8" | ||
73 | XEN_HYPER_GET_IVR | ||
74 | .exitm | ||
75 | .endif | ||
76 | .ifc "\clob", "r8" | ||
77 | XEN_HYPER_GET_IVR | ||
78 | ;; | ||
79 | mov \reg = r8 | ||
80 | .exitm | ||
81 | .endif | ||
82 | |||
83 | mov \clob = r8 | ||
84 | ;; | ||
85 | XEN_HYPER_GET_IVR | ||
86 | ;; | ||
87 | mov \reg = r8 | ||
88 | ;; | ||
89 | mov r8 = \clob | ||
90 | .endm | ||
91 | #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob | ||
92 | |||
93 | .macro __MOV_FROM_PSR pred, reg, clob | ||
94 | .ifc "\reg", "r8" | ||
95 | (\pred) XEN_HYPER_GET_PSR; | ||
96 | .exitm | ||
97 | .endif | ||
98 | .ifc "\clob", "r8" | ||
99 | (\pred) XEN_HYPER_GET_PSR | ||
100 | ;; | ||
101 | (\pred) mov \reg = r8 | ||
102 | .exitm | ||
103 | .endif | ||
104 | |||
105 | (\pred) mov \clob = r8 | ||
106 | (\pred) XEN_HYPER_GET_PSR | ||
107 | ;; | ||
108 | (\pred) mov \reg = r8 | ||
109 | (\pred) mov r8 = \clob | ||
110 | .endm | ||
111 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob | ||
112 | |||
113 | |||
114 | #define MOV_TO_IFA(reg, clob) \ | ||
115 | movl clob = XSI_IFA; \ | ||
116 | ;; \ | ||
117 | st8 [clob] = reg \ | ||
118 | |||
119 | #define MOV_TO_ITIR(pred, reg, clob) \ | ||
120 | (pred) movl clob = XSI_ITIR; \ | ||
121 | ;; \ | ||
122 | (pred) st8 [clob] = reg | ||
123 | |||
124 | #define MOV_TO_IHA(pred, reg, clob) \ | ||
125 | (pred) movl clob = XSI_IHA; \ | ||
126 | ;; \ | ||
127 | (pred) st8 [clob] = reg | ||
128 | |||
129 | #define MOV_TO_IPSR(pred, reg, clob) \ | ||
130 | (pred) movl clob = XSI_IPSR; \ | ||
131 | ;; \ | ||
132 | (pred) st8 [clob] = reg; \ | ||
133 | ;; | ||
134 | |||
135 | #define MOV_TO_IFS(pred, reg, clob) \ | ||
136 | (pred) movl clob = XSI_IFS; \ | ||
137 | ;; \ | ||
138 | (pred) st8 [clob] = reg; \ | ||
139 | ;; | ||
140 | |||
141 | #define MOV_TO_IIP(reg, clob) \ | ||
142 | movl clob = XSI_IIP; \ | ||
143 | ;; \ | ||
144 | st8 [clob] = reg | ||
145 | |||
146 | .macro ____MOV_TO_KR kr, reg, clob0, clob1 | ||
147 | .ifc "\clob0", "r9" | ||
148 | .error "clob0 \clob0 must not be r9" | ||
149 | .endif | ||
150 | .ifc "\clob1", "r8" | ||
151 | .error "clob1 \clob1 must not be r8" | ||
152 | .endif | ||
153 | |||
154 | .ifnc "\reg", "r9" | ||
155 | .ifnc "\clob1", "r9" | ||
156 | mov \clob1 = r9 | ||
157 | .endif | ||
158 | mov r9 = \reg | ||
159 | .endif | ||
160 | .ifnc "\clob0", "r8" | ||
161 | mov \clob0 = r8 | ||
162 | .endif | ||
163 | mov r8 = \kr | ||
164 | ;; | ||
165 | XEN_HYPER_SET_KR | ||
166 | |||
167 | .ifnc "\reg", "r9" | ||
168 | .ifnc "\clob1", "r9" | ||
169 | mov r9 = \clob1 | ||
170 | .endif | ||
171 | .endif | ||
172 | .ifnc "\clob0", "r8" | ||
173 | mov r8 = \clob0 | ||
174 | .endif | ||
175 | .endm | ||
176 | |||
177 | .macro __MOV_TO_KR kr, reg, clob0, clob1 | ||
178 | .ifc "\clob0", "r9" | ||
179 | ____MOV_TO_KR \kr, \reg, \clob1, \clob0 | ||
180 | .exitm | ||
181 | .endif | ||
182 | .ifc "\clob1", "r8" | ||
183 | ____MOV_TO_KR \kr, \reg, \clob1, \clob0 | ||
184 | .exitm | ||
185 | .endif | ||
186 | |||
187 | ____MOV_TO_KR \kr, \reg, \clob0, \clob1 | ||
188 | .endm | ||
189 | |||
190 | #define MOV_TO_KR(kr, reg, clob0, clob1) \ | ||
191 | __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 | ||
192 | |||
193 | |||
194 | .macro __ITC_I pred, reg, clob | ||
195 | .ifc "\reg", "r8" | ||
196 | (\pred) XEN_HYPER_ITC_I | ||
197 | .exitm | ||
198 | .endif | ||
199 | .ifc "\clob", "r8" | ||
200 | (\pred) mov r8 = \reg | ||
201 | ;; | ||
202 | (\pred) XEN_HYPER_ITC_I | ||
203 | .exitm | ||
204 | .endif | ||
205 | |||
206 | (\pred) mov \clob = r8 | ||
207 | (\pred) mov r8 = \reg | ||
208 | ;; | ||
209 | (\pred) XEN_HYPER_ITC_I | ||
210 | ;; | ||
211 | (\pred) mov r8 = \clob | ||
212 | ;; | ||
213 | .endm | ||
214 | #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob | ||
215 | |||
216 | .macro __ITC_D pred, reg, clob | ||
217 | .ifc "\reg", "r8" | ||
218 | (\pred) XEN_HYPER_ITC_D | ||
219 | ;; | ||
220 | .exitm | ||
221 | .endif | ||
222 | .ifc "\clob", "r8" | ||
223 | (\pred) mov r8 = \reg | ||
224 | ;; | ||
225 | (\pred) XEN_HYPER_ITC_D | ||
226 | ;; | ||
227 | .exitm | ||
228 | .endif | ||
229 | |||
230 | (\pred) mov \clob = r8 | ||
231 | (\pred) mov r8 = \reg | ||
232 | ;; | ||
233 | (\pred) XEN_HYPER_ITC_D | ||
234 | ;; | ||
235 | (\pred) mov r8 = \clob | ||
236 | ;; | ||
237 | .endm | ||
238 | #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob | ||
239 | |||
240 | .macro __ITC_I_AND_D pred_i, pred_d, reg, clob | ||
241 | .ifc "\reg", "r8" | ||
242 | (\pred_i)XEN_HYPER_ITC_I | ||
243 | ;; | ||
244 | (\pred_d)XEN_HYPER_ITC_D | ||
245 | ;; | ||
246 | .exitm | ||
247 | .endif | ||
248 | .ifc "\clob", "r8" | ||
249 | mov r8 = \reg | ||
250 | ;; | ||
251 | (\pred_i)XEN_HYPER_ITC_I | ||
252 | ;; | ||
253 | (\pred_d)XEN_HYPER_ITC_D | ||
254 | ;; | ||
255 | .exitm | ||
256 | .endif | ||
257 | |||
258 | mov \clob = r8 | ||
259 | mov r8 = \reg | ||
260 | ;; | ||
261 | (\pred_i)XEN_HYPER_ITC_I | ||
262 | ;; | ||
263 | (\pred_d)XEN_HYPER_ITC_D | ||
264 | ;; | ||
265 | mov r8 = \clob | ||
266 | ;; | ||
267 | .endm | ||
268 | #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ | ||
269 | __ITC_I_AND_D pred_i, pred_d, reg, clob | ||
270 | |||
271 | .macro __THASH pred, reg0, reg1, clob | ||
272 | .ifc "\reg0", "r8" | ||
273 | (\pred) mov r8 = \reg1 | ||
274 | (\pred) XEN_HYPER_THASH | ||
275 | .exitm | ||
276 | .endc | ||
277 | .ifc "\reg1", "r8" | ||
278 | (\pred) XEN_HYPER_THASH | ||
279 | ;; | ||
280 | (\pred) mov \reg0 = r8 | ||
281 | ;; | ||
282 | .exitm | ||
283 | .endif | ||
284 | .ifc "\clob", "r8" | ||
285 | (\pred) mov r8 = \reg1 | ||
286 | (\pred) XEN_HYPER_THASH | ||
287 | ;; | ||
288 | (\pred) mov \reg0 = r8 | ||
289 | ;; | ||
290 | .exitm | ||
291 | .endif | ||
292 | |||
293 | (\pred) mov \clob = r8 | ||
294 | (\pred) mov r8 = \reg1 | ||
295 | (\pred) XEN_HYPER_THASH | ||
296 | ;; | ||
297 | (\pred) mov \reg0 = r8 | ||
298 | (\pred) mov r8 = \clob | ||
299 | ;; | ||
300 | .endm | ||
301 | #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob | ||
302 | |||
303 | #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ | ||
304 | mov clob0 = 1; \ | ||
305 | movl clob1 = XSI_PSR_IC; \ | ||
306 | ;; \ | ||
307 | st4 [clob1] = clob0 \ | ||
308 | ;; | ||
309 | |||
310 | #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ | ||
311 | ;; \ | ||
312 | srlz.d; \ | ||
313 | mov clob1 = 1; \ | ||
314 | movl clob0 = XSI_PSR_IC; \ | ||
315 | ;; \ | ||
316 | st4 [clob0] = clob1 | ||
317 | |||
318 | #define RSM_PSR_IC(clob) \ | ||
319 | movl clob = XSI_PSR_IC; \ | ||
320 | ;; \ | ||
321 | st4 [clob] = r0; \ | ||
322 | ;; | ||
323 | |||
324 | /* pred will be clobbered */ | ||
325 | #define MASK_TO_PEND_OFS (-1) | ||
326 | #define SSM_PSR_I(pred, pred_clob, clob) \ | ||
327 | (pred) movl clob = XSI_PSR_I_ADDR \ | ||
328 | ;; \ | ||
329 | (pred) ld8 clob = [clob] \ | ||
330 | ;; \ | ||
331 | /* if (pred) vpsr.i = 1 */ \ | ||
332 | /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ | ||
333 | (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ | ||
334 | ;; \ | ||
335 | /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ | ||
336 | (pred) ld1 clob = [clob] \ | ||
337 | ;; \ | ||
338 | (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ | ||
339 | ;; \ | ||
340 | (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ | ||
341 | |||
342 | #define RSM_PSR_I(pred, clob0, clob1) \ | ||
343 | movl clob0 = XSI_PSR_I_ADDR; \ | ||
344 | mov clob1 = 1; \ | ||
345 | ;; \ | ||
346 | ld8 clob0 = [clob0]; \ | ||
347 | ;; \ | ||
348 | (pred) st1 [clob0] = clob1 | ||
349 | |||
350 | #define RSM_PSR_I_IC(clob0, clob1, clob2) \ | ||
351 | movl clob0 = XSI_PSR_I_ADDR; \ | ||
352 | movl clob1 = XSI_PSR_IC; \ | ||
353 | ;; \ | ||
354 | ld8 clob0 = [clob0]; \ | ||
355 | mov clob2 = 1; \ | ||
356 | ;; \ | ||
357 | /* note: clears both vpsr.i and vpsr.ic! */ \ | ||
358 | st1 [clob0] = clob2; \ | ||
359 | st4 [clob1] = r0; \ | ||
360 | ;; | ||
361 | |||
362 | #define RSM_PSR_DT \ | ||
363 | XEN_HYPER_RSM_PSR_DT | ||
364 | |||
365 | #define SSM_PSR_DT_AND_SRLZ_I \ | ||
366 | XEN_HYPER_SSM_PSR_DT | ||
367 | |||
368 | #define BSW_0(clob0, clob1, clob2) \ | ||
369 | ;; \ | ||
370 | /* r16-r31 all now hold bank1 values */ \ | ||
371 | mov clob2 = ar.unat; \ | ||
372 | movl clob0 = XSI_BANK1_R16; \ | ||
373 | movl clob1 = XSI_BANK1_R16 + 8; \ | ||
374 | ;; \ | ||
375 | .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ | ||
376 | .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ | ||
377 | ;; \ | ||
378 | .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ | ||
379 | .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ | ||
380 | ;; \ | ||
381 | .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ | ||
382 | .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ | ||
383 | ;; \ | ||
384 | .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ | ||
385 | .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ | ||
386 | ;; \ | ||
387 | .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ | ||
388 | .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ | ||
389 | ;; \ | ||
390 | .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ | ||
391 | .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ | ||
392 | ;; \ | ||
393 | .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ | ||
394 | .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ | ||
395 | ;; \ | ||
396 | .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ | ||
397 | .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ | ||
398 | ;; \ | ||
399 | mov clob1 = ar.unat; \ | ||
400 | movl clob0 = XSI_B1NAT; \ | ||
401 | ;; \ | ||
402 | st8 [clob0] = clob1; \ | ||
403 | mov ar.unat = clob2; \ | ||
404 | movl clob0 = XSI_BANKNUM; \ | ||
405 | ;; \ | ||
406 | st4 [clob0] = r0 | ||
407 | |||
408 | |||
409 | /* FIXME: THIS CODE IS NOT NaT SAFE! */ | ||
410 | #define XEN_BSW_1(clob) \ | ||
411 | mov clob = ar.unat; \ | ||
412 | movl r30 = XSI_B1NAT; \ | ||
413 | ;; \ | ||
414 | ld8 r30 = [r30]; \ | ||
415 | mov r31 = 1; \ | ||
416 | ;; \ | ||
417 | mov ar.unat = r30; \ | ||
418 | movl r30 = XSI_BANKNUM; \ | ||
419 | ;; \ | ||
420 | st4 [r30] = r31; \ | ||
421 | movl r30 = XSI_BANK1_R16; \ | ||
422 | movl r31 = XSI_BANK1_R16+8; \ | ||
423 | ;; \ | ||
424 | ld8.fill r16 = [r30], 16; \ | ||
425 | ld8.fill r17 = [r31], 16; \ | ||
426 | ;; \ | ||
427 | ld8.fill r18 = [r30], 16; \ | ||
428 | ld8.fill r19 = [r31], 16; \ | ||
429 | ;; \ | ||
430 | ld8.fill r20 = [r30], 16; \ | ||
431 | ld8.fill r21 = [r31], 16; \ | ||
432 | ;; \ | ||
433 | ld8.fill r22 = [r30], 16; \ | ||
434 | ld8.fill r23 = [r31], 16; \ | ||
435 | ;; \ | ||
436 | ld8.fill r24 = [r30], 16; \ | ||
437 | ld8.fill r25 = [r31], 16; \ | ||
438 | ;; \ | ||
439 | ld8.fill r26 = [r30], 16; \ | ||
440 | ld8.fill r27 = [r31], 16; \ | ||
441 | ;; \ | ||
442 | ld8.fill r28 = [r30], 16; \ | ||
443 | ld8.fill r29 = [r31], 16; \ | ||
444 | ;; \ | ||
445 | ld8.fill r30 = [r30]; \ | ||
446 | ld8.fill r31 = [r31]; \ | ||
447 | ;; \ | ||
448 | mov ar.unat = clob | ||
449 | |||
450 | #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) | ||
451 | |||
452 | |||
453 | #define COVER \ | ||
454 | XEN_HYPER_COVER | ||
455 | |||
456 | #define RFI \ | ||
457 | XEN_HYPER_RFI; \ | ||
458 | dv_serialize_data | ||
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h new file mode 100644 index 000000000000..f00fab40854d --- /dev/null +++ b/arch/ia64/include/asm/xen/interface.h | |||
@@ -0,0 +1,346 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch-ia64/hypervisor-if.h | ||
3 | * | ||
4 | * Guest OS interface to IA64 Xen. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright by those who contributed. (in alphabetical order) | ||
25 | * | ||
26 | * Anthony Xu <anthony.xu@intel.com> | ||
27 | * Eddie Dong <eddie.dong@intel.com> | ||
28 | * Fred Yang <fred.yang@intel.com> | ||
29 | * Kevin Tian <kevin.tian@intel.com> | ||
30 | * Alex Williamson <alex.williamson@hp.com> | ||
31 | * Chris Wright <chrisw@sous-sol.org> | ||
32 | * Christian Limpach <Christian.Limpach@cl.cam.ac.uk> | ||
33 | * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> | ||
34 | * Hollis Blanchard <hollisb@us.ibm.com> | ||
35 | * Isaku Yamahata <yamahata@valinux.co.jp> | ||
36 | * Jan Beulich <jbeulich@novell.com> | ||
37 | * John Levon <john.levon@sun.com> | ||
38 | * Kazuhiro Suzuki <kaz@jp.fujitsu.com> | ||
39 | * Keir Fraser <keir.fraser@citrix.com> | ||
40 | * Kouya Shimura <kouya@jp.fujitsu.com> | ||
41 | * Masaki Kanno <kanno.masaki@jp.fujitsu.com> | ||
42 | * Matt Chapman <matthewc@hp.com> | ||
43 | * Matthew Chapman <matthewc@hp.com> | ||
44 | * Samuel Thibault <samuel.thibault@eu.citrix.com> | ||
45 | * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com> | ||
46 | * Tristan Gingold <tgingold@free.fr> | ||
47 | * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com> | ||
48 | * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com> | ||
49 | * Zhang Xin <xing.z.zhang@intel.com> | ||
50 | * Zhang xiantao <xiantao.zhang@intel.com> | ||
51 | * dan.magenheimer@hp.com | ||
52 | * ian.pratt@cl.cam.ac.uk | ||
53 | * michael.fetterman@cl.cam.ac.uk | ||
54 | */ | ||
55 | |||
56 | #ifndef _ASM_IA64_XEN_INTERFACE_H | ||
57 | #define _ASM_IA64_XEN_INTERFACE_H | ||
58 | |||
59 | #define __DEFINE_GUEST_HANDLE(name, type) \ | ||
60 | typedef struct { type *p; } __guest_handle_ ## name | ||
61 | |||
62 | #define DEFINE_GUEST_HANDLE_STRUCT(name) \ | ||
63 | __DEFINE_GUEST_HANDLE(name, struct name) | ||
64 | #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) | ||
65 | #define GUEST_HANDLE(name) __guest_handle_ ## name | ||
66 | #define GUEST_HANDLE_64(name) GUEST_HANDLE(name) | ||
67 | #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) | ||
68 | |||
69 | #ifndef __ASSEMBLY__ | ||
70 | /* Guest handles for primitive C types. */ | ||
71 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); | ||
72 | __DEFINE_GUEST_HANDLE(uint, unsigned int); | ||
73 | __DEFINE_GUEST_HANDLE(ulong, unsigned long); | ||
74 | __DEFINE_GUEST_HANDLE(u64, unsigned long); | ||
75 | DEFINE_GUEST_HANDLE(char); | ||
76 | DEFINE_GUEST_HANDLE(int); | ||
77 | DEFINE_GUEST_HANDLE(long); | ||
78 | DEFINE_GUEST_HANDLE(void); | ||
79 | |||
80 | typedef unsigned long xen_pfn_t; | ||
81 | DEFINE_GUEST_HANDLE(xen_pfn_t); | ||
82 | #define PRI_xen_pfn "lx" | ||
83 | #endif | ||
84 | |||
85 | /* Arch specific VIRQs definition */ | ||
86 | #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ | ||
87 | #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ | ||
88 | #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ | ||
89 | |||
90 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
91 | /* keep sizeof(struct shared_page) <= PAGE_SIZE. | ||
92 | * this is checked in arch/ia64/xen/hypervisor.c. */ | ||
93 | #define MAX_VIRT_CPUS 64 | ||
94 | |||
95 | #ifndef __ASSEMBLY__ | ||
96 | |||
97 | #define INVALID_MFN (~0UL) | ||
98 | |||
99 | union vac { | ||
100 | unsigned long value; | ||
101 | struct { | ||
102 | int a_int:1; | ||
103 | int a_from_int_cr:1; | ||
104 | int a_to_int_cr:1; | ||
105 | int a_from_psr:1; | ||
106 | int a_from_cpuid:1; | ||
107 | int a_cover:1; | ||
108 | int a_bsw:1; | ||
109 | long reserved:57; | ||
110 | }; | ||
111 | }; | ||
112 | |||
113 | union vdc { | ||
114 | unsigned long value; | ||
115 | struct { | ||
116 | int d_vmsw:1; | ||
117 | int d_extint:1; | ||
118 | int d_ibr_dbr:1; | ||
119 | int d_pmc:1; | ||
120 | int d_to_pmd:1; | ||
121 | int d_itm:1; | ||
122 | long reserved:58; | ||
123 | }; | ||
124 | }; | ||
125 | |||
126 | struct mapped_regs { | ||
127 | union vac vac; | ||
128 | union vdc vdc; | ||
129 | unsigned long virt_env_vaddr; | ||
130 | unsigned long reserved1[29]; | ||
131 | unsigned long vhpi; | ||
132 | unsigned long reserved2[95]; | ||
133 | union { | ||
134 | unsigned long vgr[16]; | ||
135 | unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) | ||
136 | when bank0 active */ | ||
137 | }; | ||
138 | union { | ||
139 | unsigned long vbgr[16]; | ||
140 | unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) | ||
141 | when bank1 active */ | ||
142 | }; | ||
143 | unsigned long vnat; | ||
144 | unsigned long vbnat; | ||
145 | unsigned long vcpuid[5]; | ||
146 | unsigned long reserved3[11]; | ||
147 | unsigned long vpsr; | ||
148 | unsigned long vpr; | ||
149 | unsigned long reserved4[76]; | ||
150 | union { | ||
151 | unsigned long vcr[128]; | ||
152 | struct { | ||
153 | unsigned long dcr; /* CR0 */ | ||
154 | unsigned long itm; | ||
155 | unsigned long iva; | ||
156 | unsigned long rsv1[5]; | ||
157 | unsigned long pta; /* CR8 */ | ||
158 | unsigned long rsv2[7]; | ||
159 | unsigned long ipsr; /* CR16 */ | ||
160 | unsigned long isr; | ||
161 | unsigned long rsv3; | ||
162 | unsigned long iip; | ||
163 | unsigned long ifa; | ||
164 | unsigned long itir; | ||
165 | unsigned long iipa; | ||
166 | unsigned long ifs; | ||
167 | unsigned long iim; /* CR24 */ | ||
168 | unsigned long iha; | ||
169 | unsigned long rsv4[38]; | ||
170 | unsigned long lid; /* CR64 */ | ||
171 | unsigned long ivr; | ||
172 | unsigned long tpr; | ||
173 | unsigned long eoi; | ||
174 | unsigned long irr[4]; | ||
175 | unsigned long itv; /* CR72 */ | ||
176 | unsigned long pmv; | ||
177 | unsigned long cmcv; | ||
178 | unsigned long rsv5[5]; | ||
179 | unsigned long lrr0; /* CR80 */ | ||
180 | unsigned long lrr1; | ||
181 | unsigned long rsv6[46]; | ||
182 | }; | ||
183 | }; | ||
184 | union { | ||
185 | unsigned long reserved5[128]; | ||
186 | struct { | ||
187 | unsigned long precover_ifs; | ||
188 | unsigned long unat; /* not sure if this is needed | ||
189 | until NaT arch is done */ | ||
190 | int interrupt_collection_enabled; /* virtual psr.ic */ | ||
191 | |||
192 | /* virtual interrupt deliverable flag is | ||
193 | * evtchn_upcall_mask in shared info area now. | ||
194 | * interrupt_mask_addr is the address | ||
195 | * of evtchn_upcall_mask for current vcpu | ||
196 | */ | ||
197 | unsigned char *interrupt_mask_addr; | ||
198 | int pending_interruption; | ||
199 | unsigned char vpsr_pp; | ||
200 | unsigned char vpsr_dfh; | ||
201 | unsigned char hpsr_dfh; | ||
202 | unsigned char hpsr_mfh; | ||
203 | unsigned long reserved5_1[4]; | ||
204 | int metaphysical_mode; /* 1 = use metaphys mapping | ||
205 | 0 = use virtual */ | ||
206 | int banknum; /* 0 or 1, which virtual | ||
207 | register bank is active */ | ||
208 | unsigned long rrs[8]; /* region registers */ | ||
209 | unsigned long krs[8]; /* kernel registers */ | ||
210 | unsigned long tmp[16]; /* temp registers | ||
211 | (e.g. for hyperprivops) */ | ||
212 | }; | ||
213 | }; | ||
214 | }; | ||
215 | |||
216 | struct arch_vcpu_info { | ||
217 | /* nothing */ | ||
218 | }; | ||
219 | |||
220 | /* | ||
221 | * This structure is used for magic page in domain pseudo physical address | ||
222 | * space and the result of XENMEM_machine_memory_map. | ||
223 | * As the XENMEM_machine_memory_map result, | ||
224 | * xen_memory_map::nr_entries indicates the size in bytes | ||
225 | * including struct xen_ia64_memmap_info. Not the number of entries. | ||
226 | */ | ||
227 | struct xen_ia64_memmap_info { | ||
228 | uint64_t efi_memmap_size; /* size of EFI memory map */ | ||
229 | uint64_t efi_memdesc_size; /* size of an EFI memory map | ||
230 | * descriptor */ | ||
231 | uint32_t efi_memdesc_version; /* memory descriptor version */ | ||
232 | void *memdesc[0]; /* array of efi_memory_desc_t */ | ||
233 | }; | ||
234 | |||
235 | struct arch_shared_info { | ||
236 | /* PFN of the start_info page. */ | ||
237 | unsigned long start_info_pfn; | ||
238 | |||
239 | /* Interrupt vector for event channel. */ | ||
240 | int evtchn_vector; | ||
241 | |||
242 | /* PFN of memmap_info page */ | ||
243 | unsigned int memmap_info_num_pages; /* currently only = 1 case is | ||
244 | supported. */ | ||
245 | unsigned long memmap_info_pfn; | ||
246 | |||
247 | uint64_t pad[31]; | ||
248 | }; | ||
249 | |||
250 | struct xen_callback { | ||
251 | unsigned long ip; | ||
252 | }; | ||
253 | typedef struct xen_callback xen_callback_t; | ||
254 | |||
255 | #endif /* !__ASSEMBLY__ */ | ||
256 | |||
257 | /* Size of the shared_info area (this is not related to page size). */ | ||
258 | #define XSI_SHIFT 14 | ||
259 | #define XSI_SIZE (1 << XSI_SHIFT) | ||
260 | /* Log size of mapped_regs area (64 KB - only 4KB is used). */ | ||
261 | #define XMAPPEDREGS_SHIFT 12 | ||
262 | #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) | ||
263 | /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ | ||
264 | #define XMAPPEDREGS_OFS XSI_SIZE | ||
265 | |||
266 | /* Hyperprivops. */ | ||
267 | #define HYPERPRIVOP_START 0x1 | ||
268 | #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) | ||
269 | #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) | ||
270 | #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) | ||
271 | #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) | ||
272 | #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) | ||
273 | #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) | ||
274 | #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) | ||
275 | #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) | ||
276 | #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) | ||
277 | #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) | ||
278 | #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) | ||
279 | #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) | ||
280 | #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) | ||
281 | #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) | ||
282 | #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) | ||
283 | #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) | ||
284 | #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) | ||
285 | #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) | ||
286 | #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) | ||
287 | #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) | ||
288 | #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) | ||
289 | #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) | ||
290 | #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) | ||
291 | #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) | ||
292 | #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) | ||
293 | #define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) | ||
294 | #define HYPERPRIVOP_MAX (0x1a) | ||
295 | |||
296 | /* Fast and light hypercalls. */ | ||
297 | #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 | ||
298 | |||
299 | /* Xencomm macros. */ | ||
300 | #define XENCOMM_INLINE_MASK 0xf800000000000000UL | ||
301 | #define XENCOMM_INLINE_FLAG 0x8000000000000000UL | ||
302 | |||
303 | #ifndef __ASSEMBLY__ | ||
304 | |||
305 | /* | ||
306 | * Optimization features. | ||
307 | * The hypervisor may do some special optimizations for guests. This hypercall | ||
308 | * can be used to switch on/of these special optimizations. | ||
309 | */ | ||
310 | #define __HYPERVISOR_opt_feature 0x700UL | ||
311 | |||
312 | #define XEN_IA64_OPTF_OFF 0x0 | ||
313 | #define XEN_IA64_OPTF_ON 0x1 | ||
314 | |||
315 | /* | ||
316 | * If this feature is switched on, the hypervisor inserts the | ||
317 | * tlb entries without calling the guests traphandler. | ||
318 | * This is useful in guests using region 7 for identity mapping | ||
319 | * like the linux kernel does. | ||
320 | */ | ||
321 | #define XEN_IA64_OPTF_IDENT_MAP_REG7 1 | ||
322 | |||
323 | /* Identity mapping of region 4 addresses in HVM. */ | ||
324 | #define XEN_IA64_OPTF_IDENT_MAP_REG4 2 | ||
325 | |||
326 | /* Identity mapping of region 5 addresses in HVM. */ | ||
327 | #define XEN_IA64_OPTF_IDENT_MAP_REG5 3 | ||
328 | |||
329 | #define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) | ||
330 | |||
331 | struct xen_ia64_opt_feature { | ||
332 | unsigned long cmd; /* Which feature */ | ||
333 | unsigned char on; /* Switch feature on/off */ | ||
334 | union { | ||
335 | struct { | ||
336 | /* The page protection bit mask of the pte. | ||
337 | * This will be or'ed with the pte. */ | ||
338 | unsigned long pgprot; | ||
339 | unsigned long key; /* A protection key for itir.*/ | ||
340 | }; | ||
341 | }; | ||
342 | }; | ||
343 | |||
344 | #endif /* __ASSEMBLY__ */ | ||
345 | |||
346 | #endif /* _ASM_IA64_XEN_INTERFACE_H */ | ||
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h new file mode 100644 index 000000000000..a90450983003 --- /dev/null +++ b/arch/ia64/include/asm/xen/irq.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/irq.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_IA64_XEN_IRQ_H | ||
24 | #define _ASM_IA64_XEN_IRQ_H | ||
25 | |||
26 | /* | ||
27 | * The flat IRQ space is divided into two regions: | ||
28 | * 1. A one-to-one mapping of real physical IRQs. This space is only used | ||
29 | * if we have physical device-access privilege. This region is at the | ||
30 | * start of the IRQ space so that existing device drivers do not need | ||
31 | * to be modified to translate physical IRQ numbers into our IRQ space. | ||
32 | * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These | ||
33 | * are bound using the provided bind/unbind functions. | ||
34 | */ | ||
35 | |||
36 | #define XEN_PIRQ_BASE 0 | ||
37 | #define XEN_NR_PIRQS 256 | ||
38 | |||
39 | #define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) | ||
40 | #define XEN_NR_DYNIRQS (NR_CPUS * 8) | ||
41 | |||
42 | #define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) | ||
43 | |||
44 | #endif /* _ASM_IA64_XEN_IRQ_H */ | ||
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h new file mode 100644 index 000000000000..4d92d9bbda7b --- /dev/null +++ b/arch/ia64/include/asm/xen/minstate.h | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | ||
3 | * the minimum state necessary that allows us to turn psr.ic back | ||
4 | * on. | ||
5 | * | ||
6 | * Assumed state upon entry: | ||
7 | * psr.ic: off | ||
8 | * r31: contains saved predicates (pr) | ||
9 | * | ||
10 | * Upon exit, the state is as follows: | ||
11 | * psr.ic: off | ||
12 | * r2 = points to &pt_regs.r16 | ||
13 | * r8 = contents of ar.ccv | ||
14 | * r9 = contents of ar.csd | ||
15 | * r10 = contents of ar.ssd | ||
16 | * r11 = FPSR_DEFAULT | ||
17 | * r12 = kernel sp (kernel virtual address) | ||
18 | * r13 = points to current task_struct (kernel virtual address) | ||
19 | * p15 = TRUE if psr.i is set in cr.ipsr | ||
20 | * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: | ||
21 | * preserved | ||
22 | * CONFIG_XEN note: p6/p7 are not preserved | ||
23 | * | ||
24 | * Note that psr.ic is NOT turned on by this macro. This is so that | ||
25 | * we can pass interruption state as arguments to a handler. | ||
26 | */ | ||
27 | #define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ | ||
28 | mov r16=IA64_KR(CURRENT); /* M */ \ | ||
29 | mov r27=ar.rsc; /* M */ \ | ||
30 | mov r20=r1; /* A */ \ | ||
31 | mov r25=ar.unat; /* M */ \ | ||
32 | MOV_FROM_IPSR(p0,r29); /* M */ \ | ||
33 | MOV_FROM_IIP(r28); /* M */ \ | ||
34 | mov r21=ar.fpsr; /* M */ \ | ||
35 | mov r26=ar.pfs; /* I */ \ | ||
36 | __COVER; /* B;; (or nothing) */ \ | ||
37 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ | ||
38 | ;; \ | ||
39 | ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ | ||
40 | st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ | ||
41 | adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ | ||
42 | /* switch from user to kernel RBS: */ \ | ||
43 | ;; \ | ||
44 | invala; /* M */ \ | ||
45 | /* SAVE_IFS;*/ /* see xen special handling below */ \ | ||
46 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ | ||
47 | ;; \ | ||
48 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
49 | ;; \ | ||
50 | (pUStk) mov.m r24=ar.rnat; \ | ||
51 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
52 | (pKStk) mov r1=sp; /* get sp */ \ | ||
53 | ;; \ | ||
54 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
55 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
56 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
57 | ;; \ | ||
58 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
59 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
60 | ;; \ | ||
61 | (pUStk) mov r18=ar.bsp; \ | ||
62 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
63 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ | ||
64 | adds r16=PT(CR_IPSR),r1; \ | ||
65 | ;; \ | ||
66 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ | ||
67 | st8 [r16]=r29; /* save cr.ipsr */ \ | ||
68 | ;; \ | ||
69 | lfetch.fault.excl.nt1 [r17]; \ | ||
70 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ | ||
71 | mov r29=b0 \ | ||
72 | ;; \ | ||
73 | WORKAROUND; \ | ||
74 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ | ||
75 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ | ||
76 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ | ||
77 | ;; \ | ||
78 | .mem.offset 0,0; st8.spill [r16]=r8,16; \ | ||
79 | .mem.offset 8,0; st8.spill [r17]=r9,16; \ | ||
80 | ;; \ | ||
81 | .mem.offset 0,0; st8.spill [r16]=r10,24; \ | ||
82 | movl r8=XSI_PRECOVER_IFS; \ | ||
83 | .mem.offset 8,0; st8.spill [r17]=r11,24; \ | ||
84 | ;; \ | ||
85 | /* xen special handling for possibly lazy cover */ \ | ||
86 | /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ | ||
87 | ld8 r30=[r8]; \ | ||
88 | (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ | ||
89 | st8 [r16]=r28,16; /* save cr.iip */ \ | ||
90 | ;; \ | ||
91 | st8 [r17]=r30,16; /* save cr.ifs */ \ | ||
92 | mov r8=ar.ccv; \ | ||
93 | mov r9=ar.csd; \ | ||
94 | mov r10=ar.ssd; \ | ||
95 | movl r11=FPSR_DEFAULT; /* L-unit */ \ | ||
96 | ;; \ | ||
97 | st8 [r16]=r25,16; /* save ar.unat */ \ | ||
98 | st8 [r17]=r26,16; /* save ar.pfs */ \ | ||
99 | shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ | ||
100 | ;; \ | ||
101 | st8 [r16]=r27,16; /* save ar.rsc */ \ | ||
102 | (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ | ||
103 | (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ | ||
104 | ;; /* avoid RAW on r16 & r17 */ \ | ||
105 | (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ | ||
106 | st8 [r17]=r31,16; /* save predicates */ \ | ||
107 | (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ | ||
108 | ;; \ | ||
109 | st8 [r16]=r29,16; /* save b0 */ \ | ||
110 | st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ | ||
111 | cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ | ||
112 | ;; \ | ||
113 | .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ | ||
114 | .mem.offset 8,0; st8.spill [r17]=r12,16; \ | ||
115 | adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ | ||
116 | ;; \ | ||
117 | .mem.offset 0,0; st8.spill [r16]=r13,16; \ | ||
118 | .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ | ||
119 | mov r13=IA64_KR(CURRENT); /* establish `current' */ \ | ||
120 | ;; \ | ||
121 | .mem.offset 0,0; st8.spill [r16]=r15,16; \ | ||
122 | .mem.offset 8,0; st8.spill [r17]=r14,16; \ | ||
123 | ;; \ | ||
124 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | ||
125 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | ||
126 | ACCOUNT_GET_STAMP \ | ||
127 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | ||
128 | ;; \ | ||
129 | EXTRA; \ | ||
130 | movl r1=__gp; /* establish kernel global pointer */ \ | ||
131 | ;; \ | ||
132 | ACCOUNT_SYS_ENTER \ | ||
133 | BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ | ||
134 | ;; | ||
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h new file mode 100644 index 000000000000..03441a780b5b --- /dev/null +++ b/arch/ia64/include/asm/xen/page.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/page.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_IA64_XEN_PAGE_H | ||
24 | #define _ASM_IA64_XEN_PAGE_H | ||
25 | |||
26 | #define INVALID_P2M_ENTRY (~0UL) | ||
27 | |||
28 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
29 | { | ||
30 | return mfn; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | ||
34 | { | ||
35 | return pfn; | ||
36 | } | ||
37 | |||
38 | #define phys_to_machine_mapping_valid(_x) (1) | ||
39 | |||
40 | static inline void *mfn_to_virt(unsigned long mfn) | ||
41 | { | ||
42 | return __va(mfn << PAGE_SHIFT); | ||
43 | } | ||
44 | |||
45 | static inline unsigned long virt_to_mfn(void *virt) | ||
46 | { | ||
47 | return __pa(virt) >> PAGE_SHIFT; | ||
48 | } | ||
49 | |||
50 | /* for tpmfront.c */ | ||
51 | static inline unsigned long virt_to_machine(void *virt) | ||
52 | { | ||
53 | return __pa(virt); | ||
54 | } | ||
55 | |||
56 | static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
57 | { | ||
58 | /* nothing */ | ||
59 | } | ||
60 | |||
61 | #define pte_mfn(_x) pte_pfn(_x) | ||
62 | #define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ | ||
63 | #define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ | ||
64 | |||
65 | #endif /* _ASM_IA64_XEN_PAGE_H */ | ||
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h new file mode 100644 index 000000000000..71ec7546e100 --- /dev/null +++ b/arch/ia64/include/asm/xen/privop.h | |||
@@ -0,0 +1,129 @@ | |||
1 | #ifndef _ASM_IA64_XEN_PRIVOP_H | ||
2 | #define _ASM_IA64_XEN_PRIVOP_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2005 Hewlett-Packard Co | ||
6 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
7 | * | ||
8 | * Paravirtualizations of privileged operations for Xen/ia64 | ||
9 | * | ||
10 | * | ||
11 | * inline privop and paravirt_alt support | ||
12 | * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> | ||
13 | * VA Linux Systems Japan K.K. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __ASSEMBLY__ | ||
18 | #include <linux/types.h> /* arch-ia64.h requires uint64_t */ | ||
19 | #endif | ||
20 | #include <asm/xen/interface.h> | ||
21 | |||
22 | /* At 1 MB, before per-cpu space but still addressable using addl instead | ||
23 | of movl. */ | ||
24 | #define XSI_BASE 0xfffffffffff00000 | ||
25 | |||
26 | /* Address of mapped regs. */ | ||
27 | #define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) | ||
28 | |||
29 | #ifdef __ASSEMBLY__ | ||
30 | #define XEN_HYPER_RFI break HYPERPRIVOP_RFI | ||
31 | #define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT | ||
32 | #define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT | ||
33 | #define XEN_HYPER_COVER break HYPERPRIVOP_COVER | ||
34 | #define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D | ||
35 | #define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I | ||
36 | #define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I | ||
37 | #define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR | ||
38 | #define XEN_HYPER_THASH break HYPERPRIVOP_THASH | ||
39 | #define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D | ||
40 | #define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR | ||
41 | #define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR | ||
42 | #define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 | ||
43 | |||
44 | #define XSI_IFS (XSI_BASE + XSI_IFS_OFS) | ||
45 | #define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) | ||
46 | #define XSI_IFA (XSI_BASE + XSI_IFA_OFS) | ||
47 | #define XSI_ISR (XSI_BASE + XSI_ISR_OFS) | ||
48 | #define XSI_IIM (XSI_BASE + XSI_IIM_OFS) | ||
49 | #define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) | ||
50 | #define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
51 | #define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) | ||
52 | #define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) | ||
53 | #define XSI_IIP (XSI_BASE + XSI_IIP_OFS) | ||
54 | #define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) | ||
55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) | ||
56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) | ||
57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) | ||
58 | #endif | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /************************************************/ | ||
63 | /* Instructions paravirtualized for correctness */ | ||
64 | /************************************************/ | ||
65 | |||
66 | /* "fc" and "thash" are privilege-sensitive instructions, meaning they | ||
67 | * may have different semantics depending on whether they are executed | ||
68 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't | ||
69 | * be allowed to execute directly, lest incorrect semantics result. */ | ||
70 | extern void xen_fc(unsigned long addr); | ||
71 | extern unsigned long xen_thash(unsigned long addr); | ||
72 | |||
73 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | ||
74 | * is not currently used (though it may be in a long-format VHPT system!) | ||
75 | * and the semantics of cover only change if psr.ic is off which is very | ||
76 | * rare (and currently non-existent outside of assembly code */ | ||
77 | |||
78 | /* There are also privilege-sensitive registers. These registers are | ||
79 | * readable at any privilege level but only writable at PL0. */ | ||
80 | extern unsigned long xen_get_cpuid(int index); | ||
81 | extern unsigned long xen_get_pmd(int index); | ||
82 | |||
83 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ | ||
84 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | ||
85 | |||
86 | /************************************************/ | ||
87 | /* Instructions paravirtualized for performance */ | ||
88 | /************************************************/ | ||
89 | |||
90 | /* Xen uses memory-mapped virtual privileged registers for access to many | ||
91 | * performance-sensitive privileged registers. Some, like the processor | ||
92 | * status register (psr), are broken up into multiple memory locations. | ||
93 | * Others, like "pend", are abstractions based on privileged registers. | ||
94 | * "Pend" is guaranteed to be set if reading cr.ivr would return a | ||
95 | * (non-spurious) interrupt. */ | ||
96 | #define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) | ||
97 | |||
98 | #define XSI_PSR_I \ | ||
99 | (*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
100 | #define xen_get_virtual_psr_i() \ | ||
101 | (!XSI_PSR_I) | ||
102 | #define xen_set_virtual_psr_i(_val) \ | ||
103 | ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) | ||
104 | #define xen_set_virtual_psr_ic(_val) \ | ||
105 | ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) | ||
106 | #define xen_get_virtual_pend() \ | ||
107 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) | ||
108 | |||
109 | /* Although all privileged operations can be left to trap and will | ||
110 | * be properly handled by Xen, some are frequent enough that we use | ||
111 | * hyperprivops for performance. */ | ||
112 | extern unsigned long xen_get_psr(void); | ||
113 | extern unsigned long xen_get_ivr(void); | ||
114 | extern unsigned long xen_get_tpr(void); | ||
115 | extern void xen_hyper_ssm_i(void); | ||
116 | extern void xen_set_itm(unsigned long); | ||
117 | extern void xen_set_tpr(unsigned long); | ||
118 | extern void xen_eoi(unsigned long); | ||
119 | extern unsigned long xen_get_rr(unsigned long index); | ||
120 | extern void xen_set_rr(unsigned long index, unsigned long val); | ||
121 | extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
122 | unsigned long val2, unsigned long val3, | ||
123 | unsigned long val4); | ||
124 | extern void xen_set_kr(unsigned long index, unsigned long val); | ||
125 | extern void xen_ptcga(unsigned long addr, unsigned long size); | ||
126 | |||
127 | #endif /* !__ASSEMBLY__ */ | ||
128 | |||
129 | #endif /* _ASM_IA64_XEN_PRIVOP_H */ | ||
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h new file mode 100644 index 000000000000..20b2950c71b6 --- /dev/null +++ b/arch/ia64/include/asm/xen/xcom_hcall.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_IA64_XEN_XCOM_HCALL_H | ||
20 | #define _ASM_IA64_XEN_XCOM_HCALL_H | ||
21 | |||
22 | /* These function creates inline or mini descriptor for the parameters and | ||
23 | calls the corresponding xencomm_arch_hypercall_X. | ||
24 | Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless | ||
25 | they want to use their own wrapper. */ | ||
26 | extern int xencomm_hypercall_console_io(int cmd, int count, char *str); | ||
27 | |||
28 | extern int xencomm_hypercall_event_channel_op(int cmd, void *op); | ||
29 | |||
30 | extern int xencomm_hypercall_xen_version(int cmd, void *arg); | ||
31 | |||
32 | extern int xencomm_hypercall_physdev_op(int cmd, void *op); | ||
33 | |||
34 | extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, | ||
35 | unsigned int count); | ||
36 | |||
37 | extern int xencomm_hypercall_sched_op(int cmd, void *arg); | ||
38 | |||
39 | extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); | ||
40 | |||
41 | extern int xencomm_hypercall_callback_op(int cmd, void *arg); | ||
42 | |||
43 | extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); | ||
44 | |||
45 | extern int xencomm_hypercall_suspend(unsigned long srec); | ||
46 | |||
47 | extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); | ||
48 | |||
49 | extern long xencomm_hypercall_opt_feature(void *arg); | ||
50 | |||
51 | #endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ | ||
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h new file mode 100644 index 000000000000..cded677bebf2 --- /dev/null +++ b/arch/ia64/include/asm/xen/xencomm.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_IA64_XEN_XENCOMM_H | ||
20 | #define _ASM_IA64_XEN_XENCOMM_H | ||
21 | |||
22 | #include <xen/xencomm.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | |||
25 | /* Must be called before any hypercall. */ | ||
26 | extern void xencomm_initialize(void); | ||
27 | extern int xencomm_is_initialized(void); | ||
28 | |||
29 | /* Check if virtual contiguity means physical contiguity | ||
30 | * where the passed address is a pointer value in virtual address. | ||
31 | * On ia64, identity mapping area in region 7 or the piece of region 5 | ||
32 | * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] | ||
33 | */ | ||
34 | static inline int xencomm_is_phys_contiguous(unsigned long addr) | ||
35 | { | ||
36 | return (PAGE_OFFSET <= addr && | ||
37 | addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || | ||
38 | (KERNEL_START <= addr && | ||
39 | addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); | ||
40 | } | ||
41 | |||
42 | #endif /* _ASM_IA64_XEN_XENCOMM_H */ | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 87fea11aecb7..c381ea954892 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -42,6 +42,10 @@ obj-$(CONFIG_IA64_ESI) += esi.o | |||
42 | ifneq ($(CONFIG_IA64_ESI),) | 42 | ifneq ($(CONFIG_IA64_ESI),) |
43 | obj-y += esi_stub.o # must be in kernel proper | 43 | obj-y += esi_stub.o # must be in kernel proper |
44 | endif | 44 | endif |
45 | obj-$(CONFIG_DMAR) += pci-dma.o | ||
46 | ifeq ($(CONFIG_DMAR), y) | ||
47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | ||
48 | endif | ||
45 | 49 | ||
46 | # The gate DSO image is built using a special linker script. | 50 | # The gate DSO image is built using a special linker script. |
47 | targets += gate.so gate-syms.o | 51 | targets += gate.so gate-syms.o |
@@ -112,5 +116,23 @@ clean-files += $(objtree)/include/asm-ia64/nr-irqs.h | |||
112 | ASM_PARAVIRT_OBJS = ivt.o entry.o | 116 | ASM_PARAVIRT_OBJS = ivt.o entry.o |
113 | define paravirtualized_native | 117 | define paravirtualized_native |
114 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE | 118 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE |
119 | AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK | ||
120 | extra-y += pvchk-$(1) | ||
115 | endef | 121 | endef |
116 | $(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj)))) | 122 | $(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj)))) |
123 | |||
124 | # | ||
125 | # Checker for paravirtualizations of privileged operations. | ||
126 | # | ||
127 | quiet_cmd_pv_check_sed = PVCHK $@ | ||
128 | define cmd_pv_check_sed | ||
129 | sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@ | ||
130 | endef | ||
131 | |||
132 | $(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE | ||
133 | $(call if_changed_dep,as_s_S) | ||
134 | $(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE | ||
135 | $(call if_changed,pv_check_sed) | ||
136 | $(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE | ||
137 | $(call if_changed,as_o_S) | ||
138 | .PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 5d1eb7ee2bf6..0635015d0aaa 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/numa.h> | 52 | #include <asm/numa.h> |
53 | #include <asm/sal.h> | 53 | #include <asm/sal.h> |
54 | #include <asm/cyclone.h> | 54 | #include <asm/cyclone.h> |
55 | #include <asm/xen/hypervisor.h> | ||
55 | 56 | ||
56 | #define BAD_MADT_ENTRY(entry, end) ( \ | 57 | #define BAD_MADT_ENTRY(entry, end) ( \ |
57 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ | 58 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ |
@@ -91,6 +92,9 @@ acpi_get_sysname(void) | |||
91 | struct acpi_table_rsdp *rsdp; | 92 | struct acpi_table_rsdp *rsdp; |
92 | struct acpi_table_xsdt *xsdt; | 93 | struct acpi_table_xsdt *xsdt; |
93 | struct acpi_table_header *hdr; | 94 | struct acpi_table_header *hdr; |
95 | #ifdef CONFIG_DMAR | ||
96 | u64 i, nentries; | ||
97 | #endif | ||
94 | 98 | ||
95 | rsdp_phys = acpi_find_rsdp(); | 99 | rsdp_phys = acpi_find_rsdp(); |
96 | if (!rsdp_phys) { | 100 | if (!rsdp_phys) { |
@@ -121,7 +125,21 @@ acpi_get_sysname(void) | |||
121 | return "uv"; | 125 | return "uv"; |
122 | else | 126 | else |
123 | return "sn2"; | 127 | return "sn2"; |
128 | } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { | ||
129 | return "xen"; | ||
130 | } | ||
131 | |||
132 | #ifdef CONFIG_DMAR | ||
133 | /* Look for Intel IOMMU */ | ||
134 | nentries = (hdr->length - sizeof(*hdr)) / | ||
135 | sizeof(xsdt->table_offset_entry[0]); | ||
136 | for (i = 0; i < nentries; i++) { | ||
137 | hdr = __va(xsdt->table_offset_entry[i]); | ||
138 | if (strncmp(hdr->signature, ACPI_SIG_DMAR, | ||
139 | sizeof(ACPI_SIG_DMAR) - 1) == 0) | ||
140 | return "dig_vtd"; | ||
124 | } | 141 | } |
142 | #endif | ||
125 | 143 | ||
126 | return "dig"; | 144 | return "dig"; |
127 | #else | 145 | #else |
@@ -137,6 +155,10 @@ acpi_get_sysname(void) | |||
137 | return "uv"; | 155 | return "uv"; |
138 | # elif defined (CONFIG_IA64_DIG) | 156 | # elif defined (CONFIG_IA64_DIG) |
139 | return "dig"; | 157 | return "dig"; |
158 | # elif defined (CONFIG_IA64_XEN_GUEST) | ||
159 | return "xen"; | ||
160 | # elif defined(CONFIG_IA64_DIG_VTD) | ||
161 | return "dig_vtd"; | ||
140 | # else | 162 | # else |
141 | # error Unknown platform. Fix acpi.c. | 163 | # error Unknown platform. Fix acpi.c. |
142 | # endif | 164 | # endif |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 94c44b1ccfd0..742dbb1d5a4f 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -16,6 +16,9 @@ | |||
16 | #include <asm/sigcontext.h> | 16 | #include <asm/sigcontext.h> |
17 | #include <asm/mca.h> | 17 | #include <asm/mca.h> |
18 | 18 | ||
19 | #include <asm/xen/interface.h> | ||
20 | #include <asm/xen/hypervisor.h> | ||
21 | |||
19 | #include "../kernel/sigframe.h" | 22 | #include "../kernel/sigframe.h" |
20 | #include "../kernel/fsyscall_gtod_data.h" | 23 | #include "../kernel/fsyscall_gtod_data.h" |
21 | 24 | ||
@@ -286,4 +289,32 @@ void foo(void) | |||
286 | offsetof (struct itc_jitter_data_t, itc_jitter)); | 289 | offsetof (struct itc_jitter_data_t, itc_jitter)); |
287 | DEFINE(IA64_ITC_LASTCYCLE_OFFSET, | 290 | DEFINE(IA64_ITC_LASTCYCLE_OFFSET, |
288 | offsetof (struct itc_jitter_data_t, itc_lastcycle)); | 291 | offsetof (struct itc_jitter_data_t, itc_lastcycle)); |
292 | |||
293 | #ifdef CONFIG_XEN | ||
294 | BLANK(); | ||
295 | |||
296 | DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); | ||
297 | DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); | ||
298 | |||
299 | #define DEFINE_MAPPED_REG_OFS(sym, field) \ | ||
300 | DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) | ||
301 | |||
302 | DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); | ||
303 | DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); | ||
304 | DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); | ||
305 | DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); | ||
306 | DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); | ||
307 | DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); | ||
308 | DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); | ||
309 | DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); | ||
310 | DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); | ||
311 | DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); | ||
312 | DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); | ||
313 | DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); | ||
314 | DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); | ||
315 | DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); | ||
316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); | ||
317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); | ||
318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); | ||
319 | #endif /* CONFIG_XEN */ | ||
289 | } | 320 | } |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 0dd6c1419d8d..7ef0c594f5ed 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -534,6 +534,11 @@ GLOBAL_ENTRY(ia64_trace_syscall) | |||
534 | stf.spill [r16]=f10 | 534 | stf.spill [r16]=f10 |
535 | stf.spill [r17]=f11 | 535 | stf.spill [r17]=f11 |
536 | br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args | 536 | br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args |
537 | cmp.lt p6,p0=r8,r0 // check tracehook | ||
538 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
539 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 | ||
540 | mov r10=0 | ||
541 | (p6) br.cond.sptk strace_error // syscall failed -> | ||
537 | adds r16=PT(F6)+16,sp | 542 | adds r16=PT(F6)+16,sp |
538 | adds r17=PT(F7)+16,sp | 543 | adds r17=PT(F7)+16,sp |
539 | ;; | 544 | ;; |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 416a952b19bd..f675d8e33853 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -580,7 +580,7 @@ ENTRY(dirty_bit) | |||
580 | mov b0=r29 // restore b0 | 580 | mov b0=r29 // restore b0 |
581 | ;; | 581 | ;; |
582 | st8 [r17]=r18 // store back updated PTE | 582 | st8 [r17]=r18 // store back updated PTE |
583 | itc.d r18 // install updated PTE | 583 | ITC_D(p0, r18, r16) // install updated PTE |
584 | #endif | 584 | #endif |
585 | mov pr=r31,-1 // restore pr | 585 | mov pr=r31,-1 // restore pr |
586 | RFI | 586 | RFI |
@@ -646,7 +646,7 @@ ENTRY(iaccess_bit) | |||
646 | mov b0=r29 // restore b0 | 646 | mov b0=r29 // restore b0 |
647 | ;; | 647 | ;; |
648 | st8 [r17]=r18 // store back updated PTE | 648 | st8 [r17]=r18 // store back updated PTE |
649 | itc.i r18 // install updated PTE | 649 | ITC_I(p0, r18, r16) // install updated PTE |
650 | #endif /* !CONFIG_SMP */ | 650 | #endif /* !CONFIG_SMP */ |
651 | mov pr=r31,-1 | 651 | mov pr=r31,-1 |
652 | RFI | 652 | RFI |
@@ -698,7 +698,7 @@ ENTRY(daccess_bit) | |||
698 | or r18=_PAGE_A,r18 // set the accessed bit | 698 | or r18=_PAGE_A,r18 // set the accessed bit |
699 | ;; | 699 | ;; |
700 | st8 [r17]=r18 // store back updated PTE | 700 | st8 [r17]=r18 // store back updated PTE |
701 | itc.d r18 // install updated PTE | 701 | ITC_D(p0, r18, r16) // install updated PTE |
702 | #endif | 702 | #endif |
703 | mov b0=r29 // restore b0 | 703 | mov b0=r29 // restore b0 |
704 | mov pr=r31,-1 | 704 | mov pr=r31,-1 |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 60c6ef67ebb2..702a09c13238 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/irq.h> | 6 | #include <linux/irq.h> |
7 | #include <linux/msi.h> | 7 | #include <linux/msi.h> |
8 | #include <linux/dmar.h> | ||
8 | #include <asm/smp.h> | 9 | #include <asm/smp.h> |
9 | 10 | ||
10 | /* | 11 | /* |
@@ -162,3 +163,82 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
162 | 163 | ||
163 | return ia64_teardown_msi_irq(irq); | 164 | return ia64_teardown_msi_irq(irq); |
164 | } | 165 | } |
166 | |||
167 | #ifdef CONFIG_DMAR | ||
168 | #ifdef CONFIG_SMP | ||
169 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | ||
170 | { | ||
171 | struct irq_cfg *cfg = irq_cfg + irq; | ||
172 | struct msi_msg msg; | ||
173 | int cpu = first_cpu(mask); | ||
174 | |||
175 | |||
176 | if (!cpu_online(cpu)) | ||
177 | return; | ||
178 | |||
179 | if (irq_prepare_move(irq, cpu)) | ||
180 | return; | ||
181 | |||
182 | dmar_msi_read(irq, &msg); | ||
183 | |||
184 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
185 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
186 | msg.address_lo &= ~MSI_ADDR_DESTID_MASK; | ||
187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | ||
188 | |||
189 | dmar_msi_write(irq, &msg); | ||
190 | irq_desc[irq].affinity = mask; | ||
191 | } | ||
192 | #endif /* CONFIG_SMP */ | ||
193 | |||
194 | struct irq_chip dmar_msi_type = { | ||
195 | .name = "DMAR_MSI", | ||
196 | .unmask = dmar_msi_unmask, | ||
197 | .mask = dmar_msi_mask, | ||
198 | .ack = ia64_ack_msi_irq, | ||
199 | #ifdef CONFIG_SMP | ||
200 | .set_affinity = dmar_msi_set_affinity, | ||
201 | #endif | ||
202 | .retrigger = ia64_msi_retrigger_irq, | ||
203 | }; | ||
204 | |||
205 | static int | ||
206 | msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | ||
207 | { | ||
208 | struct irq_cfg *cfg = irq_cfg + irq; | ||
209 | unsigned dest; | ||
210 | cpumask_t mask; | ||
211 | |||
212 | cpus_and(mask, irq_to_domain(irq), cpu_online_map); | ||
213 | dest = cpu_physical_id(first_cpu(mask)); | ||
214 | |||
215 | msg->address_hi = 0; | ||
216 | msg->address_lo = | ||
217 | MSI_ADDR_HEADER | | ||
218 | MSI_ADDR_DESTMODE_PHYS | | ||
219 | MSI_ADDR_REDIRECTION_CPU | | ||
220 | MSI_ADDR_DESTID_CPU(dest); | ||
221 | |||
222 | msg->data = | ||
223 | MSI_DATA_TRIGGER_EDGE | | ||
224 | MSI_DATA_LEVEL_ASSERT | | ||
225 | MSI_DATA_DELIVERY_FIXED | | ||
226 | MSI_DATA_VECTOR(cfg->vector); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | int arch_setup_dmar_msi(unsigned int irq) | ||
231 | { | ||
232 | int ret; | ||
233 | struct msi_msg msg; | ||
234 | |||
235 | ret = msi_compose_msg(NULL, irq, &msg); | ||
236 | if (ret < 0) | ||
237 | return ret; | ||
238 | dmar_msi_write(irq, &msg); | ||
239 | set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | ||
240 | "edge"); | ||
241 | return 0; | ||
242 | } | ||
243 | #endif /* CONFIG_DMAR */ | ||
244 | |||
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index 8273afc32db8..ee564575148e 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kbuild.h> | 10 | #include <linux/kbuild.h> |
11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
12 | #include <asm/native/irq.h> | 12 | #include <asm/native/irq.h> |
13 | #include <asm/xen/irq.h> | ||
13 | 14 | ||
14 | void foo(void) | 15 | void foo(void) |
15 | { | 16 | { |
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index afaf5b9a2cf0..de35d8e8b7d2 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c | |||
@@ -332,7 +332,7 @@ ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) | |||
332 | 332 | ||
333 | struct pv_iosapic_ops pv_iosapic_ops = { | 333 | struct pv_iosapic_ops pv_iosapic_ops = { |
334 | .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, | 334 | .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, |
335 | .get_irq_chip = ia64_native_iosapic_get_irq_chip, | 335 | .__get_irq_chip = ia64_native_iosapic_get_irq_chip, |
336 | 336 | ||
337 | .__read = ia64_native_iosapic_read, | 337 | .__read = ia64_native_iosapic_read, |
338 | .__write = ia64_native_iosapic_write, | 338 | .__write = ia64_native_iosapic_write, |
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 5cad6fb2ed19..64d6d810c64b 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h | |||
@@ -20,7 +20,9 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifdef __IA64_ASM_PARAVIRTUALIZED_XEN | 23 | #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK |
24 | #include <asm/native/pvchk_inst.h> | ||
25 | #elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) | ||
24 | #include <asm/xen/inst.h> | 26 | #include <asm/xen/inst.h> |
25 | #include <asm/xen/minstate.h> | 27 | #include <asm/xen/minstate.h> |
26 | #else | 28 | #else |
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c new file mode 100644 index 000000000000..10a75b557650 --- /dev/null +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Dynamic DMA mapping support. | ||
3 | */ | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/dmar.h> | ||
11 | #include <asm/iommu.h> | ||
12 | #include <asm/machvec.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | |||
15 | #include <asm/machvec.h> | ||
16 | #include <asm/system.h> | ||
17 | |||
18 | #ifdef CONFIG_DMAR | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/string.h> | ||
22 | |||
23 | #include <asm/page.h> | ||
24 | #include <asm/iommu.h> | ||
25 | |||
26 | dma_addr_t bad_dma_address __read_mostly; | ||
27 | EXPORT_SYMBOL(bad_dma_address); | ||
28 | |||
29 | static int iommu_sac_force __read_mostly; | ||
30 | |||
31 | int no_iommu __read_mostly; | ||
32 | #ifdef CONFIG_IOMMU_DEBUG | ||
33 | int force_iommu __read_mostly = 1; | ||
34 | #else | ||
35 | int force_iommu __read_mostly; | ||
36 | #endif | ||
37 | |||
38 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
39 | int iommu_detected __read_mostly; | ||
40 | |||
41 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
42 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
43 | to i386. */ | ||
44 | struct device fallback_dev = { | ||
45 | .bus_id = "fallback device", | ||
46 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
47 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
48 | }; | ||
49 | |||
50 | void __init pci_iommu_alloc(void) | ||
51 | { | ||
52 | /* | ||
53 | * The order of these functions is important for | ||
54 | * fall-back/fail-over reasons | ||
55 | */ | ||
56 | detect_intel_iommu(); | ||
57 | |||
58 | #ifdef CONFIG_SWIOTLB | ||
59 | pci_swiotlb_init(); | ||
60 | #endif | ||
61 | } | ||
62 | |||
63 | static int __init pci_iommu_init(void) | ||
64 | { | ||
65 | if (iommu_detected) | ||
66 | intel_iommu_init(); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* Must execute after PCI subsystem */ | ||
72 | fs_initcall(pci_iommu_init); | ||
73 | |||
74 | void pci_iommu_shutdown(void) | ||
75 | { | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | void __init | ||
80 | iommu_dma_init(void) | ||
81 | { | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | struct dma_mapping_ops *dma_ops; | ||
86 | EXPORT_SYMBOL(dma_ops); | ||
87 | |||
88 | int iommu_dma_supported(struct device *dev, u64 mask) | ||
89 | { | ||
90 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
91 | |||
92 | #ifdef CONFIG_PCI | ||
93 | if (mask > 0xffffffff && forbid_dac > 0) { | ||
94 | dev_info(dev, "Disallowing DAC for device\n"); | ||
95 | return 0; | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | if (ops->dma_supported_op) | ||
100 | return ops->dma_supported_op(dev, mask); | ||
101 | |||
102 | /* Copied from i386. Doesn't make much sense, because it will | ||
103 | only work for pci_alloc_coherent. | ||
104 | The caller just has to use GFP_DMA in this case. */ | ||
105 | if (mask < DMA_24BIT_MASK) | ||
106 | return 0; | ||
107 | |||
108 | /* Tell the device to use SAC when IOMMU force is on. This | ||
109 | allows the driver to use cheaper accesses in some cases. | ||
110 | |||
111 | Problem with this is that if we overflow the IOMMU area and | ||
112 | return DAC as fallback address the device may not handle it | ||
113 | correctly. | ||
114 | |||
115 | As a special case some controllers have a 39bit address | ||
116 | mode that is as efficient as 32bit (aic79xx). Don't force | ||
117 | SAC for these. Assume all masks <= 40 bits are of this | ||
118 | type. Normally this doesn't make any difference, but gives | ||
119 | more gentle handling of IOMMU overflow. */ | ||
120 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | ||
121 | dev_info(dev, "Force SAC with mask %lx\n", mask); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | return 1; | ||
126 | } | ||
127 | EXPORT_SYMBOL(iommu_dma_supported); | ||
128 | |||
129 | #endif | ||
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c new file mode 100644 index 000000000000..16c50516dbc1 --- /dev/null +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* Glue code to lib/swiotlb.c */ | ||
2 | |||
3 | #include <linux/pci.h> | ||
4 | #include <linux/cache.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | #include <asm/swiotlb.h> | ||
9 | #include <asm/dma.h> | ||
10 | #include <asm/iommu.h> | ||
11 | #include <asm/machvec.h> | ||
12 | |||
13 | int swiotlb __read_mostly; | ||
14 | EXPORT_SYMBOL(swiotlb); | ||
15 | |||
16 | struct dma_mapping_ops swiotlb_dma_ops = { | ||
17 | .mapping_error = swiotlb_dma_mapping_error, | ||
18 | .alloc_coherent = swiotlb_alloc_coherent, | ||
19 | .free_coherent = swiotlb_free_coherent, | ||
20 | .map_single = swiotlb_map_single, | ||
21 | .unmap_single = swiotlb_unmap_single, | ||
22 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
23 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
24 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | ||
25 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | ||
26 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
27 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
28 | .map_sg = swiotlb_map_sg, | ||
29 | .unmap_sg = swiotlb_unmap_sg, | ||
30 | .dma_supported_op = swiotlb_dma_supported, | ||
31 | }; | ||
32 | |||
33 | void __init pci_swiotlb_init(void) | ||
34 | { | ||
35 | if (!iommu_detected) { | ||
36 | #ifdef CONFIG_IA64_GENERIC | ||
37 | swiotlb = 1; | ||
38 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); | ||
39 | machvec_init("dig"); | ||
40 | swiotlb_init(); | ||
41 | dma_ops = &swiotlb_dma_ops; | ||
42 | #else | ||
43 | panic("Unable to find Intel IOMMU"); | ||
44 | #endif | ||
45 | } | ||
46 | } | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index fc8f3509df27..ada4605d1223 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/capability.h> | 40 | #include <linux/capability.h> |
41 | #include <linux/rcupdate.h> | 41 | #include <linux/rcupdate.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/tracehook.h> | ||
43 | 44 | ||
44 | #include <asm/errno.h> | 45 | #include <asm/errno.h> |
45 | #include <asm/intrinsics.h> | 46 | #include <asm/intrinsics.h> |
@@ -3684,7 +3685,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3684 | 3685 | ||
3685 | PFM_SET_WORK_PENDING(task, 1); | 3686 | PFM_SET_WORK_PENDING(task, 1); |
3686 | 3687 | ||
3687 | tsk_set_notify_resume(task); | 3688 | set_notify_resume(task); |
3688 | 3689 | ||
3689 | /* | 3690 | /* |
3690 | * XXX: send reschedule if task runs on another CPU | 3691 | * XXX: send reschedule if task runs on another CPU |
@@ -5044,8 +5045,6 @@ pfm_handle_work(void) | |||
5044 | 5045 | ||
5045 | PFM_SET_WORK_PENDING(current, 0); | 5046 | PFM_SET_WORK_PENDING(current, 0); |
5046 | 5047 | ||
5047 | tsk_clear_notify_resume(current); | ||
5048 | |||
5049 | regs = task_pt_regs(current); | 5048 | regs = task_pt_regs(current); |
5050 | 5049 | ||
5051 | /* | 5050 | /* |
@@ -5414,7 +5413,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str | |||
5414 | * when coming from ctxsw, current still points to the | 5413 | * when coming from ctxsw, current still points to the |
5415 | * previous task, therefore we must work with task and not current. | 5414 | * previous task, therefore we must work with task and not current. |
5416 | */ | 5415 | */ |
5417 | tsk_set_notify_resume(task); | 5416 | set_notify_resume(task); |
5418 | } | 5417 | } |
5419 | /* | 5418 | /* |
5420 | * defer until state is changed (shorten spin window). the context is locked | 5419 | * defer until state is changed (shorten spin window). the context is locked |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 3ab8373103ec..c57162705147 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/kdebug.h> | 29 | #include <linux/kdebug.h> |
30 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
31 | #include <linux/tracehook.h> | ||
31 | 32 | ||
32 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
33 | #include <asm/delay.h> | 34 | #include <asm/delay.h> |
@@ -160,21 +161,6 @@ show_regs (struct pt_regs *regs) | |||
160 | show_stack(NULL, NULL); | 161 | show_stack(NULL, NULL); |
161 | } | 162 | } |
162 | 163 | ||
163 | void tsk_clear_notify_resume(struct task_struct *tsk) | ||
164 | { | ||
165 | #ifdef CONFIG_PERFMON | ||
166 | if (tsk->thread.pfm_needs_checking) | ||
167 | return; | ||
168 | #endif | ||
169 | if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE)) | ||
170 | return; | ||
171 | clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * do_notify_resume_user(): | ||
176 | * Called from notify_resume_user at entry.S, with interrupts disabled. | ||
177 | */ | ||
178 | void | 164 | void |
179 | do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | 165 | do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) |
180 | { | 166 | { |
@@ -203,6 +189,11 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | |||
203 | ia64_do_signal(scr, in_syscall); | 189 | ia64_do_signal(scr, in_syscall); |
204 | } | 190 | } |
205 | 191 | ||
192 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | ||
193 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
194 | tracehook_notify_resume(&scr->pt); | ||
195 | } | ||
196 | |||
206 | /* copy user rbs to kernel rbs */ | 197 | /* copy user rbs to kernel rbs */ |
207 | if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { | 198 | if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { |
208 | local_irq_enable(); /* force interrupt enable */ | 199 | local_irq_enable(); /* force interrupt enable */ |
@@ -251,7 +242,6 @@ default_idle (void) | |||
251 | /* We don't actually take CPU down, just spin without interrupts. */ | 242 | /* We don't actually take CPU down, just spin without interrupts. */ |
252 | static inline void play_dead(void) | 243 | static inline void play_dead(void) |
253 | { | 244 | { |
254 | extern void ia64_cpu_local_tick (void); | ||
255 | unsigned int this_cpu = smp_processor_id(); | 245 | unsigned int this_cpu = smp_processor_id(); |
256 | 246 | ||
257 | /* Ack it */ | 247 | /* Ack it */ |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 2a9943b5947f..92c9689b7d97 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
23 | #include <linux/regset.h> | 23 | #include <linux/regset.h> |
24 | #include <linux/elf.h> | 24 | #include <linux/elf.h> |
25 | #include <linux/tracehook.h> | ||
25 | 26 | ||
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
27 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
@@ -603,7 +604,7 @@ void ia64_ptrace_stop(void) | |||
603 | { | 604 | { |
604 | if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) | 605 | if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) |
605 | return; | 606 | return; |
606 | tsk_set_notify_resume(current); | 607 | set_notify_resume(current); |
607 | unw_init_running(do_sync_rbs, ia64_sync_user_rbs); | 608 | unw_init_running(do_sync_rbs, ia64_sync_user_rbs); |
608 | } | 609 | } |
609 | 610 | ||
@@ -613,7 +614,6 @@ void ia64_ptrace_stop(void) | |||
613 | void ia64_sync_krbs(void) | 614 | void ia64_sync_krbs(void) |
614 | { | 615 | { |
615 | clear_tsk_thread_flag(current, TIF_RESTORE_RSE); | 616 | clear_tsk_thread_flag(current, TIF_RESTORE_RSE); |
616 | tsk_clear_notify_resume(current); | ||
617 | 617 | ||
618 | unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); | 618 | unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); |
619 | } | 619 | } |
@@ -644,7 +644,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) | |||
644 | spin_lock_irq(&child->sighand->siglock); | 644 | spin_lock_irq(&child->sighand->siglock); |
645 | if (child->state == TASK_STOPPED && | 645 | if (child->state == TASK_STOPPED && |
646 | !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { | 646 | !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { |
647 | tsk_set_notify_resume(child); | 647 | set_notify_resume(child); |
648 | 648 | ||
649 | child->state = TASK_TRACED; | 649 | child->state = TASK_TRACED; |
650 | stopped = 1; | 650 | stopped = 1; |
@@ -1232,37 +1232,16 @@ arch_ptrace (struct task_struct *child, long request, long addr, long data) | |||
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | 1234 | ||
1235 | static void | ||
1236 | syscall_trace (void) | ||
1237 | { | ||
1238 | /* | ||
1239 | * The 0x80 provides a way for the tracing parent to | ||
1240 | * distinguish between a syscall stop and SIGTRAP delivery. | ||
1241 | */ | ||
1242 | ptrace_notify(SIGTRAP | ||
1243 | | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
1244 | |||
1245 | /* | ||
1246 | * This isn't the same as continuing with a signal, but it | ||
1247 | * will do for normal use. strace only continues with a | ||
1248 | * signal if the stopping signal is not SIGTRAP. -brl | ||
1249 | */ | ||
1250 | if (current->exit_code) { | ||
1251 | send_sig(current->exit_code, current, 1); | ||
1252 | current->exit_code = 0; | ||
1253 | } | ||
1254 | } | ||
1255 | |||
1256 | /* "asmlinkage" so the input arguments are preserved... */ | 1235 | /* "asmlinkage" so the input arguments are preserved... */ |
1257 | 1236 | ||
1258 | asmlinkage void | 1237 | asmlinkage long |
1259 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | 1238 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, |
1260 | long arg4, long arg5, long arg6, long arg7, | 1239 | long arg4, long arg5, long arg6, long arg7, |
1261 | struct pt_regs regs) | 1240 | struct pt_regs regs) |
1262 | { | 1241 | { |
1263 | if (test_thread_flag(TIF_SYSCALL_TRACE) | 1242 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1264 | && (current->ptrace & PT_PTRACED)) | 1243 | if (tracehook_report_syscall_entry(®s)) |
1265 | syscall_trace(); | 1244 | return -ENOSYS; |
1266 | 1245 | ||
1267 | /* copy user rbs to kernel rbs */ | 1246 | /* copy user rbs to kernel rbs */ |
1268 | if (test_thread_flag(TIF_RESTORE_RSE)) | 1247 | if (test_thread_flag(TIF_RESTORE_RSE)) |
@@ -1283,6 +1262,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | |||
1283 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); | 1262 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); |
1284 | } | 1263 | } |
1285 | 1264 | ||
1265 | return 0; | ||
1286 | } | 1266 | } |
1287 | 1267 | ||
1288 | /* "asmlinkage" so the input arguments are preserved... */ | 1268 | /* "asmlinkage" so the input arguments are preserved... */ |
@@ -1292,6 +1272,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |||
1292 | long arg4, long arg5, long arg6, long arg7, | 1272 | long arg4, long arg5, long arg6, long arg7, |
1293 | struct pt_regs regs) | 1273 | struct pt_regs regs) |
1294 | { | 1274 | { |
1275 | int step; | ||
1276 | |||
1295 | if (unlikely(current->audit_context)) { | 1277 | if (unlikely(current->audit_context)) { |
1296 | int success = AUDITSC_RESULT(regs.r10); | 1278 | int success = AUDITSC_RESULT(regs.r10); |
1297 | long result = regs.r8; | 1279 | long result = regs.r8; |
@@ -1301,10 +1283,9 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |||
1301 | audit_syscall_exit(success, result); | 1283 | audit_syscall_exit(success, result); |
1302 | } | 1284 | } |
1303 | 1285 | ||
1304 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | 1286 | step = test_thread_flag(TIF_SINGLESTEP); |
1305 | || test_thread_flag(TIF_SINGLESTEP)) | 1287 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1306 | && (current->ptrace & PT_PTRACED)) | 1288 | tracehook_report_syscall_exit(®s, step); |
1307 | syscall_trace(); | ||
1308 | 1289 | ||
1309 | /* copy user rbs to kernel rbs */ | 1290 | /* copy user rbs to kernel rbs */ |
1310 | if (test_thread_flag(TIF_RESTORE_RSE)) | 1291 | if (test_thread_flag(TIF_RESTORE_RSE)) |
@@ -1940,7 +1921,7 @@ gpregs_writeback(struct task_struct *target, | |||
1940 | { | 1921 | { |
1941 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) | 1922 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) |
1942 | return 0; | 1923 | return 0; |
1943 | tsk_set_notify_resume(target); | 1924 | set_notify_resume(target); |
1944 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, | 1925 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, |
1945 | NULL, NULL); | 1926 | NULL, NULL); |
1946 | } | 1927 | } |
@@ -2199,3 +2180,68 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | |||
2199 | #endif | 2180 | #endif |
2200 | return &user_ia64_view; | 2181 | return &user_ia64_view; |
2201 | } | 2182 | } |
2183 | |||
2184 | struct syscall_get_set_args { | ||
2185 | unsigned int i; | ||
2186 | unsigned int n; | ||
2187 | unsigned long *args; | ||
2188 | struct pt_regs *regs; | ||
2189 | int rw; | ||
2190 | }; | ||
2191 | |||
2192 | static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) | ||
2193 | { | ||
2194 | struct syscall_get_set_args *args = data; | ||
2195 | struct pt_regs *pt = args->regs; | ||
2196 | unsigned long *krbs, cfm, ndirty; | ||
2197 | int i, count; | ||
2198 | |||
2199 | if (unw_unwind_to_user(info) < 0) | ||
2200 | return; | ||
2201 | |||
2202 | cfm = pt->cr_ifs; | ||
2203 | krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; | ||
2204 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | ||
2205 | |||
2206 | count = 0; | ||
2207 | if (in_syscall(pt)) | ||
2208 | count = min_t(int, args->n, cfm & 0x7f); | ||
2209 | |||
2210 | for (i = 0; i < count; i++) { | ||
2211 | if (args->rw) | ||
2212 | *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = | ||
2213 | args->args[i]; | ||
2214 | else | ||
2215 | args->args[i] = *ia64_rse_skip_regs(krbs, | ||
2216 | ndirty + i + args->i); | ||
2217 | } | ||
2218 | |||
2219 | if (!args->rw) { | ||
2220 | while (i < args->n) { | ||
2221 | args->args[i] = 0; | ||
2222 | i++; | ||
2223 | } | ||
2224 | } | ||
2225 | } | ||
2226 | |||
2227 | void ia64_syscall_get_set_arguments(struct task_struct *task, | ||
2228 | struct pt_regs *regs, unsigned int i, unsigned int n, | ||
2229 | unsigned long *args, int rw) | ||
2230 | { | ||
2231 | struct syscall_get_set_args data = { | ||
2232 | .i = i, | ||
2233 | .n = n, | ||
2234 | .args = args, | ||
2235 | .regs = regs, | ||
2236 | .rw = rw, | ||
2237 | }; | ||
2238 | |||
2239 | if (task == current) | ||
2240 | unw_init_running(syscall_get_set_args_cb, &data); | ||
2241 | else { | ||
2242 | struct unw_frame_info ufi; | ||
2243 | memset(&ufi, 0, sizeof(ufi)); | ||
2244 | unw_init_from_blocked_task(&ufi, task); | ||
2245 | syscall_get_set_args_cb(&ufi, &data); | ||
2246 | } | ||
2247 | } | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 916ba898237f..ae7911702bf8 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -116,6 +116,13 @@ unsigned int num_io_spaces; | |||
116 | */ | 116 | */ |
117 | #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ | 117 | #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ |
118 | unsigned long ia64_i_cache_stride_shift = ~0; | 118 | unsigned long ia64_i_cache_stride_shift = ~0; |
119 | /* | ||
120 | * "clflush_cache_range()" needs to know what processor dependent stride size to | ||
121 | * use when it flushes cache lines including both d-cache and i-cache. | ||
122 | */ | ||
123 | /* Safest way to go: 32 bytes by 32 bytes */ | ||
124 | #define CACHE_STRIDE_SHIFT 5 | ||
125 | unsigned long ia64_cache_stride_shift = ~0; | ||
119 | 126 | ||
120 | /* | 127 | /* |
121 | * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This | 128 | * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This |
@@ -852,13 +859,14 @@ setup_per_cpu_areas (void) | |||
852 | } | 859 | } |
853 | 860 | ||
854 | /* | 861 | /* |
855 | * Calculate the max. cache line size. | 862 | * Do the following calculations: |
856 | * | 863 | * |
857 | * In addition, the minimum of the i-cache stride sizes is calculated for | 864 | * 1. the max. cache line size. |
858 | * "flush_icache_range()". | 865 | * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". |
866 | * 3. the minimum of the cache stride sizes for "clflush_cache_range()". | ||
859 | */ | 867 | */ |
860 | static void __cpuinit | 868 | static void __cpuinit |
861 | get_max_cacheline_size (void) | 869 | get_cache_info(void) |
862 | { | 870 | { |
863 | unsigned long line_size, max = 1; | 871 | unsigned long line_size, max = 1; |
864 | u64 l, levels, unique_caches; | 872 | u64 l, levels, unique_caches; |
@@ -872,12 +880,14 @@ get_max_cacheline_size (void) | |||
872 | max = SMP_CACHE_BYTES; | 880 | max = SMP_CACHE_BYTES; |
873 | /* Safest setup for "flush_icache_range()" */ | 881 | /* Safest setup for "flush_icache_range()" */ |
874 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; | 882 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; |
883 | /* Safest setup for "clflush_cache_range()" */ | ||
884 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | ||
875 | goto out; | 885 | goto out; |
876 | } | 886 | } |
877 | 887 | ||
878 | for (l = 0; l < levels; ++l) { | 888 | for (l = 0; l < levels; ++l) { |
879 | status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, | 889 | /* cache_type (data_or_unified)=2 */ |
880 | &cci); | 890 | status = ia64_pal_cache_config_info(l, 2, &cci); |
881 | if (status != 0) { | 891 | if (status != 0) { |
882 | printk(KERN_ERR | 892 | printk(KERN_ERR |
883 | "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", | 893 | "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", |
@@ -885,15 +895,21 @@ get_max_cacheline_size (void) | |||
885 | max = SMP_CACHE_BYTES; | 895 | max = SMP_CACHE_BYTES; |
886 | /* The safest setup for "flush_icache_range()" */ | 896 | /* The safest setup for "flush_icache_range()" */ |
887 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; | 897 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; |
898 | /* The safest setup for "clflush_cache_range()" */ | ||
899 | ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; | ||
888 | cci.pcci_unified = 1; | 900 | cci.pcci_unified = 1; |
901 | } else { | ||
902 | if (cci.pcci_stride < ia64_cache_stride_shift) | ||
903 | ia64_cache_stride_shift = cci.pcci_stride; | ||
904 | |||
905 | line_size = 1 << cci.pcci_line_size; | ||
906 | if (line_size > max) | ||
907 | max = line_size; | ||
889 | } | 908 | } |
890 | line_size = 1 << cci.pcci_line_size; | 909 | |
891 | if (line_size > max) | ||
892 | max = line_size; | ||
893 | if (!cci.pcci_unified) { | 910 | if (!cci.pcci_unified) { |
894 | status = ia64_pal_cache_config_info(l, | 911 | /* cache_type (instruction)=1*/ |
895 | /* cache_type (instruction)= */ 1, | 912 | status = ia64_pal_cache_config_info(l, 1, &cci); |
896 | &cci); | ||
897 | if (status != 0) { | 913 | if (status != 0) { |
898 | printk(KERN_ERR | 914 | printk(KERN_ERR |
899 | "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", | 915 | "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", |
@@ -947,7 +963,7 @@ cpu_init (void) | |||
947 | } | 963 | } |
948 | #endif | 964 | #endif |
949 | 965 | ||
950 | get_max_cacheline_size(); | 966 | get_cache_info(); |
951 | 967 | ||
952 | /* | 968 | /* |
953 | * We can't pass "local_cpu_data" to identify_cpu() because we haven't called | 969 | * We can't pass "local_cpu_data" to identify_cpu() because we haven't called |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 19c5a78636fc..e12500a9c443 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/ptrace.h> | 13 | #include <linux/ptrace.h> |
14 | #include <linux/tracehook.h> | ||
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
16 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
@@ -439,6 +440,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse | |||
439 | sigaddset(¤t->blocked, sig); | 440 | sigaddset(¤t->blocked, sig); |
440 | recalc_sigpending(); | 441 | recalc_sigpending(); |
441 | spin_unlock_irq(¤t->sighand->siglock); | 442 | spin_unlock_irq(¤t->sighand->siglock); |
443 | |||
444 | /* | ||
445 | * Let tracing know that we've done the handler setup. | ||
446 | */ | ||
447 | tracehook_signal_handler(sig, info, ka, &scr->pt, | ||
448 | test_thread_flag(TIF_SINGLESTEP)); | ||
449 | |||
442 | return 1; | 450 | return 1; |
443 | } | 451 | } |
444 | 452 | ||
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 2a0d27f2f21b..1d8c88860063 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S | |||
@@ -60,3 +60,58 @@ GLOBAL_ENTRY(flush_icache_range) | |||
60 | mov ar.lc=r3 // restore ar.lc | 60 | mov ar.lc=r3 // restore ar.lc |
61 | br.ret.sptk.many rp | 61 | br.ret.sptk.many rp |
62 | END(flush_icache_range) | 62 | END(flush_icache_range) |
63 | |||
64 | /* | ||
65 | * clflush_cache_range(start,size) | ||
66 | * | ||
67 | * Flush cache lines from start to start+size-1. | ||
68 | * | ||
69 | * Must deal with range from start to start+size-1 but nothing else | ||
70 | * (need to be careful not to touch addresses that may be | ||
71 | * unmapped). | ||
72 | * | ||
73 | * Note: "in0" and "in1" are preserved for debugging purposes. | ||
74 | */ | ||
75 | .section .kprobes.text,"ax" | ||
76 | GLOBAL_ENTRY(clflush_cache_range) | ||
77 | |||
78 | .prologue | ||
79 | alloc r2=ar.pfs,2,0,0,0 | ||
80 | movl r3=ia64_cache_stride_shift | ||
81 | mov r21=1 | ||
82 | add r22=in1,in0 | ||
83 | ;; | ||
84 | ld8 r20=[r3] // r20: stride shift | ||
85 | sub r22=r22,r0,1 // last byte address | ||
86 | ;; | ||
87 | shr.u r23=in0,r20 // start / (stride size) | ||
88 | shr.u r22=r22,r20 // (last byte address) / (stride size) | ||
89 | shl r21=r21,r20 // r21: stride size of the i-cache(s) | ||
90 | ;; | ||
91 | sub r8=r22,r23 // number of strides - 1 | ||
92 | shl r24=r23,r20 // r24: addresses for "fc" = | ||
93 | // "start" rounded down to stride | ||
94 | // boundary | ||
95 | .save ar.lc,r3 | ||
96 | mov r3=ar.lc // save ar.lc | ||
97 | ;; | ||
98 | |||
99 | .body | ||
100 | mov ar.lc=r8 | ||
101 | ;; | ||
102 | /* | ||
103 | * 32 byte aligned loop, even number of (actually 2) bundles | ||
104 | */ | ||
105 | .Loop_fc: | ||
106 | fc r24 // issuable on M0 only | ||
107 | add r24=r21,r24 // we flush "stride size" bytes per iteration | ||
108 | nop.i 0 | ||
109 | br.cloop.sptk.few .Loop_fc | ||
110 | ;; | ||
111 | sync.i | ||
112 | ;; | ||
113 | srlz.i | ||
114 | ;; | ||
115 | mov ar.lc=r3 // restore ar.lc | ||
116 | br.ret.sptk.many rp | ||
117 | END(clflush_cache_range) | ||
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 8caf42471f0d..bd9818a36b47 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -362,9 +362,13 @@ ia64_tlb_init (void) | |||
362 | per_cpu(ia64_tr_num, cpu) = | 362 | per_cpu(ia64_tr_num, cpu) = |
363 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; | 363 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; |
364 | if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { | 364 | if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { |
365 | static int justonce = 1; | ||
365 | per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; | 366 | per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; |
366 | printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!" | 367 | if (justonce) { |
367 | "IA64_TR_ALLOC_MAX should be extended\n"); | 368 | justonce = 0; |
369 | printk(KERN_DEBUG "TR register number exceeds " | ||
370 | "IA64_TR_ALLOC_MAX!\n"); | ||
371 | } | ||
368 | } | 372 | } |
369 | } | 373 | } |
370 | 374 | ||
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed new file mode 100644 index 000000000000..ba66ac2e4c60 --- /dev/null +++ b/arch/ia64/scripts/pvcheck.sed | |||
@@ -0,0 +1,32 @@ | |||
1 | # | ||
2 | # Checker for paravirtualizations of privileged operations. | ||
3 | # | ||
4 | s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g | ||
5 | s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g | ||
6 | s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g | ||
7 | s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g | ||
8 | s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g | ||
9 | s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g | ||
10 | s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g | ||
11 | s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g | ||
12 | s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g | ||
13 | s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g | ||
14 | s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g | ||
15 | s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g | ||
16 | s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g | ||
17 | s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g | ||
18 | s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr | ||
19 | s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g | ||
20 | s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g | ||
21 | s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g | ||
22 | s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g | ||
23 | s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g | ||
24 | s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g | ||
25 | s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g | ||
26 | s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g | ||
27 | s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g | ||
28 | s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g | ||
29 | s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g | ||
30 | s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g | ||
31 | s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g | ||
32 | s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g | ||
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig new file mode 100644 index 000000000000..f1683a20275b --- /dev/null +++ b/arch/ia64/xen/Kconfig | |||
@@ -0,0 +1,26 @@ | |||
1 | # | ||
2 | # This Kconfig describes xen/ia64 options | ||
3 | # | ||
4 | |||
5 | config XEN | ||
6 | bool "Xen hypervisor support" | ||
7 | default y | ||
8 | depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL | ||
9 | select XEN_XENCOMM | ||
10 | select NO_IDLE_HZ | ||
11 | |||
12 | # those are required to save/restore. | ||
13 | select ARCH_SUSPEND_POSSIBLE | ||
14 | select SUSPEND | ||
15 | select PM_SLEEP | ||
16 | help | ||
17 | Enable Xen hypervisor support. Resulting kernel runs | ||
18 | both as a guest OS on Xen and natively on hardware. | ||
19 | |||
20 | config XEN_XENCOMM | ||
21 | depends on XEN | ||
22 | bool | ||
23 | |||
24 | config NO_IDLE_HZ | ||
25 | depends on XEN | ||
26 | bool | ||
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile new file mode 100644 index 000000000000..0ad0224693d9 --- /dev/null +++ b/arch/ia64/xen/Makefile | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # Makefile for Xen components | ||
3 | # | ||
4 | |||
5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ | ||
6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o | ||
7 | |||
8 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | ||
9 | |||
10 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN | ||
11 | |||
12 | # xen multi compile | ||
13 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S | ||
14 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) | ||
15 | obj-y += $(ASM_PARAVIRT_OBJS) | ||
16 | define paravirtualized_xen | ||
17 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN | ||
18 | endef | ||
19 | $(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) | ||
20 | |||
21 | $(obj)/xen-%.o: $(src)/../kernel/%.S FORCE | ||
22 | $(call if_changed_dep,as_o_S) | ||
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c new file mode 100644 index 000000000000..777dd9a9108b --- /dev/null +++ b/arch/ia64/xen/grant-table.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/grant-table.c | ||
3 | * | ||
4 | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/mm.h> | ||
26 | |||
27 | #include <xen/interface/xen.h> | ||
28 | #include <xen/interface/memory.h> | ||
29 | #include <xen/grant_table.h> | ||
30 | |||
31 | #include <asm/xen/hypervisor.h> | ||
32 | |||
33 | struct vm_struct *xen_alloc_vm_area(unsigned long size) | ||
34 | { | ||
35 | int order; | ||
36 | unsigned long virt; | ||
37 | unsigned long nr_pages; | ||
38 | struct vm_struct *area; | ||
39 | |||
40 | order = get_order(size); | ||
41 | virt = __get_free_pages(GFP_KERNEL, order); | ||
42 | if (virt == 0) | ||
43 | goto err0; | ||
44 | nr_pages = 1 << order; | ||
45 | scrub_pages(virt, nr_pages); | ||
46 | |||
47 | area = kmalloc(sizeof(*area), GFP_KERNEL); | ||
48 | if (area == NULL) | ||
49 | goto err1; | ||
50 | |||
51 | area->flags = VM_IOREMAP; | ||
52 | area->addr = (void *)virt; | ||
53 | area->size = size; | ||
54 | area->pages = NULL; | ||
55 | area->nr_pages = nr_pages; | ||
56 | area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */ | ||
57 | |||
58 | return area; | ||
59 | |||
60 | err1: | ||
61 | free_pages(virt, order); | ||
62 | err0: | ||
63 | return NULL; | ||
64 | } | ||
65 | EXPORT_SYMBOL_GPL(xen_alloc_vm_area); | ||
66 | |||
67 | void xen_free_vm_area(struct vm_struct *area) | ||
68 | { | ||
69 | unsigned int order = get_order(area->size); | ||
70 | unsigned long i; | ||
71 | unsigned long phys_addr = __pa(area->addr); | ||
72 | |||
73 | /* This area is used for foreign page mappping. | ||
74 | * So underlying machine page may not be assigned. */ | ||
75 | for (i = 0; i < (1 << order); i++) { | ||
76 | unsigned long ret; | ||
77 | unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i; | ||
78 | struct xen_memory_reservation reservation = { | ||
79 | .nr_extents = 1, | ||
80 | .address_bits = 0, | ||
81 | .extent_order = 0, | ||
82 | .domid = DOMID_SELF | ||
83 | }; | ||
84 | set_xen_guest_handle(reservation.extent_start, &gpfn); | ||
85 | ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, | ||
86 | &reservation); | ||
87 | BUG_ON(ret != 1); | ||
88 | } | ||
89 | free_pages((unsigned long)area->addr, order); | ||
90 | kfree(area); | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(xen_free_vm_area); | ||
93 | |||
94 | |||
95 | /**************************************************************************** | ||
96 | * grant table hack | ||
97 | * cmd: GNTTABOP_xxx | ||
98 | */ | ||
99 | |||
100 | int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, | ||
101 | unsigned long max_nr_gframes, | ||
102 | struct grant_entry **__shared) | ||
103 | { | ||
104 | *__shared = __va(frames[0] << PAGE_SHIFT); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | void arch_gnttab_unmap_shared(struct grant_entry *shared, | ||
109 | unsigned long nr_gframes) | ||
110 | { | ||
111 | /* nothing */ | ||
112 | } | ||
113 | |||
114 | static void | ||
115 | gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) | ||
116 | { | ||
117 | uint32_t flags; | ||
118 | |||
119 | flags = uop->flags; | ||
120 | |||
121 | if (flags & GNTMAP_host_map) { | ||
122 | if (flags & GNTMAP_application_map) { | ||
123 | printk(KERN_DEBUG | ||
124 | "GNTMAP_application_map is not supported yet: " | ||
125 | "flags 0x%x\n", flags); | ||
126 | BUG(); | ||
127 | } | ||
128 | if (flags & GNTMAP_contains_pte) { | ||
129 | printk(KERN_DEBUG | ||
130 | "GNTMAP_contains_pte is not supported yet: " | ||
131 | "flags 0x%x\n", flags); | ||
132 | BUG(); | ||
133 | } | ||
134 | } else if (flags & GNTMAP_device_map) { | ||
135 | printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); | ||
136 | BUG(); /* not yet. actually this flag is not used. */ | ||
137 | } else { | ||
138 | BUG(); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | int | ||
143 | HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) | ||
144 | { | ||
145 | if (cmd == GNTTABOP_map_grant_ref) { | ||
146 | unsigned int i; | ||
147 | for (i = 0; i < count; i++) { | ||
148 | gnttab_map_grant_ref_pre( | ||
149 | (struct gnttab_map_grant_ref *)uop + i); | ||
150 | } | ||
151 | } | ||
152 | return xencomm_hypercall_grant_table_op(cmd, uop, count); | ||
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL(HYPERVISOR_grant_table_op); | ||
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S new file mode 100644 index 000000000000..d4ff0b9e79f1 --- /dev/null +++ b/arch/ia64/xen/hypercall.S | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Support routines for Xen hypercalls | ||
3 | * | ||
4 | * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> | ||
5 | * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
6 | */ | ||
7 | |||
8 | #include <asm/asmmacro.h> | ||
9 | #include <asm/intrinsics.h> | ||
10 | #include <asm/xen/privop.h> | ||
11 | |||
12 | /* | ||
13 | * Hypercalls without parameter. | ||
14 | */ | ||
15 | #define __HCALL0(name,hcall) \ | ||
16 | GLOBAL_ENTRY(name); \ | ||
17 | break hcall; \ | ||
18 | br.ret.sptk.many rp; \ | ||
19 | END(name) | ||
20 | |||
21 | /* | ||
22 | * Hypercalls with 1 parameter. | ||
23 | */ | ||
24 | #define __HCALL1(name,hcall) \ | ||
25 | GLOBAL_ENTRY(name); \ | ||
26 | mov r8=r32; \ | ||
27 | break hcall; \ | ||
28 | br.ret.sptk.many rp; \ | ||
29 | END(name) | ||
30 | |||
31 | /* | ||
32 | * Hypercalls with 2 parameters. | ||
33 | */ | ||
34 | #define __HCALL2(name,hcall) \ | ||
35 | GLOBAL_ENTRY(name); \ | ||
36 | mov r8=r32; \ | ||
37 | mov r9=r33; \ | ||
38 | break hcall; \ | ||
39 | br.ret.sptk.many rp; \ | ||
40 | END(name) | ||
41 | |||
42 | __HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) | ||
43 | __HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) | ||
44 | __HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) | ||
45 | __HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) | ||
46 | |||
47 | __HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) | ||
48 | __HCALL1(xen_eoi, HYPERPRIVOP_EOI) | ||
49 | __HCALL1(xen_thash, HYPERPRIVOP_THASH) | ||
50 | __HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) | ||
51 | __HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) | ||
52 | __HCALL1(xen_fc, HYPERPRIVOP_FC) | ||
53 | __HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) | ||
54 | __HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) | ||
55 | |||
56 | __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) | ||
57 | __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) | ||
58 | __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) | ||
59 | |||
60 | #ifdef CONFIG_IA32_SUPPORT | ||
61 | __HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) | ||
62 | __HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 | ||
63 | #endif /* CONFIG_IA32_SUPPORT */ | ||
64 | |||
65 | GLOBAL_ENTRY(xen_set_rr0_to_rr4) | ||
66 | mov r8=r32 | ||
67 | mov r9=r33 | ||
68 | mov r10=r34 | ||
69 | mov r11=r35 | ||
70 | mov r14=r36 | ||
71 | XEN_HYPER_SET_RR0_TO_RR4 | ||
72 | br.ret.sptk.many rp | ||
73 | ;; | ||
74 | END(xen_set_rr0_to_rr4) | ||
75 | |||
76 | GLOBAL_ENTRY(xen_send_ipi) | ||
77 | mov r14=r32 | ||
78 | mov r15=r33 | ||
79 | mov r2=0x400 | ||
80 | break 0x1000 | ||
81 | ;; | ||
82 | br.ret.sptk.many rp | ||
83 | ;; | ||
84 | END(xen_send_ipi) | ||
85 | |||
86 | GLOBAL_ENTRY(__hypercall) | ||
87 | mov r2=r37 | ||
88 | break 0x1000 | ||
89 | br.ret.sptk.many b0 | ||
90 | ;; | ||
91 | END(__hypercall) | ||
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c new file mode 100644 index 000000000000..cac4d97c0b5a --- /dev/null +++ b/arch/ia64/xen/hypervisor.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/hypervisor.c | ||
3 | * | ||
4 | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/efi.h> | ||
24 | #include <asm/xen/hypervisor.h> | ||
25 | #include <asm/xen/privop.h> | ||
26 | |||
27 | #include "irq_xen.h" | ||
28 | |||
29 | struct shared_info *HYPERVISOR_shared_info __read_mostly = | ||
30 | (struct shared_info *)XSI_BASE; | ||
31 | EXPORT_SYMBOL(HYPERVISOR_shared_info); | ||
32 | |||
33 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | ||
34 | |||
35 | struct start_info *xen_start_info; | ||
36 | EXPORT_SYMBOL(xen_start_info); | ||
37 | |||
38 | EXPORT_SYMBOL(xen_domain_type); | ||
39 | |||
40 | EXPORT_SYMBOL(__hypercall); | ||
41 | |||
42 | /* Stolen from arch/x86/xen/enlighten.c */ | ||
43 | /* | ||
44 | * Flag to determine whether vcpu info placement is available on all | ||
45 | * VCPUs. We assume it is to start with, and then set it to zero on | ||
46 | * the first failure. This is because it can succeed on some VCPUs | ||
47 | * and not others, since it can involve hypervisor memory allocation, | ||
48 | * or because the guest failed to guarantee all the appropriate | ||
49 | * constraints on all VCPUs (ie buffer can't cross a page boundary). | ||
50 | * | ||
51 | * Note that any particular CPU may be using a placed vcpu structure, | ||
52 | * but we can only optimise if the all are. | ||
53 | * | ||
54 | * 0: not available, 1: available | ||
55 | */ | ||
56 | |||
57 | static void __init xen_vcpu_setup(int cpu) | ||
58 | { | ||
59 | /* | ||
60 | * WARNING: | ||
61 | * before changing MAX_VIRT_CPUS, | ||
62 | * check that shared_info fits on a page | ||
63 | */ | ||
64 | BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); | ||
65 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | ||
66 | } | ||
67 | |||
68 | void __init xen_setup_vcpu_info_placement(void) | ||
69 | { | ||
70 | int cpu; | ||
71 | |||
72 | for_each_possible_cpu(cpu) | ||
73 | xen_vcpu_setup(cpu); | ||
74 | } | ||
75 | |||
76 | void __cpuinit | ||
77 | xen_cpu_init(void) | ||
78 | { | ||
79 | xen_smp_intr_init(); | ||
80 | } | ||
81 | |||
82 | /************************************************************************** | ||
83 | * opt feature | ||
84 | */ | ||
85 | void | ||
86 | xen_ia64_enable_opt_feature(void) | ||
87 | { | ||
88 | /* Enable region 7 identity map optimizations in Xen */ | ||
89 | struct xen_ia64_opt_feature optf; | ||
90 | |||
91 | optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; | ||
92 | optf.on = XEN_IA64_OPTF_ON; | ||
93 | optf.pgprot = pgprot_val(PAGE_KERNEL); | ||
94 | optf.key = 0; /* No key on linux. */ | ||
95 | HYPERVISOR_opt_feature(&optf); | ||
96 | } | ||
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c new file mode 100644 index 000000000000..af93aadb68bb --- /dev/null +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/irq_xen.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/cpu.h> | ||
24 | |||
25 | #include <xen/interface/xen.h> | ||
26 | #include <xen/interface/callback.h> | ||
27 | #include <xen/events.h> | ||
28 | |||
29 | #include <asm/xen/privop.h> | ||
30 | |||
31 | #include "irq_xen.h" | ||
32 | |||
33 | /*************************************************************************** | ||
34 | * pv_irq_ops | ||
35 | * irq operations | ||
36 | */ | ||
37 | |||
38 | static int | ||
39 | xen_assign_irq_vector(int irq) | ||
40 | { | ||
41 | struct physdev_irq irq_op; | ||
42 | |||
43 | irq_op.irq = irq; | ||
44 | if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) | ||
45 | return -ENOSPC; | ||
46 | |||
47 | return irq_op.vector; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | xen_free_irq_vector(int vector) | ||
52 | { | ||
53 | struct physdev_irq irq_op; | ||
54 | |||
55 | if (vector < IA64_FIRST_DEVICE_VECTOR || | ||
56 | vector > IA64_LAST_DEVICE_VECTOR) | ||
57 | return; | ||
58 | |||
59 | irq_op.vector = vector; | ||
60 | if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) | ||
61 | printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n", | ||
62 | __func__, vector); | ||
63 | } | ||
64 | |||
65 | |||
66 | static DEFINE_PER_CPU(int, timer_irq) = -1; | ||
67 | static DEFINE_PER_CPU(int, ipi_irq) = -1; | ||
68 | static DEFINE_PER_CPU(int, resched_irq) = -1; | ||
69 | static DEFINE_PER_CPU(int, cmc_irq) = -1; | ||
70 | static DEFINE_PER_CPU(int, cmcp_irq) = -1; | ||
71 | static DEFINE_PER_CPU(int, cpep_irq) = -1; | ||
72 | #define NAME_SIZE 15 | ||
73 | static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); | ||
74 | static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); | ||
75 | static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); | ||
76 | static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); | ||
77 | static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); | ||
78 | static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); | ||
79 | #undef NAME_SIZE | ||
80 | |||
81 | struct saved_irq { | ||
82 | unsigned int irq; | ||
83 | struct irqaction *action; | ||
84 | }; | ||
85 | /* 16 should be far optimistic value, since only several percpu irqs | ||
86 | * are registered early. | ||
87 | */ | ||
88 | #define MAX_LATE_IRQ 16 | ||
89 | static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; | ||
90 | static unsigned short late_irq_cnt; | ||
91 | static unsigned short saved_irq_cnt; | ||
92 | static int xen_slab_ready; | ||
93 | |||
94 | #ifdef CONFIG_SMP | ||
95 | /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, | ||
96 | * it ends up to issue several memory accesses upon percpu data and | ||
97 | * thus adds unnecessary traffic to other paths. | ||
98 | */ | ||
99 | static irqreturn_t | ||
100 | xen_dummy_handler(int irq, void *dev_id) | ||
101 | { | ||
102 | |||
103 | return IRQ_HANDLED; | ||
104 | } | ||
105 | |||
106 | static struct irqaction xen_ipi_irqaction = { | ||
107 | .handler = handle_IPI, | ||
108 | .flags = IRQF_DISABLED, | ||
109 | .name = "IPI" | ||
110 | }; | ||
111 | |||
112 | static struct irqaction xen_resched_irqaction = { | ||
113 | .handler = xen_dummy_handler, | ||
114 | .flags = IRQF_DISABLED, | ||
115 | .name = "resched" | ||
116 | }; | ||
117 | |||
118 | static struct irqaction xen_tlb_irqaction = { | ||
119 | .handler = xen_dummy_handler, | ||
120 | .flags = IRQF_DISABLED, | ||
121 | .name = "tlb_flush" | ||
122 | }; | ||
123 | #endif | ||
124 | |||
125 | /* | ||
126 | * This is xen version percpu irq registration, which needs bind | ||
127 | * to xen specific evtchn sub-system. One trick here is that xen | ||
128 | * evtchn binding interface depends on kmalloc because related | ||
129 | * port needs to be freed at device/cpu down. So we cache the | ||
130 | * registration on BSP before slab is ready and then deal them | ||
131 | * at later point. For rest instances happening after slab ready, | ||
132 | * we hook them to xen evtchn immediately. | ||
133 | * | ||
134 | * FIXME: MCA is not supported by far, and thus "nomca" boot param is | ||
135 | * required. | ||
136 | */ | ||
137 | static void | ||
138 | __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | ||
139 | struct irqaction *action, int save) | ||
140 | { | ||
141 | irq_desc_t *desc; | ||
142 | int irq = 0; | ||
143 | |||
144 | if (xen_slab_ready) { | ||
145 | switch (vec) { | ||
146 | case IA64_TIMER_VECTOR: | ||
147 | snprintf(per_cpu(timer_name, cpu), | ||
148 | sizeof(per_cpu(timer_name, cpu)), | ||
149 | "%s%d", action->name, cpu); | ||
150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | ||
151 | action->handler, action->flags, | ||
152 | per_cpu(timer_name, cpu), action->dev_id); | ||
153 | per_cpu(timer_irq, cpu) = irq; | ||
154 | break; | ||
155 | case IA64_IPI_RESCHEDULE: | ||
156 | snprintf(per_cpu(resched_name, cpu), | ||
157 | sizeof(per_cpu(resched_name, cpu)), | ||
158 | "%s%d", action->name, cpu); | ||
159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | ||
160 | action->handler, action->flags, | ||
161 | per_cpu(resched_name, cpu), action->dev_id); | ||
162 | per_cpu(resched_irq, cpu) = irq; | ||
163 | break; | ||
164 | case IA64_IPI_VECTOR: | ||
165 | snprintf(per_cpu(ipi_name, cpu), | ||
166 | sizeof(per_cpu(ipi_name, cpu)), | ||
167 | "%s%d", action->name, cpu); | ||
168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | ||
169 | action->handler, action->flags, | ||
170 | per_cpu(ipi_name, cpu), action->dev_id); | ||
171 | per_cpu(ipi_irq, cpu) = irq; | ||
172 | break; | ||
173 | case IA64_CMC_VECTOR: | ||
174 | snprintf(per_cpu(cmc_name, cpu), | ||
175 | sizeof(per_cpu(cmc_name, cpu)), | ||
176 | "%s%d", action->name, cpu); | ||
177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | ||
178 | action->handler, | ||
179 | action->flags, | ||
180 | per_cpu(cmc_name, cpu), | ||
181 | action->dev_id); | ||
182 | per_cpu(cmc_irq, cpu) = irq; | ||
183 | break; | ||
184 | case IA64_CMCP_VECTOR: | ||
185 | snprintf(per_cpu(cmcp_name, cpu), | ||
186 | sizeof(per_cpu(cmcp_name, cpu)), | ||
187 | "%s%d", action->name, cpu); | ||
188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | ||
189 | action->handler, | ||
190 | action->flags, | ||
191 | per_cpu(cmcp_name, cpu), | ||
192 | action->dev_id); | ||
193 | per_cpu(cmcp_irq, cpu) = irq; | ||
194 | break; | ||
195 | case IA64_CPEP_VECTOR: | ||
196 | snprintf(per_cpu(cpep_name, cpu), | ||
197 | sizeof(per_cpu(cpep_name, cpu)), | ||
198 | "%s%d", action->name, cpu); | ||
199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | ||
200 | action->handler, | ||
201 | action->flags, | ||
202 | per_cpu(cpep_name, cpu), | ||
203 | action->dev_id); | ||
204 | per_cpu(cpep_irq, cpu) = irq; | ||
205 | break; | ||
206 | case IA64_CPE_VECTOR: | ||
207 | case IA64_MCA_RENDEZ_VECTOR: | ||
208 | case IA64_PERFMON_VECTOR: | ||
209 | case IA64_MCA_WAKEUP_VECTOR: | ||
210 | case IA64_SPURIOUS_INT_VECTOR: | ||
211 | /* No need to complain, these aren't supported. */ | ||
212 | break; | ||
213 | default: | ||
214 | printk(KERN_WARNING "Percpu irq %d is unsupported " | ||
215 | "by xen!\n", vec); | ||
216 | break; | ||
217 | } | ||
218 | BUG_ON(irq < 0); | ||
219 | |||
220 | if (irq > 0) { | ||
221 | /* | ||
222 | * Mark percpu. Without this, migrate_irqs() will | ||
223 | * mark the interrupt for migrations and trigger it | ||
224 | * on cpu hotplug. | ||
225 | */ | ||
226 | desc = irq_desc + irq; | ||
227 | desc->status |= IRQ_PER_CPU; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | /* For BSP, we cache registered percpu irqs, and then re-walk | ||
232 | * them when initializing APs | ||
233 | */ | ||
234 | if (!cpu && save) { | ||
235 | BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); | ||
236 | saved_percpu_irqs[saved_irq_cnt].irq = vec; | ||
237 | saved_percpu_irqs[saved_irq_cnt].action = action; | ||
238 | saved_irq_cnt++; | ||
239 | if (!xen_slab_ready) | ||
240 | late_irq_cnt++; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | static void | ||
245 | xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) | ||
246 | { | ||
247 | __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); | ||
248 | } | ||
249 | |||
250 | static void | ||
251 | xen_bind_early_percpu_irq(void) | ||
252 | { | ||
253 | int i; | ||
254 | |||
255 | xen_slab_ready = 1; | ||
256 | /* There's no race when accessing this cached array, since only | ||
257 | * BSP will face with such step shortly | ||
258 | */ | ||
259 | for (i = 0; i < late_irq_cnt; i++) | ||
260 | __xen_register_percpu_irq(smp_processor_id(), | ||
261 | saved_percpu_irqs[i].irq, | ||
262 | saved_percpu_irqs[i].action, 0); | ||
263 | } | ||
264 | |||
265 | /* FIXME: There's no obvious point to check whether slab is ready. So | ||
266 | * a hack is used here by utilizing a late time hook. | ||
267 | */ | ||
268 | |||
269 | #ifdef CONFIG_HOTPLUG_CPU | ||
270 | static int __devinit | ||
271 | unbind_evtchn_callback(struct notifier_block *nfb, | ||
272 | unsigned long action, void *hcpu) | ||
273 | { | ||
274 | unsigned int cpu = (unsigned long)hcpu; | ||
275 | |||
276 | if (action == CPU_DEAD) { | ||
277 | /* Unregister evtchn. */ | ||
278 | if (per_cpu(cpep_irq, cpu) >= 0) { | ||
279 | unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); | ||
280 | per_cpu(cpep_irq, cpu) = -1; | ||
281 | } | ||
282 | if (per_cpu(cmcp_irq, cpu) >= 0) { | ||
283 | unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); | ||
284 | per_cpu(cmcp_irq, cpu) = -1; | ||
285 | } | ||
286 | if (per_cpu(cmc_irq, cpu) >= 0) { | ||
287 | unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); | ||
288 | per_cpu(cmc_irq, cpu) = -1; | ||
289 | } | ||
290 | if (per_cpu(ipi_irq, cpu) >= 0) { | ||
291 | unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); | ||
292 | per_cpu(ipi_irq, cpu) = -1; | ||
293 | } | ||
294 | if (per_cpu(resched_irq, cpu) >= 0) { | ||
295 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), | ||
296 | NULL); | ||
297 | per_cpu(resched_irq, cpu) = -1; | ||
298 | } | ||
299 | if (per_cpu(timer_irq, cpu) >= 0) { | ||
300 | unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); | ||
301 | per_cpu(timer_irq, cpu) = -1; | ||
302 | } | ||
303 | } | ||
304 | return NOTIFY_OK; | ||
305 | } | ||
306 | |||
307 | static struct notifier_block unbind_evtchn_notifier = { | ||
308 | .notifier_call = unbind_evtchn_callback, | ||
309 | .priority = 0 | ||
310 | }; | ||
311 | #endif | ||
312 | |||
313 | void xen_smp_intr_init_early(unsigned int cpu) | ||
314 | { | ||
315 | #ifdef CONFIG_SMP | ||
316 | unsigned int i; | ||
317 | |||
318 | for (i = 0; i < saved_irq_cnt; i++) | ||
319 | __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, | ||
320 | saved_percpu_irqs[i].action, 0); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | void xen_smp_intr_init(void) | ||
325 | { | ||
326 | #ifdef CONFIG_SMP | ||
327 | unsigned int cpu = smp_processor_id(); | ||
328 | struct callback_register event = { | ||
329 | .type = CALLBACKTYPE_event, | ||
330 | .address = { .ip = (unsigned long)&xen_event_callback }, | ||
331 | }; | ||
332 | |||
333 | if (cpu == 0) { | ||
334 | /* Initialization was already done for boot cpu. */ | ||
335 | #ifdef CONFIG_HOTPLUG_CPU | ||
336 | /* Register the notifier only once. */ | ||
337 | register_cpu_notifier(&unbind_evtchn_notifier); | ||
338 | #endif | ||
339 | return; | ||
340 | } | ||
341 | |||
342 | /* This should be piggyback when setup vcpu guest context */ | ||
343 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | ||
344 | #endif /* CONFIG_SMP */ | ||
345 | } | ||
346 | |||
347 | void __init | ||
348 | xen_irq_init(void) | ||
349 | { | ||
350 | struct callback_register event = { | ||
351 | .type = CALLBACKTYPE_event, | ||
352 | .address = { .ip = (unsigned long)&xen_event_callback }, | ||
353 | }; | ||
354 | |||
355 | xen_init_IRQ(); | ||
356 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | ||
357 | late_time_init = xen_bind_early_percpu_irq; | ||
358 | } | ||
359 | |||
360 | void | ||
361 | xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) | ||
362 | { | ||
363 | #ifdef CONFIG_SMP | ||
364 | /* TODO: we need to call vcpu_up here */ | ||
365 | if (unlikely(vector == ap_wakeup_vector)) { | ||
366 | /* XXX | ||
367 | * This should be in __cpu_up(cpu) in ia64 smpboot.c | ||
368 | * like x86. But don't want to modify it, | ||
369 | * keep it untouched. | ||
370 | */ | ||
371 | xen_smp_intr_init_early(cpu); | ||
372 | |||
373 | xen_send_ipi(cpu, vector); | ||
374 | /* vcpu_prepare_and_up(cpu); */ | ||
375 | return; | ||
376 | } | ||
377 | #endif | ||
378 | |||
379 | switch (vector) { | ||
380 | case IA64_IPI_VECTOR: | ||
381 | xen_send_IPI_one(cpu, XEN_IPI_VECTOR); | ||
382 | break; | ||
383 | case IA64_IPI_RESCHEDULE: | ||
384 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | ||
385 | break; | ||
386 | case IA64_CMCP_VECTOR: | ||
387 | xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); | ||
388 | break; | ||
389 | case IA64_CPEP_VECTOR: | ||
390 | xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); | ||
391 | break; | ||
392 | case IA64_TIMER_VECTOR: { | ||
393 | /* this is used only once by check_sal_cache_flush() | ||
394 | at boot time */ | ||
395 | static int used = 0; | ||
396 | if (!used) { | ||
397 | xen_send_ipi(cpu, IA64_TIMER_VECTOR); | ||
398 | used = 1; | ||
399 | break; | ||
400 | } | ||
401 | /* fallthrough */ | ||
402 | } | ||
403 | default: | ||
404 | printk(KERN_WARNING "Unsupported IPI type 0x%x\n", | ||
405 | vector); | ||
406 | notify_remote_via_irq(0); /* defaults to 0 irq */ | ||
407 | break; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | static void __init | ||
412 | xen_register_ipi(void) | ||
413 | { | ||
414 | #ifdef CONFIG_SMP | ||
415 | register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); | ||
416 | register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); | ||
417 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); | ||
418 | #endif | ||
419 | } | ||
420 | |||
421 | static void | ||
422 | xen_resend_irq(unsigned int vector) | ||
423 | { | ||
424 | (void)resend_irq_on_evtchn(vector); | ||
425 | } | ||
426 | |||
427 | const struct pv_irq_ops xen_irq_ops __initdata = { | ||
428 | .register_ipi = xen_register_ipi, | ||
429 | |||
430 | .assign_irq_vector = xen_assign_irq_vector, | ||
431 | .free_irq_vector = xen_free_irq_vector, | ||
432 | .register_percpu_irq = xen_register_percpu_irq, | ||
433 | |||
434 | .resend_irq = xen_resend_irq, | ||
435 | }; | ||
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h new file mode 100644 index 000000000000..26110f330c87 --- /dev/null +++ b/arch/ia64/xen/irq_xen.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/irq_xen.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef IRQ_XEN_H | ||
24 | #define IRQ_XEN_H | ||
25 | |||
26 | extern void (*late_time_init)(void); | ||
27 | extern char xen_event_callback; | ||
28 | void __init xen_init_IRQ(void); | ||
29 | |||
30 | extern const struct pv_irq_ops xen_irq_ops __initdata; | ||
31 | extern void xen_smp_intr_init(void); | ||
32 | extern void xen_send_ipi(int cpu, int vec); | ||
33 | |||
34 | #endif /* IRQ_XEN_H */ | ||
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c new file mode 100644 index 000000000000..4ad588a7c279 --- /dev/null +++ b/arch/ia64/xen/machvec.c | |||
@@ -0,0 +1,4 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME xen | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> | ||
3 | #include <asm/machvec_init.h> | ||
4 | |||
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c new file mode 100644 index 000000000000..fd66b048c6fa --- /dev/null +++ b/arch/ia64/xen/suspend.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/suspend.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | * suspend/resume | ||
22 | */ | ||
23 | |||
24 | #include <xen/xen-ops.h> | ||
25 | #include <asm/xen/hypervisor.h> | ||
26 | #include "time.h" | ||
27 | |||
28 | void | ||
29 | xen_mm_pin_all(void) | ||
30 | { | ||
31 | /* nothing */ | ||
32 | } | ||
33 | |||
34 | void | ||
35 | xen_mm_unpin_all(void) | ||
36 | { | ||
37 | /* nothing */ | ||
38 | } | ||
39 | |||
40 | void xen_pre_device_suspend(void) | ||
41 | { | ||
42 | /* nothing */ | ||
43 | } | ||
44 | |||
45 | void | ||
46 | xen_pre_suspend() | ||
47 | { | ||
48 | /* nothing */ | ||
49 | } | ||
50 | |||
51 | void | ||
52 | xen_post_suspend(int suspend_cancelled) | ||
53 | { | ||
54 | if (suspend_cancelled) | ||
55 | return; | ||
56 | |||
57 | xen_ia64_enable_opt_feature(); | ||
58 | /* add more if necessary */ | ||
59 | } | ||
60 | |||
61 | void xen_arch_resume(void) | ||
62 | { | ||
63 | xen_timer_resume_on_aps(); | ||
64 | } | ||
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c new file mode 100644 index 000000000000..d15a94c330fb --- /dev/null +++ b/arch/ia64/xen/time.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/time.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/delay.h> | ||
24 | #include <linux/kernel_stat.h> | ||
25 | #include <linux/posix-timers.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/clocksource.h> | ||
28 | |||
29 | #include <asm/timex.h> | ||
30 | |||
31 | #include <asm/xen/hypervisor.h> | ||
32 | |||
33 | #include <xen/interface/vcpu.h> | ||
34 | |||
35 | #include "../kernel/fsyscall_gtod_data.h" | ||
36 | |||
37 | DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | ||
38 | DEFINE_PER_CPU(unsigned long, processed_stolen_time); | ||
39 | DEFINE_PER_CPU(unsigned long, processed_blocked_time); | ||
40 | |||
41 | /* taken from i386/kernel/time-xen.c */ | ||
42 | static void xen_init_missing_ticks_accounting(int cpu) | ||
43 | { | ||
44 | struct vcpu_register_runstate_memory_area area; | ||
45 | struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); | ||
46 | int rc; | ||
47 | |||
48 | memset(runstate, 0, sizeof(*runstate)); | ||
49 | |||
50 | area.addr.v = runstate; | ||
51 | rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, | ||
52 | &area); | ||
53 | WARN_ON(rc && rc != -ENOSYS); | ||
54 | |||
55 | per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; | ||
56 | per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] | ||
57 | + runstate->time[RUNSTATE_offline]; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Runstate accounting | ||
62 | */ | ||
63 | /* stolen from arch/x86/xen/time.c */ | ||
64 | static void get_runstate_snapshot(struct vcpu_runstate_info *res) | ||
65 | { | ||
66 | u64 state_time; | ||
67 | struct vcpu_runstate_info *state; | ||
68 | |||
69 | BUG_ON(preemptible()); | ||
70 | |||
71 | state = &__get_cpu_var(runstate); | ||
72 | |||
73 | /* | ||
74 | * The runstate info is always updated by the hypervisor on | ||
75 | * the current CPU, so there's no need to use anything | ||
76 | * stronger than a compiler barrier when fetching it. | ||
77 | */ | ||
78 | do { | ||
79 | state_time = state->state_entry_time; | ||
80 | rmb(); | ||
81 | *res = *state; | ||
82 | rmb(); | ||
83 | } while (state->state_entry_time != state_time); | ||
84 | } | ||
85 | |||
86 | #define NS_PER_TICK (1000000000LL/HZ) | ||
87 | |||
88 | static unsigned long | ||
89 | consider_steal_time(unsigned long new_itm) | ||
90 | { | ||
91 | unsigned long stolen, blocked; | ||
92 | unsigned long delta_itm = 0, stolentick = 0; | ||
93 | int cpu = smp_processor_id(); | ||
94 | struct vcpu_runstate_info runstate; | ||
95 | struct task_struct *p = current; | ||
96 | |||
97 | get_runstate_snapshot(&runstate); | ||
98 | |||
99 | /* | ||
100 | * Check for vcpu migration effect | ||
101 | * In this case, itc value is reversed. | ||
102 | * This causes huge stolen value. | ||
103 | * This function just checks and reject this effect. | ||
104 | */ | ||
105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], | ||
106 | per_cpu(processed_blocked_time, cpu))) | ||
107 | blocked = 0; | ||
108 | |||
109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + | ||
110 | runstate.time[RUNSTATE_offline], | ||
111 | per_cpu(processed_stolen_time, cpu))) | ||
112 | stolen = 0; | ||
113 | |||
114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) | ||
115 | stolentick = ia64_get_itc() - new_itm; | ||
116 | |||
117 | do_div(stolentick, NS_PER_TICK); | ||
118 | stolentick++; | ||
119 | |||
120 | do_div(stolen, NS_PER_TICK); | ||
121 | |||
122 | if (stolen > stolentick) | ||
123 | stolen = stolentick; | ||
124 | |||
125 | stolentick -= stolen; | ||
126 | do_div(blocked, NS_PER_TICK); | ||
127 | |||
128 | if (blocked > stolentick) | ||
129 | blocked = stolentick; | ||
130 | |||
131 | if (stolen > 0 || blocked > 0) { | ||
132 | account_steal_time(NULL, jiffies_to_cputime(stolen)); | ||
133 | account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked)); | ||
134 | run_local_timers(); | ||
135 | |||
136 | if (rcu_pending(cpu)) | ||
137 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); | ||
138 | |||
139 | scheduler_tick(); | ||
140 | run_posix_cpu_timers(p); | ||
141 | delta_itm += local_cpu_data->itm_delta * (stolen + blocked); | ||
142 | |||
143 | if (cpu == time_keeper_id) { | ||
144 | write_seqlock(&xtime_lock); | ||
145 | do_timer(stolen + blocked); | ||
146 | local_cpu_data->itm_next = delta_itm + new_itm; | ||
147 | write_sequnlock(&xtime_lock); | ||
148 | } else { | ||
149 | local_cpu_data->itm_next = delta_itm + new_itm; | ||
150 | } | ||
151 | per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; | ||
152 | per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; | ||
153 | } | ||
154 | return delta_itm; | ||
155 | } | ||
156 | |||
157 | static int xen_do_steal_accounting(unsigned long *new_itm) | ||
158 | { | ||
159 | unsigned long delta_itm; | ||
160 | delta_itm = consider_steal_time(*new_itm); | ||
161 | *new_itm += delta_itm; | ||
162 | if (time_after(*new_itm, ia64_get_itc()) && delta_itm) | ||
163 | return 1; | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static void xen_itc_jitter_data_reset(void) | ||
169 | { | ||
170 | u64 lcycle, ret; | ||
171 | |||
172 | do { | ||
173 | lcycle = itc_jitter_data.itc_lastcycle; | ||
174 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); | ||
175 | } while (unlikely(ret != lcycle)); | ||
176 | } | ||
177 | |||
178 | struct pv_time_ops xen_time_ops __initdata = { | ||
179 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, | ||
180 | .do_steal_accounting = xen_do_steal_accounting, | ||
181 | .clocksource_resume = xen_itc_jitter_data_reset, | ||
182 | }; | ||
183 | |||
184 | /* Called after suspend, to resume time. */ | ||
185 | static void xen_local_tick_resume(void) | ||
186 | { | ||
187 | /* Just trigger a tick. */ | ||
188 | ia64_cpu_local_tick(); | ||
189 | touch_softlockup_watchdog(); | ||
190 | } | ||
191 | |||
192 | void | ||
193 | xen_timer_resume(void) | ||
194 | { | ||
195 | unsigned int cpu; | ||
196 | |||
197 | xen_local_tick_resume(); | ||
198 | |||
199 | for_each_online_cpu(cpu) | ||
200 | xen_init_missing_ticks_accounting(cpu); | ||
201 | } | ||
202 | |||
203 | static void ia64_cpu_local_tick_fn(void *unused) | ||
204 | { | ||
205 | xen_local_tick_resume(); | ||
206 | xen_init_missing_ticks_accounting(smp_processor_id()); | ||
207 | } | ||
208 | |||
209 | void | ||
210 | xen_timer_resume_on_aps(void) | ||
211 | { | ||
212 | smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); | ||
213 | } | ||
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h new file mode 100644 index 000000000000..f98d7e1a42f0 --- /dev/null +++ b/arch/ia64/xen/time.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/time.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | extern struct pv_time_ops xen_time_ops __initdata; | ||
24 | void xen_timer_resume_on_aps(void); | ||
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c new file mode 100644 index 000000000000..ccaf7431f7c8 --- /dev/null +++ b/arch/ia64/xen/xcom_hcall.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
15 | * | ||
16 | * Tristan Gingold <tristan.gingold@bull.net> | ||
17 | * | ||
18 | * Copyright (c) 2007 | ||
19 | * Isaku Yamahata <yamahata at valinux co jp> | ||
20 | * VA Linux Systems Japan K.K. | ||
21 | * consolidate mini and inline version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <xen/interface/xen.h> | ||
26 | #include <xen/interface/memory.h> | ||
27 | #include <xen/interface/grant_table.h> | ||
28 | #include <xen/interface/callback.h> | ||
29 | #include <xen/interface/vcpu.h> | ||
30 | #include <asm/xen/hypervisor.h> | ||
31 | #include <asm/xen/xencomm.h> | ||
32 | |||
33 | /* Xencomm notes: | ||
34 | * This file defines hypercalls to be used by xencomm. The hypercalls simply | ||
35 | * create inlines or mini descriptors for pointers and then call the raw arch | ||
36 | * hypercall xencomm_arch_hypercall_XXX | ||
37 | * | ||
38 | * If the arch wants to directly use these hypercalls, simply define macros | ||
39 | * in asm/xen/hypercall.h, eg: | ||
40 | * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op | ||
41 | * | ||
42 | * The arch may also define HYPERVISOR_xxx as a function and do more operations | ||
43 | * before/after doing the hypercall. | ||
44 | * | ||
45 | * Note: because only inline or mini descriptors are created these functions | ||
46 | * must only be called with in kernel memory parameters. | ||
47 | */ | ||
48 | |||
49 | int | ||
50 | xencomm_hypercall_console_io(int cmd, int count, char *str) | ||
51 | { | ||
52 | /* xen early printk uses console io hypercall before | ||
53 | * xencomm initialization. In that case, we just ignore it. | ||
54 | */ | ||
55 | if (!xencomm_is_initialized()) | ||
56 | return 0; | ||
57 | |||
58 | return xencomm_arch_hypercall_console_io | ||
59 | (cmd, count, xencomm_map_no_alloc(str, count)); | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); | ||
62 | |||
63 | int | ||
64 | xencomm_hypercall_event_channel_op(int cmd, void *op) | ||
65 | { | ||
66 | struct xencomm_handle *desc; | ||
67 | desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); | ||
68 | if (desc == NULL) | ||
69 | return -EINVAL; | ||
70 | |||
71 | return xencomm_arch_hypercall_event_channel_op(cmd, desc); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); | ||
74 | |||
75 | int | ||
76 | xencomm_hypercall_xen_version(int cmd, void *arg) | ||
77 | { | ||
78 | struct xencomm_handle *desc; | ||
79 | unsigned int argsize; | ||
80 | |||
81 | switch (cmd) { | ||
82 | case XENVER_version: | ||
83 | /* do not actually pass an argument */ | ||
84 | return xencomm_arch_hypercall_xen_version(cmd, 0); | ||
85 | case XENVER_extraversion: | ||
86 | argsize = sizeof(struct xen_extraversion); | ||
87 | break; | ||
88 | case XENVER_compile_info: | ||
89 | argsize = sizeof(struct xen_compile_info); | ||
90 | break; | ||
91 | case XENVER_capabilities: | ||
92 | argsize = sizeof(struct xen_capabilities_info); | ||
93 | break; | ||
94 | case XENVER_changeset: | ||
95 | argsize = sizeof(struct xen_changeset_info); | ||
96 | break; | ||
97 | case XENVER_platform_parameters: | ||
98 | argsize = sizeof(struct xen_platform_parameters); | ||
99 | break; | ||
100 | case XENVER_get_features: | ||
101 | argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); | ||
102 | break; | ||
103 | |||
104 | default: | ||
105 | printk(KERN_DEBUG | ||
106 | "%s: unknown version op %d\n", __func__, cmd); | ||
107 | return -ENOSYS; | ||
108 | } | ||
109 | |||
110 | desc = xencomm_map_no_alloc(arg, argsize); | ||
111 | if (desc == NULL) | ||
112 | return -EINVAL; | ||
113 | |||
114 | return xencomm_arch_hypercall_xen_version(cmd, desc); | ||
115 | } | ||
116 | EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); | ||
117 | |||
118 | int | ||
119 | xencomm_hypercall_physdev_op(int cmd, void *op) | ||
120 | { | ||
121 | unsigned int argsize; | ||
122 | |||
123 | switch (cmd) { | ||
124 | case PHYSDEVOP_apic_read: | ||
125 | case PHYSDEVOP_apic_write: | ||
126 | argsize = sizeof(struct physdev_apic); | ||
127 | break; | ||
128 | case PHYSDEVOP_alloc_irq_vector: | ||
129 | case PHYSDEVOP_free_irq_vector: | ||
130 | argsize = sizeof(struct physdev_irq); | ||
131 | break; | ||
132 | case PHYSDEVOP_irq_status_query: | ||
133 | argsize = sizeof(struct physdev_irq_status_query); | ||
134 | break; | ||
135 | |||
136 | default: | ||
137 | printk(KERN_DEBUG | ||
138 | "%s: unknown physdev op %d\n", __func__, cmd); | ||
139 | return -ENOSYS; | ||
140 | } | ||
141 | |||
142 | return xencomm_arch_hypercall_physdev_op | ||
143 | (cmd, xencomm_map_no_alloc(op, argsize)); | ||
144 | } | ||
145 | |||
146 | static int | ||
147 | xencommize_grant_table_op(struct xencomm_mini **xc_area, | ||
148 | unsigned int cmd, void *op, unsigned int count, | ||
149 | struct xencomm_handle **desc) | ||
150 | { | ||
151 | struct xencomm_handle *desc1; | ||
152 | unsigned int argsize; | ||
153 | |||
154 | switch (cmd) { | ||
155 | case GNTTABOP_map_grant_ref: | ||
156 | argsize = sizeof(struct gnttab_map_grant_ref); | ||
157 | break; | ||
158 | case GNTTABOP_unmap_grant_ref: | ||
159 | argsize = sizeof(struct gnttab_unmap_grant_ref); | ||
160 | break; | ||
161 | case GNTTABOP_setup_table: | ||
162 | { | ||
163 | struct gnttab_setup_table *setup = op; | ||
164 | |||
165 | argsize = sizeof(*setup); | ||
166 | |||
167 | if (count != 1) | ||
168 | return -EINVAL; | ||
169 | desc1 = __xencomm_map_no_alloc | ||
170 | (xen_guest_handle(setup->frame_list), | ||
171 | setup->nr_frames * | ||
172 | sizeof(*xen_guest_handle(setup->frame_list)), | ||
173 | *xc_area); | ||
174 | if (desc1 == NULL) | ||
175 | return -EINVAL; | ||
176 | (*xc_area)++; | ||
177 | set_xen_guest_handle(setup->frame_list, (void *)desc1); | ||
178 | break; | ||
179 | } | ||
180 | case GNTTABOP_dump_table: | ||
181 | argsize = sizeof(struct gnttab_dump_table); | ||
182 | break; | ||
183 | case GNTTABOP_transfer: | ||
184 | argsize = sizeof(struct gnttab_transfer); | ||
185 | break; | ||
186 | case GNTTABOP_copy: | ||
187 | argsize = sizeof(struct gnttab_copy); | ||
188 | break; | ||
189 | case GNTTABOP_query_size: | ||
190 | argsize = sizeof(struct gnttab_query_size); | ||
191 | break; | ||
192 | default: | ||
193 | printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", | ||
194 | __func__, cmd); | ||
195 | BUG(); | ||
196 | } | ||
197 | |||
198 | *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); | ||
199 | if (*desc == NULL) | ||
200 | return -EINVAL; | ||
201 | (*xc_area)++; | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | int | ||
207 | xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, | ||
208 | unsigned int count) | ||
209 | { | ||
210 | int rc; | ||
211 | struct xencomm_handle *desc; | ||
212 | XENCOMM_MINI_ALIGNED(xc_area, 2); | ||
213 | |||
214 | rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); | ||
215 | if (rc) | ||
216 | return rc; | ||
217 | |||
218 | return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); | ||
221 | |||
222 | int | ||
223 | xencomm_hypercall_sched_op(int cmd, void *arg) | ||
224 | { | ||
225 | struct xencomm_handle *desc; | ||
226 | unsigned int argsize; | ||
227 | |||
228 | switch (cmd) { | ||
229 | case SCHEDOP_yield: | ||
230 | case SCHEDOP_block: | ||
231 | argsize = 0; | ||
232 | break; | ||
233 | case SCHEDOP_shutdown: | ||
234 | argsize = sizeof(struct sched_shutdown); | ||
235 | break; | ||
236 | case SCHEDOP_poll: | ||
237 | { | ||
238 | struct sched_poll *poll = arg; | ||
239 | struct xencomm_handle *ports; | ||
240 | |||
241 | argsize = sizeof(struct sched_poll); | ||
242 | ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), | ||
243 | sizeof(*xen_guest_handle(poll->ports))); | ||
244 | |||
245 | set_xen_guest_handle(poll->ports, (void *)ports); | ||
246 | break; | ||
247 | } | ||
248 | default: | ||
249 | printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); | ||
250 | return -ENOSYS; | ||
251 | } | ||
252 | |||
253 | desc = xencomm_map_no_alloc(arg, argsize); | ||
254 | if (desc == NULL) | ||
255 | return -EINVAL; | ||
256 | |||
257 | return xencomm_arch_hypercall_sched_op(cmd, desc); | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); | ||
260 | |||
261 | int | ||
262 | xencomm_hypercall_multicall(void *call_list, int nr_calls) | ||
263 | { | ||
264 | int rc; | ||
265 | int i; | ||
266 | struct multicall_entry *mce; | ||
267 | struct xencomm_handle *desc; | ||
268 | XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); | ||
269 | |||
270 | for (i = 0; i < nr_calls; i++) { | ||
271 | mce = (struct multicall_entry *)call_list + i; | ||
272 | |||
273 | switch (mce->op) { | ||
274 | case __HYPERVISOR_update_va_mapping: | ||
275 | case __HYPERVISOR_mmu_update: | ||
276 | /* No-op on ia64. */ | ||
277 | break; | ||
278 | case __HYPERVISOR_grant_table_op: | ||
279 | rc = xencommize_grant_table_op | ||
280 | (&xc_area, | ||
281 | mce->args[0], (void *)mce->args[1], | ||
282 | mce->args[2], &desc); | ||
283 | if (rc) | ||
284 | return rc; | ||
285 | mce->args[1] = (unsigned long)desc; | ||
286 | break; | ||
287 | case __HYPERVISOR_memory_op: | ||
288 | default: | ||
289 | printk(KERN_DEBUG | ||
290 | "%s: unhandled multicall op entry op %lu\n", | ||
291 | __func__, mce->op); | ||
292 | return -ENOSYS; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | desc = xencomm_map_no_alloc(call_list, | ||
297 | nr_calls * sizeof(struct multicall_entry)); | ||
298 | if (desc == NULL) | ||
299 | return -EINVAL; | ||
300 | |||
301 | return xencomm_arch_hypercall_multicall(desc, nr_calls); | ||
302 | } | ||
303 | EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); | ||
304 | |||
305 | int | ||
306 | xencomm_hypercall_callback_op(int cmd, void *arg) | ||
307 | { | ||
308 | unsigned int argsize; | ||
309 | switch (cmd) { | ||
310 | case CALLBACKOP_register: | ||
311 | argsize = sizeof(struct callback_register); | ||
312 | break; | ||
313 | case CALLBACKOP_unregister: | ||
314 | argsize = sizeof(struct callback_unregister); | ||
315 | break; | ||
316 | default: | ||
317 | printk(KERN_DEBUG | ||
318 | "%s: unknown callback op %d\n", __func__, cmd); | ||
319 | return -ENOSYS; | ||
320 | } | ||
321 | |||
322 | return xencomm_arch_hypercall_callback_op | ||
323 | (cmd, xencomm_map_no_alloc(arg, argsize)); | ||
324 | } | ||
325 | |||
326 | static int | ||
327 | xencommize_memory_reservation(struct xencomm_mini *xc_area, | ||
328 | struct xen_memory_reservation *mop) | ||
329 | { | ||
330 | struct xencomm_handle *desc; | ||
331 | |||
332 | desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), | ||
333 | mop->nr_extents * | ||
334 | sizeof(*xen_guest_handle(mop->extent_start)), | ||
335 | xc_area); | ||
336 | if (desc == NULL) | ||
337 | return -EINVAL; | ||
338 | |||
339 | set_xen_guest_handle(mop->extent_start, (void *)desc); | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | int | ||
344 | xencomm_hypercall_memory_op(unsigned int cmd, void *arg) | ||
345 | { | ||
346 | GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; | ||
347 | struct xen_memory_reservation *xmr = NULL; | ||
348 | int rc; | ||
349 | struct xencomm_handle *desc; | ||
350 | unsigned int argsize; | ||
351 | XENCOMM_MINI_ALIGNED(xc_area, 2); | ||
352 | |||
353 | switch (cmd) { | ||
354 | case XENMEM_increase_reservation: | ||
355 | case XENMEM_decrease_reservation: | ||
356 | case XENMEM_populate_physmap: | ||
357 | xmr = (struct xen_memory_reservation *)arg; | ||
358 | set_xen_guest_handle(extent_start_va[0], | ||
359 | xen_guest_handle(xmr->extent_start)); | ||
360 | |||
361 | argsize = sizeof(*xmr); | ||
362 | rc = xencommize_memory_reservation(xc_area, xmr); | ||
363 | if (rc) | ||
364 | return rc; | ||
365 | xc_area++; | ||
366 | break; | ||
367 | |||
368 | case XENMEM_maximum_ram_page: | ||
369 | argsize = 0; | ||
370 | break; | ||
371 | |||
372 | case XENMEM_add_to_physmap: | ||
373 | argsize = sizeof(struct xen_add_to_physmap); | ||
374 | break; | ||
375 | |||
376 | default: | ||
377 | printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); | ||
378 | return -ENOSYS; | ||
379 | } | ||
380 | |||
381 | desc = xencomm_map_no_alloc(arg, argsize); | ||
382 | if (desc == NULL) | ||
383 | return -EINVAL; | ||
384 | |||
385 | rc = xencomm_arch_hypercall_memory_op(cmd, desc); | ||
386 | |||
387 | switch (cmd) { | ||
388 | case XENMEM_increase_reservation: | ||
389 | case XENMEM_decrease_reservation: | ||
390 | case XENMEM_populate_physmap: | ||
391 | set_xen_guest_handle(xmr->extent_start, | ||
392 | xen_guest_handle(extent_start_va[0])); | ||
393 | break; | ||
394 | } | ||
395 | |||
396 | return rc; | ||
397 | } | ||
398 | EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); | ||
399 | |||
400 | int | ||
401 | xencomm_hypercall_suspend(unsigned long srec) | ||
402 | { | ||
403 | struct sched_shutdown arg; | ||
404 | |||
405 | arg.reason = SHUTDOWN_suspend; | ||
406 | |||
407 | return xencomm_arch_hypercall_sched_op( | ||
408 | SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); | ||
409 | } | ||
410 | |||
411 | long | ||
412 | xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) | ||
413 | { | ||
414 | unsigned int argsize; | ||
415 | switch (cmd) { | ||
416 | case VCPUOP_register_runstate_memory_area: { | ||
417 | struct vcpu_register_runstate_memory_area *area = | ||
418 | (struct vcpu_register_runstate_memory_area *)arg; | ||
419 | argsize = sizeof(*arg); | ||
420 | set_xen_guest_handle(area->addr.h, | ||
421 | (void *)xencomm_map_no_alloc(area->addr.v, | ||
422 | sizeof(area->addr.v))); | ||
423 | break; | ||
424 | } | ||
425 | |||
426 | default: | ||
427 | printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); | ||
428 | return -ENOSYS; | ||
429 | } | ||
430 | |||
431 | return xencomm_arch_hypercall_vcpu_op(cmd, cpu, | ||
432 | xencomm_map_no_alloc(arg, argsize)); | ||
433 | } | ||
434 | |||
435 | long | ||
436 | xencomm_hypercall_opt_feature(void *arg) | ||
437 | { | ||
438 | return xencomm_arch_hypercall_opt_feature( | ||
439 | xencomm_map_no_alloc(arg, | ||
440 | sizeof(struct xen_ia64_opt_feature))); | ||
441 | } | ||
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c new file mode 100644 index 000000000000..04cd12350455 --- /dev/null +++ b/arch/ia64/xen/xen_pv_ops.c | |||
@@ -0,0 +1,364 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/xen_pv_ops.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/console.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/pm.h> | ||
27 | |||
28 | #include <asm/xen/hypervisor.h> | ||
29 | #include <asm/xen/xencomm.h> | ||
30 | #include <asm/xen/privop.h> | ||
31 | |||
32 | #include "irq_xen.h" | ||
33 | #include "time.h" | ||
34 | |||
35 | /*************************************************************************** | ||
36 | * general info | ||
37 | */ | ||
38 | static struct pv_info xen_info __initdata = { | ||
39 | .kernel_rpl = 2, /* or 1: determin at runtime */ | ||
40 | .paravirt_enabled = 1, | ||
41 | .name = "Xen/ia64", | ||
42 | }; | ||
43 | |||
44 | #define IA64_RSC_PL_SHIFT 2 | ||
45 | #define IA64_RSC_PL_BIT_SIZE 2 | ||
46 | #define IA64_RSC_PL_MASK \ | ||
47 | (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) | ||
48 | |||
49 | static void __init | ||
50 | xen_info_init(void) | ||
51 | { | ||
52 | /* Xenified Linux/ia64 may run on pl = 1 or 2. | ||
53 | * determin at run time. */ | ||
54 | unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
55 | unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; | ||
56 | xen_info.kernel_rpl = rpl; | ||
57 | } | ||
58 | |||
59 | /*************************************************************************** | ||
60 | * pv_init_ops | ||
61 | * initialization hooks. | ||
62 | */ | ||
63 | |||
64 | static void | ||
65 | xen_panic_hypercall(struct unw_frame_info *info, void *arg) | ||
66 | { | ||
67 | current->thread.ksp = (__u64)info->sw - 16; | ||
68 | HYPERVISOR_shutdown(SHUTDOWN_crash); | ||
69 | /* we're never actually going to get here... */ | ||
70 | } | ||
71 | |||
72 | static int | ||
73 | xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) | ||
74 | { | ||
75 | unw_init_running(xen_panic_hypercall, NULL); | ||
76 | /* we're never actually going to get here... */ | ||
77 | return NOTIFY_DONE; | ||
78 | } | ||
79 | |||
80 | static struct notifier_block xen_panic_block = { | ||
81 | xen_panic_event, NULL, 0 /* try to go last */ | ||
82 | }; | ||
83 | |||
84 | static void xen_pm_power_off(void) | ||
85 | { | ||
86 | local_irq_disable(); | ||
87 | HYPERVISOR_shutdown(SHUTDOWN_poweroff); | ||
88 | } | ||
89 | |||
90 | static void __init | ||
91 | xen_banner(void) | ||
92 | { | ||
93 | printk(KERN_INFO | ||
94 | "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " | ||
95 | "flags=0x%x\n", | ||
96 | xen_info.kernel_rpl, | ||
97 | HYPERVISOR_shared_info->arch.start_info_pfn, | ||
98 | xen_start_info->nr_pages, xen_start_info->flags); | ||
99 | } | ||
100 | |||
101 | static int __init | ||
102 | xen_reserve_memory(struct rsvd_region *region) | ||
103 | { | ||
104 | region->start = (unsigned long)__va( | ||
105 | (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); | ||
106 | region->end = region->start + PAGE_SIZE; | ||
107 | return 1; | ||
108 | } | ||
109 | |||
110 | static void __init | ||
111 | xen_arch_setup_early(void) | ||
112 | { | ||
113 | struct shared_info *s; | ||
114 | BUG_ON(!xen_pv_domain()); | ||
115 | |||
116 | s = HYPERVISOR_shared_info; | ||
117 | xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); | ||
118 | |||
119 | /* Must be done before any hypercall. */ | ||
120 | xencomm_initialize(); | ||
121 | |||
122 | xen_setup_features(); | ||
123 | /* Register a call for panic conditions. */ | ||
124 | atomic_notifier_chain_register(&panic_notifier_list, | ||
125 | &xen_panic_block); | ||
126 | pm_power_off = xen_pm_power_off; | ||
127 | |||
128 | xen_ia64_enable_opt_feature(); | ||
129 | } | ||
130 | |||
131 | static void __init | ||
132 | xen_arch_setup_console(char **cmdline_p) | ||
133 | { | ||
134 | add_preferred_console("xenboot", 0, NULL); | ||
135 | add_preferred_console("tty", 0, NULL); | ||
136 | /* use hvc_xen */ | ||
137 | add_preferred_console("hvc", 0, NULL); | ||
138 | |||
139 | #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) | ||
140 | conswitchp = NULL; | ||
141 | #endif | ||
142 | } | ||
143 | |||
144 | static int __init | ||
145 | xen_arch_setup_nomca(void) | ||
146 | { | ||
147 | return 1; | ||
148 | } | ||
149 | |||
150 | static void __init | ||
151 | xen_post_smp_prepare_boot_cpu(void) | ||
152 | { | ||
153 | xen_setup_vcpu_info_placement(); | ||
154 | } | ||
155 | |||
156 | static const struct pv_init_ops xen_init_ops __initdata = { | ||
157 | .banner = xen_banner, | ||
158 | |||
159 | .reserve_memory = xen_reserve_memory, | ||
160 | |||
161 | .arch_setup_early = xen_arch_setup_early, | ||
162 | .arch_setup_console = xen_arch_setup_console, | ||
163 | .arch_setup_nomca = xen_arch_setup_nomca, | ||
164 | |||
165 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, | ||
166 | }; | ||
167 | |||
168 | /*************************************************************************** | ||
169 | * pv_cpu_ops | ||
170 | * intrinsics hooks. | ||
171 | */ | ||
172 | |||
173 | static void xen_setreg(int regnum, unsigned long val) | ||
174 | { | ||
175 | switch (regnum) { | ||
176 | case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: | ||
177 | xen_set_kr(regnum - _IA64_REG_AR_KR0, val); | ||
178 | break; | ||
179 | #ifdef CONFIG_IA32_SUPPORT | ||
180 | case _IA64_REG_AR_EFLAG: | ||
181 | xen_set_eflag(val); | ||
182 | break; | ||
183 | #endif | ||
184 | case _IA64_REG_CR_TPR: | ||
185 | xen_set_tpr(val); | ||
186 | break; | ||
187 | case _IA64_REG_CR_ITM: | ||
188 | xen_set_itm(val); | ||
189 | break; | ||
190 | case _IA64_REG_CR_EOI: | ||
191 | xen_eoi(val); | ||
192 | break; | ||
193 | default: | ||
194 | ia64_native_setreg_func(regnum, val); | ||
195 | break; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | static unsigned long xen_getreg(int regnum) | ||
200 | { | ||
201 | unsigned long res; | ||
202 | |||
203 | switch (regnum) { | ||
204 | case _IA64_REG_PSR: | ||
205 | res = xen_get_psr(); | ||
206 | break; | ||
207 | #ifdef CONFIG_IA32_SUPPORT | ||
208 | case _IA64_REG_AR_EFLAG: | ||
209 | res = xen_get_eflag(); | ||
210 | break; | ||
211 | #endif | ||
212 | case _IA64_REG_CR_IVR: | ||
213 | res = xen_get_ivr(); | ||
214 | break; | ||
215 | case _IA64_REG_CR_TPR: | ||
216 | res = xen_get_tpr(); | ||
217 | break; | ||
218 | default: | ||
219 | res = ia64_native_getreg_func(regnum); | ||
220 | break; | ||
221 | } | ||
222 | return res; | ||
223 | } | ||
224 | |||
225 | /* turning on interrupts is a bit more complicated.. write to the | ||
226 | * memory-mapped virtual psr.i bit first (to avoid race condition), | ||
227 | * then if any interrupts were pending, we have to execute a hyperprivop | ||
228 | * to ensure the pending interrupt gets delivered; else we're done! */ | ||
229 | static void | ||
230 | xen_ssm_i(void) | ||
231 | { | ||
232 | int old = xen_get_virtual_psr_i(); | ||
233 | xen_set_virtual_psr_i(1); | ||
234 | barrier(); | ||
235 | if (!old && xen_get_virtual_pend()) | ||
236 | xen_hyper_ssm_i(); | ||
237 | } | ||
238 | |||
239 | /* turning off interrupts can be paravirtualized simply by writing | ||
240 | * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ | ||
241 | static void | ||
242 | xen_rsm_i(void) | ||
243 | { | ||
244 | xen_set_virtual_psr_i(0); | ||
245 | barrier(); | ||
246 | } | ||
247 | |||
248 | static unsigned long | ||
249 | xen_get_psr_i(void) | ||
250 | { | ||
251 | return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; | ||
252 | } | ||
253 | |||
254 | static void | ||
255 | xen_intrin_local_irq_restore(unsigned long mask) | ||
256 | { | ||
257 | if (mask & IA64_PSR_I) | ||
258 | xen_ssm_i(); | ||
259 | else | ||
260 | xen_rsm_i(); | ||
261 | } | ||
262 | |||
263 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | ||
264 | .fc = xen_fc, | ||
265 | .thash = xen_thash, | ||
266 | .get_cpuid = xen_get_cpuid, | ||
267 | .get_pmd = xen_get_pmd, | ||
268 | .getreg = xen_getreg, | ||
269 | .setreg = xen_setreg, | ||
270 | .ptcga = xen_ptcga, | ||
271 | .get_rr = xen_get_rr, | ||
272 | .set_rr = xen_set_rr, | ||
273 | .set_rr0_to_rr4 = xen_set_rr0_to_rr4, | ||
274 | .ssm_i = xen_ssm_i, | ||
275 | .rsm_i = xen_rsm_i, | ||
276 | .get_psr_i = xen_get_psr_i, | ||
277 | .intrin_local_irq_restore | ||
278 | = xen_intrin_local_irq_restore, | ||
279 | }; | ||
280 | |||
281 | /****************************************************************************** | ||
282 | * replacement of hand written assembly codes. | ||
283 | */ | ||
284 | |||
285 | extern char xen_switch_to; | ||
286 | extern char xen_leave_syscall; | ||
287 | extern char xen_work_processed_syscall; | ||
288 | extern char xen_leave_kernel; | ||
289 | |||
290 | const struct pv_cpu_asm_switch xen_cpu_asm_switch = { | ||
291 | .switch_to = (unsigned long)&xen_switch_to, | ||
292 | .leave_syscall = (unsigned long)&xen_leave_syscall, | ||
293 | .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, | ||
294 | .leave_kernel = (unsigned long)&xen_leave_kernel, | ||
295 | }; | ||
296 | |||
297 | /*************************************************************************** | ||
298 | * pv_iosapic_ops | ||
299 | * iosapic read/write hooks. | ||
300 | */ | ||
301 | static void | ||
302 | xen_pcat_compat_init(void) | ||
303 | { | ||
304 | /* nothing */ | ||
305 | } | ||
306 | |||
307 | static struct irq_chip* | ||
308 | xen_iosapic_get_irq_chip(unsigned long trigger) | ||
309 | { | ||
310 | return NULL; | ||
311 | } | ||
312 | |||
313 | static unsigned int | ||
314 | xen_iosapic_read(char __iomem *iosapic, unsigned int reg) | ||
315 | { | ||
316 | struct physdev_apic apic_op; | ||
317 | int ret; | ||
318 | |||
319 | apic_op.apic_physbase = (unsigned long)iosapic - | ||
320 | __IA64_UNCACHED_OFFSET; | ||
321 | apic_op.reg = reg; | ||
322 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); | ||
323 | if (ret) | ||
324 | return ret; | ||
325 | return apic_op.value; | ||
326 | } | ||
327 | |||
328 | static void | ||
329 | xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) | ||
330 | { | ||
331 | struct physdev_apic apic_op; | ||
332 | |||
333 | apic_op.apic_physbase = (unsigned long)iosapic - | ||
334 | __IA64_UNCACHED_OFFSET; | ||
335 | apic_op.reg = reg; | ||
336 | apic_op.value = val; | ||
337 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); | ||
338 | } | ||
339 | |||
340 | static const struct pv_iosapic_ops xen_iosapic_ops __initdata = { | ||
341 | .pcat_compat_init = xen_pcat_compat_init, | ||
342 | .__get_irq_chip = xen_iosapic_get_irq_chip, | ||
343 | |||
344 | .__read = xen_iosapic_read, | ||
345 | .__write = xen_iosapic_write, | ||
346 | }; | ||
347 | |||
348 | /*************************************************************************** | ||
349 | * pv_ops initialization | ||
350 | */ | ||
351 | |||
352 | void __init | ||
353 | xen_setup_pv_ops(void) | ||
354 | { | ||
355 | xen_info_init(); | ||
356 | pv_info = xen_info; | ||
357 | pv_init_ops = xen_init_ops; | ||
358 | pv_cpu_ops = xen_cpu_ops; | ||
359 | pv_iosapic_ops = xen_iosapic_ops; | ||
360 | pv_irq_ops = xen_irq_ops; | ||
361 | pv_time_ops = xen_time_ops; | ||
362 | |||
363 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); | ||
364 | } | ||
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c new file mode 100644 index 000000000000..1f5d7ac82e97 --- /dev/null +++ b/arch/ia64/xen/xencomm.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/mm.h> | ||
20 | |||
21 | static unsigned long kernel_virtual_offset; | ||
22 | static int is_xencomm_initialized; | ||
23 | |||
24 | /* for xen early printk. It uses console io hypercall which uses xencomm. | ||
25 | * However early printk may use it before xencomm initialization. | ||
26 | */ | ||
27 | int | ||
28 | xencomm_is_initialized(void) | ||
29 | { | ||
30 | return is_xencomm_initialized; | ||
31 | } | ||
32 | |||
33 | void | ||
34 | xencomm_initialize(void) | ||
35 | { | ||
36 | kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); | ||
37 | is_xencomm_initialized = 1; | ||
38 | } | ||
39 | |||
40 | /* Translate virtual address to physical address. */ | ||
41 | unsigned long | ||
42 | xencomm_vtop(unsigned long vaddr) | ||
43 | { | ||
44 | struct page *page; | ||
45 | struct vm_area_struct *vma; | ||
46 | |||
47 | if (vaddr == 0) | ||
48 | return 0UL; | ||
49 | |||
50 | if (REGION_NUMBER(vaddr) == 5) { | ||
51 | pgd_t *pgd; | ||
52 | pud_t *pud; | ||
53 | pmd_t *pmd; | ||
54 | pte_t *ptep; | ||
55 | |||
56 | /* On ia64, TASK_SIZE refers to current. It is not initialized | ||
57 | during boot. | ||
58 | Furthermore the kernel is relocatable and __pa() doesn't | ||
59 | work on addresses. */ | ||
60 | if (vaddr >= KERNEL_START | ||
61 | && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) | ||
62 | return vaddr - kernel_virtual_offset; | ||
63 | |||
64 | /* In kernel area -- virtually mapped. */ | ||
65 | pgd = pgd_offset_k(vaddr); | ||
66 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | ||
67 | return ~0UL; | ||
68 | |||
69 | pud = pud_offset(pgd, vaddr); | ||
70 | if (pud_none(*pud) || pud_bad(*pud)) | ||
71 | return ~0UL; | ||
72 | |||
73 | pmd = pmd_offset(pud, vaddr); | ||
74 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | ||
75 | return ~0UL; | ||
76 | |||
77 | ptep = pte_offset_kernel(pmd, vaddr); | ||
78 | if (!ptep) | ||
79 | return ~0UL; | ||
80 | |||
81 | return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); | ||
82 | } | ||
83 | |||
84 | if (vaddr > TASK_SIZE) { | ||
85 | /* percpu variables */ | ||
86 | if (REGION_NUMBER(vaddr) == 7 && | ||
87 | REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) | ||
88 | ia64_tpa(vaddr); | ||
89 | |||
90 | /* kernel address */ | ||
91 | return __pa(vaddr); | ||
92 | } | ||
93 | |||
94 | /* XXX double-check (lack of) locking */ | ||
95 | vma = find_extend_vma(current->mm, vaddr); | ||
96 | if (!vma) | ||
97 | return ~0UL; | ||
98 | |||
99 | /* We assume the page is modified. */ | ||
100 | page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); | ||
101 | if (!page) | ||
102 | return ~0UL; | ||
103 | |||
104 | return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); | ||
105 | } | ||
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S new file mode 100644 index 000000000000..3e71d50584d9 --- /dev/null +++ b/arch/ia64/xen/xenivt.S | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * arch/ia64/xen/ivt.S | ||
3 | * | ||
4 | * Copyright (C) 2005 Hewlett-Packard Co | ||
5 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
6 | * | ||
7 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
8 | * VA Linux Systems Japan K.K. | ||
9 | * pv_ops. | ||
10 | */ | ||
11 | |||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/kregs.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | |||
16 | #include "../kernel/minstate.h" | ||
17 | |||
18 | .section .text,"ax" | ||
19 | GLOBAL_ENTRY(xen_event_callback) | ||
20 | mov r31=pr // prepare to save predicates | ||
21 | ;; | ||
22 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | ||
23 | ;; | ||
24 | movl r3=XSI_PSR_IC | ||
25 | mov r14=1 | ||
26 | ;; | ||
27 | st4 [r3]=r14 | ||
28 | ;; | ||
29 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
30 | srlz.i // ensure everybody knows psr.ic is back on | ||
31 | ;; | ||
32 | SAVE_REST | ||
33 | ;; | ||
34 | 1: | ||
35 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
36 | add out0=16,sp // pass pointer to pt_regs as first arg | ||
37 | ;; | ||
38 | br.call.sptk.many b0=xen_evtchn_do_upcall | ||
39 | ;; | ||
40 | movl r20=XSI_PSR_I_ADDR | ||
41 | ;; | ||
42 | ld8 r20=[r20] | ||
43 | ;; | ||
44 | adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending | ||
45 | ;; | ||
46 | ld1 r20=[r20] | ||
47 | ;; | ||
48 | cmp.ne p6,p0=r20,r0 // if there are pending events, | ||
49 | (p6) br.spnt.few 1b // call evtchn_do_upcall again. | ||
50 | br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is | ||
51 | // paravirtualized as xen_leave_kernel | ||
52 | END(xen_event_callback) | ||
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S new file mode 100644 index 000000000000..28fed1fcc079 --- /dev/null +++ b/arch/ia64/xen/xensetup.S | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Support routines for Xen | ||
3 | * | ||
4 | * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/processor.h> | ||
8 | #include <asm/asmmacro.h> | ||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/system.h> | ||
11 | #include <asm/paravirt.h> | ||
12 | #include <asm/xen/privop.h> | ||
13 | #include <linux/elfnote.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <xen/interface/elfnote.h> | ||
16 | |||
17 | .section .data.read_mostly | ||
18 | .align 8 | ||
19 | .global xen_domain_type | ||
20 | xen_domain_type: | ||
21 | data4 XEN_NATIVE_ASM | ||
22 | .previous | ||
23 | |||
24 | __INIT | ||
25 | ENTRY(startup_xen) | ||
26 | // Calculate load offset. | ||
27 | // The constant, LOAD_OFFSET, can't be used because the boot | ||
28 | // loader doesn't always load to the LMA specified by the vmlinux.lds. | ||
29 | mov r9=ip // must be the first instruction to make sure | ||
30 | // that r9 = the physical address of startup_xen. | ||
31 | // Usually r9 = startup_xen - LOAD_OFFSET | ||
32 | movl r8=startup_xen | ||
33 | ;; | ||
34 | sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. | ||
35 | |||
36 | mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN | ||
37 | movl r11=_start | ||
38 | ;; | ||
39 | add r11=r11,r9 | ||
40 | movl r8=hypervisor_type | ||
41 | ;; | ||
42 | add r8=r8,r9 | ||
43 | mov b0=r11 | ||
44 | ;; | ||
45 | st8 [r8]=r10 | ||
46 | br.cond.sptk.many b0 | ||
47 | ;; | ||
48 | END(startup_xen) | ||
49 | |||
50 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") | ||
51 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") | ||
52 | ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") | ||
53 | ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) | ||
54 | |||
55 | #define isBP p3 // are we the Bootstrap Processor? | ||
56 | |||
57 | .text | ||
58 | |||
59 | GLOBAL_ENTRY(xen_setup_hook) | ||
60 | mov r8=XEN_PV_DOMAIN_ASM | ||
61 | (isBP) movl r9=xen_domain_type;; | ||
62 | (isBP) st4 [r9]=r8 | ||
63 | movl r10=xen_ivt;; | ||
64 | |||
65 | mov cr.iva=r10 | ||
66 | |||
67 | /* Set xsi base. */ | ||
68 | #define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 | ||
69 | (isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA | ||
70 | (isBP) movl r28=XSI_BASE;; | ||
71 | (isBP) break 0x1000;; | ||
72 | |||
73 | /* setup pv_ops */ | ||
74 | (isBP) mov r4=rp | ||
75 | ;; | ||
76 | (isBP) br.call.sptk.many rp=xen_setup_pv_ops | ||
77 | ;; | ||
78 | (isBP) mov rp=r4 | ||
79 | ;; | ||
80 | |||
81 | br.ret.sptk.many rp | ||
82 | ;; | ||
83 | END(xen_setup_hook) | ||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9391199d9e77..5b1527883fcb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -19,9 +19,6 @@ config WORD_SIZE | |||
19 | default 64 if PPC64 | 19 | default 64 if PPC64 |
20 | default 32 if !PPC64 | 20 | default 32 if !PPC64 |
21 | 21 | ||
22 | config PPC_MERGE | ||
23 | def_bool y | ||
24 | |||
25 | config ARCH_PHYS_ADDR_T_64BIT | 22 | config ARCH_PHYS_ADDR_T_64BIT |
26 | def_bool PPC64 || PHYS_64BIT | 23 | def_bool PPC64 || PHYS_64BIT |
27 | 24 | ||
@@ -326,13 +323,11 @@ config KEXEC | |||
326 | 323 | ||
327 | config CRASH_DUMP | 324 | config CRASH_DUMP |
328 | bool "Build a kdump crash kernel" | 325 | bool "Build a kdump crash kernel" |
329 | depends on PPC_MULTIPLATFORM && PPC64 | 326 | depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE |
330 | help | 327 | help |
331 | Build a kernel suitable for use as a kdump capture kernel. | 328 | Build a kernel suitable for use as a kdump capture kernel. |
332 | The kernel will be linked at a different address than normal, and | 329 | The same kernel binary can be used as production kernel and dump |
333 | so can only be used for Kdump. | 330 | capture kernel. |
334 | |||
335 | Don't change this unless you know what you are doing. | ||
336 | 331 | ||
337 | config PHYP_DUMP | 332 | config PHYP_DUMP |
338 | bool "Hypervisor-assisted dump (EXPERIMENTAL)" | 333 | bool "Hypervisor-assisted dump (EXPERIMENTAL)" |
@@ -832,11 +827,9 @@ config PAGE_OFFSET | |||
832 | default "0xc000000000000000" | 827 | default "0xc000000000000000" |
833 | config KERNEL_START | 828 | config KERNEL_START |
834 | hex | 829 | hex |
835 | default "0xc000000002000000" if CRASH_DUMP | ||
836 | default "0xc000000000000000" | 830 | default "0xc000000000000000" |
837 | config PHYSICAL_START | 831 | config PHYSICAL_START |
838 | hex | 832 | hex |
839 | default "0x02000000" if CRASH_DUMP | ||
840 | default "0x00000000" | 833 | default "0x00000000" |
841 | endif | 834 | endif |
842 | 835 | ||
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index aac1406ccba5..8fc6d72849ae 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -68,7 +68,8 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c | |||
68 | fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \ | 68 | fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \ |
69 | cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \ | 69 | cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \ |
70 | cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ | 70 | cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ |
71 | virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c | 71 | virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \ |
72 | cuboot-acadia.c | ||
72 | src-boot := $(src-wlib) $(src-plat) empty.c | 73 | src-boot := $(src-wlib) $(src-plat) empty.c |
73 | 74 | ||
74 | src-boot := $(addprefix $(obj)/, $(src-boot)) | 75 | src-boot := $(addprefix $(obj)/, $(src-boot)) |
@@ -211,6 +212,7 @@ image-$(CONFIG_DEFAULT_UIMAGE) += uImage | |||
211 | # Board ports in arch/powerpc/platform/40x/Kconfig | 212 | # Board ports in arch/powerpc/platform/40x/Kconfig |
212 | image-$(CONFIG_EP405) += dtbImage.ep405 | 213 | image-$(CONFIG_EP405) += dtbImage.ep405 |
213 | image-$(CONFIG_WALNUT) += treeImage.walnut | 214 | image-$(CONFIG_WALNUT) += treeImage.walnut |
215 | image-$(CONFIG_ACADIA) += cuImage.acadia | ||
214 | 216 | ||
215 | # Board ports in arch/powerpc/platform/44x/Kconfig | 217 | # Board ports in arch/powerpc/platform/44x/Kconfig |
216 | image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony | 218 | image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony |
@@ -319,6 +321,9 @@ $(obj)/zImage.iseries: vmlinux | |||
319 | $(obj)/uImage: vmlinux $(wrapperbits) | 321 | $(obj)/uImage: vmlinux $(wrapperbits) |
320 | $(call if_changed,wrap,uboot) | 322 | $(call if_changed,wrap,uboot) |
321 | 323 | ||
324 | $(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) | ||
325 | $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) | ||
326 | |||
322 | $(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) | 327 | $(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) |
323 | $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb) | 328 | $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb) |
324 | 329 | ||
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c index dcc9ab2ca823..3091d1d21aef 100644 --- a/arch/powerpc/boot/addnote.c +++ b/arch/powerpc/boot/addnote.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | * | 13 | * |
14 | * Usage: addnote zImage [note.elf] | 14 | * Usage: addnote [-r realbase] zImage [note.elf] |
15 | * | 15 | * |
16 | * If note.elf is supplied, it is the name of an ELF file that contains | 16 | * If note.elf is supplied, it is the name of an ELF file that contains |
17 | * an RPA note to use instead of the built-in one. Alternatively, the | 17 | * an RPA note to use instead of the built-in one. Alternatively, the |
@@ -153,18 +153,31 @@ unsigned char *read_rpanote(const char *fname, int *nnp) | |||
153 | int | 153 | int |
154 | main(int ac, char **av) | 154 | main(int ac, char **av) |
155 | { | 155 | { |
156 | int fd, n, i; | 156 | int fd, n, i, ai; |
157 | int ph, ps, np; | 157 | int ph, ps, np; |
158 | int nnote, nnote2, ns; | 158 | int nnote, nnote2, ns; |
159 | unsigned char *rpap; | 159 | unsigned char *rpap; |
160 | 160 | char *p, *endp; | |
161 | if (ac != 2 && ac != 3) { | 161 | |
162 | fprintf(stderr, "Usage: %s elf-file [rpanote.elf]\n", av[0]); | 162 | ai = 1; |
163 | if (ac >= ai + 2 && strcmp(av[ai], "-r") == 0) { | ||
164 | /* process -r realbase */ | ||
165 | p = av[ai + 1]; | ||
166 | descr[1] = strtol(p, &endp, 16); | ||
167 | if (endp == p || *endp != 0) { | ||
168 | fprintf(stderr, "Can't parse -r argument '%s' as hex\n", | ||
169 | p); | ||
170 | exit(1); | ||
171 | } | ||
172 | ai += 2; | ||
173 | } | ||
174 | if (ac != ai + 1 && ac != ai + 2) { | ||
175 | fprintf(stderr, "Usage: %s [-r realbase] elf-file [rpanote.elf]\n", av[0]); | ||
163 | exit(1); | 176 | exit(1); |
164 | } | 177 | } |
165 | fd = open(av[1], O_RDWR); | 178 | fd = open(av[ai], O_RDWR); |
166 | if (fd < 0) { | 179 | if (fd < 0) { |
167 | perror(av[1]); | 180 | perror(av[ai]); |
168 | exit(1); | 181 | exit(1); |
169 | } | 182 | } |
170 | 183 | ||
@@ -184,12 +197,12 @@ main(int ac, char **av) | |||
184 | if (buf[E_IDENT+EI_CLASS] != ELFCLASS32 | 197 | if (buf[E_IDENT+EI_CLASS] != ELFCLASS32 |
185 | || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) { | 198 | || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) { |
186 | fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n", | 199 | fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n", |
187 | av[1]); | 200 | av[ai]); |
188 | exit(1); | 201 | exit(1); |
189 | } | 202 | } |
190 | 203 | ||
191 | if (ac == 3) | 204 | if (ac == ai + 2) |
192 | rpap = read_rpanote(av[2], &nnote2); | 205 | rpap = read_rpanote(av[ai + 1], &nnote2); |
193 | 206 | ||
194 | ph = GET_32BE(buf, E_PHOFF); | 207 | ph = GET_32BE(buf, E_PHOFF); |
195 | ps = GET_16BE(buf, E_PHENTSIZE); | 208 | ps = GET_16BE(buf, E_PHENTSIZE); |
@@ -202,7 +215,7 @@ main(int ac, char **av) | |||
202 | for (i = 0; i < np; ++i) { | 215 | for (i = 0; i < np; ++i) { |
203 | if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) { | 216 | if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) { |
204 | fprintf(stderr, "%s already has a note entry\n", | 217 | fprintf(stderr, "%s already has a note entry\n", |
205 | av[1]); | 218 | av[ai]); |
206 | exit(0); | 219 | exit(0); |
207 | } | 220 | } |
208 | ph += ps; | 221 | ph += ps; |
@@ -260,18 +273,18 @@ main(int ac, char **av) | |||
260 | exit(1); | 273 | exit(1); |
261 | } | 274 | } |
262 | if (i < n) { | 275 | if (i < n) { |
263 | fprintf(stderr, "%s: write truncated\n", av[1]); | 276 | fprintf(stderr, "%s: write truncated\n", av[ai]); |
264 | exit(1); | 277 | exit(1); |
265 | } | 278 | } |
266 | 279 | ||
267 | exit(0); | 280 | exit(0); |
268 | 281 | ||
269 | notelf: | 282 | notelf: |
270 | fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); | 283 | fprintf(stderr, "%s does not appear to be an ELF file\n", av[ai]); |
271 | exit(1); | 284 | exit(1); |
272 | 285 | ||
273 | nospace: | 286 | nospace: |
274 | fprintf(stderr, "sorry, I can't find space in %s to put the note\n", | 287 | fprintf(stderr, "sorry, I can't find space in %s to put the note\n", |
275 | av[1]); | 288 | av[ai]); |
276 | exit(1); | 289 | exit(1); |
277 | } | 290 | } |
diff --git a/arch/powerpc/boot/cuboot-52xx.c b/arch/powerpc/boot/cuboot-52xx.c index a8611546a656..4c42ec8687be 100644 --- a/arch/powerpc/boot/cuboot-52xx.c +++ b/arch/powerpc/boot/cuboot-52xx.c | |||
@@ -37,6 +37,10 @@ static void platform_fixups(void) | |||
37 | * this can do a simple path lookup. | 37 | * this can do a simple path lookup. |
38 | */ | 38 | */ |
39 | soc = find_node_by_devtype(NULL, "soc"); | 39 | soc = find_node_by_devtype(NULL, "soc"); |
40 | if (!soc) | ||
41 | soc = find_node_by_compatible(NULL, "fsl,mpc5200-immr"); | ||
42 | if (!soc) | ||
43 | soc = find_node_by_compatible(NULL, "fsl,mpc5200b-immr"); | ||
40 | if (soc) { | 44 | if (soc) { |
41 | setprop(soc, "bus-frequency", &bd.bi_ipbfreq, | 45 | setprop(soc, "bus-frequency", &bd.bi_ipbfreq, |
42 | sizeof(bd.bi_ipbfreq)); | 46 | sizeof(bd.bi_ipbfreq)); |
diff --git a/arch/powerpc/boot/cuboot-acadia.c b/arch/powerpc/boot/cuboot-acadia.c new file mode 100644 index 000000000000..0634aba6348a --- /dev/null +++ b/arch/powerpc/boot/cuboot-acadia.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Old U-boot compatibility for Acadia | ||
3 | * | ||
4 | * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> | ||
5 | * | ||
6 | * Copyright 2008 IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include "ops.h" | ||
14 | #include "io.h" | ||
15 | #include "dcr.h" | ||
16 | #include "stdio.h" | ||
17 | #include "4xx.h" | ||
18 | #include "44x.h" | ||
19 | #include "cuboot.h" | ||
20 | |||
21 | #define TARGET_4xx | ||
22 | #include "ppcboot.h" | ||
23 | |||
24 | static bd_t bd; | ||
25 | |||
26 | #define CPR_PERD0_SPIDV_MASK 0x000F0000 /* SPI Clock Divider */ | ||
27 | |||
28 | #define PLLC_SRC_MASK 0x20000000 /* PLL feedback source */ | ||
29 | |||
30 | #define PLLD_FBDV_MASK 0x1F000000 /* PLL feedback divider value */ | ||
31 | #define PLLD_FWDVA_MASK 0x000F0000 /* PLL forward divider A value */ | ||
32 | #define PLLD_FWDVB_MASK 0x00000700 /* PLL forward divider B value */ | ||
33 | |||
34 | #define PRIMAD_CPUDV_MASK 0x0F000000 /* CPU Clock Divisor Mask */ | ||
35 | #define PRIMAD_PLBDV_MASK 0x000F0000 /* PLB Clock Divisor Mask */ | ||
36 | #define PRIMAD_OPBDV_MASK 0x00000F00 /* OPB Clock Divisor Mask */ | ||
37 | #define PRIMAD_EBCDV_MASK 0x0000000F /* EBC Clock Divisor Mask */ | ||
38 | |||
39 | #define PERD0_PWMDV_MASK 0xFF000000 /* PWM Divider Mask */ | ||
40 | #define PERD0_SPIDV_MASK 0x000F0000 /* SPI Divider Mask */ | ||
41 | #define PERD0_U0DV_MASK 0x0000FF00 /* UART 0 Divider Mask */ | ||
42 | #define PERD0_U1DV_MASK 0x000000FF /* UART 1 Divider Mask */ | ||
43 | |||
44 | static void get_clocks(void) | ||
45 | { | ||
46 | unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i; | ||
47 | unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv; | ||
48 | unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB; | ||
49 | unsigned long div; /* total divisor udiv * bdiv */ | ||
50 | unsigned long umin; /* minimum udiv */ | ||
51 | unsigned short diff; /* smallest diff */ | ||
52 | unsigned long udiv; /* best udiv */ | ||
53 | unsigned short idiff; /* current diff */ | ||
54 | unsigned short ibdiv; /* current bdiv */ | ||
55 | unsigned long est; /* current estimate */ | ||
56 | unsigned long baud; | ||
57 | void *np; | ||
58 | |||
59 | /* read the sysclk value from the CPLD */ | ||
60 | sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000; | ||
61 | |||
62 | /* | ||
63 | * Read PLL Mode registers | ||
64 | */ | ||
65 | cpr_plld = CPR0_READ(DCRN_CPR0_PLLD); | ||
66 | cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC); | ||
67 | |||
68 | /* | ||
69 | * Determine forward divider A | ||
70 | */ | ||
71 | pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16); | ||
72 | |||
73 | /* | ||
74 | * Determine forward divider B | ||
75 | */ | ||
76 | pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8); | ||
77 | if (pllFwdDivB == 0) | ||
78 | pllFwdDivB = 8; | ||
79 | |||
80 | /* | ||
81 | * Determine FBK_DIV. | ||
82 | */ | ||
83 | pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24); | ||
84 | if (pllFbkDiv == 0) | ||
85 | pllFbkDiv = 256; | ||
86 | |||
87 | /* | ||
88 | * Read CPR_PRIMAD register | ||
89 | */ | ||
90 | cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD); | ||
91 | |||
92 | /* | ||
93 | * Determine PLB_DIV. | ||
94 | */ | ||
95 | pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16); | ||
96 | if (pllPlbDiv == 0) | ||
97 | pllPlbDiv = 16; | ||
98 | |||
99 | /* | ||
100 | * Determine EXTBUS_DIV. | ||
101 | */ | ||
102 | pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK); | ||
103 | if (pllExtBusDiv == 0) | ||
104 | pllExtBusDiv = 16; | ||
105 | |||
106 | /* | ||
107 | * Determine OPB_DIV. | ||
108 | */ | ||
109 | pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8); | ||
110 | if (pllOpbDiv == 0) | ||
111 | pllOpbDiv = 16; | ||
112 | |||
113 | /* There is a bug in U-Boot that prevents us from using | ||
114 | * bd.bi_opbfreq because U-Boot doesn't populate it for | ||
115 | * 405EZ. We get to calculate it, yay! | ||
116 | */ | ||
117 | freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv; | ||
118 | |||
119 | freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv; | ||
120 | |||
121 | plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ? | ||
122 | pllFwdDivB : pllFwdDiv) * | ||
123 | pllFbkDiv) / pllFwdDivB); | ||
124 | |||
125 | np = find_node_by_alias("serial0"); | ||
126 | if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud)) | ||
127 | fatal("no current-speed property\n\r"); | ||
128 | |||
129 | udiv = 256; /* Assume lowest possible serial clk */ | ||
130 | div = plloutb / (16 * baud); /* total divisor */ | ||
131 | umin = (plloutb / freqOPB) << 1; /* 2 x OPB divisor */ | ||
132 | diff = 256; /* highest possible */ | ||
133 | |||
134 | /* i is the test udiv value -- start with the largest | ||
135 | * possible (256) to minimize serial clock and constrain | ||
136 | * search to umin. | ||
137 | */ | ||
138 | for (i = 256; i > umin; i--) { | ||
139 | ibdiv = div / i; | ||
140 | est = i * ibdiv; | ||
141 | idiff = (est > div) ? (est-div) : (div-est); | ||
142 | if (idiff == 0) { | ||
143 | udiv = i; | ||
144 | break; /* can't do better */ | ||
145 | } else if (idiff < diff) { | ||
146 | udiv = i; /* best so far */ | ||
147 | diff = idiff; /* update lowest diff*/ | ||
148 | } | ||
149 | } | ||
150 | freqUART = plloutb / udiv; | ||
151 | |||
152 | dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq); | ||
153 | dt_fixup_clock("/plb/ebc", freqEBC); | ||
154 | dt_fixup_clock("/plb/opb", freqOPB); | ||
155 | dt_fixup_clock("/plb/opb/serial@ef600300", freqUART); | ||
156 | dt_fixup_clock("/plb/opb/serial@ef600400", freqUART); | ||
157 | } | ||
158 | |||
159 | static void acadia_fixups(void) | ||
160 | { | ||
161 | dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); | ||
162 | get_clocks(); | ||
163 | dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); | ||
164 | } | ||
165 | |||
166 | void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | ||
167 | unsigned long r6, unsigned long r7) | ||
168 | { | ||
169 | CUBOOT_INIT(); | ||
170 | platform_ops.fixups = acadia_fixups; | ||
171 | platform_ops.exit = ibm40x_dbcr_reset; | ||
172 | fdt_init(_dtb_start); | ||
173 | serial_console_init(); | ||
174 | } | ||
diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts new file mode 100644 index 000000000000..57291f61ffe7 --- /dev/null +++ b/arch/powerpc/boot/dts/acadia.dts | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Device Tree Source for AMCC Acadia (405EZ) | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | /dts-v1/; | ||
12 | |||
13 | / { | ||
14 | #address-cells = <1>; | ||
15 | #size-cells = <1>; | ||
16 | model = "amcc,acadia"; | ||
17 | compatible = "amcc,acadia"; | ||
18 | dcr-parent = <&{/cpus/cpu@0}>; | ||
19 | |||
20 | aliases { | ||
21 | ethernet0 = &EMAC0; | ||
22 | serial0 = &UART0; | ||
23 | serial1 = &UART1; | ||
24 | }; | ||
25 | |||
26 | cpus { | ||
27 | #address-cells = <1>; | ||
28 | #size-cells = <0>; | ||
29 | |||
30 | cpu@0 { | ||
31 | device_type = "cpu"; | ||
32 | model = "PowerPC,405EZ"; | ||
33 | reg = <0x0>; | ||
34 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
35 | timebase-frequency = <0>; /* Filled in by wrapper */ | ||
36 | i-cache-line-size = <32>; | ||
37 | d-cache-line-size = <32>; | ||
38 | i-cache-size = <16384>; | ||
39 | d-cache-size = <16384>; | ||
40 | dcr-controller; | ||
41 | dcr-access-method = "native"; | ||
42 | }; | ||
43 | }; | ||
44 | |||
45 | memory { | ||
46 | device_type = "memory"; | ||
47 | reg = <0x0 0x0>; /* Filled in by wrapper */ | ||
48 | }; | ||
49 | |||
50 | UIC0: interrupt-controller { | ||
51 | compatible = "ibm,uic-405ez", "ibm,uic"; | ||
52 | interrupt-controller; | ||
53 | dcr-reg = <0x0c0 0x009>; | ||
54 | cell-index = <0>; | ||
55 | #address-cells = <0>; | ||
56 | #size-cells = <0>; | ||
57 | #interrupt-cells = <2>; | ||
58 | }; | ||
59 | |||
60 | plb { | ||
61 | compatible = "ibm,plb-405ez", "ibm,plb3"; | ||
62 | #address-cells = <1>; | ||
63 | #size-cells = <1>; | ||
64 | ranges; | ||
65 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
66 | |||
67 | MAL0: mcmal { | ||
68 | compatible = "ibm,mcmal-405ez", "ibm,mcmal"; | ||
69 | dcr-reg = <0x380 0x62>; | ||
70 | num-tx-chans = <1>; | ||
71 | num-rx-chans = <1>; | ||
72 | interrupt-parent = <&UIC0>; | ||
73 | /* 405EZ has only 3 interrupts to the UIC, as | ||
74 | * SERR, TXDE, and RXDE are or'd together into | ||
75 | * one UIC bit | ||
76 | */ | ||
77 | interrupts = < | ||
78 | 0x13 0x4 /* TXEOB */ | ||
79 | 0x15 0x4 /* RXEOB */ | ||
80 | 0x12 0x4 /* SERR, TXDE, RXDE */>; | ||
81 | }; | ||
82 | |||
83 | POB0: opb { | ||
84 | compatible = "ibm,opb-405ez", "ibm,opb"; | ||
85 | #address-cells = <1>; | ||
86 | #size-cells = <1>; | ||
87 | ranges; | ||
88 | dcr-reg = <0x0a 0x05>; | ||
89 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
90 | |||
91 | UART0: serial@ef600300 { | ||
92 | device_type = "serial"; | ||
93 | compatible = "ns16550"; | ||
94 | reg = <0xef600300 0x8>; | ||
95 | virtual-reg = <0xef600300>; | ||
96 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
97 | current-speed = <115200>; | ||
98 | interrupt-parent = <&UIC0>; | ||
99 | interrupts = <0x5 0x4>; | ||
100 | }; | ||
101 | |||
102 | UART1: serial@ef600400 { | ||
103 | device_type = "serial"; | ||
104 | compatible = "ns16550"; | ||
105 | reg = <0xef600400 0x8>; | ||
106 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
107 | current-speed = <115200>; | ||
108 | interrupt-parent = <&UIC0>; | ||
109 | interrupts = <0x6 0x4>; | ||
110 | }; | ||
111 | |||
112 | IIC: i2c@ef600500 { | ||
113 | compatible = "ibm,iic-405ez", "ibm,iic"; | ||
114 | reg = <0xef600500 0x11>; | ||
115 | interrupt-parent = <&UIC0>; | ||
116 | interrupts = <0xa 0x4>; | ||
117 | }; | ||
118 | |||
119 | GPIO0: gpio@ef600700 { | ||
120 | compatible = "ibm,gpio-405ez"; | ||
121 | reg = <0xef600700 0x20>; | ||
122 | }; | ||
123 | |||
124 | GPIO1: gpio@ef600800 { | ||
125 | compatible = "ibm,gpio-405ez"; | ||
126 | reg = <0xef600800 0x20>; | ||
127 | }; | ||
128 | |||
129 | EMAC0: ethernet@ef600900 { | ||
130 | device_type = "network"; | ||
131 | compatible = "ibm,emac-405ez", "ibm,emac"; | ||
132 | interrupt-parent = <&UIC0>; | ||
133 | interrupts = < | ||
134 | 0x10 0x4 /* Ethernet */ | ||
135 | 0x11 0x4 /* Ethernet Wake up */>; | ||
136 | local-mac-address = [000000000000]; /* Filled in by wrapper */ | ||
137 | reg = <0xef600900 0x70>; | ||
138 | mal-device = <&MAL0>; | ||
139 | mal-tx-channel = <0>; | ||
140 | mal-rx-channel = <0>; | ||
141 | cell-index = <0>; | ||
142 | max-frame-size = <1500>; | ||
143 | rx-fifo-size = <4096>; | ||
144 | tx-fifo-size = <2048>; | ||
145 | phy-mode = "mii"; | ||
146 | phy-map = <0x0>; | ||
147 | }; | ||
148 | |||
149 | CAN0: can@ef601000 { | ||
150 | compatible = "amcc,can-405ez"; | ||
151 | reg = <0xef601000 0x620>; | ||
152 | interrupt-parent = <&UIC0>; | ||
153 | interrupts = <0x7 0x4>; | ||
154 | }; | ||
155 | |||
156 | CAN1: can@ef601800 { | ||
157 | compatible = "amcc,can-405ez"; | ||
158 | reg = <0xef601800 0x620>; | ||
159 | interrupt-parent = <&UIC0>; | ||
160 | interrupts = <0x8 0x4>; | ||
161 | }; | ||
162 | |||
163 | cameleon@ef602000 { | ||
164 | compatible = "amcc,cameleon-405ez"; | ||
165 | reg = <0xef602000 0x800>; | ||
166 | interrupt-parent = <&UIC0>; | ||
167 | interrupts = <0xb 0x4 0xc 0x4>; | ||
168 | }; | ||
169 | |||
170 | ieee1588@ef602800 { | ||
171 | compatible = "amcc,ieee1588-405ez"; | ||
172 | reg = <0xef602800 0x60>; | ||
173 | interrupt-parent = <&UIC0>; | ||
174 | interrupts = <0x4 0x4>; | ||
175 | /* This thing is a bit weird. It has it's own UIC | ||
176 | * that it uses to generate snapshot triggers. We | ||
177 | * don't really support this device yet, and it needs | ||
178 | * work to figure this out. | ||
179 | */ | ||
180 | dcr-reg = <0xe0 0x9>; | ||
181 | }; | ||
182 | |||
183 | usb@ef603000 { | ||
184 | compatible = "ohci-be"; | ||
185 | reg = <0xef603000 0x80>; | ||
186 | interrupts-parent = <&UIC0>; | ||
187 | interrupts = <0xd 0x4 0xe 0x4>; | ||
188 | }; | ||
189 | |||
190 | dac@ef603300 { | ||
191 | compatible = "amcc,dac-405ez"; | ||
192 | reg = <0xef603300 0x40>; | ||
193 | interrupt-parent = <&UIC0>; | ||
194 | interrupts = <0x18 0x4>; | ||
195 | }; | ||
196 | |||
197 | adc@ef603400 { | ||
198 | compatible = "amcc,adc-405ez"; | ||
199 | reg = <0xef603400 0x40>; | ||
200 | interrupt-parent = <&UIC0>; | ||
201 | interrupts = <0x17 0x4>; | ||
202 | }; | ||
203 | |||
204 | spi@ef603500 { | ||
205 | compatible = "amcc,spi-405ez"; | ||
206 | reg = <0xef603500 0x100>; | ||
207 | interrupt-parent = <&UIC0>; | ||
208 | interrupts = <0x9 0x4>; | ||
209 | }; | ||
210 | }; | ||
211 | |||
212 | EBC0: ebc { | ||
213 | compatible = "ibm,ebc-405ez", "ibm,ebc"; | ||
214 | dcr-reg = <0x12 0x2>; | ||
215 | #address-cells = <2>; | ||
216 | #size-cells = <1>; | ||
217 | clock-frequency = <0>; /* Filled in by wrapper */ | ||
218 | }; | ||
219 | }; | ||
220 | |||
221 | chosen { | ||
222 | linux,stdout-path = "/plb/opb/serial@ef600300"; | ||
223 | }; | ||
224 | }; | ||
diff --git a/arch/powerpc/boot/dts/hcu4.dts b/arch/powerpc/boot/dts/hcu4.dts new file mode 100644 index 000000000000..7988598da4c9 --- /dev/null +++ b/arch/powerpc/boot/dts/hcu4.dts | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Device Tree Source for Netstal Maschinen HCU4 | ||
3 | * based on the IBM Walnut | ||
4 | * | ||
5 | * Copyright 2008 | ||
6 | * Niklaus Giger <niklaus.giger@member.fsf.org> | ||
7 | * | ||
8 | * Copyright 2007 IBM Corp. | ||
9 | * Josh Boyer <jwboyer@linux.vnet.ibm.com> | ||
10 | * | ||
11 | * This file is licensed under the terms of the GNU General Public | ||
12 | * License version 2. This program is licensed "as is" without | ||
13 | * any warranty of any kind, whether express or implied. | ||
14 | */ | ||
15 | |||
16 | /dts-v1/; | ||
17 | |||
18 | / { | ||
19 | #address-cells = <0x1>; | ||
20 | #size-cells = <0x1>; | ||
21 | model = "netstal,hcu4"; | ||
22 | compatible = "netstal,hcu4"; | ||
23 | dcr-parent = <0x1>; | ||
24 | |||
25 | aliases { | ||
26 | ethernet0 = "/plb/opb/ethernet@ef600800"; | ||
27 | serial0 = "/plb/opb/serial@ef600300"; | ||
28 | }; | ||
29 | |||
30 | cpus { | ||
31 | #address-cells = <0x1>; | ||
32 | #size-cells = <0x0>; | ||
33 | |||
34 | cpu@0 { | ||
35 | device_type = "cpu"; | ||
36 | model = "PowerPC,405GPr"; | ||
37 | reg = <0x0>; | ||
38 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
39 | timebase-frequency = <0x0>; /* Filled in by U-Boot */ | ||
40 | i-cache-line-size = <0x20>; | ||
41 | d-cache-line-size = <0x20>; | ||
42 | i-cache-size = <0x4000>; | ||
43 | d-cache-size = <0x4000>; | ||
44 | dcr-controller; | ||
45 | dcr-access-method = "native"; | ||
46 | linux,phandle = <0x1>; | ||
47 | }; | ||
48 | }; | ||
49 | |||
50 | memory { | ||
51 | device_type = "memory"; | ||
52 | reg = <0x0 0x0>; /* Filled in by U-Boot */ | ||
53 | }; | ||
54 | |||
55 | UIC0: interrupt-controller { | ||
56 | compatible = "ibm,uic"; | ||
57 | interrupt-controller; | ||
58 | cell-index = <0x0>; | ||
59 | dcr-reg = <0xc0 0x9>; | ||
60 | #address-cells = <0x0>; | ||
61 | #size-cells = <0x0>; | ||
62 | #interrupt-cells = <0x2>; | ||
63 | linux,phandle = <0x2>; | ||
64 | }; | ||
65 | |||
66 | plb { | ||
67 | compatible = "ibm,plb3"; | ||
68 | #address-cells = <0x1>; | ||
69 | #size-cells = <0x1>; | ||
70 | ranges; | ||
71 | clock-frequency = <0x0>; /* Filled in by U-Boot */ | ||
72 | |||
73 | SDRAM0: memory-controller { | ||
74 | compatible = "ibm,sdram-405gp"; | ||
75 | dcr-reg = <0x10 0x2>; | ||
76 | }; | ||
77 | |||
78 | MAL: mcmal { | ||
79 | compatible = "ibm,mcmal-405gp", "ibm,mcmal"; | ||
80 | dcr-reg = <0x180 0x62>; | ||
81 | num-tx-chans = <0x1>; | ||
82 | num-rx-chans = <0x1>; | ||
83 | interrupt-parent = <0x2>; | ||
84 | interrupts = <0xb 0x4 0xc 0x4 0xa 0x4 0xd 0x4 0xe 0x4>; | ||
85 | linux,phandle = <0x3>; | ||
86 | }; | ||
87 | |||
88 | POB0: opb { | ||
89 | compatible = "ibm,opb-405gp", "ibm,opb"; | ||
90 | #address-cells = <0x1>; | ||
91 | #size-cells = <0x1>; | ||
92 | ranges = <0xef600000 0xef600000 0xa00000>; | ||
93 | dcr-reg = <0xa0 0x5>; | ||
94 | clock-frequency = <0x0>; /* Filled in by U-Boot */ | ||
95 | |||
96 | UART0: serial@ef600300 { | ||
97 | device_type = "serial"; | ||
98 | compatible = "ns16550"; | ||
99 | reg = <0xef600300 0x8>; | ||
100 | virtual-reg = <0xef600300>; | ||
101 | clock-frequency = <0x0>;/* Filled in by U-Boot */ | ||
102 | current-speed = <0>; /* Filled in by U-Boot */ | ||
103 | interrupt-parent = <0x2>; | ||
104 | interrupts = <0x0 0x4>; | ||
105 | }; | ||
106 | |||
107 | IIC: i2c@ef600500 { | ||
108 | compatible = "ibm,iic-405gp", "ibm,iic"; | ||
109 | reg = <0xef600500 0x11>; | ||
110 | interrupt-parent = <0x2>; | ||
111 | interrupts = <0x2 0x4>; | ||
112 | }; | ||
113 | |||
114 | GPIO: gpio@ef600700 { | ||
115 | compatible = "ibm,gpio-405gp"; | ||
116 | reg = <0xef600700 0x20>; | ||
117 | }; | ||
118 | |||
119 | EMAC: ethernet@ef600800 { | ||
120 | device_type = "network"; | ||
121 | compatible = "ibm,emac-405gp", "ibm,emac"; | ||
122 | interrupt-parent = <0x2>; | ||
123 | interrupts = <0xf 0x4 0x9 0x4>; | ||
124 | local-mac-address = [00 00 00 00 00 00]; | ||
125 | reg = <0xef600800 0x70>; | ||
126 | mal-device = <0x3>; | ||
127 | mal-tx-channel = <0x0>; | ||
128 | mal-rx-channel = <0x0>; | ||
129 | cell-index = <0x0>; | ||
130 | max-frame-size = <0x5dc>; | ||
131 | rx-fifo-size = <0x1000>; | ||
132 | tx-fifo-size = <0x800>; | ||
133 | phy-mode = "rmii"; | ||
134 | phy-map = <0x1>; | ||
135 | }; | ||
136 | }; | ||
137 | |||
138 | EBC0: ebc { | ||
139 | compatible = "ibm,ebc-405gp", "ibm,ebc"; | ||
140 | dcr-reg = <0x12 0x2>; | ||
141 | #address-cells = <0x2>; | ||
142 | #size-cells = <0x1>; | ||
143 | clock-frequency = <0x0>; /* Filled in by U-Boot */ | ||
144 | |||
145 | sram@0,0 { | ||
146 | reg = <0x0 0x0 0x80000>; | ||
147 | }; | ||
148 | |||
149 | flash@0,80000 { | ||
150 | compatible = "jedec-flash"; | ||
151 | bank-width = <0x1>; | ||
152 | reg = <0x0 0x80000 0x80000>; | ||
153 | #address-cells = <0x1>; | ||
154 | #size-cells = <0x1>; | ||
155 | |||
156 | partition@0 { | ||
157 | label = "OpenBIOS"; | ||
158 | reg = <0x0 0x80000>; | ||
159 | read-only; | ||
160 | }; | ||
161 | }; | ||
162 | }; | ||
163 | }; | ||
164 | |||
165 | chosen { | ||
166 | linux,stdout-path = "/plb/opb/serial@ef600300"; | ||
167 | }; | ||
168 | }; | ||
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts index 7449e54c1a90..6b850670de1d 100644 --- a/arch/powerpc/boot/dts/mpc8315erdb.dts +++ b/arch/powerpc/boot/dts/mpc8315erdb.dts | |||
@@ -121,6 +121,14 @@ | |||
121 | compatible = "dallas,ds1339"; | 121 | compatible = "dallas,ds1339"; |
122 | reg = <0x68>; | 122 | reg = <0x68>; |
123 | }; | 123 | }; |
124 | |||
125 | mcu_pio: mcu@a { | ||
126 | #gpio-cells = <2>; | ||
127 | compatible = "fsl,mc9s08qg8-mpc8315erdb", | ||
128 | "fsl,mcu-mpc8349emitx"; | ||
129 | reg = <0x0a>; | ||
130 | gpio-controller; | ||
131 | }; | ||
124 | }; | 132 | }; |
125 | 133 | ||
126 | spi@7000 { | 134 | spi@7000 { |
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts index e4cc1768f241..57c595bf1071 100644 --- a/arch/powerpc/boot/dts/mpc832x_mds.dts +++ b/arch/powerpc/boot/dts/mpc832x_mds.dts | |||
@@ -60,7 +60,7 @@ | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | bcsr@f8000000 { | 62 | bcsr@f8000000 { |
63 | device_type = "board-control"; | 63 | compatible = "fsl,mpc8323mds-bcsr"; |
64 | reg = <0xf8000000 0x8000>; | 64 | reg = <0xf8000000 0x8000>; |
65 | }; | 65 | }; |
66 | 66 | ||
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts index 5cedf373a1d8..2c9d54a35bc3 100644 --- a/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/arch/powerpc/boot/dts/mpc8349emitx.dts | |||
@@ -83,6 +83,14 @@ | |||
83 | interrupts = <15 0x8>; | 83 | interrupts = <15 0x8>; |
84 | interrupt-parent = <&ipic>; | 84 | interrupt-parent = <&ipic>; |
85 | dfsrr; | 85 | dfsrr; |
86 | |||
87 | rtc@68 { | ||
88 | device_type = "rtc"; | ||
89 | compatible = "dallas,ds1339"; | ||
90 | reg = <0x68>; | ||
91 | interrupts = <18 0x8>; | ||
92 | interrupt-parent = <&ipic>; | ||
93 | }; | ||
86 | }; | 94 | }; |
87 | 95 | ||
88 | spi@7000 { | 96 | spi@7000 { |
@@ -131,6 +139,14 @@ | |||
131 | interrupt-parent = <&ipic>; | 139 | interrupt-parent = <&ipic>; |
132 | interrupts = <71 8>; | 140 | interrupts = <71 8>; |
133 | }; | 141 | }; |
142 | |||
143 | mcu_pio: mcu@a { | ||
144 | #gpio-cells = <2>; | ||
145 | compatible = "fsl,mc9s08qg8-mpc8349emitx", | ||
146 | "fsl,mcu-mpc8349emitx"; | ||
147 | reg = <0x0a>; | ||
148 | gpio-controller; | ||
149 | }; | ||
134 | }; | 150 | }; |
135 | 151 | ||
136 | usb@22000 { | 152 | usb@22000 { |
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts index 81ae1d3e9440..fa40647ee62e 100644 --- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts +++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts | |||
@@ -81,6 +81,14 @@ | |||
81 | interrupts = <15 0x8>; | 81 | interrupts = <15 0x8>; |
82 | interrupt-parent = <&ipic>; | 82 | interrupt-parent = <&ipic>; |
83 | dfsrr; | 83 | dfsrr; |
84 | |||
85 | rtc@68 { | ||
86 | device_type = "rtc"; | ||
87 | compatible = "dallas,ds1339"; | ||
88 | reg = <0x68>; | ||
89 | interrupts = <18 0x8>; | ||
90 | interrupt-parent = <&ipic>; | ||
91 | }; | ||
84 | }; | 92 | }; |
85 | 93 | ||
86 | spi@7000 { | 94 | spi@7000 { |
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts index 04bfde3ea605..c986c541e9bb 100644 --- a/arch/powerpc/boot/dts/mpc834x_mds.dts +++ b/arch/powerpc/boot/dts/mpc834x_mds.dts | |||
@@ -49,7 +49,7 @@ | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | bcsr@e2400000 { | 51 | bcsr@e2400000 { |
52 | device_type = "board-control"; | 52 | compatible = "fsl,mpc8349mds-bcsr"; |
53 | reg = <0xe2400000 0x8000>; | 53 | reg = <0xe2400000 0x8000>; |
54 | }; | 54 | }; |
55 | 55 | ||
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts index 66a12d2631fb..14534d04e4db 100644 --- a/arch/powerpc/boot/dts/mpc836x_mds.dts +++ b/arch/powerpc/boot/dts/mpc836x_mds.dts | |||
@@ -69,7 +69,7 @@ | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | bcsr@1,0 { | 71 | bcsr@1,0 { |
72 | device_type = "board-control"; | 72 | compatible = "fsl,mpc8360mds-bcsr"; |
73 | reg = <1 0 0x8000>; | 73 | reg = <1 0 0x8000>; |
74 | }; | 74 | }; |
75 | }; | 75 | }; |
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts index 53191ba67aaa..435ef3dd022d 100644 --- a/arch/powerpc/boot/dts/mpc8377_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts | |||
@@ -121,6 +121,14 @@ | |||
121 | compatible = "dallas,ds1339"; | 121 | compatible = "dallas,ds1339"; |
122 | reg = <0x68>; | 122 | reg = <0x68>; |
123 | }; | 123 | }; |
124 | |||
125 | mcu_pio: mcu@a { | ||
126 | #gpio-cells = <2>; | ||
127 | compatible = "fsl,mc9s08qg8-mpc8377erdb", | ||
128 | "fsl,mcu-mpc8349emitx"; | ||
129 | reg = <0x0a>; | ||
130 | gpio-controller; | ||
131 | }; | ||
124 | }; | 132 | }; |
125 | 133 | ||
126 | i2c@3100 { | 134 | i2c@3100 { |
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts index 4a09153d160c..b11e68f56a06 100644 --- a/arch/powerpc/boot/dts/mpc8378_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts | |||
@@ -121,6 +121,14 @@ | |||
121 | compatible = "dallas,ds1339"; | 121 | compatible = "dallas,ds1339"; |
122 | reg = <0x68>; | 122 | reg = <0x68>; |
123 | }; | 123 | }; |
124 | |||
125 | mcu_pio: mcu@a { | ||
126 | #gpio-cells = <2>; | ||
127 | compatible = "fsl,mc9s08qg8-mpc8378erdb", | ||
128 | "fsl,mcu-mpc8349emitx"; | ||
129 | reg = <0x0a>; | ||
130 | gpio-controller; | ||
131 | }; | ||
124 | }; | 132 | }; |
125 | 133 | ||
126 | i2c@3100 { | 134 | i2c@3100 { |
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts index bbd884ac9dc0..337af6ea26d3 100644 --- a/arch/powerpc/boot/dts/mpc8379_rdb.dts +++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts | |||
@@ -121,6 +121,14 @@ | |||
121 | compatible = "dallas,ds1339"; | 121 | compatible = "dallas,ds1339"; |
122 | reg = <0x68>; | 122 | reg = <0x68>; |
123 | }; | 123 | }; |
124 | |||
125 | mcu_pio: mcu@a { | ||
126 | #gpio-cells = <2>; | ||
127 | compatible = "fsl,mc9s08qg8-mpc8379erdb", | ||
128 | "fsl,mcu-mpc8349emitx"; | ||
129 | reg = <0x0a>; | ||
130 | gpio-controller; | ||
131 | }; | ||
124 | }; | 132 | }; |
125 | 133 | ||
126 | i2c@3100 { | 134 | i2c@3100 { |
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts index 93fdd99901b6..35db1e5440c7 100644 --- a/arch/powerpc/boot/dts/mpc8536ds.dts +++ b/arch/powerpc/boot/dts/mpc8536ds.dts | |||
@@ -109,7 +109,7 @@ | |||
109 | reg = <0x0 0x80>; | 109 | reg = <0x0 0x80>; |
110 | cell-index = <0>; | 110 | cell-index = <0>; |
111 | interrupt-parent = <&mpic>; | 111 | interrupt-parent = <&mpic>; |
112 | interrupts = <14 0x2>; | 112 | interrupts = <20 2>; |
113 | }; | 113 | }; |
114 | dma-channel@80 { | 114 | dma-channel@80 { |
115 | compatible = "fsl,mpc8536-dma-channel", | 115 | compatible = "fsl,mpc8536-dma-channel", |
@@ -117,7 +117,7 @@ | |||
117 | reg = <0x80 0x80>; | 117 | reg = <0x80 0x80>; |
118 | cell-index = <1>; | 118 | cell-index = <1>; |
119 | interrupt-parent = <&mpic>; | 119 | interrupt-parent = <&mpic>; |
120 | interrupts = <15 0x2>; | 120 | interrupts = <21 2>; |
121 | }; | 121 | }; |
122 | dma-channel@100 { | 122 | dma-channel@100 { |
123 | compatible = "fsl,mpc8536-dma-channel", | 123 | compatible = "fsl,mpc8536-dma-channel", |
@@ -125,7 +125,7 @@ | |||
125 | reg = <0x100 0x80>; | 125 | reg = <0x100 0x80>; |
126 | cell-index = <2>; | 126 | cell-index = <2>; |
127 | interrupt-parent = <&mpic>; | 127 | interrupt-parent = <&mpic>; |
128 | interrupts = <16 0x2>; | 128 | interrupts = <22 2>; |
129 | }; | 129 | }; |
130 | dma-channel@180 { | 130 | dma-channel@180 { |
131 | compatible = "fsl,mpc8536-dma-channel", | 131 | compatible = "fsl,mpc8536-dma-channel", |
@@ -133,7 +133,7 @@ | |||
133 | reg = <0x180 0x80>; | 133 | reg = <0x180 0x80>; |
134 | cell-index = <3>; | 134 | cell-index = <3>; |
135 | interrupt-parent = <&mpic>; | 135 | interrupt-parent = <&mpic>; |
136 | interrupts = <17 0x2>; | 136 | interrupts = <23 2>; |
137 | }; | 137 | }; |
138 | }; | 138 | }; |
139 | 139 | ||
@@ -180,7 +180,7 @@ | |||
180 | enet0: ethernet@24000 { | 180 | enet0: ethernet@24000 { |
181 | cell-index = <0>; | 181 | cell-index = <0>; |
182 | device_type = "network"; | 182 | device_type = "network"; |
183 | model = "TSEC"; | 183 | model = "eTSEC"; |
184 | compatible = "gianfar"; | 184 | compatible = "gianfar"; |
185 | reg = <0x24000 0x1000>; | 185 | reg = <0x24000 0x1000>; |
186 | local-mac-address = [ 00 00 00 00 00 00 ]; | 186 | local-mac-address = [ 00 00 00 00 00 00 ]; |
@@ -193,7 +193,7 @@ | |||
193 | enet1: ethernet@26000 { | 193 | enet1: ethernet@26000 { |
194 | cell-index = <1>; | 194 | cell-index = <1>; |
195 | device_type = "network"; | 195 | device_type = "network"; |
196 | model = "TSEC"; | 196 | model = "eTSEC"; |
197 | compatible = "gianfar"; | 197 | compatible = "gianfar"; |
198 | reg = <0x26000 0x1000>; | 198 | reg = <0x26000 0x1000>; |
199 | local-mac-address = [ 00 00 00 00 00 00 ]; | 199 | local-mac-address = [ 00 00 00 00 00 00 ]; |
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts index a15f10343f53..c80158f7741d 100644 --- a/arch/powerpc/boot/dts/mpc8568mds.dts +++ b/arch/powerpc/boot/dts/mpc8568mds.dts | |||
@@ -52,7 +52,7 @@ | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | bcsr@f8000000 { | 54 | bcsr@f8000000 { |
55 | device_type = "board-control"; | 55 | compatible = "fsl,mpc8568mds-bcsr"; |
56 | reg = <0xf8000000 0x8000>; | 56 | reg = <0xf8000000 0x8000>; |
57 | }; | 57 | }; |
58 | 58 | ||
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts index e124dd18fb5a..cadd4652a695 100644 --- a/arch/powerpc/boot/dts/mpc8572ds.dts +++ b/arch/powerpc/boot/dts/mpc8572ds.dts | |||
@@ -13,8 +13,8 @@ | |||
13 | / { | 13 | / { |
14 | model = "fsl,MPC8572DS"; | 14 | model = "fsl,MPC8572DS"; |
15 | compatible = "fsl,MPC8572DS"; | 15 | compatible = "fsl,MPC8572DS"; |
16 | #address-cells = <1>; | 16 | #address-cells = <2>; |
17 | #size-cells = <1>; | 17 | #size-cells = <2>; |
18 | 18 | ||
19 | aliases { | 19 | aliases { |
20 | ethernet0 = &enet0; | 20 | ethernet0 = &enet0; |
@@ -61,7 +61,6 @@ | |||
61 | 61 | ||
62 | memory { | 62 | memory { |
63 | device_type = "memory"; | 63 | device_type = "memory"; |
64 | reg = <0x0 0x0>; // Filled by U-Boot | ||
65 | }; | 64 | }; |
66 | 65 | ||
67 | soc8572@ffe00000 { | 66 | soc8572@ffe00000 { |
@@ -69,8 +68,8 @@ | |||
69 | #size-cells = <1>; | 68 | #size-cells = <1>; |
70 | device_type = "soc"; | 69 | device_type = "soc"; |
71 | compatible = "simple-bus"; | 70 | compatible = "simple-bus"; |
72 | ranges = <0x0 0xffe00000 0x100000>; | 71 | ranges = <0x0 0 0xffe00000 0x100000>; |
73 | reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed | 72 | reg = <0 0xffe00000 0 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed |
74 | bus-frequency = <0>; // Filled out by uboot. | 73 | bus-frequency = <0>; // Filled out by uboot. |
75 | 74 | ||
76 | memory-controller@2000 { | 75 | memory-controller@2000 { |
@@ -351,10 +350,10 @@ | |||
351 | #interrupt-cells = <1>; | 350 | #interrupt-cells = <1>; |
352 | #size-cells = <2>; | 351 | #size-cells = <2>; |
353 | #address-cells = <3>; | 352 | #address-cells = <3>; |
354 | reg = <0xffe08000 0x1000>; | 353 | reg = <0 0xffe08000 0 0x1000>; |
355 | bus-range = <0 255>; | 354 | bus-range = <0 255>; |
356 | ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 | 355 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 |
357 | 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>; | 356 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>; |
358 | clock-frequency = <33333333>; | 357 | clock-frequency = <33333333>; |
359 | interrupt-parent = <&mpic>; | 358 | interrupt-parent = <&mpic>; |
360 | interrupts = <24 2>; | 359 | interrupts = <24 2>; |
@@ -561,10 +560,10 @@ | |||
561 | #interrupt-cells = <1>; | 560 | #interrupt-cells = <1>; |
562 | #size-cells = <2>; | 561 | #size-cells = <2>; |
563 | #address-cells = <3>; | 562 | #address-cells = <3>; |
564 | reg = <0xffe09000 0x1000>; | 563 | reg = <0 0xffe09000 0 0x1000>; |
565 | bus-range = <0 255>; | 564 | bus-range = <0 255>; |
566 | ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 | 565 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 |
567 | 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>; | 566 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>; |
568 | clock-frequency = <33333333>; | 567 | clock-frequency = <33333333>; |
569 | interrupt-parent = <&mpic>; | 568 | interrupt-parent = <&mpic>; |
570 | interrupts = <26 2>; | 569 | interrupts = <26 2>; |
@@ -598,10 +597,10 @@ | |||
598 | #interrupt-cells = <1>; | 597 | #interrupt-cells = <1>; |
599 | #size-cells = <2>; | 598 | #size-cells = <2>; |
600 | #address-cells = <3>; | 599 | #address-cells = <3>; |
601 | reg = <0xffe0a000 0x1000>; | 600 | reg = <0 0xffe0a000 0 0x1000>; |
602 | bus-range = <0 255>; | 601 | bus-range = <0 255>; |
603 | ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 | 602 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 |
604 | 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>; | 603 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; |
605 | clock-frequency = <33333333>; | 604 | clock-frequency = <33333333>; |
606 | interrupt-parent = <&mpic>; | 605 | interrupt-parent = <&mpic>; |
607 | interrupts = <27 2>; | 606 | interrupts = <27 2>; |
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c index c541fd8a95d4..9276327bc2bb 100644 --- a/arch/powerpc/boot/libfdt-wrapper.c +++ b/arch/powerpc/boot/libfdt-wrapper.c | |||
@@ -105,6 +105,11 @@ static int fdt_wrapper_setprop(const void *devp, const char *name, | |||
105 | return check_err(rc); | 105 | return check_err(rc); |
106 | } | 106 | } |
107 | 107 | ||
108 | static int fdt_wrapper_del_node(const void *devp) | ||
109 | { | ||
110 | return fdt_del_node(fdt, devp_offset(devp)); | ||
111 | } | ||
112 | |||
108 | static void *fdt_wrapper_get_parent(const void *devp) | 113 | static void *fdt_wrapper_get_parent(const void *devp) |
109 | { | 114 | { |
110 | return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); | 115 | return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); |
@@ -165,6 +170,7 @@ static unsigned long fdt_wrapper_finalize(void) | |||
165 | void fdt_init(void *blob) | 170 | void fdt_init(void *blob) |
166 | { | 171 | { |
167 | int err; | 172 | int err; |
173 | int bufsize; | ||
168 | 174 | ||
169 | dt_ops.finddevice = fdt_wrapper_finddevice; | 175 | dt_ops.finddevice = fdt_wrapper_finddevice; |
170 | dt_ops.getprop = fdt_wrapper_getprop; | 176 | dt_ops.getprop = fdt_wrapper_getprop; |
@@ -173,21 +179,21 @@ void fdt_init(void *blob) | |||
173 | dt_ops.create_node = fdt_wrapper_create_node; | 179 | dt_ops.create_node = fdt_wrapper_create_node; |
174 | dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; | 180 | dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; |
175 | dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; | 181 | dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; |
182 | dt_ops.del_node = fdt_wrapper_del_node; | ||
176 | dt_ops.get_path = fdt_wrapper_get_path; | 183 | dt_ops.get_path = fdt_wrapper_get_path; |
177 | dt_ops.finalize = fdt_wrapper_finalize; | 184 | dt_ops.finalize = fdt_wrapper_finalize; |
178 | 185 | ||
179 | /* Make sure the dt blob is the right version and so forth */ | 186 | /* Make sure the dt blob is the right version and so forth */ |
180 | fdt = blob; | 187 | fdt = blob; |
181 | err = fdt_open_into(fdt, fdt, fdt_totalsize(blob)); | 188 | bufsize = fdt_totalsize(fdt) + 4; |
182 | if (err == -FDT_ERR_NOSPACE) { | 189 | buf = malloc(bufsize); |
183 | int bufsize = fdt_totalsize(fdt) + 4; | 190 | if(!buf) |
184 | buf = malloc(bufsize); | 191 | fatal("malloc failed. can't relocate the device tree\n\r"); |
185 | err = fdt_open_into(fdt, buf, bufsize); | 192 | |
186 | } | 193 | err = fdt_open_into(fdt, buf, bufsize); |
187 | 194 | ||
188 | if (err != 0) | 195 | if (err != 0) |
189 | fatal("fdt_init(): %s\n\r", fdt_strerror(err)); | 196 | fatal("fdt_init(): %s\n\r", fdt_strerror(err)); |
190 | 197 | ||
191 | if (buf) | 198 | fdt = buf; |
192 | fdt = buf; | ||
193 | } | 199 | } |
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index 9e7f3ddd9913..ae32801ebd69 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c | |||
@@ -56,9 +56,19 @@ static struct addr_range prep_kernel(void) | |||
56 | if (platform_ops.vmlinux_alloc) { | 56 | if (platform_ops.vmlinux_alloc) { |
57 | addr = platform_ops.vmlinux_alloc(ei.memsize); | 57 | addr = platform_ops.vmlinux_alloc(ei.memsize); |
58 | } else { | 58 | } else { |
59 | if ((unsigned long)_start < ei.memsize) | 59 | /* |
60 | * Check if the kernel image (without bss) would overwrite the | ||
61 | * bootwrapper. The device tree has been moved in fdt_init() | ||
62 | * to an area allocated with malloc() (somewhere past _end). | ||
63 | */ | ||
64 | if ((unsigned long)_start < ei.loadsize) | ||
60 | fatal("Insufficient memory for kernel at address 0!" | 65 | fatal("Insufficient memory for kernel at address 0!" |
61 | " (_start=%p)\n\r", _start); | 66 | " (_start=%p, uncomressed size=%08x)\n\r", |
67 | _start, ei.loadsize); | ||
68 | |||
69 | if ((unsigned long)_end < ei.memsize) | ||
70 | fatal("The final kernel image would overwrite the " | ||
71 | "device tree\n\r"); | ||
62 | } | 72 | } |
63 | 73 | ||
64 | /* Finally, gunzip the kernel */ | 74 | /* Finally, gunzip the kernel */ |
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 321e2f5afe71..b3218ce451bb 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h | |||
@@ -40,6 +40,7 @@ struct dt_ops { | |||
40 | const int buflen); | 40 | const int buflen); |
41 | int (*setprop)(const void *phandle, const char *name, | 41 | int (*setprop)(const void *phandle, const char *name, |
42 | const void *buf, const int buflen); | 42 | const void *buf, const int buflen); |
43 | int (*del_node)(const void *phandle); | ||
43 | void *(*get_parent)(const void *phandle); | 44 | void *(*get_parent)(const void *phandle); |
44 | /* The node must not already exist. */ | 45 | /* The node must not already exist. */ |
45 | void *(*create_node)(const void *parent, const char *name); | 46 | void *(*create_node)(const void *parent, const char *name); |
@@ -126,6 +127,11 @@ static inline int setprop_str(void *devp, const char *name, const char *buf) | |||
126 | return -1; | 127 | return -1; |
127 | } | 128 | } |
128 | 129 | ||
130 | static inline int del_node(const void *devp) | ||
131 | { | ||
132 | return dt_ops.del_node ? dt_ops.del_node(devp) : -1; | ||
133 | } | ||
134 | |||
129 | static inline void *get_parent(const char *devp) | 135 | static inline void *get_parent(const char *devp) |
130 | { | 136 | { |
131 | return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL; | 137 | return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL; |
diff --git a/arch/powerpc/boot/string.S b/arch/powerpc/boot/string.S index 643e4cb2f11d..acc9428f2789 100644 --- a/arch/powerpc/boot/string.S +++ b/arch/powerpc/boot/string.S | |||
@@ -235,7 +235,7 @@ memchr: | |||
235 | .globl memcmp | 235 | .globl memcmp |
236 | memcmp: | 236 | memcmp: |
237 | cmpwi 0,r5,0 | 237 | cmpwi 0,r5,0 |
238 | blelr | 238 | ble 2f |
239 | mtctr r5 | 239 | mtctr r5 |
240 | addi r6,r3,-1 | 240 | addi r6,r3,-1 |
241 | addi r4,r4,-1 | 241 | addi r4,r4,-1 |
@@ -244,6 +244,8 @@ memcmp: | |||
244 | subf. r3,r0,r3 | 244 | subf. r3,r0,r3 |
245 | bdnzt 2,1b | 245 | bdnzt 2,1b |
246 | blr | 246 | blr |
247 | 2: li r3,0 | ||
248 | blr | ||
247 | 249 | ||
248 | 250 | ||
249 | /* | 251 | /* |
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index ee0dc41d7c56..f39073511a49 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper | |||
@@ -306,11 +306,14 @@ fi | |||
306 | 306 | ||
307 | # post-processing needed for some platforms | 307 | # post-processing needed for some platforms |
308 | case "$platform" in | 308 | case "$platform" in |
309 | pseries|chrp) | 309 | pseries) |
310 | ${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote | 310 | ${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote |
311 | $objbin/addnote "$ofile" "$ofile".rpanote | 311 | $objbin/addnote "$ofile" "$ofile".rpanote |
312 | rm -r "$ofile".rpanote | 312 | rm -r "$ofile".rpanote |
313 | ;; | 313 | ;; |
314 | chrp) | ||
315 | $objbin/addnote -r c00000 "$ofile" | ||
316 | ;; | ||
314 | coff) | 317 | coff) |
315 | ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile" | 318 | ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile" |
316 | $objbin/hack-coff "$ofile" | 319 | $objbin/hack-coff "$ofile" |
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig new file mode 100644 index 000000000000..39bd9eb453f0 --- /dev/null +++ b/arch/powerpc/configs/40x/acadia_defconfig | |||
@@ -0,0 +1,921 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.27-rc5 | ||
4 | # Mon Oct 13 13:47:16 2008 | ||
5 | # | ||
6 | # CONFIG_PPC64 is not set | ||
7 | |||
8 | # | ||
9 | # Processor support | ||
10 | # | ||
11 | # CONFIG_6xx is not set | ||
12 | # CONFIG_PPC_85xx is not set | ||
13 | # CONFIG_PPC_8xx is not set | ||
14 | CONFIG_40x=y | ||
15 | # CONFIG_44x is not set | ||
16 | # CONFIG_E200 is not set | ||
17 | CONFIG_4xx=y | ||
18 | # CONFIG_PPC_MM_SLICES is not set | ||
19 | CONFIG_NOT_COHERENT_CACHE=y | ||
20 | CONFIG_PPC32=y | ||
21 | CONFIG_WORD_SIZE=32 | ||
22 | CONFIG_PPC_MERGE=y | ||
23 | CONFIG_MMU=y | ||
24 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
25 | CONFIG_GENERIC_TIME=y | ||
26 | CONFIG_GENERIC_TIME_VSYSCALL=y | ||
27 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
28 | CONFIG_GENERIC_HARDIRQS=y | ||
29 | # CONFIG_HAVE_SETUP_PER_CPU_AREA is not set | ||
30 | CONFIG_IRQ_PER_CPU=y | ||
31 | CONFIG_STACKTRACE_SUPPORT=y | ||
32 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | ||
33 | CONFIG_LOCKDEP_SUPPORT=y | ||
34 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
35 | CONFIG_ARCH_HAS_ILOG2_U32=y | ||
36 | CONFIG_GENERIC_HWEIGHT=y | ||
37 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
38 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
39 | # CONFIG_ARCH_NO_VIRT_TO_BUS is not set | ||
40 | CONFIG_PPC=y | ||
41 | CONFIG_EARLY_PRINTK=y | ||
42 | CONFIG_GENERIC_NVRAM=y | ||
43 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | ||
44 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
45 | CONFIG_PPC_OF=y | ||
46 | CONFIG_OF=y | ||
47 | CONFIG_PPC_UDBG_16550=y | ||
48 | # CONFIG_GENERIC_TBSYNC is not set | ||
49 | CONFIG_AUDIT_ARCH=y | ||
50 | CONFIG_GENERIC_BUG=y | ||
51 | # CONFIG_DEFAULT_UIMAGE is not set | ||
52 | CONFIG_PPC_DCR_NATIVE=y | ||
53 | # CONFIG_PPC_DCR_MMIO is not set | ||
54 | CONFIG_PPC_DCR=y | ||
55 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
56 | |||
57 | # | ||
58 | # General setup | ||
59 | # | ||
60 | CONFIG_EXPERIMENTAL=y | ||
61 | CONFIG_BROKEN_ON_SMP=y | ||
62 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
63 | CONFIG_LOCALVERSION="" | ||
64 | CONFIG_LOCALVERSION_AUTO=y | ||
65 | CONFIG_SWAP=y | ||
66 | CONFIG_SYSVIPC=y | ||
67 | CONFIG_SYSVIPC_SYSCTL=y | ||
68 | CONFIG_POSIX_MQUEUE=y | ||
69 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
70 | # CONFIG_TASKSTATS is not set | ||
71 | # CONFIG_AUDIT is not set | ||
72 | # CONFIG_IKCONFIG is not set | ||
73 | CONFIG_LOG_BUF_SHIFT=14 | ||
74 | # CONFIG_CGROUPS is not set | ||
75 | CONFIG_GROUP_SCHED=y | ||
76 | # CONFIG_FAIR_GROUP_SCHED is not set | ||
77 | # CONFIG_RT_GROUP_SCHED is not set | ||
78 | CONFIG_USER_SCHED=y | ||
79 | # CONFIG_CGROUP_SCHED is not set | ||
80 | CONFIG_SYSFS_DEPRECATED=y | ||
81 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
82 | # CONFIG_RELAY is not set | ||
83 | # CONFIG_NAMESPACES is not set | ||
84 | CONFIG_BLK_DEV_INITRD=y | ||
85 | CONFIG_INITRAMFS_SOURCE="" | ||
86 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
87 | CONFIG_SYSCTL=y | ||
88 | CONFIG_EMBEDDED=y | ||
89 | CONFIG_SYSCTL_SYSCALL=y | ||
90 | CONFIG_KALLSYMS=y | ||
91 | CONFIG_KALLSYMS_ALL=y | ||
92 | CONFIG_KALLSYMS_EXTRA_PASS=y | ||
93 | CONFIG_HOTPLUG=y | ||
94 | CONFIG_PRINTK=y | ||
95 | CONFIG_BUG=y | ||
96 | CONFIG_ELF_CORE=y | ||
97 | CONFIG_COMPAT_BRK=y | ||
98 | CONFIG_BASE_FULL=y | ||
99 | CONFIG_FUTEX=y | ||
100 | CONFIG_ANON_INODES=y | ||
101 | CONFIG_EPOLL=y | ||
102 | CONFIG_SIGNALFD=y | ||
103 | CONFIG_TIMERFD=y | ||
104 | CONFIG_EVENTFD=y | ||
105 | CONFIG_SHMEM=y | ||
106 | CONFIG_VM_EVENT_COUNTERS=y | ||
107 | CONFIG_SLUB_DEBUG=y | ||
108 | # CONFIG_SLAB is not set | ||
109 | CONFIG_SLUB=y | ||
110 | # CONFIG_SLOB is not set | ||
111 | # CONFIG_PROFILING is not set | ||
112 | # CONFIG_MARKERS is not set | ||
113 | CONFIG_HAVE_OPROFILE=y | ||
114 | # CONFIG_KPROBES is not set | ||
115 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
116 | CONFIG_HAVE_IOREMAP_PROT=y | ||
117 | CONFIG_HAVE_KPROBES=y | ||
118 | CONFIG_HAVE_KRETPROBES=y | ||
119 | CONFIG_HAVE_ARCH_TRACEHOOK=y | ||
120 | # CONFIG_HAVE_DMA_ATTRS is not set | ||
121 | # CONFIG_USE_GENERIC_SMP_HELPERS is not set | ||
122 | # CONFIG_HAVE_CLK is not set | ||
123 | CONFIG_PROC_PAGE_MONITOR=y | ||
124 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
125 | CONFIG_SLABINFO=y | ||
126 | CONFIG_RT_MUTEXES=y | ||
127 | # CONFIG_TINY_SHMEM is not set | ||
128 | CONFIG_BASE_SMALL=0 | ||
129 | CONFIG_MODULES=y | ||
130 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
131 | CONFIG_MODULE_UNLOAD=y | ||
132 | # CONFIG_MODULE_FORCE_UNLOAD is not set | ||
133 | # CONFIG_MODVERSIONS is not set | ||
134 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
135 | CONFIG_KMOD=y | ||
136 | CONFIG_BLOCK=y | ||
137 | CONFIG_LBD=y | ||
138 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
139 | # CONFIG_LSF is not set | ||
140 | # CONFIG_BLK_DEV_BSG is not set | ||
141 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
142 | |||
143 | # | ||
144 | # IO Schedulers | ||
145 | # | ||
146 | CONFIG_IOSCHED_NOOP=y | ||
147 | CONFIG_IOSCHED_AS=y | ||
148 | CONFIG_IOSCHED_DEADLINE=y | ||
149 | CONFIG_IOSCHED_CFQ=y | ||
150 | CONFIG_DEFAULT_AS=y | ||
151 | # CONFIG_DEFAULT_DEADLINE is not set | ||
152 | # CONFIG_DEFAULT_CFQ is not set | ||
153 | # CONFIG_DEFAULT_NOOP is not set | ||
154 | CONFIG_DEFAULT_IOSCHED="anticipatory" | ||
155 | CONFIG_CLASSIC_RCU=y | ||
156 | # CONFIG_PPC4xx_PCI_EXPRESS is not set | ||
157 | |||
158 | # | ||
159 | # Platform support | ||
160 | # | ||
161 | # CONFIG_PPC_CELL is not set | ||
162 | # CONFIG_PPC_CELL_NATIVE is not set | ||
163 | # CONFIG_PQ2ADS is not set | ||
164 | CONFIG_ACADIA=y | ||
165 | # CONFIG_EP405 is not set | ||
166 | # CONFIG_KILAUEA is not set | ||
167 | # CONFIG_MAKALU is not set | ||
168 | # CONFIG_WALNUT is not set | ||
169 | # CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set | ||
170 | CONFIG_PPC40x_SIMPLE=y | ||
171 | CONFIG_405EZ=y | ||
172 | # CONFIG_IPIC is not set | ||
173 | # CONFIG_MPIC is not set | ||
174 | # CONFIG_MPIC_WEIRD is not set | ||
175 | # CONFIG_PPC_I8259 is not set | ||
176 | # CONFIG_PPC_RTAS is not set | ||
177 | # CONFIG_MMIO_NVRAM is not set | ||
178 | # CONFIG_PPC_MPC106 is not set | ||
179 | # CONFIG_PPC_970_NAP is not set | ||
180 | # CONFIG_PPC_INDIRECT_IO is not set | ||
181 | # CONFIG_GENERIC_IOMAP is not set | ||
182 | # CONFIG_CPU_FREQ is not set | ||
183 | # CONFIG_FSL_ULI1575 is not set | ||
184 | |||
185 | # | ||
186 | # Kernel options | ||
187 | # | ||
188 | # CONFIG_HIGHMEM is not set | ||
189 | # CONFIG_TICK_ONESHOT is not set | ||
190 | # CONFIG_NO_HZ is not set | ||
191 | # CONFIG_HIGH_RES_TIMERS is not set | ||
192 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
193 | # CONFIG_HZ_100 is not set | ||
194 | CONFIG_HZ_250=y | ||
195 | # CONFIG_HZ_300 is not set | ||
196 | # CONFIG_HZ_1000 is not set | ||
197 | CONFIG_HZ=250 | ||
198 | # CONFIG_SCHED_HRTICK is not set | ||
199 | CONFIG_PREEMPT_NONE=y | ||
200 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
201 | # CONFIG_PREEMPT is not set | ||
202 | CONFIG_BINFMT_ELF=y | ||
203 | # CONFIG_BINFMT_MISC is not set | ||
204 | # CONFIG_MATH_EMULATION is not set | ||
205 | # CONFIG_IOMMU_HELPER is not set | ||
206 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
207 | CONFIG_ARCH_HAS_WALK_MEMORY=y | ||
208 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | ||
209 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
210 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
211 | CONFIG_SELECT_MEMORY_MODEL=y | ||
212 | CONFIG_FLATMEM_MANUAL=y | ||
213 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
214 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
215 | CONFIG_FLATMEM=y | ||
216 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
217 | # CONFIG_SPARSEMEM_STATIC is not set | ||
218 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
219 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
220 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
221 | CONFIG_MIGRATION=y | ||
222 | # CONFIG_RESOURCES_64BIT is not set | ||
223 | CONFIG_ZONE_DMA_FLAG=1 | ||
224 | CONFIG_BOUNCE=y | ||
225 | CONFIG_VIRT_TO_BUS=y | ||
226 | CONFIG_FORCE_MAX_ZONEORDER=11 | ||
227 | CONFIG_PROC_DEVICETREE=y | ||
228 | # CONFIG_CMDLINE_BOOL is not set | ||
229 | CONFIG_EXTRA_TARGETS="" | ||
230 | # CONFIG_PM is not set | ||
231 | CONFIG_SECCOMP=y | ||
232 | CONFIG_ISA_DMA_API=y | ||
233 | |||
234 | # | ||
235 | # Bus options | ||
236 | # | ||
237 | CONFIG_ZONE_DMA=y | ||
238 | CONFIG_PPC_INDIRECT_PCI=y | ||
239 | CONFIG_4xx_SOC=y | ||
240 | CONFIG_PPC_PCI_CHOICE=y | ||
241 | CONFIG_PCI=y | ||
242 | CONFIG_PCI_DOMAINS=y | ||
243 | CONFIG_PCI_SYSCALL=y | ||
244 | # CONFIG_PCIEPORTBUS is not set | ||
245 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
246 | # CONFIG_PCI_MSI is not set | ||
247 | CONFIG_PCI_LEGACY=y | ||
248 | # CONFIG_PCI_DEBUG is not set | ||
249 | # CONFIG_PCCARD is not set | ||
250 | # CONFIG_HOTPLUG_PCI is not set | ||
251 | # CONFIG_HAS_RAPIDIO is not set | ||
252 | |||
253 | # | ||
254 | # Advanced setup | ||
255 | # | ||
256 | # CONFIG_ADVANCED_OPTIONS is not set | ||
257 | |||
258 | # | ||
259 | # Default settings for advanced configuration options are used | ||
260 | # | ||
261 | CONFIG_LOWMEM_SIZE=0x30000000 | ||
262 | CONFIG_PAGE_OFFSET=0xc0000000 | ||
263 | CONFIG_KERNEL_START=0xc0000000 | ||
264 | CONFIG_PHYSICAL_START=0x00000000 | ||
265 | CONFIG_TASK_SIZE=0xc0000000 | ||
266 | CONFIG_CONSISTENT_START=0xff100000 | ||
267 | CONFIG_CONSISTENT_SIZE=0x00200000 | ||
268 | CONFIG_NET=y | ||
269 | |||
270 | # | ||
271 | # Networking options | ||
272 | # | ||
273 | CONFIG_PACKET=y | ||
274 | # CONFIG_PACKET_MMAP is not set | ||
275 | CONFIG_UNIX=y | ||
276 | # CONFIG_NET_KEY is not set | ||
277 | CONFIG_INET=y | ||
278 | # CONFIG_IP_MULTICAST is not set | ||
279 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
280 | CONFIG_IP_FIB_HASH=y | ||
281 | CONFIG_IP_PNP=y | ||
282 | CONFIG_IP_PNP_DHCP=y | ||
283 | CONFIG_IP_PNP_BOOTP=y | ||
284 | # CONFIG_IP_PNP_RARP is not set | ||
285 | # CONFIG_NET_IPIP is not set | ||
286 | # CONFIG_NET_IPGRE is not set | ||
287 | # CONFIG_ARPD is not set | ||
288 | # CONFIG_SYN_COOKIES is not set | ||
289 | # CONFIG_INET_AH is not set | ||
290 | # CONFIG_INET_ESP is not set | ||
291 | # CONFIG_INET_IPCOMP is not set | ||
292 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
293 | # CONFIG_INET_TUNNEL is not set | ||
294 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
295 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
296 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
297 | # CONFIG_INET_LRO is not set | ||
298 | CONFIG_INET_DIAG=y | ||
299 | CONFIG_INET_TCP_DIAG=y | ||
300 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
301 | CONFIG_TCP_CONG_CUBIC=y | ||
302 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
303 | # CONFIG_TCP_MD5SIG is not set | ||
304 | # CONFIG_IPV6 is not set | ||
305 | # CONFIG_NETWORK_SECMARK is not set | ||
306 | # CONFIG_NETFILTER is not set | ||
307 | # CONFIG_IP_DCCP is not set | ||
308 | # CONFIG_IP_SCTP is not set | ||
309 | # CONFIG_TIPC is not set | ||
310 | # CONFIG_ATM is not set | ||
311 | # CONFIG_BRIDGE is not set | ||
312 | # CONFIG_VLAN_8021Q is not set | ||
313 | # CONFIG_DECNET is not set | ||
314 | # CONFIG_LLC2 is not set | ||
315 | # CONFIG_IPX is not set | ||
316 | # CONFIG_ATALK is not set | ||
317 | # CONFIG_X25 is not set | ||
318 | # CONFIG_LAPB is not set | ||
319 | # CONFIG_ECONET is not set | ||
320 | # CONFIG_WAN_ROUTER is not set | ||
321 | # CONFIG_NET_SCHED is not set | ||
322 | |||
323 | # | ||
324 | # Network testing | ||
325 | # | ||
326 | # CONFIG_NET_PKTGEN is not set | ||
327 | # CONFIG_HAMRADIO is not set | ||
328 | # CONFIG_CAN is not set | ||
329 | # CONFIG_IRDA is not set | ||
330 | # CONFIG_BT is not set | ||
331 | # CONFIG_AF_RXRPC is not set | ||
332 | |||
333 | # | ||
334 | # Wireless | ||
335 | # | ||
336 | # CONFIG_CFG80211 is not set | ||
337 | # CONFIG_WIRELESS_EXT is not set | ||
338 | # CONFIG_MAC80211 is not set | ||
339 | # CONFIG_IEEE80211 is not set | ||
340 | # CONFIG_RFKILL is not set | ||
341 | # CONFIG_NET_9P is not set | ||
342 | |||
343 | # | ||
344 | # Device Drivers | ||
345 | # | ||
346 | |||
347 | # | ||
348 | # Generic Driver Options | ||
349 | # | ||
350 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
351 | CONFIG_STANDALONE=y | ||
352 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
353 | CONFIG_FW_LOADER=y | ||
354 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
355 | CONFIG_EXTRA_FIRMWARE="" | ||
356 | # CONFIG_DEBUG_DRIVER is not set | ||
357 | # CONFIG_DEBUG_DEVRES is not set | ||
358 | # CONFIG_SYS_HYPERVISOR is not set | ||
359 | CONFIG_CONNECTOR=y | ||
360 | CONFIG_PROC_EVENTS=y | ||
361 | CONFIG_MTD=y | ||
362 | # CONFIG_MTD_DEBUG is not set | ||
363 | # CONFIG_MTD_CONCAT is not set | ||
364 | CONFIG_MTD_PARTITIONS=y | ||
365 | # CONFIG_MTD_REDBOOT_PARTS is not set | ||
366 | CONFIG_MTD_CMDLINE_PARTS=y | ||
367 | CONFIG_MTD_OF_PARTS=y | ||
368 | # CONFIG_MTD_AR7_PARTS is not set | ||
369 | |||
370 | # | ||
371 | # User Modules And Translation Layers | ||
372 | # | ||
373 | CONFIG_MTD_CHAR=y | ||
374 | CONFIG_MTD_BLKDEVS=m | ||
375 | CONFIG_MTD_BLOCK=m | ||
376 | # CONFIG_MTD_BLOCK_RO is not set | ||
377 | # CONFIG_FTL is not set | ||
378 | # CONFIG_NFTL is not set | ||
379 | # CONFIG_INFTL is not set | ||
380 | # CONFIG_RFD_FTL is not set | ||
381 | # CONFIG_SSFDC is not set | ||
382 | # CONFIG_MTD_OOPS is not set | ||
383 | |||
384 | # | ||
385 | # RAM/ROM/Flash chip drivers | ||
386 | # | ||
387 | CONFIG_MTD_CFI=y | ||
388 | CONFIG_MTD_JEDECPROBE=y | ||
389 | CONFIG_MTD_GEN_PROBE=y | ||
390 | # CONFIG_MTD_CFI_ADV_OPTIONS is not set | ||
391 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
392 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
393 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
394 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
395 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
396 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
397 | CONFIG_MTD_CFI_I1=y | ||
398 | CONFIG_MTD_CFI_I2=y | ||
399 | # CONFIG_MTD_CFI_I4 is not set | ||
400 | # CONFIG_MTD_CFI_I8 is not set | ||
401 | # CONFIG_MTD_CFI_INTELEXT is not set | ||
402 | CONFIG_MTD_CFI_AMDSTD=y | ||
403 | # CONFIG_MTD_CFI_STAA is not set | ||
404 | CONFIG_MTD_CFI_UTIL=y | ||
405 | # CONFIG_MTD_RAM is not set | ||
406 | # CONFIG_MTD_ROM is not set | ||
407 | # CONFIG_MTD_ABSENT is not set | ||
408 | |||
409 | # | ||
410 | # Mapping drivers for chip access | ||
411 | # | ||
412 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
413 | # CONFIG_MTD_PHYSMAP is not set | ||
414 | CONFIG_MTD_PHYSMAP_OF=y | ||
415 | # CONFIG_MTD_INTEL_VR_NOR is not set | ||
416 | # CONFIG_MTD_PLATRAM is not set | ||
417 | |||
418 | # | ||
419 | # Self-contained MTD device drivers | ||
420 | # | ||
421 | # CONFIG_MTD_PMC551 is not set | ||
422 | # CONFIG_MTD_SLRAM is not set | ||
423 | # CONFIG_MTD_PHRAM is not set | ||
424 | # CONFIG_MTD_MTDRAM is not set | ||
425 | # CONFIG_MTD_BLOCK2MTD is not set | ||
426 | |||
427 | # | ||
428 | # Disk-On-Chip Device Drivers | ||
429 | # | ||
430 | # CONFIG_MTD_DOC2000 is not set | ||
431 | # CONFIG_MTD_DOC2001 is not set | ||
432 | # CONFIG_MTD_DOC2001PLUS is not set | ||
433 | # CONFIG_MTD_NAND is not set | ||
434 | # CONFIG_MTD_ONENAND is not set | ||
435 | |||
436 | # | ||
437 | # UBI - Unsorted block images | ||
438 | # | ||
439 | # CONFIG_MTD_UBI is not set | ||
440 | CONFIG_OF_DEVICE=y | ||
441 | # CONFIG_PARPORT is not set | ||
442 | CONFIG_BLK_DEV=y | ||
443 | # CONFIG_BLK_DEV_FD is not set | ||
444 | # CONFIG_BLK_CPQ_DA is not set | ||
445 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
446 | # CONFIG_BLK_DEV_DAC960 is not set | ||
447 | # CONFIG_BLK_DEV_UMEM is not set | ||
448 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
449 | # CONFIG_BLK_DEV_LOOP is not set | ||
450 | # CONFIG_BLK_DEV_NBD is not set | ||
451 | # CONFIG_BLK_DEV_SX8 is not set | ||
452 | CONFIG_BLK_DEV_RAM=y | ||
453 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
454 | CONFIG_BLK_DEV_RAM_SIZE=35000 | ||
455 | # CONFIG_BLK_DEV_XIP is not set | ||
456 | # CONFIG_CDROM_PKTCDVD is not set | ||
457 | # CONFIG_ATA_OVER_ETH is not set | ||
458 | # CONFIG_XILINX_SYSACE is not set | ||
459 | # CONFIG_BLK_DEV_HD is not set | ||
460 | # CONFIG_MISC_DEVICES is not set | ||
461 | CONFIG_HAVE_IDE=y | ||
462 | # CONFIG_IDE is not set | ||
463 | |||
464 | # | ||
465 | # SCSI device support | ||
466 | # | ||
467 | # CONFIG_RAID_ATTRS is not set | ||
468 | # CONFIG_SCSI is not set | ||
469 | # CONFIG_SCSI_DMA is not set | ||
470 | # CONFIG_SCSI_NETLINK is not set | ||
471 | # CONFIG_ATA is not set | ||
472 | # CONFIG_MD is not set | ||
473 | # CONFIG_FUSION is not set | ||
474 | |||
475 | # | ||
476 | # IEEE 1394 (FireWire) support | ||
477 | # | ||
478 | |||
479 | # | ||
480 | # Enable only one of the two stacks, unless you know what you are doing | ||
481 | # | ||
482 | # CONFIG_FIREWIRE is not set | ||
483 | # CONFIG_IEEE1394 is not set | ||
484 | # CONFIG_I2O is not set | ||
485 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
486 | CONFIG_NETDEVICES=y | ||
487 | # CONFIG_DUMMY is not set | ||
488 | # CONFIG_BONDING is not set | ||
489 | # CONFIG_MACVLAN is not set | ||
490 | # CONFIG_EQUALIZER is not set | ||
491 | # CONFIG_TUN is not set | ||
492 | # CONFIG_VETH is not set | ||
493 | # CONFIG_ARCNET is not set | ||
494 | # CONFIG_PHYLIB is not set | ||
495 | CONFIG_NET_ETHERNET=y | ||
496 | CONFIG_MII=y | ||
497 | # CONFIG_HAPPYMEAL is not set | ||
498 | # CONFIG_SUNGEM is not set | ||
499 | # CONFIG_CASSINI is not set | ||
500 | # CONFIG_NET_VENDOR_3COM is not set | ||
501 | # CONFIG_NET_TULIP is not set | ||
502 | # CONFIG_HP100 is not set | ||
503 | CONFIG_IBM_NEW_EMAC=y | ||
504 | CONFIG_IBM_NEW_EMAC_RXB=256 | ||
505 | CONFIG_IBM_NEW_EMAC_TXB=256 | ||
506 | CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32 | ||
507 | CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256 | ||
508 | CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0 | ||
509 | CONFIG_IBM_NEW_EMAC_DEBUG=y | ||
510 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
511 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
512 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
513 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
514 | CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL=y | ||
515 | CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT=y | ||
516 | CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR=y | ||
517 | # CONFIG_NET_PCI is not set | ||
518 | # CONFIG_B44 is not set | ||
519 | # CONFIG_NETDEV_1000 is not set | ||
520 | # CONFIG_NETDEV_10000 is not set | ||
521 | # CONFIG_TR is not set | ||
522 | |||
523 | # | ||
524 | # Wireless LAN | ||
525 | # | ||
526 | # CONFIG_WLAN_PRE80211 is not set | ||
527 | # CONFIG_WLAN_80211 is not set | ||
528 | # CONFIG_IWLWIFI_LEDS is not set | ||
529 | # CONFIG_WAN is not set | ||
530 | # CONFIG_FDDI is not set | ||
531 | # CONFIG_HIPPI is not set | ||
532 | # CONFIG_PPP is not set | ||
533 | # CONFIG_SLIP is not set | ||
534 | # CONFIG_NETCONSOLE is not set | ||
535 | # CONFIG_NETPOLL is not set | ||
536 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
537 | # CONFIG_ISDN is not set | ||
538 | # CONFIG_PHONE is not set | ||
539 | |||
540 | # | ||
541 | # Input device support | ||
542 | # | ||
543 | # CONFIG_INPUT is not set | ||
544 | |||
545 | # | ||
546 | # Hardware I/O ports | ||
547 | # | ||
548 | # CONFIG_SERIO is not set | ||
549 | # CONFIG_GAMEPORT is not set | ||
550 | |||
551 | # | ||
552 | # Character devices | ||
553 | # | ||
554 | # CONFIG_VT is not set | ||
555 | CONFIG_DEVKMEM=y | ||
556 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
557 | # CONFIG_NOZOMI is not set | ||
558 | |||
559 | # | ||
560 | # Serial drivers | ||
561 | # | ||
562 | CONFIG_SERIAL_8250=y | ||
563 | CONFIG_SERIAL_8250_CONSOLE=y | ||
564 | CONFIG_SERIAL_8250_PCI=y | ||
565 | CONFIG_SERIAL_8250_NR_UARTS=4 | ||
566 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
567 | CONFIG_SERIAL_8250_EXTENDED=y | ||
568 | # CONFIG_SERIAL_8250_MANY_PORTS is not set | ||
569 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
570 | # CONFIG_SERIAL_8250_DETECT_IRQ is not set | ||
571 | # CONFIG_SERIAL_8250_RSA is not set | ||
572 | |||
573 | # | ||
574 | # Non-8250 serial port support | ||
575 | # | ||
576 | # CONFIG_SERIAL_UARTLITE is not set | ||
577 | CONFIG_SERIAL_CORE=y | ||
578 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
579 | # CONFIG_SERIAL_JSM is not set | ||
580 | CONFIG_SERIAL_OF_PLATFORM=y | ||
581 | CONFIG_UNIX98_PTYS=y | ||
582 | CONFIG_LEGACY_PTYS=y | ||
583 | CONFIG_LEGACY_PTY_COUNT=256 | ||
584 | # CONFIG_IPMI_HANDLER is not set | ||
585 | # CONFIG_HW_RANDOM is not set | ||
586 | # CONFIG_NVRAM is not set | ||
587 | # CONFIG_GEN_RTC is not set | ||
588 | # CONFIG_R3964 is not set | ||
589 | # CONFIG_APPLICOM is not set | ||
590 | # CONFIG_RAW_DRIVER is not set | ||
591 | # CONFIG_TCG_TPM is not set | ||
592 | CONFIG_DEVPORT=y | ||
593 | # CONFIG_I2C is not set | ||
594 | # CONFIG_SPI is not set | ||
595 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
596 | # CONFIG_GPIOLIB is not set | ||
597 | # CONFIG_W1 is not set | ||
598 | # CONFIG_POWER_SUPPLY is not set | ||
599 | # CONFIG_HWMON is not set | ||
600 | CONFIG_THERMAL=y | ||
601 | # CONFIG_WATCHDOG is not set | ||
602 | |||
603 | # | ||
604 | # Sonics Silicon Backplane | ||
605 | # | ||
606 | CONFIG_SSB_POSSIBLE=y | ||
607 | # CONFIG_SSB is not set | ||
608 | |||
609 | # | ||
610 | # Multifunction device drivers | ||
611 | # | ||
612 | # CONFIG_MFD_CORE is not set | ||
613 | # CONFIG_MFD_SM501 is not set | ||
614 | # CONFIG_HTC_PASIC3 is not set | ||
615 | # CONFIG_MFD_TMIO is not set | ||
616 | |||
617 | # | ||
618 | # Multimedia devices | ||
619 | # | ||
620 | |||
621 | # | ||
622 | # Multimedia core support | ||
623 | # | ||
624 | # CONFIG_VIDEO_DEV is not set | ||
625 | # CONFIG_DVB_CORE is not set | ||
626 | # CONFIG_VIDEO_MEDIA is not set | ||
627 | |||
628 | # | ||
629 | # Multimedia drivers | ||
630 | # | ||
631 | # CONFIG_DAB is not set | ||
632 | |||
633 | # | ||
634 | # Graphics support | ||
635 | # | ||
636 | # CONFIG_AGP is not set | ||
637 | # CONFIG_DRM is not set | ||
638 | # CONFIG_VGASTATE is not set | ||
639 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
640 | # CONFIG_FB is not set | ||
641 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
642 | |||
643 | # | ||
644 | # Display device support | ||
645 | # | ||
646 | # CONFIG_DISPLAY_SUPPORT is not set | ||
647 | # CONFIG_SOUND is not set | ||
648 | # CONFIG_USB_SUPPORT is not set | ||
649 | # CONFIG_MMC is not set | ||
650 | # CONFIG_MEMSTICK is not set | ||
651 | # CONFIG_NEW_LEDS is not set | ||
652 | # CONFIG_ACCESSIBILITY is not set | ||
653 | # CONFIG_INFINIBAND is not set | ||
654 | # CONFIG_EDAC is not set | ||
655 | # CONFIG_RTC_CLASS is not set | ||
656 | # CONFIG_DMADEVICES is not set | ||
657 | # CONFIG_UIO is not set | ||
658 | |||
659 | # | ||
660 | # File systems | ||
661 | # | ||
662 | CONFIG_EXT2_FS=y | ||
663 | # CONFIG_EXT2_FS_XATTR is not set | ||
664 | # CONFIG_EXT2_FS_XIP is not set | ||
665 | # CONFIG_EXT3_FS is not set | ||
666 | # CONFIG_EXT4DEV_FS is not set | ||
667 | # CONFIG_REISERFS_FS is not set | ||
668 | # CONFIG_JFS_FS is not set | ||
669 | # CONFIG_FS_POSIX_ACL is not set | ||
670 | # CONFIG_XFS_FS is not set | ||
671 | # CONFIG_OCFS2_FS is not set | ||
672 | CONFIG_DNOTIFY=y | ||
673 | CONFIG_INOTIFY=y | ||
674 | CONFIG_INOTIFY_USER=y | ||
675 | # CONFIG_QUOTA is not set | ||
676 | # CONFIG_AUTOFS_FS is not set | ||
677 | # CONFIG_AUTOFS4_FS is not set | ||
678 | # CONFIG_FUSE_FS is not set | ||
679 | |||
680 | # | ||
681 | # CD-ROM/DVD Filesystems | ||
682 | # | ||
683 | # CONFIG_ISO9660_FS is not set | ||
684 | # CONFIG_UDF_FS is not set | ||
685 | |||
686 | # | ||
687 | # DOS/FAT/NT Filesystems | ||
688 | # | ||
689 | # CONFIG_MSDOS_FS is not set | ||
690 | # CONFIG_VFAT_FS is not set | ||
691 | # CONFIG_NTFS_FS is not set | ||
692 | |||
693 | # | ||
694 | # Pseudo filesystems | ||
695 | # | ||
696 | CONFIG_PROC_FS=y | ||
697 | CONFIG_PROC_KCORE=y | ||
698 | CONFIG_PROC_SYSCTL=y | ||
699 | CONFIG_SYSFS=y | ||
700 | CONFIG_TMPFS=y | ||
701 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
702 | # CONFIG_HUGETLB_PAGE is not set | ||
703 | # CONFIG_CONFIGFS_FS is not set | ||
704 | |||
705 | # | ||
706 | # Miscellaneous filesystems | ||
707 | # | ||
708 | # CONFIG_ADFS_FS is not set | ||
709 | # CONFIG_AFFS_FS is not set | ||
710 | # CONFIG_HFS_FS is not set | ||
711 | # CONFIG_HFSPLUS_FS is not set | ||
712 | # CONFIG_BEFS_FS is not set | ||
713 | # CONFIG_BFS_FS is not set | ||
714 | # CONFIG_EFS_FS is not set | ||
715 | # CONFIG_JFFS2_FS is not set | ||
716 | CONFIG_CRAMFS=y | ||
717 | # CONFIG_VXFS_FS is not set | ||
718 | # CONFIG_MINIX_FS is not set | ||
719 | # CONFIG_OMFS_FS is not set | ||
720 | # CONFIG_HPFS_FS is not set | ||
721 | # CONFIG_QNX4FS_FS is not set | ||
722 | # CONFIG_ROMFS_FS is not set | ||
723 | # CONFIG_SYSV_FS is not set | ||
724 | # CONFIG_UFS_FS is not set | ||
725 | CONFIG_NETWORK_FILESYSTEMS=y | ||
726 | CONFIG_NFS_FS=y | ||
727 | CONFIG_NFS_V3=y | ||
728 | # CONFIG_NFS_V3_ACL is not set | ||
729 | # CONFIG_NFS_V4 is not set | ||
730 | CONFIG_ROOT_NFS=y | ||
731 | # CONFIG_NFSD is not set | ||
732 | CONFIG_LOCKD=y | ||
733 | CONFIG_LOCKD_V4=y | ||
734 | CONFIG_NFS_COMMON=y | ||
735 | CONFIG_SUNRPC=y | ||
736 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
737 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
738 | # CONFIG_SMB_FS is not set | ||
739 | # CONFIG_CIFS is not set | ||
740 | # CONFIG_NCP_FS is not set | ||
741 | # CONFIG_CODA_FS is not set | ||
742 | # CONFIG_AFS_FS is not set | ||
743 | |||
744 | # | ||
745 | # Partition Types | ||
746 | # | ||
747 | # CONFIG_PARTITION_ADVANCED is not set | ||
748 | CONFIG_MSDOS_PARTITION=y | ||
749 | # CONFIG_NLS is not set | ||
750 | # CONFIG_DLM is not set | ||
751 | |||
752 | # | ||
753 | # Library routines | ||
754 | # | ||
755 | CONFIG_BITREVERSE=y | ||
756 | # CONFIG_GENERIC_FIND_FIRST_BIT is not set | ||
757 | # CONFIG_CRC_CCITT is not set | ||
758 | # CONFIG_CRC16 is not set | ||
759 | # CONFIG_CRC_T10DIF is not set | ||
760 | # CONFIG_CRC_ITU_T is not set | ||
761 | CONFIG_CRC32=y | ||
762 | # CONFIG_CRC7 is not set | ||
763 | # CONFIG_LIBCRC32C is not set | ||
764 | CONFIG_ZLIB_INFLATE=y | ||
765 | CONFIG_PLIST=y | ||
766 | CONFIG_HAS_IOMEM=y | ||
767 | CONFIG_HAS_IOPORT=y | ||
768 | CONFIG_HAS_DMA=y | ||
769 | CONFIG_HAVE_LMB=y | ||
770 | |||
771 | # | ||
772 | # Kernel hacking | ||
773 | # | ||
774 | # CONFIG_PRINTK_TIME is not set | ||
775 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
776 | CONFIG_ENABLE_MUST_CHECK=y | ||
777 | CONFIG_FRAME_WARN=1024 | ||
778 | CONFIG_MAGIC_SYSRQ=y | ||
779 | # CONFIG_UNUSED_SYMBOLS is not set | ||
780 | CONFIG_DEBUG_FS=y | ||
781 | # CONFIG_HEADERS_CHECK is not set | ||
782 | CONFIG_DEBUG_KERNEL=y | ||
783 | # CONFIG_DEBUG_SHIRQ is not set | ||
784 | CONFIG_DETECT_SOFTLOCKUP=y | ||
785 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
786 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
787 | CONFIG_SCHED_DEBUG=y | ||
788 | # CONFIG_SCHEDSTATS is not set | ||
789 | # CONFIG_TIMER_STATS is not set | ||
790 | # CONFIG_DEBUG_OBJECTS is not set | ||
791 | # CONFIG_SLUB_DEBUG_ON is not set | ||
792 | # CONFIG_SLUB_STATS is not set | ||
793 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
794 | # CONFIG_RT_MUTEX_TESTER is not set | ||
795 | # CONFIG_DEBUG_SPINLOCK is not set | ||
796 | # CONFIG_DEBUG_MUTEXES is not set | ||
797 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
798 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
799 | # CONFIG_DEBUG_KOBJECT is not set | ||
800 | CONFIG_DEBUG_BUGVERBOSE=y | ||
801 | # CONFIG_DEBUG_INFO is not set | ||
802 | # CONFIG_DEBUG_VM is not set | ||
803 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
804 | # CONFIG_DEBUG_MEMORY_INIT is not set | ||
805 | # CONFIG_DEBUG_LIST is not set | ||
806 | # CONFIG_DEBUG_SG is not set | ||
807 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
808 | # CONFIG_RCU_TORTURE_TEST is not set | ||
809 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
810 | # CONFIG_FAULT_INJECTION is not set | ||
811 | # CONFIG_LATENCYTOP is not set | ||
812 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
813 | CONFIG_HAVE_FTRACE=y | ||
814 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
815 | # CONFIG_FTRACE is not set | ||
816 | # CONFIG_SCHED_TRACER is not set | ||
817 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
818 | # CONFIG_SAMPLES is not set | ||
819 | CONFIG_HAVE_ARCH_KGDB=y | ||
820 | # CONFIG_KGDB is not set | ||
821 | # CONFIG_DEBUG_STACKOVERFLOW is not set | ||
822 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
823 | # CONFIG_DEBUG_PAGEALLOC is not set | ||
824 | # CONFIG_CODE_PATCHING_SELFTEST is not set | ||
825 | # CONFIG_FTR_FIXUP_SELFTEST is not set | ||
826 | # CONFIG_MSI_BITMAP_SELFTEST is not set | ||
827 | # CONFIG_XMON is not set | ||
828 | # CONFIG_IRQSTACKS is not set | ||
829 | # CONFIG_VIRQ_DEBUG is not set | ||
830 | # CONFIG_BDI_SWITCH is not set | ||
831 | # CONFIG_PPC_EARLY_DEBUG is not set | ||
832 | |||
833 | # | ||
834 | # Security options | ||
835 | # | ||
836 | # CONFIG_KEYS is not set | ||
837 | # CONFIG_SECURITY is not set | ||
838 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
839 | CONFIG_CRYPTO=y | ||
840 | |||
841 | # | ||
842 | # Crypto core or helper | ||
843 | # | ||
844 | CONFIG_CRYPTO_ALGAPI=y | ||
845 | CONFIG_CRYPTO_BLKCIPHER=y | ||
846 | CONFIG_CRYPTO_MANAGER=y | ||
847 | # CONFIG_CRYPTO_GF128MUL is not set | ||
848 | # CONFIG_CRYPTO_NULL is not set | ||
849 | # CONFIG_CRYPTO_CRYPTD is not set | ||
850 | # CONFIG_CRYPTO_AUTHENC is not set | ||
851 | # CONFIG_CRYPTO_TEST is not set | ||
852 | |||
853 | # | ||
854 | # Authenticated Encryption with Associated Data | ||
855 | # | ||
856 | # CONFIG_CRYPTO_CCM is not set | ||
857 | # CONFIG_CRYPTO_GCM is not set | ||
858 | # CONFIG_CRYPTO_SEQIV is not set | ||
859 | |||
860 | # | ||
861 | # Block modes | ||
862 | # | ||
863 | CONFIG_CRYPTO_CBC=y | ||
864 | # CONFIG_CRYPTO_CTR is not set | ||
865 | # CONFIG_CRYPTO_CTS is not set | ||
866 | CONFIG_CRYPTO_ECB=y | ||
867 | # CONFIG_CRYPTO_LRW is not set | ||
868 | CONFIG_CRYPTO_PCBC=y | ||
869 | # CONFIG_CRYPTO_XTS is not set | ||
870 | |||
871 | # | ||
872 | # Hash modes | ||
873 | # | ||
874 | # CONFIG_CRYPTO_HMAC is not set | ||
875 | # CONFIG_CRYPTO_XCBC is not set | ||
876 | |||
877 | # | ||
878 | # Digest | ||
879 | # | ||
880 | # CONFIG_CRYPTO_CRC32C is not set | ||
881 | # CONFIG_CRYPTO_MD4 is not set | ||
882 | CONFIG_CRYPTO_MD5=y | ||
883 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
884 | # CONFIG_CRYPTO_RMD128 is not set | ||
885 | # CONFIG_CRYPTO_RMD160 is not set | ||
886 | # CONFIG_CRYPTO_RMD256 is not set | ||
887 | # CONFIG_CRYPTO_RMD320 is not set | ||
888 | # CONFIG_CRYPTO_SHA1 is not set | ||
889 | # CONFIG_CRYPTO_SHA256 is not set | ||
890 | # CONFIG_CRYPTO_SHA512 is not set | ||
891 | # CONFIG_CRYPTO_TGR192 is not set | ||
892 | # CONFIG_CRYPTO_WP512 is not set | ||
893 | |||
894 | # | ||
895 | # Ciphers | ||
896 | # | ||
897 | # CONFIG_CRYPTO_AES is not set | ||
898 | # CONFIG_CRYPTO_ANUBIS is not set | ||
899 | # CONFIG_CRYPTO_ARC4 is not set | ||
900 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
901 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
902 | # CONFIG_CRYPTO_CAST5 is not set | ||
903 | # CONFIG_CRYPTO_CAST6 is not set | ||
904 | CONFIG_CRYPTO_DES=y | ||
905 | # CONFIG_CRYPTO_FCRYPT is not set | ||
906 | # CONFIG_CRYPTO_KHAZAD is not set | ||
907 | # CONFIG_CRYPTO_SALSA20 is not set | ||
908 | # CONFIG_CRYPTO_SEED is not set | ||
909 | # CONFIG_CRYPTO_SERPENT is not set | ||
910 | # CONFIG_CRYPTO_TEA is not set | ||
911 | # CONFIG_CRYPTO_TWOFISH is not set | ||
912 | |||
913 | # | ||
914 | # Compression | ||
915 | # | ||
916 | # CONFIG_CRYPTO_DEFLATE is not set | ||
917 | # CONFIG_CRYPTO_LZO is not set | ||
918 | CONFIG_CRYPTO_HW=y | ||
919 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set | ||
920 | # CONFIG_PPC_CLOCK is not set | ||
921 | # CONFIG_VIRTUALIZATION is not set | ||
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig new file mode 100644 index 000000000000..682fce02c73a --- /dev/null +++ b/arch/powerpc/configs/40x/hcu4_defconfig | |||
@@ -0,0 +1,929 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.26.5 | ||
4 | # Tue Sep 16 00:44:33 2008 | ||
5 | # | ||
6 | # CONFIG_PPC64 is not set | ||
7 | |||
8 | # | ||
9 | # Processor support | ||
10 | # | ||
11 | # CONFIG_6xx is not set | ||
12 | # CONFIG_PPC_85xx is not set | ||
13 | # CONFIG_PPC_8xx is not set | ||
14 | CONFIG_40x=y | ||
15 | # CONFIG_44x is not set | ||
16 | # CONFIG_E200 is not set | ||
17 | CONFIG_4xx=y | ||
18 | # CONFIG_PPC_MM_SLICES is not set | ||
19 | CONFIG_NOT_COHERENT_CACHE=y | ||
20 | CONFIG_PPC32=y | ||
21 | CONFIG_WORD_SIZE=32 | ||
22 | CONFIG_PPC_MERGE=y | ||
23 | CONFIG_MMU=y | ||
24 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
25 | CONFIG_GENERIC_TIME=y | ||
26 | CONFIG_GENERIC_TIME_VSYSCALL=y | ||
27 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
28 | CONFIG_GENERIC_HARDIRQS=y | ||
29 | # CONFIG_HAVE_SETUP_PER_CPU_AREA is not set | ||
30 | CONFIG_IRQ_PER_CPU=y | ||
31 | CONFIG_STACKTRACE_SUPPORT=y | ||
32 | CONFIG_LOCKDEP_SUPPORT=y | ||
33 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
34 | CONFIG_ARCH_HAS_ILOG2_U32=y | ||
35 | CONFIG_GENERIC_HWEIGHT=y | ||
36 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
37 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
38 | # CONFIG_ARCH_NO_VIRT_TO_BUS is not set | ||
39 | CONFIG_PPC=y | ||
40 | CONFIG_EARLY_PRINTK=y | ||
41 | CONFIG_GENERIC_NVRAM=y | ||
42 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | ||
43 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
44 | CONFIG_PPC_OF=y | ||
45 | CONFIG_OF=y | ||
46 | CONFIG_PPC_UDBG_16550=y | ||
47 | # CONFIG_GENERIC_TBSYNC is not set | ||
48 | CONFIG_AUDIT_ARCH=y | ||
49 | CONFIG_GENERIC_BUG=y | ||
50 | # CONFIG_DEFAULT_UIMAGE is not set | ||
51 | CONFIG_PPC_DCR_NATIVE=y | ||
52 | # CONFIG_PPC_DCR_MMIO is not set | ||
53 | CONFIG_PPC_DCR=y | ||
54 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
55 | |||
56 | # | ||
57 | # General setup | ||
58 | # | ||
59 | CONFIG_EXPERIMENTAL=y | ||
60 | CONFIG_BROKEN_ON_SMP=y | ||
61 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
62 | CONFIG_LOCALVERSION="" | ||
63 | CONFIG_LOCALVERSION_AUTO=y | ||
64 | CONFIG_SWAP=y | ||
65 | CONFIG_SYSVIPC=y | ||
66 | CONFIG_SYSVIPC_SYSCTL=y | ||
67 | CONFIG_POSIX_MQUEUE=y | ||
68 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
69 | # CONFIG_TASKSTATS is not set | ||
70 | # CONFIG_AUDIT is not set | ||
71 | # CONFIG_IKCONFIG is not set | ||
72 | CONFIG_LOG_BUF_SHIFT=14 | ||
73 | # CONFIG_CGROUPS is not set | ||
74 | CONFIG_GROUP_SCHED=y | ||
75 | CONFIG_FAIR_GROUP_SCHED=y | ||
76 | # CONFIG_RT_GROUP_SCHED is not set | ||
77 | CONFIG_USER_SCHED=y | ||
78 | # CONFIG_CGROUP_SCHED is not set | ||
79 | CONFIG_SYSFS_DEPRECATED=y | ||
80 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
81 | # CONFIG_RELAY is not set | ||
82 | # CONFIG_NAMESPACES is not set | ||
83 | CONFIG_BLK_DEV_INITRD=y | ||
84 | CONFIG_INITRAMFS_SOURCE="" | ||
85 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
86 | CONFIG_SYSCTL=y | ||
87 | CONFIG_EMBEDDED=y | ||
88 | CONFIG_SYSCTL_SYSCALL=y | ||
89 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
90 | CONFIG_KALLSYMS=y | ||
91 | CONFIG_KALLSYMS_ALL=y | ||
92 | CONFIG_KALLSYMS_EXTRA_PASS=y | ||
93 | CONFIG_HOTPLUG=y | ||
94 | CONFIG_PRINTK=y | ||
95 | # CONFIG_LOGBUFFER is not set | ||
96 | CONFIG_BUG=y | ||
97 | CONFIG_ELF_CORE=y | ||
98 | CONFIG_COMPAT_BRK=y | ||
99 | CONFIG_BASE_FULL=y | ||
100 | CONFIG_FUTEX=y | ||
101 | CONFIG_ANON_INODES=y | ||
102 | CONFIG_EPOLL=y | ||
103 | CONFIG_SIGNALFD=y | ||
104 | CONFIG_TIMERFD=y | ||
105 | CONFIG_EVENTFD=y | ||
106 | CONFIG_SHMEM=y | ||
107 | CONFIG_VM_EVENT_COUNTERS=y | ||
108 | CONFIG_SLUB_DEBUG=y | ||
109 | # CONFIG_SLAB is not set | ||
110 | CONFIG_SLUB=y | ||
111 | # CONFIG_SLOB is not set | ||
112 | # CONFIG_PROFILING is not set | ||
113 | # CONFIG_MARKERS is not set | ||
114 | CONFIG_HAVE_OPROFILE=y | ||
115 | # CONFIG_KPROBES is not set | ||
116 | CONFIG_HAVE_KPROBES=y | ||
117 | CONFIG_HAVE_KRETPROBES=y | ||
118 | # CONFIG_HAVE_DMA_ATTRS is not set | ||
119 | CONFIG_PROC_PAGE_MONITOR=y | ||
120 | CONFIG_SLABINFO=y | ||
121 | CONFIG_RT_MUTEXES=y | ||
122 | # CONFIG_TINY_SHMEM is not set | ||
123 | CONFIG_BASE_SMALL=0 | ||
124 | CONFIG_MODULES=y | ||
125 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
126 | CONFIG_MODULE_UNLOAD=y | ||
127 | # CONFIG_MODULE_FORCE_UNLOAD is not set | ||
128 | # CONFIG_MODVERSIONS is not set | ||
129 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
130 | CONFIG_KMOD=y | ||
131 | CONFIG_BLOCK=y | ||
132 | CONFIG_LBD=y | ||
133 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
134 | # CONFIG_LSF is not set | ||
135 | # CONFIG_BLK_DEV_BSG is not set | ||
136 | |||
137 | # | ||
138 | # IO Schedulers | ||
139 | # | ||
140 | CONFIG_IOSCHED_NOOP=y | ||
141 | CONFIG_IOSCHED_AS=y | ||
142 | CONFIG_IOSCHED_DEADLINE=y | ||
143 | CONFIG_IOSCHED_CFQ=y | ||
144 | CONFIG_DEFAULT_AS=y | ||
145 | # CONFIG_DEFAULT_DEADLINE is not set | ||
146 | # CONFIG_DEFAULT_CFQ is not set | ||
147 | # CONFIG_DEFAULT_NOOP is not set | ||
148 | CONFIG_DEFAULT_IOSCHED="anticipatory" | ||
149 | CONFIG_CLASSIC_RCU=y | ||
150 | # CONFIG_PPC4xx_PCI_EXPRESS is not set | ||
151 | |||
152 | # | ||
153 | # Platform support | ||
154 | # | ||
155 | # CONFIG_PPC_MPC512x is not set | ||
156 | # CONFIG_PPC_MPC5121 is not set | ||
157 | # CONFIG_PPC_CELL is not set | ||
158 | # CONFIG_PPC_CELL_NATIVE is not set | ||
159 | # CONFIG_PQ2ADS is not set | ||
160 | # CONFIG_EP405 is not set | ||
161 | CONFIG_HCU4=y | ||
162 | # CONFIG_KILAUEA is not set | ||
163 | # CONFIG_MAKALU is not set | ||
164 | # CONFIG_WALNUT is not set | ||
165 | # CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set | ||
166 | # CONFIG_IPIC is not set | ||
167 | # CONFIG_MPIC is not set | ||
168 | # CONFIG_MPIC_WEIRD is not set | ||
169 | # CONFIG_PPC_I8259 is not set | ||
170 | # CONFIG_PPC_RTAS is not set | ||
171 | # CONFIG_MMIO_NVRAM is not set | ||
172 | # CONFIG_PPC_MPC106 is not set | ||
173 | # CONFIG_PPC_970_NAP is not set | ||
174 | # CONFIG_PPC_INDIRECT_IO is not set | ||
175 | # CONFIG_GENERIC_IOMAP is not set | ||
176 | # CONFIG_CPU_FREQ is not set | ||
177 | # CONFIG_FSL_ULI1575 is not set | ||
178 | |||
179 | # | ||
180 | # Kernel options | ||
181 | # | ||
182 | # CONFIG_HIGHMEM is not set | ||
183 | # CONFIG_TICK_ONESHOT is not set | ||
184 | # CONFIG_NO_HZ is not set | ||
185 | # CONFIG_HIGH_RES_TIMERS is not set | ||
186 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
187 | # CONFIG_HZ_100 is not set | ||
188 | CONFIG_HZ_250=y | ||
189 | # CONFIG_HZ_300 is not set | ||
190 | # CONFIG_HZ_1000 is not set | ||
191 | CONFIG_HZ=250 | ||
192 | # CONFIG_SCHED_HRTICK is not set | ||
193 | CONFIG_PREEMPT_NONE=y | ||
194 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
195 | # CONFIG_PREEMPT is not set | ||
196 | CONFIG_BINFMT_ELF=y | ||
197 | # CONFIG_BINFMT_MISC is not set | ||
198 | # CONFIG_MATH_EMULATION is not set | ||
199 | # CONFIG_IOMMU_HELPER is not set | ||
200 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
201 | CONFIG_ARCH_HAS_WALK_MEMORY=y | ||
202 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | ||
203 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
204 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
205 | CONFIG_SELECT_MEMORY_MODEL=y | ||
206 | CONFIG_FLATMEM_MANUAL=y | ||
207 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
208 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
209 | CONFIG_FLATMEM=y | ||
210 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
211 | # CONFIG_SPARSEMEM_STATIC is not set | ||
212 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
213 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
214 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
215 | CONFIG_RESOURCES_64BIT=y | ||
216 | CONFIG_ZONE_DMA_FLAG=1 | ||
217 | CONFIG_BOUNCE=y | ||
218 | CONFIG_VIRT_TO_BUS=y | ||
219 | CONFIG_FORCE_MAX_ZONEORDER=11 | ||
220 | CONFIG_PROC_DEVICETREE=y | ||
221 | # CONFIG_CMDLINE_BOOL is not set | ||
222 | # CONFIG_PM is not set | ||
223 | CONFIG_SECCOMP=y | ||
224 | CONFIG_ISA_DMA_API=y | ||
225 | |||
226 | # | ||
227 | # Bus options | ||
228 | # | ||
229 | CONFIG_ZONE_DMA=y | ||
230 | CONFIG_PPC_INDIRECT_PCI=y | ||
231 | CONFIG_4xx_SOC=y | ||
232 | CONFIG_PCI=y | ||
233 | CONFIG_PCI_DOMAINS=y | ||
234 | CONFIG_PCI_SYSCALL=y | ||
235 | # CONFIG_PCIEPORTBUS is not set | ||
236 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
237 | # CONFIG_PCI_MSI is not set | ||
238 | # CONFIG_PCI_LEGACY is not set | ||
239 | # CONFIG_PCI_DEBUG is not set | ||
240 | # CONFIG_PCCARD is not set | ||
241 | # CONFIG_HOTPLUG_PCI is not set | ||
242 | # CONFIG_HAS_RAPIDIO is not set | ||
243 | |||
244 | # | ||
245 | # Advanced setup | ||
246 | # | ||
247 | # CONFIG_ADVANCED_OPTIONS is not set | ||
248 | |||
249 | # | ||
250 | # Default settings for advanced configuration options are used | ||
251 | # | ||
252 | CONFIG_LOWMEM_SIZE=0x30000000 | ||
253 | CONFIG_PAGE_OFFSET=0xc0000000 | ||
254 | CONFIG_KERNEL_START=0xc0000000 | ||
255 | CONFIG_PHYSICAL_START=0x00000000 | ||
256 | CONFIG_TASK_SIZE=0xc0000000 | ||
257 | CONFIG_CONSISTENT_START=0xff100000 | ||
258 | CONFIG_CONSISTENT_SIZE=0x00200000 | ||
259 | |||
260 | # | ||
261 | # Networking | ||
262 | # | ||
263 | CONFIG_NET=y | ||
264 | |||
265 | # | ||
266 | # Networking options | ||
267 | # | ||
268 | CONFIG_PACKET=y | ||
269 | # CONFIG_PACKET_MMAP is not set | ||
270 | CONFIG_UNIX=y | ||
271 | # CONFIG_NET_KEY is not set | ||
272 | CONFIG_INET=y | ||
273 | # CONFIG_IP_MULTICAST is not set | ||
274 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
275 | CONFIG_IP_FIB_HASH=y | ||
276 | CONFIG_IP_PNP=y | ||
277 | CONFIG_IP_PNP_DHCP=y | ||
278 | CONFIG_IP_PNP_BOOTP=y | ||
279 | # CONFIG_IP_PNP_RARP is not set | ||
280 | # CONFIG_NET_IPIP is not set | ||
281 | # CONFIG_NET_IPGRE is not set | ||
282 | # CONFIG_ARPD is not set | ||
283 | # CONFIG_SYN_COOKIES is not set | ||
284 | # CONFIG_INET_AH is not set | ||
285 | # CONFIG_INET_ESP is not set | ||
286 | # CONFIG_INET_IPCOMP is not set | ||
287 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
288 | # CONFIG_INET_TUNNEL is not set | ||
289 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
290 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
291 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
292 | # CONFIG_INET_LRO is not set | ||
293 | CONFIG_INET_DIAG=y | ||
294 | CONFIG_INET_TCP_DIAG=y | ||
295 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
296 | CONFIG_TCP_CONG_CUBIC=y | ||
297 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
298 | # CONFIG_TCP_MD5SIG is not set | ||
299 | # CONFIG_IPV6 is not set | ||
300 | # CONFIG_NETWORK_SECMARK is not set | ||
301 | # CONFIG_NETFILTER is not set | ||
302 | # CONFIG_IP_DCCP is not set | ||
303 | # CONFIG_IP_SCTP is not set | ||
304 | # CONFIG_TIPC is not set | ||
305 | # CONFIG_ATM is not set | ||
306 | # CONFIG_BRIDGE is not set | ||
307 | # CONFIG_VLAN_8021Q is not set | ||
308 | # CONFIG_DECNET is not set | ||
309 | # CONFIG_LLC2 is not set | ||
310 | # CONFIG_IPX is not set | ||
311 | # CONFIG_ATALK is not set | ||
312 | # CONFIG_X25 is not set | ||
313 | # CONFIG_LAPB is not set | ||
314 | # CONFIG_ECONET is not set | ||
315 | # CONFIG_WAN_ROUTER is not set | ||
316 | # CONFIG_NET_SCHED is not set | ||
317 | |||
318 | # | ||
319 | # Network testing | ||
320 | # | ||
321 | # CONFIG_NET_PKTGEN is not set | ||
322 | # CONFIG_HAMRADIO is not set | ||
323 | # CONFIG_CAN is not set | ||
324 | # CONFIG_IRDA is not set | ||
325 | # CONFIG_BT is not set | ||
326 | # CONFIG_AF_RXRPC is not set | ||
327 | |||
328 | # | ||
329 | # Wireless | ||
330 | # | ||
331 | # CONFIG_CFG80211 is not set | ||
332 | # CONFIG_WIRELESS_EXT is not set | ||
333 | # CONFIG_MAC80211 is not set | ||
334 | # CONFIG_IEEE80211 is not set | ||
335 | # CONFIG_RFKILL is not set | ||
336 | # CONFIG_NET_9P is not set | ||
337 | |||
338 | # | ||
339 | # Device Drivers | ||
340 | # | ||
341 | |||
342 | # | ||
343 | # Generic Driver Options | ||
344 | # | ||
345 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
346 | CONFIG_STANDALONE=y | ||
347 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
348 | CONFIG_FW_LOADER=y | ||
349 | # CONFIG_DEBUG_DRIVER is not set | ||
350 | # CONFIG_DEBUG_DEVRES is not set | ||
351 | # CONFIG_SYS_HYPERVISOR is not set | ||
352 | CONFIG_CONNECTOR=y | ||
353 | CONFIG_PROC_EVENTS=y | ||
354 | CONFIG_MTD=y | ||
355 | # CONFIG_MTD_DEBUG is not set | ||
356 | # CONFIG_MTD_CONCAT is not set | ||
357 | CONFIG_MTD_PARTITIONS=y | ||
358 | # CONFIG_MTD_REDBOOT_PARTS is not set | ||
359 | CONFIG_MTD_CMDLINE_PARTS=y | ||
360 | CONFIG_MTD_OF_PARTS=y | ||
361 | # CONFIG_MTD_AR7_PARTS is not set | ||
362 | |||
363 | # | ||
364 | # User Modules And Translation Layers | ||
365 | # | ||
366 | CONFIG_MTD_CHAR=y | ||
367 | CONFIG_MTD_BLKDEVS=m | ||
368 | CONFIG_MTD_BLOCK=m | ||
369 | # CONFIG_MTD_BLOCK_RO is not set | ||
370 | # CONFIG_FTL is not set | ||
371 | # CONFIG_NFTL is not set | ||
372 | # CONFIG_INFTL is not set | ||
373 | # CONFIG_RFD_FTL is not set | ||
374 | # CONFIG_SSFDC is not set | ||
375 | # CONFIG_MTD_OOPS is not set | ||
376 | |||
377 | # | ||
378 | # RAM/ROM/Flash chip drivers | ||
379 | # | ||
380 | CONFIG_MTD_CFI=y | ||
381 | CONFIG_MTD_JEDECPROBE=y | ||
382 | CONFIG_MTD_GEN_PROBE=y | ||
383 | # CONFIG_MTD_CFI_ADV_OPTIONS is not set | ||
384 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
385 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
386 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
387 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
388 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
389 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
390 | CONFIG_MTD_CFI_I1=y | ||
391 | CONFIG_MTD_CFI_I2=y | ||
392 | # CONFIG_MTD_CFI_I4 is not set | ||
393 | # CONFIG_MTD_CFI_I8 is not set | ||
394 | # CONFIG_MTD_CFI_INTELEXT is not set | ||
395 | CONFIG_MTD_CFI_AMDSTD=y | ||
396 | # CONFIG_MTD_CFI_STAA is not set | ||
397 | CONFIG_MTD_CFI_UTIL=y | ||
398 | # CONFIG_MTD_RAM is not set | ||
399 | # CONFIG_MTD_ROM is not set | ||
400 | # CONFIG_MTD_ABSENT is not set | ||
401 | |||
402 | # | ||
403 | # Mapping drivers for chip access | ||
404 | # | ||
405 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
406 | # CONFIG_MTD_PHYSMAP is not set | ||
407 | CONFIG_MTD_PHYSMAP_OF=y | ||
408 | # CONFIG_MTD_INTEL_VR_NOR is not set | ||
409 | # CONFIG_MTD_PLATRAM is not set | ||
410 | |||
411 | # | ||
412 | # Self-contained MTD device drivers | ||
413 | # | ||
414 | # CONFIG_MTD_PMC551 is not set | ||
415 | # CONFIG_MTD_SLRAM is not set | ||
416 | # CONFIG_MTD_PHRAM is not set | ||
417 | # CONFIG_MTD_MTDRAM is not set | ||
418 | # CONFIG_MTD_BLOCK2MTD is not set | ||
419 | |||
420 | # | ||
421 | # Disk-On-Chip Device Drivers | ||
422 | # | ||
423 | # CONFIG_MTD_DOC2000 is not set | ||
424 | # CONFIG_MTD_DOC2001 is not set | ||
425 | # CONFIG_MTD_DOC2001PLUS is not set | ||
426 | # CONFIG_MTD_NAND is not set | ||
427 | # CONFIG_MTD_ONENAND is not set | ||
428 | |||
429 | # | ||
430 | # UBI - Unsorted block images | ||
431 | # | ||
432 | # CONFIG_MTD_UBI is not set | ||
433 | CONFIG_OF_DEVICE=y | ||
434 | # CONFIG_PARPORT is not set | ||
435 | CONFIG_BLK_DEV=y | ||
436 | # CONFIG_BLK_DEV_FD is not set | ||
437 | # CONFIG_BLK_CPQ_DA is not set | ||
438 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
439 | # CONFIG_BLK_DEV_DAC960 is not set | ||
440 | # CONFIG_BLK_DEV_UMEM is not set | ||
441 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
442 | # CONFIG_BLK_DEV_LOOP is not set | ||
443 | # CONFIG_BLK_DEV_NBD is not set | ||
444 | # CONFIG_BLK_DEV_SX8 is not set | ||
445 | CONFIG_BLK_DEV_RAM=y | ||
446 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
447 | CONFIG_BLK_DEV_RAM_SIZE=35000 | ||
448 | # CONFIG_BLK_DEV_XIP is not set | ||
449 | # CONFIG_CDROM_PKTCDVD is not set | ||
450 | # CONFIG_ATA_OVER_ETH is not set | ||
451 | # CONFIG_XILINX_SYSACE is not set | ||
452 | CONFIG_MISC_DEVICES=y | ||
453 | # CONFIG_PHANTOM is not set | ||
454 | # CONFIG_EEPROM_93CX6 is not set | ||
455 | # CONFIG_SGI_IOC4 is not set | ||
456 | # CONFIG_TIFM_CORE is not set | ||
457 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
458 | CONFIG_HAVE_IDE=y | ||
459 | # CONFIG_IDE is not set | ||
460 | |||
461 | # | ||
462 | # SCSI device support | ||
463 | # | ||
464 | # CONFIG_RAID_ATTRS is not set | ||
465 | # CONFIG_SCSI is not set | ||
466 | # CONFIG_SCSI_DMA is not set | ||
467 | # CONFIG_SCSI_NETLINK is not set | ||
468 | # CONFIG_ATA is not set | ||
469 | # CONFIG_MD is not set | ||
470 | # CONFIG_FUSION is not set | ||
471 | |||
472 | # | ||
473 | # IEEE 1394 (FireWire) support | ||
474 | # | ||
475 | |||
476 | # | ||
477 | # Enable only one of the two stacks, unless you know what you are doing | ||
478 | # | ||
479 | # CONFIG_FIREWIRE is not set | ||
480 | # CONFIG_IEEE1394 is not set | ||
481 | # CONFIG_I2O is not set | ||
482 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
483 | CONFIG_NETDEVICES=y | ||
484 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
485 | # CONFIG_DUMMY is not set | ||
486 | # CONFIG_BONDING is not set | ||
487 | # CONFIG_MACVLAN is not set | ||
488 | # CONFIG_EQUALIZER is not set | ||
489 | # CONFIG_TUN is not set | ||
490 | # CONFIG_VETH is not set | ||
491 | # CONFIG_ARCNET is not set | ||
492 | # CONFIG_PHYLIB is not set | ||
493 | CONFIG_NET_ETHERNET=y | ||
494 | # CONFIG_MII is not set | ||
495 | # CONFIG_HAPPYMEAL is not set | ||
496 | # CONFIG_SUNGEM is not set | ||
497 | # CONFIG_CASSINI is not set | ||
498 | # CONFIG_NET_VENDOR_3COM is not set | ||
499 | # CONFIG_NET_TULIP is not set | ||
500 | # CONFIG_HP100 is not set | ||
501 | CONFIG_IBM_NEW_EMAC=y | ||
502 | CONFIG_IBM_NEW_EMAC_RXB=128 | ||
503 | CONFIG_IBM_NEW_EMAC_TXB=64 | ||
504 | CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32 | ||
505 | CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256 | ||
506 | CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0 | ||
507 | # CONFIG_IBM_NEW_EMAC_DEBUG is not set | ||
508 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
509 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
510 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
511 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
512 | # CONFIG_NET_PCI is not set | ||
513 | # CONFIG_B44 is not set | ||
514 | CONFIG_NETDEV_1000=y | ||
515 | # CONFIG_ACENIC is not set | ||
516 | # CONFIG_DL2K is not set | ||
517 | # CONFIG_E1000 is not set | ||
518 | # CONFIG_E1000E is not set | ||
519 | # CONFIG_E1000E_ENABLED is not set | ||
520 | # CONFIG_IP1000 is not set | ||
521 | # CONFIG_IGB is not set | ||
522 | # CONFIG_NS83820 is not set | ||
523 | # CONFIG_HAMACHI is not set | ||
524 | # CONFIG_YELLOWFIN is not set | ||
525 | # CONFIG_R8169 is not set | ||
526 | # CONFIG_SIS190 is not set | ||
527 | # CONFIG_SKGE is not set | ||
528 | # CONFIG_SKY2 is not set | ||
529 | # CONFIG_VIA_VELOCITY is not set | ||
530 | # CONFIG_TIGON3 is not set | ||
531 | # CONFIG_BNX2 is not set | ||
532 | # CONFIG_QLA3XXX is not set | ||
533 | # CONFIG_ATL1 is not set | ||
534 | CONFIG_NETDEV_10000=y | ||
535 | # CONFIG_CHELSIO_T1 is not set | ||
536 | # CONFIG_CHELSIO_T3 is not set | ||
537 | # CONFIG_IXGBE is not set | ||
538 | # CONFIG_IXGB is not set | ||
539 | # CONFIG_S2IO is not set | ||
540 | # CONFIG_MYRI10GE is not set | ||
541 | # CONFIG_NETXEN_NIC is not set | ||
542 | # CONFIG_NIU is not set | ||
543 | # CONFIG_MLX4_CORE is not set | ||
544 | # CONFIG_TEHUTI is not set | ||
545 | # CONFIG_BNX2X is not set | ||
546 | # CONFIG_SFC is not set | ||
547 | # CONFIG_TR is not set | ||
548 | |||
549 | # | ||
550 | # Wireless LAN | ||
551 | # | ||
552 | # CONFIG_WLAN_PRE80211 is not set | ||
553 | # CONFIG_WLAN_80211 is not set | ||
554 | # CONFIG_IWLWIFI_LEDS is not set | ||
555 | # CONFIG_WAN is not set | ||
556 | # CONFIG_FDDI is not set | ||
557 | # CONFIG_HIPPI is not set | ||
558 | # CONFIG_PPP is not set | ||
559 | # CONFIG_SLIP is not set | ||
560 | # CONFIG_NETCONSOLE is not set | ||
561 | # CONFIG_NETPOLL is not set | ||
562 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
563 | # CONFIG_ISDN is not set | ||
564 | # CONFIG_PHONE is not set | ||
565 | |||
566 | # | ||
567 | # Input device support | ||
568 | # | ||
569 | # CONFIG_INPUT is not set | ||
570 | |||
571 | # | ||
572 | # Hardware I/O ports | ||
573 | # | ||
574 | # CONFIG_SERIO is not set | ||
575 | # CONFIG_GAMEPORT is not set | ||
576 | |||
577 | # | ||
578 | # Character devices | ||
579 | # | ||
580 | # CONFIG_VT is not set | ||
581 | CONFIG_DEVKMEM=y | ||
582 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
583 | # CONFIG_NOZOMI is not set | ||
584 | |||
585 | # | ||
586 | # Serial drivers | ||
587 | # | ||
588 | CONFIG_SERIAL_8250=y | ||
589 | CONFIG_SERIAL_8250_CONSOLE=y | ||
590 | CONFIG_SERIAL_8250_PCI=y | ||
591 | CONFIG_SERIAL_8250_NR_UARTS=4 | ||
592 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
593 | CONFIG_SERIAL_8250_EXTENDED=y | ||
594 | # CONFIG_SERIAL_8250_MANY_PORTS is not set | ||
595 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
596 | # CONFIG_SERIAL_8250_DETECT_IRQ is not set | ||
597 | # CONFIG_SERIAL_8250_RSA is not set | ||
598 | |||
599 | # | ||
600 | # Non-8250 serial port support | ||
601 | # | ||
602 | # CONFIG_SERIAL_UARTLITE is not set | ||
603 | CONFIG_SERIAL_CORE=y | ||
604 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
605 | # CONFIG_SERIAL_JSM is not set | ||
606 | CONFIG_SERIAL_OF_PLATFORM=y | ||
607 | CONFIG_UNIX98_PTYS=y | ||
608 | CONFIG_LEGACY_PTYS=y | ||
609 | CONFIG_LEGACY_PTY_COUNT=256 | ||
610 | # CONFIG_IPMI_HANDLER is not set | ||
611 | # CONFIG_HW_RANDOM is not set | ||
612 | # CONFIG_NVRAM is not set | ||
613 | # CONFIG_GEN_RTC is not set | ||
614 | # CONFIG_R3964 is not set | ||
615 | # CONFIG_APPLICOM is not set | ||
616 | # CONFIG_RAW_DRIVER is not set | ||
617 | # CONFIG_TCG_TPM is not set | ||
618 | CONFIG_DEVPORT=y | ||
619 | # CONFIG_I2C is not set | ||
620 | # CONFIG_SPI is not set | ||
621 | # CONFIG_W1 is not set | ||
622 | # CONFIG_POWER_SUPPLY is not set | ||
623 | # CONFIG_HWMON is not set | ||
624 | # CONFIG_THERMAL is not set | ||
625 | # CONFIG_THERMAL_HWMON is not set | ||
626 | # CONFIG_WATCHDOG is not set | ||
627 | |||
628 | # | ||
629 | # Sonics Silicon Backplane | ||
630 | # | ||
631 | CONFIG_SSB_POSSIBLE=y | ||
632 | # CONFIG_SSB is not set | ||
633 | |||
634 | # | ||
635 | # Multifunction device drivers | ||
636 | # | ||
637 | # CONFIG_MFD_SM501 is not set | ||
638 | # CONFIG_HTC_PASIC3 is not set | ||
639 | |||
640 | # | ||
641 | # Multimedia devices | ||
642 | # | ||
643 | |||
644 | # | ||
645 | # Multimedia core support | ||
646 | # | ||
647 | # CONFIG_VIDEO_DEV is not set | ||
648 | # CONFIG_DVB_CORE is not set | ||
649 | # CONFIG_VIDEO_MEDIA is not set | ||
650 | |||
651 | # | ||
652 | # Multimedia drivers | ||
653 | # | ||
654 | # CONFIG_DAB is not set | ||
655 | |||
656 | # | ||
657 | # Graphics support | ||
658 | # | ||
659 | # CONFIG_AGP is not set | ||
660 | # CONFIG_DRM is not set | ||
661 | # CONFIG_VGASTATE is not set | ||
662 | CONFIG_VIDEO_OUTPUT_CONTROL=m | ||
663 | # CONFIG_FB is not set | ||
664 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
665 | |||
666 | # | ||
667 | # Display device support | ||
668 | # | ||
669 | # CONFIG_DISPLAY_SUPPORT is not set | ||
670 | |||
671 | # | ||
672 | # Sound | ||
673 | # | ||
674 | # CONFIG_SOUND is not set | ||
675 | # CONFIG_USB_SUPPORT is not set | ||
676 | # CONFIG_MMC is not set | ||
677 | # CONFIG_MEMSTICK is not set | ||
678 | # CONFIG_NEW_LEDS is not set | ||
679 | # CONFIG_ACCESSIBILITY is not set | ||
680 | # CONFIG_INFINIBAND is not set | ||
681 | # CONFIG_EDAC is not set | ||
682 | # CONFIG_RTC_CLASS is not set | ||
683 | # CONFIG_DMADEVICES is not set | ||
684 | # CONFIG_UIO is not set | ||
685 | |||
686 | # | ||
687 | # File systems | ||
688 | # | ||
689 | CONFIG_EXT2_FS=y | ||
690 | # CONFIG_EXT2_FS_XATTR is not set | ||
691 | # CONFIG_EXT2_FS_XIP is not set | ||
692 | # CONFIG_EXT3_FS is not set | ||
693 | # CONFIG_EXT4DEV_FS is not set | ||
694 | # CONFIG_REISERFS_FS is not set | ||
695 | # CONFIG_JFS_FS is not set | ||
696 | # CONFIG_FS_POSIX_ACL is not set | ||
697 | # CONFIG_XFS_FS is not set | ||
698 | # CONFIG_OCFS2_FS is not set | ||
699 | CONFIG_DNOTIFY=y | ||
700 | CONFIG_INOTIFY=y | ||
701 | CONFIG_INOTIFY_USER=y | ||
702 | # CONFIG_QUOTA is not set | ||
703 | # CONFIG_AUTOFS_FS is not set | ||
704 | # CONFIG_AUTOFS4_FS is not set | ||
705 | # CONFIG_FUSE_FS is not set | ||
706 | |||
707 | # | ||
708 | # CD-ROM/DVD Filesystems | ||
709 | # | ||
710 | # CONFIG_ISO9660_FS is not set | ||
711 | # CONFIG_UDF_FS is not set | ||
712 | |||
713 | # | ||
714 | # DOS/FAT/NT Filesystems | ||
715 | # | ||
716 | # CONFIG_MSDOS_FS is not set | ||
717 | # CONFIG_VFAT_FS is not set | ||
718 | # CONFIG_NTFS_FS is not set | ||
719 | |||
720 | # | ||
721 | # Pseudo filesystems | ||
722 | # | ||
723 | CONFIG_PROC_FS=y | ||
724 | CONFIG_PROC_KCORE=y | ||
725 | CONFIG_PROC_SYSCTL=y | ||
726 | CONFIG_SYSFS=y | ||
727 | CONFIG_TMPFS=y | ||
728 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
729 | # CONFIG_HUGETLB_PAGE is not set | ||
730 | # CONFIG_CONFIGFS_FS is not set | ||
731 | |||
732 | # | ||
733 | # Miscellaneous filesystems | ||
734 | # | ||
735 | # CONFIG_ADFS_FS is not set | ||
736 | # CONFIG_AFFS_FS is not set | ||
737 | # CONFIG_HFS_FS is not set | ||
738 | # CONFIG_HFSPLUS_FS is not set | ||
739 | # CONFIG_BEFS_FS is not set | ||
740 | # CONFIG_BFS_FS is not set | ||
741 | # CONFIG_EFS_FS is not set | ||
742 | # CONFIG_YAFFS_FS is not set | ||
743 | # CONFIG_JFFS2_FS is not set | ||
744 | CONFIG_CRAMFS=y | ||
745 | # CONFIG_VXFS_FS is not set | ||
746 | # CONFIG_MINIX_FS is not set | ||
747 | # CONFIG_HPFS_FS is not set | ||
748 | # CONFIG_QNX4FS_FS is not set | ||
749 | # CONFIG_ROMFS_FS is not set | ||
750 | # CONFIG_SYSV_FS is not set | ||
751 | # CONFIG_UFS_FS is not set | ||
752 | CONFIG_NETWORK_FILESYSTEMS=y | ||
753 | CONFIG_NFS_FS=y | ||
754 | CONFIG_NFS_V3=y | ||
755 | # CONFIG_NFS_V3_ACL is not set | ||
756 | # CONFIG_NFS_V4 is not set | ||
757 | # CONFIG_NFSD is not set | ||
758 | CONFIG_ROOT_NFS=y | ||
759 | CONFIG_LOCKD=y | ||
760 | CONFIG_LOCKD_V4=y | ||
761 | CONFIG_NFS_COMMON=y | ||
762 | CONFIG_SUNRPC=y | ||
763 | # CONFIG_SUNRPC_BIND34 is not set | ||
764 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
765 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
766 | # CONFIG_SMB_FS is not set | ||
767 | # CONFIG_CIFS is not set | ||
768 | # CONFIG_NCP_FS is not set | ||
769 | # CONFIG_CODA_FS is not set | ||
770 | # CONFIG_AFS_FS is not set | ||
771 | |||
772 | # | ||
773 | # Partition Types | ||
774 | # | ||
775 | # CONFIG_PARTITION_ADVANCED is not set | ||
776 | CONFIG_MSDOS_PARTITION=y | ||
777 | # CONFIG_NLS is not set | ||
778 | # CONFIG_DLM is not set | ||
779 | |||
780 | # | ||
781 | # Library routines | ||
782 | # | ||
783 | CONFIG_BITREVERSE=y | ||
784 | # CONFIG_GENERIC_FIND_FIRST_BIT is not set | ||
785 | # CONFIG_CRC_CCITT is not set | ||
786 | # CONFIG_CRC16 is not set | ||
787 | # CONFIG_CRC_ITU_T is not set | ||
788 | CONFIG_CRC32=y | ||
789 | # CONFIG_CRC7 is not set | ||
790 | # CONFIG_LIBCRC32C is not set | ||
791 | CONFIG_ZLIB_INFLATE=y | ||
792 | CONFIG_PLIST=y | ||
793 | CONFIG_HAS_IOMEM=y | ||
794 | CONFIG_HAS_IOPORT=y | ||
795 | CONFIG_HAS_DMA=y | ||
796 | CONFIG_HAVE_LMB=y | ||
797 | |||
798 | # | ||
799 | # Kernel hacking | ||
800 | # | ||
801 | # CONFIG_PRINTK_TIME is not set | ||
802 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
803 | CONFIG_ENABLE_MUST_CHECK=y | ||
804 | CONFIG_FRAME_WARN=1024 | ||
805 | CONFIG_MAGIC_SYSRQ=y | ||
806 | # CONFIG_UNUSED_SYMBOLS is not set | ||
807 | CONFIG_DEBUG_FS=y | ||
808 | # CONFIG_HEADERS_CHECK is not set | ||
809 | CONFIG_DEBUG_KERNEL=y | ||
810 | # CONFIG_DEBUG_SHIRQ is not set | ||
811 | CONFIG_DETECT_SOFTLOCKUP=y | ||
812 | CONFIG_SCHED_DEBUG=y | ||
813 | # CONFIG_SCHEDSTATS is not set | ||
814 | # CONFIG_TIMER_STATS is not set | ||
815 | # CONFIG_DEBUG_OBJECTS is not set | ||
816 | # CONFIG_SLUB_DEBUG_ON is not set | ||
817 | # CONFIG_SLUB_STATS is not set | ||
818 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
819 | # CONFIG_RT_MUTEX_TESTER is not set | ||
820 | # CONFIG_DEBUG_SPINLOCK is not set | ||
821 | # CONFIG_DEBUG_MUTEXES is not set | ||
822 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
823 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
824 | # CONFIG_DEBUG_KOBJECT is not set | ||
825 | CONFIG_DEBUG_BUGVERBOSE=y | ||
826 | # CONFIG_DEBUG_INFO is not set | ||
827 | # CONFIG_DEBUG_VM is not set | ||
828 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
829 | # CONFIG_DEBUG_LIST is not set | ||
830 | # CONFIG_DEBUG_SG is not set | ||
831 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
832 | # CONFIG_RCU_TORTURE_TEST is not set | ||
833 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
834 | # CONFIG_FAULT_INJECTION is not set | ||
835 | # CONFIG_SAMPLES is not set | ||
836 | # CONFIG_DEBUG_STACKOVERFLOW is not set | ||
837 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
838 | # CONFIG_DEBUG_PAGEALLOC is not set | ||
839 | # CONFIG_DEBUGGER is not set | ||
840 | # CONFIG_IRQSTACKS is not set | ||
841 | # CONFIG_VIRQ_DEBUG is not set | ||
842 | # CONFIG_BDI_SWITCH is not set | ||
843 | # CONFIG_PPC_EARLY_DEBUG is not set | ||
844 | |||
845 | # | ||
846 | # Security options | ||
847 | # | ||
848 | # CONFIG_KEYS is not set | ||
849 | # CONFIG_SECURITY is not set | ||
850 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
851 | CONFIG_CRYPTO=y | ||
852 | |||
853 | # | ||
854 | # Crypto core or helper | ||
855 | # | ||
856 | CONFIG_CRYPTO_ALGAPI=y | ||
857 | CONFIG_CRYPTO_BLKCIPHER=y | ||
858 | CONFIG_CRYPTO_MANAGER=y | ||
859 | # CONFIG_CRYPTO_GF128MUL is not set | ||
860 | # CONFIG_CRYPTO_NULL is not set | ||
861 | # CONFIG_CRYPTO_CRYPTD is not set | ||
862 | # CONFIG_CRYPTO_AUTHENC is not set | ||
863 | # CONFIG_CRYPTO_TEST is not set | ||
864 | |||
865 | # | ||
866 | # Authenticated Encryption with Associated Data | ||
867 | # | ||
868 | # CONFIG_CRYPTO_CCM is not set | ||
869 | # CONFIG_CRYPTO_GCM is not set | ||
870 | # CONFIG_CRYPTO_SEQIV is not set | ||
871 | |||
872 | # | ||
873 | # Block modes | ||
874 | # | ||
875 | CONFIG_CRYPTO_CBC=y | ||
876 | # CONFIG_CRYPTO_CTR is not set | ||
877 | # CONFIG_CRYPTO_CTS is not set | ||
878 | CONFIG_CRYPTO_ECB=y | ||
879 | # CONFIG_CRYPTO_LRW is not set | ||
880 | CONFIG_CRYPTO_PCBC=y | ||
881 | # CONFIG_CRYPTO_XTS is not set | ||
882 | |||
883 | # | ||
884 | # Hash modes | ||
885 | # | ||
886 | # CONFIG_CRYPTO_HMAC is not set | ||
887 | # CONFIG_CRYPTO_XCBC is not set | ||
888 | |||
889 | # | ||
890 | # Digest | ||
891 | # | ||
892 | # CONFIG_CRYPTO_CRC32C is not set | ||
893 | # CONFIG_CRYPTO_MD4 is not set | ||
894 | CONFIG_CRYPTO_MD5=y | ||
895 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
896 | # CONFIG_CRYPTO_SHA1 is not set | ||
897 | # CONFIG_CRYPTO_SHA256 is not set | ||
898 | # CONFIG_CRYPTO_SHA512 is not set | ||
899 | # CONFIG_CRYPTO_TGR192 is not set | ||
900 | # CONFIG_CRYPTO_WP512 is not set | ||
901 | |||
902 | # | ||
903 | # Ciphers | ||
904 | # | ||
905 | # CONFIG_CRYPTO_AES is not set | ||
906 | # CONFIG_CRYPTO_ANUBIS is not set | ||
907 | # CONFIG_CRYPTO_ARC4 is not set | ||
908 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
909 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
910 | # CONFIG_CRYPTO_CAST5 is not set | ||
911 | # CONFIG_CRYPTO_CAST6 is not set | ||
912 | CONFIG_CRYPTO_DES=y | ||
913 | # CONFIG_CRYPTO_FCRYPT is not set | ||
914 | # CONFIG_CRYPTO_KHAZAD is not set | ||
915 | # CONFIG_CRYPTO_SALSA20 is not set | ||
916 | # CONFIG_CRYPTO_SEED is not set | ||
917 | # CONFIG_CRYPTO_SERPENT is not set | ||
918 | # CONFIG_CRYPTO_TEA is not set | ||
919 | # CONFIG_CRYPTO_TWOFISH is not set | ||
920 | |||
921 | # | ||
922 | # Compression | ||
923 | # | ||
924 | # CONFIG_CRYPTO_DEFLATE is not set | ||
925 | # CONFIG_CRYPTO_LZO is not set | ||
926 | CONFIG_CRYPTO_HW=y | ||
927 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set | ||
928 | # CONFIG_PPC_CLOCK is not set | ||
929 | # CONFIG_VIRTUALIZATION is not set | ||
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h index f6c93c716898..a503da9d56f3 100644 --- a/arch/powerpc/include/asm/kdump.h +++ b/arch/powerpc/include/asm/kdump.h | |||
@@ -9,6 +9,12 @@ | |||
9 | * Reserve to the end of the FWNMI area, see head_64.S */ | 9 | * Reserve to the end of the FWNMI area, see head_64.S */ |
10 | #define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ | 10 | #define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ |
11 | 11 | ||
12 | /* | ||
13 | * Used to differentiate between relocatable kdump kernel and other | ||
14 | * kernels | ||
15 | */ | ||
16 | #define KDUMP_SIGNATURE 0xfeed1234 | ||
17 | |||
12 | #ifdef CONFIG_CRASH_DUMP | 18 | #ifdef CONFIG_CRASH_DUMP |
13 | 19 | ||
14 | #define KDUMP_TRAMPOLINE_START 0x0100 | 20 | #define KDUMP_TRAMPOLINE_START 0x0100 |
@@ -19,17 +25,18 @@ | |||
19 | #endif /* CONFIG_CRASH_DUMP */ | 25 | #endif /* CONFIG_CRASH_DUMP */ |
20 | 26 | ||
21 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
22 | #ifdef CONFIG_CRASH_DUMP | ||
23 | 28 | ||
29 | extern unsigned long __kdump_flag; | ||
30 | |||
31 | #if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE) | ||
24 | extern void reserve_kdump_trampoline(void); | 32 | extern void reserve_kdump_trampoline(void); |
25 | extern void setup_kdump_trampoline(void); | 33 | extern void setup_kdump_trampoline(void); |
26 | 34 | #else | |
27 | #else /* !CONFIG_CRASH_DUMP */ | 35 | /* !CRASH_DUMP || RELOCATABLE */ |
28 | |||
29 | static inline void reserve_kdump_trampoline(void) { ; } | 36 | static inline void reserve_kdump_trampoline(void) { ; } |
30 | static inline void setup_kdump_trampoline(void) { ; } | 37 | static inline void setup_kdump_trampoline(void) { ; } |
38 | #endif | ||
31 | 39 | ||
32 | #endif /* CONFIG_CRASH_DUMP */ | ||
33 | #endif /* __ASSEMBLY__ */ | 40 | #endif /* __ASSEMBLY__ */ |
34 | 41 | ||
35 | #endif /* __PPC64_KDUMP_H */ | 42 | #endif /* __PPC64_KDUMP_H */ |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 5ac51e6efc1d..c0b8d4a29a91 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -77,6 +77,7 @@ | |||
77 | 77 | ||
78 | #if defined(CONFIG_RELOCATABLE) | 78 | #if defined(CONFIG_RELOCATABLE) |
79 | #ifndef __ASSEMBLY__ | 79 | #ifndef __ASSEMBLY__ |
80 | |||
80 | extern phys_addr_t memstart_addr; | 81 | extern phys_addr_t memstart_addr; |
81 | extern phys_addr_t kernstart_addr; | 82 | extern phys_addr_t kernstart_addr; |
82 | #endif | 83 | #endif |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e70d0483fb4e..b1eb834bc0fc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -1277,6 +1277,19 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1277 | .machine_check = machine_check_4xx, | 1277 | .machine_check = machine_check_4xx, |
1278 | .platform = "ppc405", | 1278 | .platform = "ppc405", |
1279 | }, | 1279 | }, |
1280 | { | ||
1281 | /* 405EZ */ | ||
1282 | .pvr_mask = 0xffff0000, | ||
1283 | .pvr_value = 0x41510000, | ||
1284 | .cpu_name = "405EZ", | ||
1285 | .cpu_features = CPU_FTRS_40X, | ||
1286 | .cpu_user_features = PPC_FEATURE_32 | | ||
1287 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
1288 | .icache_bsize = 32, | ||
1289 | .dcache_bsize = 32, | ||
1290 | .machine_check = machine_check_4xx, | ||
1291 | .platform = "ppc405", | ||
1292 | }, | ||
1280 | { /* default match */ | 1293 | { /* default match */ |
1281 | .pvr_mask = 0x00000000, | 1294 | .pvr_mask = 0x00000000, |
1282 | .pvr_value = 0x00000000, | 1295 | .pvr_value = 0x00000000, |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 97e056379728..19671aca6591 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -30,6 +30,7 @@ | |||
30 | /* Stores the physical address of elf header of crash image. */ | 30 | /* Stores the physical address of elf header of crash image. */ |
31 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | 31 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; |
32 | 32 | ||
33 | #ifndef CONFIG_RELOCATABLE | ||
33 | void __init reserve_kdump_trampoline(void) | 34 | void __init reserve_kdump_trampoline(void) |
34 | { | 35 | { |
35 | lmb_reserve(0, KDUMP_RESERVE_LIMIT); | 36 | lmb_reserve(0, KDUMP_RESERVE_LIMIT); |
@@ -68,6 +69,7 @@ void __init setup_kdump_trampoline(void) | |||
68 | 69 | ||
69 | DBG(" <- setup_kdump_trampoline()\n"); | 70 | DBG(" <- setup_kdump_trampoline()\n"); |
70 | } | 71 | } |
72 | #endif /* CONFIG_RELOCATABLE */ | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | 75 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 84856bee33a5..69489bd3210c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -97,6 +97,12 @@ __secondary_hold_spinloop: | |||
97 | __secondary_hold_acknowledge: | 97 | __secondary_hold_acknowledge: |
98 | .llong 0x0 | 98 | .llong 0x0 |
99 | 99 | ||
100 | /* This flag is set by purgatory if we should be a kdump kernel. */ | ||
101 | /* Do not move this variable as purgatory knows about it. */ | ||
102 | .globl __kdump_flag | ||
103 | __kdump_flag: | ||
104 | .llong 0x0 | ||
105 | |||
100 | #ifdef CONFIG_PPC_ISERIES | 106 | #ifdef CONFIG_PPC_ISERIES |
101 | /* | 107 | /* |
102 | * At offset 0x20, there is a pointer to iSeries LPAR data. | 108 | * At offset 0x20, there is a pointer to iSeries LPAR data. |
@@ -1384,7 +1390,13 @@ _STATIC(__after_prom_start) | |||
1384 | /* process relocations for the final address of the kernel */ | 1390 | /* process relocations for the final address of the kernel */ |
1385 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | 1391 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ |
1386 | sldi r25,r25,32 | 1392 | sldi r25,r25,32 |
1387 | mr r3,r25 | 1393 | #ifdef CONFIG_CRASH_DUMP |
1394 | ld r7,__kdump_flag-_stext(r26) | ||
1395 | cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */ | ||
1396 | bne 1f | ||
1397 | add r25,r25,r26 | ||
1398 | #endif | ||
1399 | 1: mr r3,r25 | ||
1388 | bl .relocate | 1400 | bl .relocate |
1389 | #endif | 1401 | #endif |
1390 | 1402 | ||
@@ -1398,11 +1410,26 @@ _STATIC(__after_prom_start) | |||
1398 | li r3,0 /* target addr */ | 1410 | li r3,0 /* target addr */ |
1399 | mr. r4,r26 /* In some cases the loader may */ | 1411 | mr. r4,r26 /* In some cases the loader may */ |
1400 | beq 9f /* have already put us at zero */ | 1412 | beq 9f /* have already put us at zero */ |
1401 | lis r5,(copy_to_here - _stext)@ha | ||
1402 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | ||
1403 | li r6,0x100 /* Start offset, the first 0x100 */ | 1413 | li r6,0x100 /* Start offset, the first 0x100 */ |
1404 | /* bytes were copied earlier. */ | 1414 | /* bytes were copied earlier. */ |
1405 | 1415 | ||
1416 | #ifdef CONFIG_CRASH_DUMP | ||
1417 | /* | ||
1418 | * Check if the kernel has to be running as relocatable kernel based on the | ||
1419 | * variable __kdump_flag, if it is set the kernel is treated as relocatable | ||
1420 | * kernel, otherwise it will be moved to PHYSICAL_START | ||
1421 | */ | ||
1422 | ld r7,__kdump_flag-_stext(r26) | ||
1423 | cmpldi cr0,r7,1 | ||
1424 | bne 3f | ||
1425 | |||
1426 | li r5,__end_interrupts - _stext /* just copy interrupts */ | ||
1427 | b 5f | ||
1428 | 3: | ||
1429 | #endif | ||
1430 | lis r5,(copy_to_here - _stext)@ha | ||
1431 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | ||
1432 | |||
1406 | bl .copy_and_flush /* copy the first n bytes */ | 1433 | bl .copy_and_flush /* copy the first n bytes */ |
1407 | /* this includes the code being */ | 1434 | /* this includes the code being */ |
1408 | /* executed here. */ | 1435 | /* executed here. */ |
@@ -1411,15 +1438,15 @@ _STATIC(__after_prom_start) | |||
1411 | mtctr r8 | 1438 | mtctr r8 |
1412 | bctr | 1439 | bctr |
1413 | 1440 | ||
1441 | p_end: .llong _end - _stext | ||
1442 | |||
1414 | 4: /* Now copy the rest of the kernel up to _end */ | 1443 | 4: /* Now copy the rest of the kernel up to _end */ |
1415 | addis r5,r26,(p_end - _stext)@ha | 1444 | addis r5,r26,(p_end - _stext)@ha |
1416 | ld r5,(p_end - _stext)@l(r5) /* get _end */ | 1445 | ld r5,(p_end - _stext)@l(r5) /* get _end */ |
1417 | bl .copy_and_flush /* copy the rest */ | 1446 | 5: bl .copy_and_flush /* copy the rest */ |
1418 | 1447 | ||
1419 | 9: b .start_here_multiplatform | 1448 | 9: b .start_here_multiplatform |
1420 | 1449 | ||
1421 | p_end: .llong _end - _stext | ||
1422 | |||
1423 | /* | 1450 | /* |
1424 | * Copy routine used to copy the kernel to start at physical address 0 | 1451 | * Copy routine used to copy the kernel to start at physical address 0 |
1425 | * and flush and invalidate the caches as needed. | 1452 | * and flush and invalidate the caches as needed. |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index ea1ba89f9c90..3857d7e2af0c 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -458,6 +458,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
458 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 458 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
459 | } | 459 | } |
460 | 460 | ||
461 | static void iommu_table_clear(struct iommu_table *tbl) | ||
462 | { | ||
463 | if (!__kdump_flag) { | ||
464 | /* Clear the table in case firmware left allocations in it */ | ||
465 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | ||
466 | return; | ||
467 | } | ||
468 | |||
469 | #ifdef CONFIG_CRASH_DUMP | ||
470 | if (ppc_md.tce_get) { | ||
471 | unsigned long index, tceval, tcecount = 0; | ||
472 | |||
473 | /* Reserve the existing mappings left by the first kernel. */ | ||
474 | for (index = 0; index < tbl->it_size; index++) { | ||
475 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); | ||
476 | /* | ||
477 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | ||
478 | */ | ||
479 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | ||
480 | __set_bit(index, tbl->it_map); | ||
481 | tcecount++; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | ||
486 | printk(KERN_WARNING "TCE table is full; freeing "); | ||
487 | printk(KERN_WARNING "%d entries for the kdump boot\n", | ||
488 | KDUMP_MIN_TCE_ENTRIES); | ||
489 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | ||
490 | index < tbl->it_size; index++) | ||
491 | __clear_bit(index, tbl->it_map); | ||
492 | } | ||
493 | } | ||
494 | #endif | ||
495 | } | ||
496 | |||
461 | /* | 497 | /* |
462 | * Build a iommu_table structure. This contains a bit map which | 498 | * Build a iommu_table structure. This contains a bit map which |
463 | * is used to manage allocation of the tce space. | 499 | * is used to manage allocation of the tce space. |
@@ -484,38 +520,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
484 | tbl->it_largehint = tbl->it_halfpoint; | 520 | tbl->it_largehint = tbl->it_halfpoint; |
485 | spin_lock_init(&tbl->it_lock); | 521 | spin_lock_init(&tbl->it_lock); |
486 | 522 | ||
487 | #ifdef CONFIG_CRASH_DUMP | 523 | iommu_table_clear(tbl); |
488 | if (ppc_md.tce_get) { | ||
489 | unsigned long index; | ||
490 | unsigned long tceval; | ||
491 | unsigned long tcecount = 0; | ||
492 | |||
493 | /* | ||
494 | * Reserve the existing mappings left by the first kernel. | ||
495 | */ | ||
496 | for (index = 0; index < tbl->it_size; index++) { | ||
497 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); | ||
498 | /* | ||
499 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | ||
500 | */ | ||
501 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | ||
502 | __set_bit(index, tbl->it_map); | ||
503 | tcecount++; | ||
504 | } | ||
505 | } | ||
506 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | ||
507 | printk(KERN_WARNING "TCE table is full; "); | ||
508 | printk(KERN_WARNING "freeing %d entries for the kdump boot\n", | ||
509 | KDUMP_MIN_TCE_ENTRIES); | ||
510 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | ||
511 | index < tbl->it_size; index++) | ||
512 | __clear_bit(index, tbl->it_map); | ||
513 | } | ||
514 | } | ||
515 | #else | ||
516 | /* Clear the hardware table in case firmware left allocations in it */ | ||
517 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | ||
518 | #endif | ||
519 | 524 | ||
520 | if (!welcomed) { | 525 | if (!welcomed) { |
521 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | 526 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index aab76887a842..ac2a21f45c75 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -88,11 +88,13 @@ void __init reserve_crashkernel(void) | |||
88 | 88 | ||
89 | crash_size = crashk_res.end - crashk_res.start + 1; | 89 | crash_size = crashk_res.end - crashk_res.start + 1; |
90 | 90 | ||
91 | #ifndef CONFIG_RELOCATABLE | ||
91 | if (crashk_res.start != KDUMP_KERNELBASE) | 92 | if (crashk_res.start != KDUMP_KERNELBASE) |
92 | printk("Crash kernel location must be 0x%x\n", | 93 | printk("Crash kernel location must be 0x%x\n", |
93 | KDUMP_KERNELBASE); | 94 | KDUMP_KERNELBASE); |
94 | 95 | ||
95 | crashk_res.start = KDUMP_KERNELBASE; | 96 | crashk_res.start = KDUMP_KERNELBASE; |
97 | #endif | ||
96 | crash_size = PAGE_ALIGN(crash_size); | 98 | crash_size = PAGE_ALIGN(crash_size); |
97 | crashk_res.end = crashk_res.start + crash_size - 1; | 99 | crashk_res.end = crashk_res.start + crash_size - 1; |
98 | 100 | ||
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index a168514d8609..e6efec788c4d 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -255,11 +255,14 @@ static union thread_union kexec_stack | |||
255 | /* Our assembly helper, in kexec_stub.S */ | 255 | /* Our assembly helper, in kexec_stub.S */ |
256 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, | 256 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, |
257 | void *image, void *control, | 257 | void *image, void *control, |
258 | void (*clear_all)(void)) ATTRIB_NORET; | 258 | void (*clear_all)(void), |
259 | unsigned long kdump_flag) ATTRIB_NORET; | ||
259 | 260 | ||
260 | /* too late to fail here */ | 261 | /* too late to fail here */ |
261 | void default_machine_kexec(struct kimage *image) | 262 | void default_machine_kexec(struct kimage *image) |
262 | { | 263 | { |
264 | unsigned long kdump_flag = 0; | ||
265 | |||
263 | /* prepare control code if any */ | 266 | /* prepare control code if any */ |
264 | 267 | ||
265 | /* | 268 | /* |
@@ -270,8 +273,10 @@ void default_machine_kexec(struct kimage *image) | |||
270 | * using debugger IPI. | 273 | * using debugger IPI. |
271 | */ | 274 | */ |
272 | 275 | ||
273 | if (crashing_cpu == -1) | 276 | if (crashing_cpu == -1) |
274 | kexec_prepare_cpus(); | 277 | kexec_prepare_cpus(); |
278 | else | ||
279 | kdump_flag = KDUMP_SIGNATURE; | ||
275 | 280 | ||
276 | /* switch to a staticly allocated stack. Based on irq stack code. | 281 | /* switch to a staticly allocated stack. Based on irq stack code. |
277 | * XXX: the task struct will likely be invalid once we do the copy! | 282 | * XXX: the task struct will likely be invalid once we do the copy! |
@@ -284,7 +289,7 @@ void default_machine_kexec(struct kimage *image) | |||
284 | */ | 289 | */ |
285 | kexec_sequence(&kexec_stack, image->start, image, | 290 | kexec_sequence(&kexec_stack, image->start, image, |
286 | page_address(image->control_code_page), | 291 | page_address(image->control_code_page), |
287 | ppc_md.hpte_clear_all); | 292 | ppc_md.hpte_clear_all, kdump_flag); |
288 | /* NOTREACHED */ | 293 | /* NOTREACHED */ |
289 | } | 294 | } |
290 | 295 | ||
@@ -312,11 +317,24 @@ static struct property kernel_end_prop = { | |||
312 | static void __init export_htab_values(void) | 317 | static void __init export_htab_values(void) |
313 | { | 318 | { |
314 | struct device_node *node; | 319 | struct device_node *node; |
320 | struct property *prop; | ||
315 | 321 | ||
316 | node = of_find_node_by_path("/chosen"); | 322 | node = of_find_node_by_path("/chosen"); |
317 | if (!node) | 323 | if (!node) |
318 | return; | 324 | return; |
319 | 325 | ||
326 | /* remove any stale propertys so ours can be found */ | ||
327 | prop = of_find_property(node, kernel_end_prop.name, NULL); | ||
328 | if (prop) | ||
329 | prom_remove_property(node, prop); | ||
330 | prop = of_find_property(node, htab_base_prop.name, NULL); | ||
331 | if (prop) | ||
332 | prom_remove_property(node, prop); | ||
333 | prop = of_find_property(node, htab_size_prop.name, NULL); | ||
334 | if (prop) | ||
335 | prom_remove_property(node, prop); | ||
336 | |||
337 | /* information needed by userspace when using default_machine_kexec */ | ||
320 | kernel_end = __pa(_end); | 338 | kernel_end = __pa(_end); |
321 | prom_add_property(node, &kernel_end_prop); | 339 | prom_add_property(node, &kernel_end_prop); |
322 | 340 | ||
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3053fe5c62f2..a243fd072a77 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -611,10 +611,12 @@ real_mode: /* assume normal blr return */ | |||
611 | 611 | ||
612 | 612 | ||
613 | /* | 613 | /* |
614 | * kexec_sequence(newstack, start, image, control, clear_all()) | 614 | * kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag) |
615 | * | 615 | * |
616 | * does the grungy work with stack switching and real mode switches | 616 | * does the grungy work with stack switching and real mode switches |
617 | * also does simple calls to other code | 617 | * also does simple calls to other code |
618 | * | ||
619 | * kdump_flag says whether the next kernel should be a kdump kernel. | ||
618 | */ | 620 | */ |
619 | 621 | ||
620 | _GLOBAL(kexec_sequence) | 622 | _GLOBAL(kexec_sequence) |
@@ -647,7 +649,7 @@ _GLOBAL(kexec_sequence) | |||
647 | mr r29,r5 /* image (virt) */ | 649 | mr r29,r5 /* image (virt) */ |
648 | mr r28,r6 /* control, unused */ | 650 | mr r28,r6 /* control, unused */ |
649 | mr r27,r7 /* clear_all() fn desc */ | 651 | mr r27,r7 /* clear_all() fn desc */ |
650 | mr r26,r8 /* spare */ | 652 | mr r26,r8 /* kdump flag */ |
651 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ | 653 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ |
652 | 654 | ||
653 | /* disable interrupts, we are overwriting kernel data next */ | 655 | /* disable interrupts, we are overwriting kernel data next */ |
@@ -709,5 +711,6 @@ _GLOBAL(kexec_sequence) | |||
709 | mr r4,r30 # start, aka phys mem offset | 711 | mr r4,r30 # start, aka phys mem offset |
710 | mtlr 4 | 712 | mtlr 4 |
711 | li r5,0 | 713 | li r5,0 |
712 | blr /* image->start(physid, image->start, 0); */ | 714 | mr r6,r26 /* kdump_flag */ |
715 | blr /* image->start(physid, image->start, 0, kdump_flag); */ | ||
713 | #endif /* CONFIG_KEXEC */ | 716 | #endif /* CONFIG_KEXEC */ |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 3815d84a1ef4..1ec73938a00f 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -610,7 +610,8 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
610 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); | 610 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); |
611 | 611 | ||
612 | vma->vm_pgoff = offset >> PAGE_SHIFT; | 612 | vma->vm_pgoff = offset >> PAGE_SHIFT; |
613 | vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | 613 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
614 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
614 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 615 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
615 | vma->vm_end - vma->vm_start, | 616 | vma->vm_end - vma->vm_start, |
616 | vma->vm_page_prot); | 617 | vma->vm_page_prot); |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 2fdbc18ae94a..23e0db203329 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -487,67 +487,6 @@ static int __init prom_setprop(phandle node, const char *nodename, | |||
487 | return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); | 487 | return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); |
488 | } | 488 | } |
489 | 489 | ||
490 | /* We can't use the standard versions because of RELOC headaches. */ | ||
491 | #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ | ||
492 | || ('a' <= (c) && (c) <= 'f') \ | ||
493 | || ('A' <= (c) && (c) <= 'F')) | ||
494 | |||
495 | #define isdigit(c) ('0' <= (c) && (c) <= '9') | ||
496 | #define islower(c) ('a' <= (c) && (c) <= 'z') | ||
497 | #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) | ||
498 | |||
499 | unsigned long prom_strtoul(const char *cp, const char **endp) | ||
500 | { | ||
501 | unsigned long result = 0, base = 10, value; | ||
502 | |||
503 | if (*cp == '0') { | ||
504 | base = 8; | ||
505 | cp++; | ||
506 | if (toupper(*cp) == 'X') { | ||
507 | cp++; | ||
508 | base = 16; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | while (isxdigit(*cp) && | ||
513 | (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { | ||
514 | result = result * base + value; | ||
515 | cp++; | ||
516 | } | ||
517 | |||
518 | if (endp) | ||
519 | *endp = cp; | ||
520 | |||
521 | return result; | ||
522 | } | ||
523 | |||
524 | unsigned long prom_memparse(const char *ptr, const char **retptr) | ||
525 | { | ||
526 | unsigned long ret = prom_strtoul(ptr, retptr); | ||
527 | int shift = 0; | ||
528 | |||
529 | /* | ||
530 | * We can't use a switch here because GCC *may* generate a | ||
531 | * jump table which won't work, because we're not running at | ||
532 | * the address we're linked at. | ||
533 | */ | ||
534 | if ('G' == **retptr || 'g' == **retptr) | ||
535 | shift = 30; | ||
536 | |||
537 | if ('M' == **retptr || 'm' == **retptr) | ||
538 | shift = 20; | ||
539 | |||
540 | if ('K' == **retptr || 'k' == **retptr) | ||
541 | shift = 10; | ||
542 | |||
543 | if (shift) { | ||
544 | ret <<= shift; | ||
545 | (*retptr)++; | ||
546 | } | ||
547 | |||
548 | return ret; | ||
549 | } | ||
550 | |||
551 | /* | 490 | /* |
552 | * Early parsing of the command line passed to the kernel, used for | 491 | * Early parsing of the command line passed to the kernel, used for |
553 | * "mem=x" and the options that affect the iommu | 492 | * "mem=x" and the options that affect the iommu |
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 2c7e8e87f770..ea3a2ec03ffa 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh | |||
@@ -20,7 +20,7 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush | |||
20 | _end enter_prom memcpy memset reloc_offset __secondary_hold | 20 | _end enter_prom memcpy memset reloc_offset __secondary_hold |
21 | __secondary_hold_acknowledge __secondary_hold_spinloop __start | 21 | __secondary_hold_acknowledge __secondary_hold_spinloop __start |
22 | strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 | 22 | strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 |
23 | reloc_got2 kernstart_addr" | 23 | reloc_got2 kernstart_addr memstart_addr" |
24 | 24 | ||
25 | NM="$1" | 25 | NM="$1" |
26 | OBJ="$2" | 26 | OBJ="$2" |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5ec56ff03e86..705fc4bf3800 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include <asm/mmu.h> | 59 | #include <asm/mmu.h> |
60 | #include <asm/xmon.h> | 60 | #include <asm/xmon.h> |
61 | #include <asm/cputhreads.h> | 61 | #include <asm/cputhreads.h> |
62 | #include <mm/mmu_decl.h> | ||
62 | 63 | ||
63 | #include "setup.h" | 64 | #include "setup.h" |
64 | 65 | ||
@@ -190,6 +191,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
190 | if (ppc_md.show_cpuinfo != NULL) | 191 | if (ppc_md.show_cpuinfo != NULL) |
191 | ppc_md.show_cpuinfo(m); | 192 | ppc_md.show_cpuinfo(m); |
192 | 193 | ||
194 | #ifdef CONFIG_PPC32 | ||
195 | /* Display the amount of memory */ | ||
196 | seq_printf(m, "Memory\t\t: %d MB\n", | ||
197 | (unsigned int)(total_memory / (1024 * 1024))); | ||
198 | #endif | ||
199 | |||
193 | return 0; | 200 | return 0; |
194 | } | 201 | } |
195 | 202 | ||
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 65ad925c3a8f..c6a8f2326b6f 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -235,8 +235,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | |||
235 | else | 235 | else |
236 | for (i = 0; i < 32 ; i++) | 236 | for (i = 0; i < 32 ; i++) |
237 | current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; | 237 | current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; |
238 | |||
239 | #else | ||
240 | #endif | 238 | #endif |
241 | return err; | 239 | return err; |
242 | } | 240 | } |
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index cb01ebc59387..7b7da8cfd5e8 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -142,7 +142,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) | |||
142 | speed = (clock / prescaler) / (divisor * 16); | 142 | speed = (clock / prescaler) / (divisor * 16); |
143 | 143 | ||
144 | /* sanity check */ | 144 | /* sanity check */ |
145 | if (speed < 0 || speed > (clock / 16)) | 145 | if (speed > (clock / 16)) |
146 | speed = 9600; | 146 | speed = 9600; |
147 | 147 | ||
148 | return speed; | 148 | return speed; |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5c64af174752..8d5b4758c13a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -382,8 +382,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, | |||
382 | printk(KERN_INFO "Huge page(16GB) memory: " | 382 | printk(KERN_INFO "Huge page(16GB) memory: " |
383 | "addr = 0x%lX size = 0x%lX pages = %d\n", | 383 | "addr = 0x%lX size = 0x%lX pages = %d\n", |
384 | phys_addr, block_size, expected_pages); | 384 | phys_addr, block_size, expected_pages); |
385 | lmb_reserve(phys_addr, block_size * expected_pages); | 385 | if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { |
386 | add_gpage(phys_addr, block_size, expected_pages); | 386 | lmb_reserve(phys_addr, block_size * expected_pages); |
387 | add_gpage(phys_addr, block_size, expected_pages); | ||
388 | } | ||
387 | return 0; | 389 | return 0; |
388 | } | 390 | } |
389 | #endif /* CONFIG_HUGETLB_PAGE */ | 391 | #endif /* CONFIG_HUGETLB_PAGE */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6cf5c71c431f..eb505ad34a85 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -116,6 +116,7 @@ static int __init get_active_region_work_fn(unsigned long start_pfn, | |||
116 | 116 | ||
117 | /* | 117 | /* |
118 | * get_node_active_region - Return active region containing start_pfn | 118 | * get_node_active_region - Return active region containing start_pfn |
119 | * Active range returned is empty if none found. | ||
119 | * @start_pfn: The page to return the region for. | 120 | * @start_pfn: The page to return the region for. |
120 | * @node_ar: Returned set to the active region containing start_pfn | 121 | * @node_ar: Returned set to the active region containing start_pfn |
121 | */ | 122 | */ |
@@ -126,6 +127,7 @@ static void __init get_node_active_region(unsigned long start_pfn, | |||
126 | 127 | ||
127 | node_ar->nid = nid; | 128 | node_ar->nid = nid; |
128 | node_ar->start_pfn = start_pfn; | 129 | node_ar->start_pfn = start_pfn; |
130 | node_ar->end_pfn = start_pfn; | ||
129 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); | 131 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); |
130 | } | 132 | } |
131 | 133 | ||
@@ -526,12 +528,10 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
526 | /* | 528 | /* |
527 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | 529 | * We use lmb_end_of_DRAM() in here instead of memory_limit because |
528 | * we've already adjusted it for the limit and it takes care of | 530 | * we've already adjusted it for the limit and it takes care of |
529 | * having memory holes below the limit. | 531 | * having memory holes below the limit. Also, in the case of |
532 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | ||
530 | */ | 533 | */ |
531 | 534 | ||
532 | if (! memory_limit) | ||
533 | return size; | ||
534 | |||
535 | if (start + size <= lmb_end_of_DRAM()) | 535 | if (start + size <= lmb_end_of_DRAM()) |
536 | return size; | 536 | return size; |
537 | 537 | ||
@@ -933,18 +933,20 @@ void __init do_init_bootmem(void) | |||
933 | struct node_active_region node_ar; | 933 | struct node_active_region node_ar; |
934 | 934 | ||
935 | get_node_active_region(start_pfn, &node_ar); | 935 | get_node_active_region(start_pfn, &node_ar); |
936 | while (start_pfn < end_pfn) { | 936 | while (start_pfn < end_pfn && |
937 | node_ar.start_pfn < node_ar.end_pfn) { | ||
938 | unsigned long reserve_size = size; | ||
937 | /* | 939 | /* |
938 | * if reserved region extends past active region | 940 | * if reserved region extends past active region |
939 | * then trim size to active region | 941 | * then trim size to active region |
940 | */ | 942 | */ |
941 | if (end_pfn > node_ar.end_pfn) | 943 | if (end_pfn > node_ar.end_pfn) |
942 | size = (node_ar.end_pfn << PAGE_SHIFT) | 944 | reserve_size = (node_ar.end_pfn << PAGE_SHIFT) |
943 | - (start_pfn << PAGE_SHIFT); | 945 | - (start_pfn << PAGE_SHIFT); |
944 | dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, size, | 946 | dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, |
945 | node_ar.nid); | 947 | reserve_size, node_ar.nid); |
946 | reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, | 948 | reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, |
947 | size, BOOTMEM_DEFAULT); | 949 | reserve_size, BOOTMEM_DEFAULT); |
948 | /* | 950 | /* |
949 | * if reserved region is contained in the active region | 951 | * if reserved region is contained in the active region |
950 | * then done. | 952 | * then done. |
@@ -959,6 +961,7 @@ void __init do_init_bootmem(void) | |||
959 | */ | 961 | */ |
960 | start_pfn = node_ar.end_pfn; | 962 | start_pfn = node_ar.end_pfn; |
961 | physbase = start_pfn << PAGE_SHIFT; | 963 | physbase = start_pfn << PAGE_SHIFT; |
964 | size = size - reserve_size; | ||
962 | get_node_active_region(start_pfn, &node_ar); | 965 | get_node_active_region(start_pfn, &node_ar); |
963 | } | 966 | } |
964 | 967 | ||
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h index 22e4e8d4eb2c..628009c01958 100644 --- a/arch/powerpc/oprofile/cell/pr_util.h +++ b/arch/powerpc/oprofile/cell/pr_util.h | |||
@@ -24,6 +24,11 @@ | |||
24 | #define SKIP_GENERIC_SYNC 0 | 24 | #define SKIP_GENERIC_SYNC 0 |
25 | #define SYNC_START_ERROR -1 | 25 | #define SYNC_START_ERROR -1 |
26 | #define DO_GENERIC_SYNC 1 | 26 | #define DO_GENERIC_SYNC 1 |
27 | #define SPUS_PER_NODE 8 | ||
28 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | ||
29 | |||
30 | extern struct delayed_work spu_work; | ||
31 | extern int spu_prof_running; | ||
27 | 32 | ||
28 | struct spu_overlay_info { /* map of sections within an SPU overlay */ | 33 | struct spu_overlay_info { /* map of sections within an SPU overlay */ |
29 | unsigned int vma; /* SPU virtual memory address from elf */ | 34 | unsigned int vma; /* SPU virtual memory address from elf */ |
@@ -62,6 +67,14 @@ struct vma_to_fileoffset_map { /* map of sections within an SPU program */ | |||
62 | 67 | ||
63 | }; | 68 | }; |
64 | 69 | ||
70 | struct spu_buffer { | ||
71 | int last_guard_val; | ||
72 | int ctx_sw_seen; | ||
73 | unsigned long *buff; | ||
74 | unsigned int head, tail; | ||
75 | }; | ||
76 | |||
77 | |||
65 | /* The three functions below are for maintaining and accessing | 78 | /* The three functions below are for maintaining and accessing |
66 | * the vma-to-fileoffset map. | 79 | * the vma-to-fileoffset map. |
67 | */ | 80 | */ |
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 380d7e217531..6edaebd5099a 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
@@ -23,12 +23,11 @@ | |||
23 | 23 | ||
24 | static u32 *samples; | 24 | static u32 *samples; |
25 | 25 | ||
26 | static int spu_prof_running; | 26 | int spu_prof_running; |
27 | static unsigned int profiling_interval; | 27 | static unsigned int profiling_interval; |
28 | 28 | ||
29 | #define NUM_SPU_BITS_TRBUF 16 | 29 | #define NUM_SPU_BITS_TRBUF 16 |
30 | #define SPUS_PER_TB_ENTRY 4 | 30 | #define SPUS_PER_TB_ENTRY 4 |
31 | #define SPUS_PER_NODE 8 | ||
32 | 31 | ||
33 | #define SPU_PC_MASK 0xFFFF | 32 | #define SPU_PC_MASK 0xFFFF |
34 | 33 | ||
@@ -208,6 +207,7 @@ int start_spu_profiling(unsigned int cycles_reset) | |||
208 | 207 | ||
209 | spu_prof_running = 1; | 208 | spu_prof_running = 1; |
210 | hrtimer_start(&timer, kt, HRTIMER_MODE_REL); | 209 | hrtimer_start(&timer, kt, HRTIMER_MODE_REL); |
210 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
211 | 211 | ||
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 2a9b4a049329..2949126d28d1 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c | |||
@@ -35,7 +35,102 @@ static DEFINE_SPINLOCK(buffer_lock); | |||
35 | static DEFINE_SPINLOCK(cache_lock); | 35 | static DEFINE_SPINLOCK(cache_lock); |
36 | static int num_spu_nodes; | 36 | static int num_spu_nodes; |
37 | int spu_prof_num_nodes; | 37 | int spu_prof_num_nodes; |
38 | int last_guard_val[MAX_NUMNODES * 8]; | 38 | |
39 | struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE]; | ||
40 | struct delayed_work spu_work; | ||
41 | static unsigned max_spu_buff; | ||
42 | |||
43 | static void spu_buff_add(unsigned long int value, int spu) | ||
44 | { | ||
45 | /* spu buff is a circular buffer. Add entries to the | ||
46 | * head. Head is the index to store the next value. | ||
47 | * The buffer is full when there is one available entry | ||
48 | * in the queue, i.e. head and tail can't be equal. | ||
49 | * That way we can tell the difference between the | ||
50 | * buffer being full versus empty. | ||
51 | * | ||
52 | * ASSUPTION: the buffer_lock is held when this function | ||
53 | * is called to lock the buffer, head and tail. | ||
54 | */ | ||
55 | int full = 1; | ||
56 | |||
57 | if (spu_buff[spu].head >= spu_buff[spu].tail) { | ||
58 | if ((spu_buff[spu].head - spu_buff[spu].tail) | ||
59 | < (max_spu_buff - 1)) | ||
60 | full = 0; | ||
61 | |||
62 | } else if (spu_buff[spu].tail > spu_buff[spu].head) { | ||
63 | if ((spu_buff[spu].tail - spu_buff[spu].head) | ||
64 | > 1) | ||
65 | full = 0; | ||
66 | } | ||
67 | |||
68 | if (!full) { | ||
69 | spu_buff[spu].buff[spu_buff[spu].head] = value; | ||
70 | spu_buff[spu].head++; | ||
71 | |||
72 | if (spu_buff[spu].head >= max_spu_buff) | ||
73 | spu_buff[spu].head = 0; | ||
74 | } else { | ||
75 | /* From the user's perspective make the SPU buffer | ||
76 | * size management/overflow look like we are using | ||
77 | * per cpu buffers. The user uses the same | ||
78 | * per cpu parameter to adjust the SPU buffer size. | ||
79 | * Increment the sample_lost_overflow to inform | ||
80 | * the user the buffer size needs to be increased. | ||
81 | */ | ||
82 | oprofile_cpu_buffer_inc_smpl_lost(); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | /* This function copies the per SPU buffers to the | ||
87 | * OProfile kernel buffer. | ||
88 | */ | ||
89 | void sync_spu_buff(void) | ||
90 | { | ||
91 | int spu; | ||
92 | unsigned long flags; | ||
93 | int curr_head; | ||
94 | |||
95 | for (spu = 0; spu < num_spu_nodes; spu++) { | ||
96 | /* In case there was an issue and the buffer didn't | ||
97 | * get created skip it. | ||
98 | */ | ||
99 | if (spu_buff[spu].buff == NULL) | ||
100 | continue; | ||
101 | |||
102 | /* Hold the lock to make sure the head/tail | ||
103 | * doesn't change while spu_buff_add() is | ||
104 | * deciding if the buffer is full or not. | ||
105 | * Being a little paranoid. | ||
106 | */ | ||
107 | spin_lock_irqsave(&buffer_lock, flags); | ||
108 | curr_head = spu_buff[spu].head; | ||
109 | spin_unlock_irqrestore(&buffer_lock, flags); | ||
110 | |||
111 | /* Transfer the current contents to the kernel buffer. | ||
112 | * data can still be added to the head of the buffer. | ||
113 | */ | ||
114 | oprofile_put_buff(spu_buff[spu].buff, | ||
115 | spu_buff[spu].tail, | ||
116 | curr_head, max_spu_buff); | ||
117 | |||
118 | spin_lock_irqsave(&buffer_lock, flags); | ||
119 | spu_buff[spu].tail = curr_head; | ||
120 | spin_unlock_irqrestore(&buffer_lock, flags); | ||
121 | } | ||
122 | |||
123 | } | ||
124 | |||
125 | static void wq_sync_spu_buff(struct work_struct *work) | ||
126 | { | ||
127 | /* move data from spu buffers to kernel buffer */ | ||
128 | sync_spu_buff(); | ||
129 | |||
130 | /* only reschedule if profiling is not done */ | ||
131 | if (spu_prof_running) | ||
132 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
133 | } | ||
39 | 134 | ||
40 | /* Container for caching information about an active SPU task. */ | 135 | /* Container for caching information about an active SPU task. */ |
41 | struct cached_info { | 136 | struct cached_info { |
@@ -305,14 +400,21 @@ static int process_context_switch(struct spu *spu, unsigned long objectId) | |||
305 | 400 | ||
306 | /* Record context info in event buffer */ | 401 | /* Record context info in event buffer */ |
307 | spin_lock_irqsave(&buffer_lock, flags); | 402 | spin_lock_irqsave(&buffer_lock, flags); |
308 | add_event_entry(ESCAPE_CODE); | 403 | spu_buff_add(ESCAPE_CODE, spu->number); |
309 | add_event_entry(SPU_CTX_SWITCH_CODE); | 404 | spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number); |
310 | add_event_entry(spu->number); | 405 | spu_buff_add(spu->number, spu->number); |
311 | add_event_entry(spu->pid); | 406 | spu_buff_add(spu->pid, spu->number); |
312 | add_event_entry(spu->tgid); | 407 | spu_buff_add(spu->tgid, spu->number); |
313 | add_event_entry(app_dcookie); | 408 | spu_buff_add(app_dcookie, spu->number); |
314 | add_event_entry(spu_cookie); | 409 | spu_buff_add(spu_cookie, spu->number); |
315 | add_event_entry(offset); | 410 | spu_buff_add(offset, spu->number); |
411 | |||
412 | /* Set flag to indicate SPU PC data can now be written out. If | ||
413 | * the SPU program counter data is seen before an SPU context | ||
414 | * record is seen, the postprocessing will fail. | ||
415 | */ | ||
416 | spu_buff[spu->number].ctx_sw_seen = 1; | ||
417 | |||
316 | spin_unlock_irqrestore(&buffer_lock, flags); | 418 | spin_unlock_irqrestore(&buffer_lock, flags); |
317 | smp_wmb(); /* insure spu event buffer updates are written */ | 419 | smp_wmb(); /* insure spu event buffer updates are written */ |
318 | /* don't want entries intermingled... */ | 420 | /* don't want entries intermingled... */ |
@@ -360,6 +462,47 @@ static int number_of_online_nodes(void) | |||
360 | return nodes; | 462 | return nodes; |
361 | } | 463 | } |
362 | 464 | ||
465 | static int oprofile_spu_buff_create(void) | ||
466 | { | ||
467 | int spu; | ||
468 | |||
469 | max_spu_buff = oprofile_get_cpu_buffer_size(); | ||
470 | |||
471 | for (spu = 0; spu < num_spu_nodes; spu++) { | ||
472 | /* create circular buffers to store the data in. | ||
473 | * use locks to manage accessing the buffers | ||
474 | */ | ||
475 | spu_buff[spu].head = 0; | ||
476 | spu_buff[spu].tail = 0; | ||
477 | |||
478 | /* | ||
479 | * Create a buffer for each SPU. Can't reliably | ||
480 | * create a single buffer for all spus due to not | ||
481 | * enough contiguous kernel memory. | ||
482 | */ | ||
483 | |||
484 | spu_buff[spu].buff = kzalloc((max_spu_buff | ||
485 | * sizeof(unsigned long)), | ||
486 | GFP_KERNEL); | ||
487 | |||
488 | if (!spu_buff[spu].buff) { | ||
489 | printk(KERN_ERR "SPU_PROF: " | ||
490 | "%s, line %d: oprofile_spu_buff_create " | ||
491 | "failed to allocate spu buffer %d.\n", | ||
492 | __func__, __LINE__, spu); | ||
493 | |||
494 | /* release the spu buffers that have been allocated */ | ||
495 | while (spu >= 0) { | ||
496 | kfree(spu_buff[spu].buff); | ||
497 | spu_buff[spu].buff = 0; | ||
498 | spu--; | ||
499 | } | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | } | ||
503 | return 0; | ||
504 | } | ||
505 | |||
363 | /* The main purpose of this function is to synchronize | 506 | /* The main purpose of this function is to synchronize |
364 | * OProfile with SPUFS by registering to be notified of | 507 | * OProfile with SPUFS by registering to be notified of |
365 | * SPU task switches. | 508 | * SPU task switches. |
@@ -372,20 +515,35 @@ static int number_of_online_nodes(void) | |||
372 | */ | 515 | */ |
373 | int spu_sync_start(void) | 516 | int spu_sync_start(void) |
374 | { | 517 | { |
375 | int k; | 518 | int spu; |
376 | int ret = SKIP_GENERIC_SYNC; | 519 | int ret = SKIP_GENERIC_SYNC; |
377 | int register_ret; | 520 | int register_ret; |
378 | unsigned long flags = 0; | 521 | unsigned long flags = 0; |
379 | 522 | ||
380 | spu_prof_num_nodes = number_of_online_nodes(); | 523 | spu_prof_num_nodes = number_of_online_nodes(); |
381 | num_spu_nodes = spu_prof_num_nodes * 8; | 524 | num_spu_nodes = spu_prof_num_nodes * 8; |
525 | INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff); | ||
526 | |||
527 | /* create buffer for storing the SPU data to put in | ||
528 | * the kernel buffer. | ||
529 | */ | ||
530 | ret = oprofile_spu_buff_create(); | ||
531 | if (ret) | ||
532 | goto out; | ||
382 | 533 | ||
383 | spin_lock_irqsave(&buffer_lock, flags); | 534 | spin_lock_irqsave(&buffer_lock, flags); |
384 | add_event_entry(ESCAPE_CODE); | 535 | for (spu = 0; spu < num_spu_nodes; spu++) { |
385 | add_event_entry(SPU_PROFILING_CODE); | 536 | spu_buff_add(ESCAPE_CODE, spu); |
386 | add_event_entry(num_spu_nodes); | 537 | spu_buff_add(SPU_PROFILING_CODE, spu); |
538 | spu_buff_add(num_spu_nodes, spu); | ||
539 | } | ||
387 | spin_unlock_irqrestore(&buffer_lock, flags); | 540 | spin_unlock_irqrestore(&buffer_lock, flags); |
388 | 541 | ||
542 | for (spu = 0; spu < num_spu_nodes; spu++) { | ||
543 | spu_buff[spu].ctx_sw_seen = 0; | ||
544 | spu_buff[spu].last_guard_val = 0; | ||
545 | } | ||
546 | |||
389 | /* Register for SPU events */ | 547 | /* Register for SPU events */ |
390 | register_ret = spu_switch_event_register(&spu_active); | 548 | register_ret = spu_switch_event_register(&spu_active); |
391 | if (register_ret) { | 549 | if (register_ret) { |
@@ -393,8 +551,6 @@ int spu_sync_start(void) | |||
393 | goto out; | 551 | goto out; |
394 | } | 552 | } |
395 | 553 | ||
396 | for (k = 0; k < (MAX_NUMNODES * 8); k++) | ||
397 | last_guard_val[k] = 0; | ||
398 | pr_debug("spu_sync_start -- running.\n"); | 554 | pr_debug("spu_sync_start -- running.\n"); |
399 | out: | 555 | out: |
400 | return ret; | 556 | return ret; |
@@ -446,13 +602,20 @@ void spu_sync_buffer(int spu_num, unsigned int *samples, | |||
446 | * use. We need to discard samples taken during the time | 602 | * use. We need to discard samples taken during the time |
447 | * period which an overlay occurs (i.e., guard value changes). | 603 | * period which an overlay occurs (i.e., guard value changes). |
448 | */ | 604 | */ |
449 | if (grd_val && grd_val != last_guard_val[spu_num]) { | 605 | if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) { |
450 | last_guard_val[spu_num] = grd_val; | 606 | spu_buff[spu_num].last_guard_val = grd_val; |
451 | /* Drop the rest of the samples. */ | 607 | /* Drop the rest of the samples. */ |
452 | break; | 608 | break; |
453 | } | 609 | } |
454 | 610 | ||
455 | add_event_entry(file_offset | spu_num_shifted); | 611 | /* We must ensure that the SPU context switch has been written |
612 | * out before samples for the SPU. Otherwise, the SPU context | ||
613 | * information is not available and the postprocessing of the | ||
614 | * SPU PC will fail with no available anonymous map information. | ||
615 | */ | ||
616 | if (spu_buff[spu_num].ctx_sw_seen) | ||
617 | spu_buff_add((file_offset | spu_num_shifted), | ||
618 | spu_num); | ||
456 | } | 619 | } |
457 | spin_unlock(&buffer_lock); | 620 | spin_unlock(&buffer_lock); |
458 | out: | 621 | out: |
@@ -463,20 +626,41 @@ out: | |||
463 | int spu_sync_stop(void) | 626 | int spu_sync_stop(void) |
464 | { | 627 | { |
465 | unsigned long flags = 0; | 628 | unsigned long flags = 0; |
466 | int ret = spu_switch_event_unregister(&spu_active); | 629 | int ret; |
467 | if (ret) { | 630 | int k; |
631 | |||
632 | ret = spu_switch_event_unregister(&spu_active); | ||
633 | |||
634 | if (ret) | ||
468 | printk(KERN_ERR "SPU_PROF: " | 635 | printk(KERN_ERR "SPU_PROF: " |
469 | "%s, line %d: spu_switch_event_unregister returned %d\n", | 636 | "%s, line %d: spu_switch_event_unregister " \ |
470 | __func__, __LINE__, ret); | 637 | "returned %d\n", |
471 | goto out; | 638 | __func__, __LINE__, ret); |
472 | } | 639 | |
640 | /* flush any remaining data in the per SPU buffers */ | ||
641 | sync_spu_buff(); | ||
473 | 642 | ||
474 | spin_lock_irqsave(&cache_lock, flags); | 643 | spin_lock_irqsave(&cache_lock, flags); |
475 | ret = release_cached_info(RELEASE_ALL); | 644 | ret = release_cached_info(RELEASE_ALL); |
476 | spin_unlock_irqrestore(&cache_lock, flags); | 645 | spin_unlock_irqrestore(&cache_lock, flags); |
477 | out: | 646 | |
647 | /* remove scheduled work queue item rather then waiting | ||
648 | * for every queued entry to execute. Then flush pending | ||
649 | * system wide buffer to event buffer. | ||
650 | */ | ||
651 | cancel_delayed_work(&spu_work); | ||
652 | |||
653 | for (k = 0; k < num_spu_nodes; k++) { | ||
654 | spu_buff[k].ctx_sw_seen = 0; | ||
655 | |||
656 | /* | ||
657 | * spu_sys_buff will be null if there was a problem | ||
658 | * allocating the buffer. Only delete if it exists. | ||
659 | */ | ||
660 | kfree(spu_buff[k].buff); | ||
661 | spu_buff[k].buff = 0; | ||
662 | } | ||
478 | pr_debug("spu_sync_stop -- done.\n"); | 663 | pr_debug("spu_sync_stop -- done.\n"); |
479 | return ret; | 664 | return ret; |
480 | } | 665 | } |
481 | 666 | ||
482 | |||
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index a9260e21451e..65730275e012 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig | |||
@@ -14,6 +14,15 @@ | |||
14 | # help | 14 | # help |
15 | # This option enables support for the CPCI405 board. | 15 | # This option enables support for the CPCI405 board. |
16 | 16 | ||
17 | config ACADIA | ||
18 | bool "Acadia" | ||
19 | depends on 40x | ||
20 | default n | ||
21 | select PPC40x_SIMPLE | ||
22 | select 405EZ | ||
23 | help | ||
24 | This option enables support for the AMCC 405EZ Acadia evaluation board. | ||
25 | |||
17 | config EP405 | 26 | config EP405 |
18 | bool "EP405/EP405PC" | 27 | bool "EP405/EP405PC" |
19 | depends on 40x | 28 | depends on 40x |
@@ -23,6 +32,14 @@ config EP405 | |||
23 | help | 32 | help |
24 | This option enables support for the EP405/EP405PC boards. | 33 | This option enables support for the EP405/EP405PC boards. |
25 | 34 | ||
35 | config HCU4 | ||
36 | bool "Hcu4" | ||
37 | depends on 40x | ||
38 | default y | ||
39 | select 405GPR | ||
40 | help | ||
41 | This option enables support for the Nestal Maschinen HCU4 board. | ||
42 | |||
26 | config KILAUEA | 43 | config KILAUEA |
27 | bool "Kilauea" | 44 | bool "Kilauea" |
28 | depends on 40x | 45 | depends on 40x |
@@ -93,6 +110,13 @@ config XILINX_VIRTEX_GENERIC_BOARD | |||
93 | Most Virtex designs should use this unless it needs to do some | 110 | Most Virtex designs should use this unless it needs to do some |
94 | special configuration at board probe time. | 111 | special configuration at board probe time. |
95 | 112 | ||
113 | config PPC40x_SIMPLE | ||
114 | bool "Simple PowerPC 40x board support" | ||
115 | depends on 40x | ||
116 | default n | ||
117 | help | ||
118 | This option enables the simple PowerPC 40x platform support. | ||
119 | |||
96 | # 40x specific CPU modules, selected based on the board above. | 120 | # 40x specific CPU modules, selected based on the board above. |
97 | config NP405H | 121 | config NP405H |
98 | bool | 122 | bool |
@@ -118,6 +142,12 @@ config 405EX | |||
118 | select IBM_NEW_EMAC_EMAC4 | 142 | select IBM_NEW_EMAC_EMAC4 |
119 | select IBM_NEW_EMAC_RGMII | 143 | select IBM_NEW_EMAC_RGMII |
120 | 144 | ||
145 | config 405EZ | ||
146 | bool | ||
147 | select IBM_NEW_EMAC_NO_FLOW_CTRL | ||
148 | select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT | ||
149 | select IBM_NEW_EMAC_MAL_COMMON_ERR | ||
150 | |||
121 | config 405GPR | 151 | config 405GPR |
122 | bool | 152 | bool |
123 | 153 | ||
@@ -139,6 +169,14 @@ config STB03xxx | |||
139 | select IBM405_ERR77 | 169 | select IBM405_ERR77 |
140 | select IBM405_ERR51 | 170 | select IBM405_ERR51 |
141 | 171 | ||
172 | config PPC4xx_GPIO | ||
173 | bool "PPC4xx GPIO support" | ||
174 | depends on 40x | ||
175 | select ARCH_REQUIRE_GPIOLIB | ||
176 | select GENERIC_GPIO | ||
177 | help | ||
178 | Enable gpiolib support for ppc40x based boards | ||
179 | |||
142 | # 40x errata/workaround config symbols, selected by the CPU models above | 180 | # 40x errata/workaround config symbols, selected by the CPU models above |
143 | 181 | ||
144 | # All 405-based cores up until the 405GPR and 405EP have this errata. | 182 | # All 405-based cores up until the 405GPR and 405EP have this errata. |
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile index 5533a5c8ce4e..9bab76a652a6 100644 --- a/arch/powerpc/platforms/40x/Makefile +++ b/arch/powerpc/platforms/40x/Makefile | |||
@@ -1,5 +1,7 @@ | |||
1 | obj-$(CONFIG_KILAUEA) += kilauea.o | 1 | obj-$(CONFIG_KILAUEA) += kilauea.o |
2 | obj-$(CONFIG_HCU4) += hcu4.o | ||
2 | obj-$(CONFIG_MAKALU) += makalu.o | 3 | obj-$(CONFIG_MAKALU) += makalu.o |
3 | obj-$(CONFIG_WALNUT) += walnut.o | 4 | obj-$(CONFIG_WALNUT) += walnut.o |
4 | obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o | 5 | obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o |
5 | obj-$(CONFIG_EP405) += ep405.o | 6 | obj-$(CONFIG_EP405) += ep405.o |
7 | obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o | ||
diff --git a/arch/powerpc/platforms/40x/hcu4.c b/arch/powerpc/platforms/40x/hcu4.c new file mode 100644 index 000000000000..60b2afecab75 --- /dev/null +++ b/arch/powerpc/platforms/40x/hcu4.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Architecture- / platform-specific boot-time initialization code for | ||
3 | * IBM PowerPC 4xx based boards. Adapted from original | ||
4 | * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek | ||
5 | * <dan@net4x.com>. | ||
6 | * | ||
7 | * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> | ||
8 | * | ||
9 | * Rewritten and ported to the merged powerpc tree: | ||
10 | * Copyright 2007 IBM Corporation | ||
11 | * Josh Boyer <jwboyer@linux.vnet.ibm.com> | ||
12 | * | ||
13 | * 2002 (c) MontaVista, Software, Inc. This file is licensed under | ||
14 | * the terms of the GNU General Public License version 2. This program | ||
15 | * is licensed "as is" without any warranty of any kind, whether express | ||
16 | * or implied. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of_platform.h> | ||
21 | |||
22 | #include <asm/machdep.h> | ||
23 | #include <asm/prom.h> | ||
24 | #include <asm/udbg.h> | ||
25 | #include <asm/time.h> | ||
26 | #include <asm/uic.h> | ||
27 | #include <asm/ppc4xx.h> | ||
28 | |||
29 | static __initdata struct of_device_id hcu4_of_bus[] = { | ||
30 | { .compatible = "ibm,plb3", }, | ||
31 | { .compatible = "ibm,opb", }, | ||
32 | { .compatible = "ibm,ebc", }, | ||
33 | {}, | ||
34 | }; | ||
35 | |||
36 | static int __init hcu4_device_probe(void) | ||
37 | { | ||
38 | of_platform_bus_probe(NULL, hcu4_of_bus, NULL); | ||
39 | return 0; | ||
40 | } | ||
41 | machine_device_initcall(hcu4, hcu4_device_probe); | ||
42 | |||
43 | static int __init hcu4_probe(void) | ||
44 | { | ||
45 | unsigned long root = of_get_flat_dt_root(); | ||
46 | |||
47 | if (!of_flat_dt_is_compatible(root, "netstal,hcu4")) | ||
48 | return 0; | ||
49 | |||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | define_machine(hcu4) { | ||
54 | .name = "HCU4", | ||
55 | .probe = hcu4_probe, | ||
56 | .progress = udbg_progress, | ||
57 | .init_IRQ = uic_init_tree, | ||
58 | .get_irq = uic_get_irq, | ||
59 | .restart = ppc4xx_reset_system, | ||
60 | .calibrate_decr = generic_calibrate_decr, | ||
61 | }; | ||
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c new file mode 100644 index 000000000000..4498a86b46c3 --- /dev/null +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * Generic PowerPC 40x platform support | ||
3 | * | ||
4 | * Copyright 2008 IBM Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This implements simple platform support for PowerPC 44x chips. This is | ||
11 | * mostly used for eval boards or other simple and "generic" 44x boards. If | ||
12 | * your board has custom functions or hardware, then you will likely want to | ||
13 | * implement your own board.c file to accommodate it. | ||
14 | */ | ||
15 | |||
16 | #include <asm/machdep.h> | ||
17 | #include <asm/pci-bridge.h> | ||
18 | #include <asm/ppc4xx.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/time.h> | ||
21 | #include <asm/udbg.h> | ||
22 | #include <asm/uic.h> | ||
23 | |||
24 | #include <linux/init.h> | ||
25 | #include <linux/of_platform.h> | ||
26 | |||
27 | static __initdata struct of_device_id ppc40x_of_bus[] = { | ||
28 | { .compatible = "ibm,plb3", }, | ||
29 | { .compatible = "ibm,plb4", }, | ||
30 | { .compatible = "ibm,opb", }, | ||
31 | { .compatible = "ibm,ebc", }, | ||
32 | { .compatible = "simple-bus", }, | ||
33 | {}, | ||
34 | }; | ||
35 | |||
36 | static int __init ppc40x_device_probe(void) | ||
37 | { | ||
38 | of_platform_bus_probe(NULL, ppc40x_of_bus, NULL); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | machine_device_initcall(ppc40x_simple, ppc40x_device_probe); | ||
43 | |||
44 | /* This is the list of boards that can be supported by this simple | ||
45 | * platform code. This does _not_ mean the boards are compatible, | ||
46 | * as they most certainly are not from a device tree perspective. | ||
47 | * However, their differences are handled by the device tree and the | ||
48 | * drivers and therefore they don't need custom board support files. | ||
49 | * | ||
50 | * Again, if your board needs to do things differently then create a | ||
51 | * board.c file for it rather than adding it to this list. | ||
52 | */ | ||
53 | static char *board[] __initdata = { | ||
54 | "amcc,acadia" | ||
55 | }; | ||
56 | |||
57 | static int __init ppc40x_probe(void) | ||
58 | { | ||
59 | unsigned long root = of_get_flat_dt_root(); | ||
60 | int i = 0; | ||
61 | |||
62 | for (i = 0; i < ARRAY_SIZE(board); i++) { | ||
63 | if (of_flat_dt_is_compatible(root, board[i])) { | ||
64 | ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; | ||
65 | return 1; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | define_machine(ppc40x_simple) { | ||
73 | .name = "PowerPC 40x Platform", | ||
74 | .probe = ppc40x_probe, | ||
75 | .progress = udbg_progress, | ||
76 | .init_IRQ = uic_init_tree, | ||
77 | .get_irq = uic_get_irq, | ||
78 | .restart = ppc4xx_reset_system, | ||
79 | .calibrate_decr = generic_calibrate_decr, | ||
80 | }; | ||
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 79c1154f88d4..3496bc05058e 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -167,6 +167,14 @@ config PPC44x_SIMPLE | |||
167 | help | 167 | help |
168 | This option enables the simple PowerPC 44x platform support. | 168 | This option enables the simple PowerPC 44x platform support. |
169 | 169 | ||
170 | config PPC4xx_GPIO | ||
171 | bool "PPC4xx GPIO support" | ||
172 | depends on 44x | ||
173 | select ARCH_REQUIRE_GPIOLIB | ||
174 | select GENERIC_GPIO | ||
175 | help | ||
176 | Enable gpiolib support for ppc440 based boards | ||
177 | |||
170 | # 44x specific CPU modules, selected based on the board above. | 178 | # 44x specific CPU modules, selected based on the board above. |
171 | config 440EP | 179 | config 440EP |
172 | bool | 180 | bool |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 044b4e6e8743..ae7c34f37e1c 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -99,11 +99,14 @@ mpc5200_setup_xlb_arbiter(void) | |||
99 | out_be32(&xlb->master_pri_enable, 0xff); | 99 | out_be32(&xlb->master_pri_enable, 0xff); |
100 | out_be32(&xlb->master_priority, 0x11111111); | 100 | out_be32(&xlb->master_priority, 0x11111111); |
101 | 101 | ||
102 | /* Disable XLB pipelining | 102 | /* |
103 | * Disable XLB pipelining | ||
103 | * (cfr errate 292. We could do this only just before ATA PIO | 104 | * (cfr errate 292. We could do this only just before ATA PIO |
104 | * transaction and re-enable it afterwards ...) | 105 | * transaction and re-enable it afterwards ...) |
106 | * Not needed on MPC5200B. | ||
105 | */ | 107 | */ |
106 | out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS); | 108 | if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) |
109 | out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS); | ||
107 | 110 | ||
108 | iounmap(xlb); | 111 | iounmap(xlb); |
109 | } | 112 | } |
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index 8a3b117b6ce2..81cee7bbf2d2 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c | |||
@@ -193,7 +193,6 @@ static void __init ksi8560_setup_arch(void) | |||
193 | static void ksi8560_show_cpuinfo(struct seq_file *m) | 193 | static void ksi8560_show_cpuinfo(struct seq_file *m) |
194 | { | 194 | { |
195 | uint pvid, svid, phid1; | 195 | uint pvid, svid, phid1; |
196 | uint memsize = total_memory; | ||
197 | 196 | ||
198 | pvid = mfspr(SPRN_PVR); | 197 | pvid = mfspr(SPRN_PVR); |
199 | svid = mfspr(SPRN_SVR); | 198 | svid = mfspr(SPRN_SVR); |
@@ -215,9 +214,6 @@ static void ksi8560_show_cpuinfo(struct seq_file *m) | |||
215 | /* Display cpu Pll setting */ | 214 | /* Display cpu Pll setting */ |
216 | phid1 = mfspr(SPRN_HID1); | 215 | phid1 = mfspr(SPRN_HID1); |
217 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 216 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
218 | |||
219 | /* Display the amount of memory */ | ||
220 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
221 | } | 217 | } |
222 | 218 | ||
223 | static struct of_device_id __initdata of_bus_ids[] = { | 219 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 0293e3d3580f..21f009023e26 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c | |||
@@ -207,7 +207,6 @@ static void __init mpc85xx_ads_setup_arch(void) | |||
207 | static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) | 207 | static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) |
208 | { | 208 | { |
209 | uint pvid, svid, phid1; | 209 | uint pvid, svid, phid1; |
210 | uint memsize = total_memory; | ||
211 | 210 | ||
212 | pvid = mfspr(SPRN_PVR); | 211 | pvid = mfspr(SPRN_PVR); |
213 | svid = mfspr(SPRN_SVR); | 212 | svid = mfspr(SPRN_SVR); |
@@ -219,9 +218,6 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) | |||
219 | /* Display cpu Pll setting */ | 218 | /* Display cpu Pll setting */ |
220 | phid1 = mfspr(SPRN_HID1); | 219 | phid1 = mfspr(SPRN_HID1); |
221 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 220 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
222 | |||
223 | /* Display the amount of memory */ | ||
224 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
225 | } | 221 | } |
226 | 222 | ||
227 | static struct of_device_id __initdata of_bus_ids[] = { | 223 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 50d7ea8f922b..aeb6a5bc5522 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c | |||
@@ -307,7 +307,6 @@ static void __init mpc85xx_cds_setup_arch(void) | |||
307 | static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) | 307 | static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) |
308 | { | 308 | { |
309 | uint pvid, svid, phid1; | 309 | uint pvid, svid, phid1; |
310 | uint memsize = total_memory; | ||
311 | 310 | ||
312 | pvid = mfspr(SPRN_PVR); | 311 | pvid = mfspr(SPRN_PVR); |
313 | svid = mfspr(SPRN_SVR); | 312 | svid = mfspr(SPRN_SVR); |
@@ -320,9 +319,6 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) | |||
320 | /* Display cpu Pll setting */ | 319 | /* Display cpu Pll setting */ |
321 | phid1 = mfspr(SPRN_HID1); | 320 | phid1 = mfspr(SPRN_HID1); |
322 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 321 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
323 | |||
324 | /* Display the amount of memory */ | ||
325 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
326 | } | 322 | } |
327 | 323 | ||
328 | 324 | ||
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c index b9246ea0928a..7ec77ce12dad 100644 --- a/arch/powerpc/platforms/85xx/sbc8548.c +++ b/arch/powerpc/platforms/85xx/sbc8548.c | |||
@@ -136,7 +136,6 @@ static void __init sbc8548_setup_arch(void) | |||
136 | static void sbc8548_show_cpuinfo(struct seq_file *m) | 136 | static void sbc8548_show_cpuinfo(struct seq_file *m) |
137 | { | 137 | { |
138 | uint pvid, svid, phid1; | 138 | uint pvid, svid, phid1; |
139 | uint memsize = total_memory; | ||
140 | 139 | ||
141 | pvid = mfspr(SPRN_PVR); | 140 | pvid = mfspr(SPRN_PVR); |
142 | svid = mfspr(SPRN_SVR); | 141 | svid = mfspr(SPRN_SVR); |
@@ -149,9 +148,6 @@ static void sbc8548_show_cpuinfo(struct seq_file *m) | |||
149 | /* Display cpu Pll setting */ | 148 | /* Display cpu Pll setting */ |
150 | phid1 = mfspr(SPRN_HID1); | 149 | phid1 = mfspr(SPRN_HID1); |
151 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 150 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
152 | |||
153 | /* Display the amount of memory */ | ||
154 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
155 | } | 151 | } |
156 | 152 | ||
157 | static struct of_device_id __initdata of_bus_ids[] = { | 153 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index 0c9a856f66b6..472f254a19d2 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c | |||
@@ -194,7 +194,6 @@ static void __init sbc8560_setup_arch(void) | |||
194 | static void sbc8560_show_cpuinfo(struct seq_file *m) | 194 | static void sbc8560_show_cpuinfo(struct seq_file *m) |
195 | { | 195 | { |
196 | uint pvid, svid, phid1; | 196 | uint pvid, svid, phid1; |
197 | uint memsize = total_memory; | ||
198 | 197 | ||
199 | pvid = mfspr(SPRN_PVR); | 198 | pvid = mfspr(SPRN_PVR); |
200 | svid = mfspr(SPRN_SVR); | 199 | svid = mfspr(SPRN_SVR); |
@@ -206,9 +205,6 @@ static void sbc8560_show_cpuinfo(struct seq_file *m) | |||
206 | /* Display cpu Pll setting */ | 205 | /* Display cpu Pll setting */ |
207 | phid1 = mfspr(SPRN_HID1); | 206 | phid1 = mfspr(SPRN_HID1); |
208 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 207 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
209 | |||
210 | /* Display the amount of memory */ | ||
211 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
212 | } | 208 | } |
213 | 209 | ||
214 | static struct of_device_id __initdata of_bus_ids[] = { | 210 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 18499d7c9d9e..0cca8f5cb272 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c | |||
@@ -130,7 +130,6 @@ static void __init stx_gp3_setup_arch(void) | |||
130 | static void stx_gp3_show_cpuinfo(struct seq_file *m) | 130 | static void stx_gp3_show_cpuinfo(struct seq_file *m) |
131 | { | 131 | { |
132 | uint pvid, svid, phid1; | 132 | uint pvid, svid, phid1; |
133 | uint memsize = total_memory; | ||
134 | 133 | ||
135 | pvid = mfspr(SPRN_PVR); | 134 | pvid = mfspr(SPRN_PVR); |
136 | svid = mfspr(SPRN_SVR); | 135 | svid = mfspr(SPRN_SVR); |
@@ -142,9 +141,6 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m) | |||
142 | /* Display cpu Pll setting */ | 141 | /* Display cpu Pll setting */ |
143 | phid1 = mfspr(SPRN_HID1); | 142 | phid1 = mfspr(SPRN_HID1); |
144 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 143 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
145 | |||
146 | /* Display the amount of memory */ | ||
147 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
148 | } | 144 | } |
149 | 145 | ||
150 | static struct of_device_id __initdata of_bus_ids[] = { | 146 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index d850880d6964..2933a8e827d9 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c | |||
@@ -138,7 +138,6 @@ static void __init tqm85xx_setup_arch(void) | |||
138 | static void tqm85xx_show_cpuinfo(struct seq_file *m) | 138 | static void tqm85xx_show_cpuinfo(struct seq_file *m) |
139 | { | 139 | { |
140 | uint pvid, svid, phid1; | 140 | uint pvid, svid, phid1; |
141 | uint memsize = total_memory; | ||
142 | 141 | ||
143 | pvid = mfspr(SPRN_PVR); | 142 | pvid = mfspr(SPRN_PVR); |
144 | svid = mfspr(SPRN_SVR); | 143 | svid = mfspr(SPRN_SVR); |
@@ -150,9 +149,6 @@ static void tqm85xx_show_cpuinfo(struct seq_file *m) | |||
150 | /* Display cpu Pll setting */ | 149 | /* Display cpu Pll setting */ |
151 | phid1 = mfspr(SPRN_HID1); | 150 | phid1 = mfspr(SPRN_HID1); |
152 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 151 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
153 | |||
154 | /* Display the amount of memory */ | ||
155 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
156 | } | 152 | } |
157 | 153 | ||
158 | static struct of_device_id __initdata of_bus_ids[] = { | 154 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index 821c45fac18b..fb371f5ce132 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c | |||
@@ -127,7 +127,6 @@ static unsigned int gef_sbc610_get_fpga_rev(void) | |||
127 | 127 | ||
128 | static void gef_sbc610_show_cpuinfo(struct seq_file *m) | 128 | static void gef_sbc610_show_cpuinfo(struct seq_file *m) |
129 | { | 129 | { |
130 | uint memsize = total_memory; | ||
131 | uint svid = mfspr(SPRN_SVR); | 130 | uint svid = mfspr(SPRN_SVR); |
132 | 131 | ||
133 | seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); | 132 | seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); |
@@ -137,7 +136,6 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m) | |||
137 | seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev()); | 136 | seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev()); |
138 | 137 | ||
139 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); | 138 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); |
140 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
141 | } | 139 | } |
142 | 140 | ||
143 | static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev) | 141 | static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev) |
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 2672829a71dc..27e0e682d8e1 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c | |||
@@ -101,13 +101,11 @@ mpc86xx_hpcn_setup_arch(void) | |||
101 | static void | 101 | static void |
102 | mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) | 102 | mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) |
103 | { | 103 | { |
104 | uint memsize = total_memory; | ||
105 | uint svid = mfspr(SPRN_SVR); | 104 | uint svid = mfspr(SPRN_SVR); |
106 | 105 | ||
107 | seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); | 106 | seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); |
108 | 107 | ||
109 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); | 108 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); |
110 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
111 | } | 109 | } |
112 | 110 | ||
113 | 111 | ||
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c index da677a74e2d1..5fd7ed40986f 100644 --- a/arch/powerpc/platforms/86xx/sbc8641d.c +++ b/arch/powerpc/platforms/86xx/sbc8641d.c | |||
@@ -63,13 +63,11 @@ sbc8641_setup_arch(void) | |||
63 | static void | 63 | static void |
64 | sbc8641_show_cpuinfo(struct seq_file *m) | 64 | sbc8641_show_cpuinfo(struct seq_file *m) |
65 | { | 65 | { |
66 | uint memsize = total_memory; | ||
67 | uint svid = mfspr(SPRN_SVR); | 66 | uint svid = mfspr(SPRN_SVR); |
68 | 67 | ||
69 | seq_printf(m, "Vendor\t\t: Wind River Systems\n"); | 68 | seq_printf(m, "Vendor\t\t: Wind River Systems\n"); |
70 | 69 | ||
71 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); | 70 | seq_printf(m, "SVR\t\t: 0x%x\n", svid); |
72 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
73 | } | 71 | } |
74 | 72 | ||
75 | 73 | ||
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 2a14b052abcd..665af1c4195b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
22 | #include <asm/rtas.h> | 22 | #include <asm/rtas.h> |
23 | #include <asm/cell-regs.h> | 23 | #include <asm/cell-regs.h> |
24 | #include <asm/kdump.h> | ||
24 | 25 | ||
25 | #include "ras.h" | 26 | #include "ras.h" |
26 | 27 | ||
@@ -111,9 +112,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
111 | int ret = -ENOMEM; | 112 | int ret = -ENOMEM; |
112 | unsigned long addr; | 113 | unsigned long addr; |
113 | 114 | ||
114 | #ifdef CONFIG_CRASH_DUMP | 115 | if (__kdump_flag) |
115 | rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); | 116 | rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); |
116 | #endif | ||
117 | 117 | ||
118 | area = kmalloc(sizeof(*area), GFP_KERNEL); | 118 | area = kmalloc(sizeof(*area), GFP_KERNEL); |
119 | if (!area) | 119 | if (!area) |
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index efb3964457b1..c0d86e1f56ea 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c | |||
@@ -54,8 +54,8 @@ | |||
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * The primary thread of each non-boot processor is recorded here before | 57 | * The Primary thread of each non-boot processor was started from the OF client |
58 | * smp init. | 58 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. |
59 | */ | 59 | */ |
60 | static cpumask_t of_spin_map; | 60 | static cpumask_t of_spin_map; |
61 | 61 | ||
@@ -208,11 +208,7 @@ void __init smp_init_cell(void) | |||
208 | /* Mark threads which are still spinning in hold loops. */ | 208 | /* Mark threads which are still spinning in hold loops. */ |
209 | if (cpu_has_feature(CPU_FTR_SMT)) { | 209 | if (cpu_has_feature(CPU_FTR_SMT)) { |
210 | for_each_present_cpu(i) { | 210 | for_each_present_cpu(i) { |
211 | if (i % 2 == 0) | 211 | if (cpu_thread_in_core(i) == 0) |
212 | /* | ||
213 | * Even-numbered logical cpus correspond to | ||
214 | * primary threads. | ||
215 | */ | ||
216 | cpu_set(i, of_spin_map); | 212 | cpu_set(i, of_spin_map); |
217 | } | 213 | } |
218 | } else { | 214 | } else { |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 010a51f59796..b73c369cc6f1 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -548,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer, | |||
548 | int ret; | 548 | int ret; |
549 | struct spu_context *ctx = file->private_data; | 549 | struct spu_context *ctx = file->private_data; |
550 | 550 | ||
551 | /* pre-check for file position: if we'd return EOF, there's no point | ||
552 | * causing a deschedule */ | ||
553 | if (*pos >= sizeof(ctx->csa.lscsa->gprs)) | ||
554 | return 0; | ||
555 | |||
551 | ret = spu_acquire_saved(ctx); | 556 | ret = spu_acquire_saved(ctx); |
552 | if (ret) | 557 | if (ret) |
553 | return ret; | 558 | return ret; |
@@ -2426,38 +2431,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx) | |||
2426 | static int spufs_switch_log_open(struct inode *inode, struct file *file) | 2431 | static int spufs_switch_log_open(struct inode *inode, struct file *file) |
2427 | { | 2432 | { |
2428 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; | 2433 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2434 | int rc; | ||
2435 | |||
2436 | rc = spu_acquire(ctx); | ||
2437 | if (rc) | ||
2438 | return rc; | ||
2429 | 2439 | ||
2430 | /* | ||
2431 | * We (ab-)use the mapping_lock here because it serves the similar | ||
2432 | * purpose for synchronizing open/close elsewhere. Maybe it should | ||
2433 | * be renamed eventually. | ||
2434 | */ | ||
2435 | mutex_lock(&ctx->mapping_lock); | ||
2436 | if (ctx->switch_log) { | 2440 | if (ctx->switch_log) { |
2437 | spin_lock(&ctx->switch_log->lock); | 2441 | rc = -EBUSY; |
2438 | ctx->switch_log->head = 0; | 2442 | goto out; |
2439 | ctx->switch_log->tail = 0; | ||
2440 | spin_unlock(&ctx->switch_log->lock); | ||
2441 | } else { | ||
2442 | /* | ||
2443 | * We allocate the switch log data structures on first open. | ||
2444 | * They will never be free because we assume a context will | ||
2445 | * be traced until it goes away. | ||
2446 | */ | ||
2447 | ctx->switch_log = kzalloc(sizeof(struct switch_log) + | ||
2448 | SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), | ||
2449 | GFP_KERNEL); | ||
2450 | if (!ctx->switch_log) | ||
2451 | goto out; | ||
2452 | spin_lock_init(&ctx->switch_log->lock); | ||
2453 | init_waitqueue_head(&ctx->switch_log->wait); | ||
2454 | } | 2443 | } |
2455 | mutex_unlock(&ctx->mapping_lock); | 2444 | |
2445 | ctx->switch_log = kmalloc(sizeof(struct switch_log) + | ||
2446 | SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), | ||
2447 | GFP_KERNEL); | ||
2448 | |||
2449 | if (!ctx->switch_log) { | ||
2450 | rc = -ENOMEM; | ||
2451 | goto out; | ||
2452 | } | ||
2453 | |||
2454 | ctx->switch_log->head = ctx->switch_log->tail = 0; | ||
2455 | init_waitqueue_head(&ctx->switch_log->wait); | ||
2456 | rc = 0; | ||
2457 | |||
2458 | out: | ||
2459 | spu_release(ctx); | ||
2460 | return rc; | ||
2461 | } | ||
2462 | |||
2463 | static int spufs_switch_log_release(struct inode *inode, struct file *file) | ||
2464 | { | ||
2465 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; | ||
2466 | int rc; | ||
2467 | |||
2468 | rc = spu_acquire(ctx); | ||
2469 | if (rc) | ||
2470 | return rc; | ||
2471 | |||
2472 | kfree(ctx->switch_log); | ||
2473 | ctx->switch_log = NULL; | ||
2474 | spu_release(ctx); | ||
2456 | 2475 | ||
2457 | return 0; | 2476 | return 0; |
2458 | out: | ||
2459 | mutex_unlock(&ctx->mapping_lock); | ||
2460 | return -ENOMEM; | ||
2461 | } | 2477 | } |
2462 | 2478 | ||
2463 | static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) | 2479 | static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) |
@@ -2485,42 +2501,54 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, | |||
2485 | if (!buf || len < 0) | 2501 | if (!buf || len < 0) |
2486 | return -EINVAL; | 2502 | return -EINVAL; |
2487 | 2503 | ||
2504 | error = spu_acquire(ctx); | ||
2505 | if (error) | ||
2506 | return error; | ||
2507 | |||
2488 | while (cnt < len) { | 2508 | while (cnt < len) { |
2489 | char tbuf[128]; | 2509 | char tbuf[128]; |
2490 | int width; | 2510 | int width; |
2491 | 2511 | ||
2492 | if (file->f_flags & O_NONBLOCK) { | 2512 | if (spufs_switch_log_used(ctx) == 0) { |
2493 | if (spufs_switch_log_used(ctx) <= 0) | 2513 | if (cnt > 0) { |
2494 | return cnt ? cnt : -EAGAIN; | 2514 | /* If there's data ready to go, we can |
2495 | } else { | 2515 | * just return straight away */ |
2496 | /* Wait for data in buffer */ | 2516 | break; |
2497 | error = wait_event_interruptible(ctx->switch_log->wait, | 2517 | |
2498 | spufs_switch_log_used(ctx) > 0); | 2518 | } else if (file->f_flags & O_NONBLOCK) { |
2499 | if (error) | 2519 | error = -EAGAIN; |
2500 | break; | 2520 | break; |
2501 | } | ||
2502 | 2521 | ||
2503 | spin_lock(&ctx->switch_log->lock); | 2522 | } else { |
2504 | if (ctx->switch_log->head == ctx->switch_log->tail) { | 2523 | /* spufs_wait will drop the mutex and |
2505 | /* multiple readers race? */ | 2524 | * re-acquire, but since we're in read(), the |
2506 | spin_unlock(&ctx->switch_log->lock); | 2525 | * file cannot be _released (and so |
2507 | continue; | 2526 | * ctx->switch_log is stable). |
2527 | */ | ||
2528 | error = spufs_wait(ctx->switch_log->wait, | ||
2529 | spufs_switch_log_used(ctx) > 0); | ||
2530 | |||
2531 | /* On error, spufs_wait returns without the | ||
2532 | * state mutex held */ | ||
2533 | if (error) | ||
2534 | return error; | ||
2535 | |||
2536 | /* We may have had entries read from underneath | ||
2537 | * us while we dropped the mutex in spufs_wait, | ||
2538 | * so re-check */ | ||
2539 | if (spufs_switch_log_used(ctx) == 0) | ||
2540 | continue; | ||
2541 | } | ||
2508 | } | 2542 | } |
2509 | 2543 | ||
2510 | width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); | 2544 | width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); |
2511 | if (width < len) { | 2545 | if (width < len) |
2512 | ctx->switch_log->tail = | 2546 | ctx->switch_log->tail = |
2513 | (ctx->switch_log->tail + 1) % | 2547 | (ctx->switch_log->tail + 1) % |
2514 | SWITCH_LOG_BUFSIZE; | 2548 | SWITCH_LOG_BUFSIZE; |
2515 | } | 2549 | else |
2516 | 2550 | /* If the record is greater than space available return | |
2517 | spin_unlock(&ctx->switch_log->lock); | 2551 | * partial buffer (so far) */ |
2518 | |||
2519 | /* | ||
2520 | * If the record is greater than space available return | ||
2521 | * partial buffer (so far) | ||
2522 | */ | ||
2523 | if (width >= len) | ||
2524 | break; | 2552 | break; |
2525 | 2553 | ||
2526 | error = copy_to_user(buf + cnt, tbuf, width); | 2554 | error = copy_to_user(buf + cnt, tbuf, width); |
@@ -2529,6 +2557,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, | |||
2529 | cnt += width; | 2557 | cnt += width; |
2530 | } | 2558 | } |
2531 | 2559 | ||
2560 | spu_release(ctx); | ||
2561 | |||
2532 | return cnt == 0 ? error : cnt; | 2562 | return cnt == 0 ? error : cnt; |
2533 | } | 2563 | } |
2534 | 2564 | ||
@@ -2537,29 +2567,41 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) | |||
2537 | struct inode *inode = file->f_path.dentry->d_inode; | 2567 | struct inode *inode = file->f_path.dentry->d_inode; |
2538 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; | 2568 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2539 | unsigned int mask = 0; | 2569 | unsigned int mask = 0; |
2570 | int rc; | ||
2540 | 2571 | ||
2541 | poll_wait(file, &ctx->switch_log->wait, wait); | 2572 | poll_wait(file, &ctx->switch_log->wait, wait); |
2542 | 2573 | ||
2574 | rc = spu_acquire(ctx); | ||
2575 | if (rc) | ||
2576 | return rc; | ||
2577 | |||
2543 | if (spufs_switch_log_used(ctx) > 0) | 2578 | if (spufs_switch_log_used(ctx) > 0) |
2544 | mask |= POLLIN; | 2579 | mask |= POLLIN; |
2545 | 2580 | ||
2581 | spu_release(ctx); | ||
2582 | |||
2546 | return mask; | 2583 | return mask; |
2547 | } | 2584 | } |
2548 | 2585 | ||
2549 | static const struct file_operations spufs_switch_log_fops = { | 2586 | static const struct file_operations spufs_switch_log_fops = { |
2550 | .owner = THIS_MODULE, | 2587 | .owner = THIS_MODULE, |
2551 | .open = spufs_switch_log_open, | 2588 | .open = spufs_switch_log_open, |
2552 | .read = spufs_switch_log_read, | 2589 | .read = spufs_switch_log_read, |
2553 | .poll = spufs_switch_log_poll, | 2590 | .poll = spufs_switch_log_poll, |
2591 | .release = spufs_switch_log_release, | ||
2554 | }; | 2592 | }; |
2555 | 2593 | ||
2594 | /** | ||
2595 | * Log a context switch event to a switch log reader. | ||
2596 | * | ||
2597 | * Must be called with ctx->state_mutex held. | ||
2598 | */ | ||
2556 | void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, | 2599 | void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, |
2557 | u32 type, u32 val) | 2600 | u32 type, u32 val) |
2558 | { | 2601 | { |
2559 | if (!ctx->switch_log) | 2602 | if (!ctx->switch_log) |
2560 | return; | 2603 | return; |
2561 | 2604 | ||
2562 | spin_lock(&ctx->switch_log->lock); | ||
2563 | if (spufs_switch_log_avail(ctx) > 1) { | 2605 | if (spufs_switch_log_avail(ctx) > 1) { |
2564 | struct switch_log_entry *p; | 2606 | struct switch_log_entry *p; |
2565 | 2607 | ||
@@ -2573,7 +2615,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, | |||
2573 | ctx->switch_log->head = | 2615 | ctx->switch_log->head = |
2574 | (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; | 2616 | (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; |
2575 | } | 2617 | } |
2576 | spin_unlock(&ctx->switch_log->lock); | ||
2577 | 2618 | ||
2578 | wake_up(&ctx->switch_log->wait); | 2619 | wake_up(&ctx->switch_log->wait); |
2579 | } | 2620 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index c9bb7cfd3dca..c58bd36b0c5b 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc, | |||
249 | 249 | ||
250 | spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); | 250 | spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); |
251 | clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); | 251 | clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); |
252 | spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); | ||
252 | spu_release(ctx); | 253 | spu_release(ctx); |
253 | 254 | ||
254 | if (signal_pending(current)) | 255 | if (signal_pending(current)) |
@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
417 | ret = spu_run_fini(ctx, npc, &status); | 418 | ret = spu_run_fini(ctx, npc, &status); |
418 | spu_yield(ctx); | 419 | spu_yield(ctx); |
419 | 420 | ||
420 | spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status); | ||
421 | |||
422 | if ((status & SPU_STATUS_STOPPED_BY_STOP) && | 421 | if ((status & SPU_STATUS_STOPPED_BY_STOP) && |
423 | (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) | 422 | (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) |
424 | ctx->stats.libassist++; | 423 | ctx->stats.libassist++; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 67595bc380dc..2ad914c47493 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -312,6 +312,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, | |||
312 | */ | 312 | */ |
313 | node = cpu_to_node(raw_smp_processor_id()); | 313 | node = cpu_to_node(raw_smp_processor_id()); |
314 | for (n = 0; n < MAX_NUMNODES; n++, node++) { | 314 | for (n = 0; n < MAX_NUMNODES; n++, node++) { |
315 | /* | ||
316 | * "available_spus" counts how many spus are not potentially | ||
317 | * going to be used by other affinity gangs whose reference | ||
318 | * context is already in place. Although this code seeks to | ||
319 | * avoid having affinity gangs with a summed amount of | ||
320 | * contexts bigger than the amount of spus in the node, | ||
321 | * this may happen sporadically. In this case, available_spus | ||
322 | * becomes negative, which is harmless. | ||
323 | */ | ||
315 | int available_spus; | 324 | int available_spus; |
316 | 325 | ||
317 | node = (node < MAX_NUMNODES) ? node : 0; | 326 | node = (node < MAX_NUMNODES) ? node : 0; |
@@ -321,12 +330,10 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, | |||
321 | available_spus = 0; | 330 | available_spus = 0; |
322 | mutex_lock(&cbe_spu_info[node].list_mutex); | 331 | mutex_lock(&cbe_spu_info[node].list_mutex); |
323 | list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { | 332 | list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { |
324 | if (spu->ctx && spu->ctx->gang | 333 | if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset |
325 | && spu->ctx->aff_offset == 0) | 334 | && spu->ctx->gang->aff_ref_spu) |
326 | available_spus -= | 335 | available_spus -= spu->ctx->gang->contexts; |
327 | (spu->ctx->gang->contexts - 1); | 336 | available_spus++; |
328 | else | ||
329 | available_spus++; | ||
330 | } | 337 | } |
331 | if (available_spus < ctx->gang->contexts) { | 338 | if (available_spus < ctx->gang->contexts) { |
332 | mutex_unlock(&cbe_spu_info[node].list_mutex); | 339 | mutex_unlock(&cbe_spu_info[node].list_mutex); |
@@ -437,6 +444,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
437 | atomic_dec(&cbe_spu_info[spu->node].reserved_spus); | 444 | atomic_dec(&cbe_spu_info[spu->node].reserved_spus); |
438 | 445 | ||
439 | if (ctx->gang) | 446 | if (ctx->gang) |
447 | /* | ||
448 | * If ctx->gang->aff_sched_count is positive, SPU affinity is | ||
449 | * being considered in this gang. Using atomic_dec_if_positive | ||
450 | * allow us to skip an explicit check for affinity in this gang | ||
451 | */ | ||
440 | atomic_dec_if_positive(&ctx->gang->aff_sched_count); | 452 | atomic_dec_if_positive(&ctx->gang->aff_sched_count); |
441 | 453 | ||
442 | spu_switch_notify(spu, NULL); | 454 | spu_switch_notify(spu, NULL); |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 8ae8ef9dfc22..15c62d3ca129 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -65,7 +65,6 @@ enum { | |||
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct switch_log { | 67 | struct switch_log { |
68 | spinlock_t lock; | ||
69 | wait_queue_head_t wait; | 68 | wait_queue_head_t wait; |
70 | unsigned long head; | 69 | unsigned long head; |
71 | unsigned long tail; | 70 | unsigned long tail; |
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 2ece399f2862..d0b1f3f4d9c8 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c | |||
@@ -40,6 +40,7 @@ static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait); | |||
40 | static ktime_t sputrace_start; | 40 | static ktime_t sputrace_start; |
41 | static unsigned long sputrace_head, sputrace_tail; | 41 | static unsigned long sputrace_head, sputrace_tail; |
42 | static struct sputrace *sputrace_log; | 42 | static struct sputrace *sputrace_log; |
43 | static int sputrace_logging; | ||
43 | 44 | ||
44 | static int sputrace_used(void) | 45 | static int sputrace_used(void) |
45 | { | 46 | { |
@@ -79,6 +80,11 @@ static ssize_t sputrace_read(struct file *file, char __user *buf, | |||
79 | char tbuf[128]; | 80 | char tbuf[128]; |
80 | int width; | 81 | int width; |
81 | 82 | ||
83 | /* If we have data ready to return, don't block waiting | ||
84 | * for more */ | ||
85 | if (cnt > 0 && sputrace_used() == 0) | ||
86 | break; | ||
87 | |||
82 | error = wait_event_interruptible(sputrace_wait, | 88 | error = wait_event_interruptible(sputrace_wait, |
83 | sputrace_used() > 0); | 89 | sputrace_used() > 0); |
84 | if (error) | 90 | if (error) |
@@ -109,24 +115,49 @@ static ssize_t sputrace_read(struct file *file, char __user *buf, | |||
109 | 115 | ||
110 | static int sputrace_open(struct inode *inode, struct file *file) | 116 | static int sputrace_open(struct inode *inode, struct file *file) |
111 | { | 117 | { |
118 | int rc; | ||
119 | |||
112 | spin_lock(&sputrace_lock); | 120 | spin_lock(&sputrace_lock); |
121 | if (sputrace_logging) { | ||
122 | rc = -EBUSY; | ||
123 | goto out; | ||
124 | } | ||
125 | |||
126 | sputrace_logging = 1; | ||
113 | sputrace_head = sputrace_tail = 0; | 127 | sputrace_head = sputrace_tail = 0; |
114 | sputrace_start = ktime_get(); | 128 | sputrace_start = ktime_get(); |
129 | rc = 0; | ||
130 | |||
131 | out: | ||
115 | spin_unlock(&sputrace_lock); | 132 | spin_unlock(&sputrace_lock); |
133 | return rc; | ||
134 | } | ||
116 | 135 | ||
136 | static int sputrace_release(struct inode *inode, struct file *file) | ||
137 | { | ||
138 | spin_lock(&sputrace_lock); | ||
139 | sputrace_logging = 0; | ||
140 | spin_unlock(&sputrace_lock); | ||
117 | return 0; | 141 | return 0; |
118 | } | 142 | } |
119 | 143 | ||
120 | static const struct file_operations sputrace_fops = { | 144 | static const struct file_operations sputrace_fops = { |
121 | .owner = THIS_MODULE, | 145 | .owner = THIS_MODULE, |
122 | .open = sputrace_open, | 146 | .open = sputrace_open, |
123 | .read = sputrace_read, | 147 | .read = sputrace_read, |
148 | .release = sputrace_release, | ||
124 | }; | 149 | }; |
125 | 150 | ||
126 | static void sputrace_log_item(const char *name, struct spu_context *ctx, | 151 | static void sputrace_log_item(const char *name, struct spu_context *ctx, |
127 | struct spu *spu) | 152 | struct spu *spu) |
128 | { | 153 | { |
129 | spin_lock(&sputrace_lock); | 154 | spin_lock(&sputrace_lock); |
155 | |||
156 | if (!sputrace_logging) { | ||
157 | spin_unlock(&sputrace_lock); | ||
158 | return; | ||
159 | } | ||
160 | |||
130 | if (sputrace_avail() > 1) { | 161 | if (sputrace_avail() > 1) { |
131 | struct sputrace *t = sputrace_log + sputrace_head; | 162 | struct sputrace *t = sputrace_log + sputrace_head; |
132 | 163 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c index d0b25b8c39d1..32ba0fa0ad03 100644 --- a/arch/powerpc/platforms/embedded6xx/c2k.c +++ b/arch/powerpc/platforms/embedded6xx/c2k.c | |||
@@ -116,10 +116,7 @@ static void c2k_restart(char *cmd) | |||
116 | 116 | ||
117 | void c2k_show_cpuinfo(struct seq_file *m) | 117 | void c2k_show_cpuinfo(struct seq_file *m) |
118 | { | 118 | { |
119 | uint memsize = total_memory; | ||
120 | |||
121 | seq_printf(m, "Vendor\t\t: GEFanuc\n"); | 119 | seq_printf(m, "Vendor\t\t: GEFanuc\n"); |
122 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
123 | seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING); | 120 | seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING); |
124 | } | 121 | } |
125 | 122 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c index 5a19b9a1457c..4c485e984236 100644 --- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c +++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c | |||
@@ -119,10 +119,7 @@ static void prpmc2800_restart(char *cmd) | |||
119 | 119 | ||
120 | void prpmc2800_show_cpuinfo(struct seq_file *m) | 120 | void prpmc2800_show_cpuinfo(struct seq_file *m) |
121 | { | 121 | { |
122 | uint memsize = total_memory; | ||
123 | |||
124 | seq_printf(m, "Vendor\t\t: Motorola\n"); | 122 | seq_printf(m, "Vendor\t\t: Motorola\n"); |
125 | seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); | ||
126 | seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING); | 123 | seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING); |
127 | } | 124 | } |
128 | 125 | ||
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 140d02a5232a..a623ad256e9e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -22,6 +22,12 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) | |||
22 | int ret; | 22 | int ret; |
23 | 23 | ||
24 | start_pfn = base >> PAGE_SHIFT; | 24 | start_pfn = base >> PAGE_SHIFT; |
25 | |||
26 | if (!pfn_valid(start_pfn)) { | ||
27 | lmb_remove(base, lmb_size); | ||
28 | return 0; | ||
29 | } | ||
30 | |||
25 | zone = page_zone(pfn_to_page(start_pfn)); | 31 | zone = page_zone(pfn_to_page(start_pfn)); |
26 | 32 | ||
27 | /* | 33 | /* |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index a8c446697f9e..d56491d182d3 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/tce.h> | 44 | #include <asm/tce.h> |
45 | #include <asm/ppc-pci.h> | 45 | #include <asm/ppc-pci.h> |
46 | #include <asm/udbg.h> | 46 | #include <asm/udbg.h> |
47 | #include <asm/kdump.h> | ||
47 | 48 | ||
48 | #include "plpar_wrappers.h" | 49 | #include "plpar_wrappers.h" |
49 | 50 | ||
@@ -291,9 +292,8 @@ static void iommu_table_setparms(struct pci_controller *phb, | |||
291 | 292 | ||
292 | tbl->it_base = (unsigned long)__va(*basep); | 293 | tbl->it_base = (unsigned long)__va(*basep); |
293 | 294 | ||
294 | #ifndef CONFIG_CRASH_DUMP | 295 | if (!__kdump_flag) |
295 | memset((void *)tbl->it_base, 0, *sizep); | 296 | memset((void *)tbl->it_base, 0, *sizep); |
296 | #endif | ||
297 | 297 | ||
298 | tbl->it_busno = phb->bus->number; | 298 | tbl->it_busno = phb->bus->number; |
299 | 299 | ||
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index e00f96baa381..1a231c389ba0 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -52,8 +52,8 @@ | |||
52 | 52 | ||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * The primary thread of each non-boot processor is recorded here before | 55 | * The Primary thread of each non-boot processor was started from the OF client |
56 | * smp init. | 56 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. |
57 | */ | 57 | */ |
58 | static cpumask_t of_spin_map; | 58 | static cpumask_t of_spin_map; |
59 | 59 | ||
@@ -161,8 +161,7 @@ static void __devinit smp_pSeries_kick_cpu(int nr) | |||
161 | static int smp_pSeries_cpu_bootable(unsigned int nr) | 161 | static int smp_pSeries_cpu_bootable(unsigned int nr) |
162 | { | 162 | { |
163 | /* Special case - we inhibit secondary thread startup | 163 | /* Special case - we inhibit secondary thread startup |
164 | * during boot if the user requests it. Odd-numbered | 164 | * during boot if the user requests it. |
165 | * cpus are assumed to be secondary threads. | ||
166 | */ | 165 | */ |
167 | if (system_state < SYSTEM_RUNNING && | 166 | if (system_state < SYSTEM_RUNNING && |
168 | cpu_has_feature(CPU_FTR_SMT) && | 167 | cpu_has_feature(CPU_FTR_SMT) && |
@@ -199,11 +198,7 @@ static void __init smp_init_pseries(void) | |||
199 | /* Mark threads which are still spinning in hold loops. */ | 198 | /* Mark threads which are still spinning in hold loops. */ |
200 | if (cpu_has_feature(CPU_FTR_SMT)) { | 199 | if (cpu_has_feature(CPU_FTR_SMT)) { |
201 | for_each_present_cpu(i) { | 200 | for_each_present_cpu(i) { |
202 | if (i % 2 == 0) | 201 | if (cpu_thread_in_core(i) == 0) |
203 | /* | ||
204 | * Even-numbered logical cpus correspond to | ||
205 | * primary threads. | ||
206 | */ | ||
207 | cpu_set(i, of_spin_map); | 202 | cpu_set(i, of_spin_map); |
208 | } | 203 | } |
209 | } else { | 204 | } else { |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index a44709a94f97..5afce115ab1f 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -37,6 +37,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o | |||
37 | ifeq ($(CONFIG_PCI),y) | 37 | ifeq ($(CONFIG_PCI),y) |
38 | obj-$(CONFIG_4xx) += ppc4xx_pci.o | 38 | obj-$(CONFIG_4xx) += ppc4xx_pci.o |
39 | endif | 39 | endif |
40 | obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o | ||
40 | 41 | ||
41 | obj-$(CONFIG_CPM) += cpm_common.o | 42 | obj-$(CONFIG_CPM) += cpm_common.o |
42 | obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o | 43 | obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o |
diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/sysdev/ppc4xx_gpio.c new file mode 100644 index 000000000000..110efe2a54fc --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_gpio.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * PPC4xx gpio driver | ||
3 | * | ||
4 | * Copyright (c) 2008 Harris Corporation | ||
5 | * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix | ||
6 | * Copyright (c) MontaVista Software, Inc. 2008. | ||
7 | * | ||
8 | * Author: Steve Falco <sfalco@harris.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 | ||
12 | * as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/of.h> | ||
29 | #include <linux/of_gpio.h> | ||
30 | #include <linux/gpio.h> | ||
31 | #include <linux/types.h> | ||
32 | |||
33 | #define GPIO_MASK(gpio) (0x80000000 >> (gpio)) | ||
34 | #define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2)) | ||
35 | |||
36 | /* Physical GPIO register layout */ | ||
37 | struct ppc4xx_gpio { | ||
38 | __be32 or; | ||
39 | __be32 tcr; | ||
40 | __be32 osrl; | ||
41 | __be32 osrh; | ||
42 | __be32 tsrl; | ||
43 | __be32 tsrh; | ||
44 | __be32 odr; | ||
45 | __be32 ir; | ||
46 | __be32 rr1; | ||
47 | __be32 rr2; | ||
48 | __be32 rr3; | ||
49 | __be32 reserved1; | ||
50 | __be32 isr1l; | ||
51 | __be32 isr1h; | ||
52 | __be32 isr2l; | ||
53 | __be32 isr2h; | ||
54 | __be32 isr3l; | ||
55 | __be32 isr3h; | ||
56 | }; | ||
57 | |||
58 | struct ppc4xx_gpio_chip { | ||
59 | struct of_mm_gpio_chip mm_gc; | ||
60 | spinlock_t lock; | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * GPIO LIB API implementation for GPIOs | ||
65 | * | ||
66 | * There are a maximum of 32 gpios in each gpio controller. | ||
67 | */ | ||
68 | |||
69 | static inline struct ppc4xx_gpio_chip * | ||
70 | to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc) | ||
71 | { | ||
72 | return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc); | ||
73 | } | ||
74 | |||
75 | static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio) | ||
76 | { | ||
77 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
78 | struct ppc4xx_gpio __iomem *regs = mm_gc->regs; | ||
79 | |||
80 | return in_be32(®s->ir) & GPIO_MASK(gpio); | ||
81 | } | ||
82 | |||
83 | static inline void | ||
84 | __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) | ||
85 | { | ||
86 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
87 | struct ppc4xx_gpio __iomem *regs = mm_gc->regs; | ||
88 | |||
89 | if (val) | ||
90 | setbits32(®s->or, GPIO_MASK(gpio)); | ||
91 | else | ||
92 | clrbits32(®s->or, GPIO_MASK(gpio)); | ||
93 | } | ||
94 | |||
95 | static void | ||
96 | ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) | ||
97 | { | ||
98 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
99 | struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); | ||
100 | unsigned long flags; | ||
101 | |||
102 | spin_lock_irqsave(&chip->lock, flags); | ||
103 | |||
104 | __ppc4xx_gpio_set(gc, gpio, val); | ||
105 | |||
106 | spin_unlock_irqrestore(&chip->lock, flags); | ||
107 | |||
108 | pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); | ||
109 | } | ||
110 | |||
111 | static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) | ||
112 | { | ||
113 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
114 | struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); | ||
115 | struct ppc4xx_gpio __iomem *regs = mm_gc->regs; | ||
116 | unsigned long flags; | ||
117 | |||
118 | spin_lock_irqsave(&chip->lock, flags); | ||
119 | |||
120 | /* Disable open-drain function */ | ||
121 | clrbits32(®s->odr, GPIO_MASK(gpio)); | ||
122 | |||
123 | /* Float the pin */ | ||
124 | clrbits32(®s->tcr, GPIO_MASK(gpio)); | ||
125 | |||
126 | /* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */ | ||
127 | if (gpio < 16) { | ||
128 | clrbits32(®s->osrl, GPIO_MASK2(gpio)); | ||
129 | clrbits32(®s->tsrl, GPIO_MASK2(gpio)); | ||
130 | } else { | ||
131 | clrbits32(®s->osrh, GPIO_MASK2(gpio)); | ||
132 | clrbits32(®s->tsrh, GPIO_MASK2(gpio)); | ||
133 | } | ||
134 | |||
135 | spin_unlock_irqrestore(&chip->lock, flags); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | ||
142 | { | ||
143 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
144 | struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); | ||
145 | struct ppc4xx_gpio __iomem *regs = mm_gc->regs; | ||
146 | unsigned long flags; | ||
147 | |||
148 | spin_lock_irqsave(&chip->lock, flags); | ||
149 | |||
150 | /* First set initial value */ | ||
151 | __ppc4xx_gpio_set(gc, gpio, val); | ||
152 | |||
153 | /* Disable open-drain function */ | ||
154 | clrbits32(®s->odr, GPIO_MASK(gpio)); | ||
155 | |||
156 | /* Drive the pin */ | ||
157 | setbits32(®s->tcr, GPIO_MASK(gpio)); | ||
158 | |||
159 | /* Bits 0-15 use TSRL, bits 16-31 use TSRH */ | ||
160 | if (gpio < 16) { | ||
161 | clrbits32(®s->osrl, GPIO_MASK2(gpio)); | ||
162 | clrbits32(®s->tsrl, GPIO_MASK2(gpio)); | ||
163 | } else { | ||
164 | clrbits32(®s->osrh, GPIO_MASK2(gpio)); | ||
165 | clrbits32(®s->tsrh, GPIO_MASK2(gpio)); | ||
166 | } | ||
167 | |||
168 | spin_unlock_irqrestore(&chip->lock, flags); | ||
169 | |||
170 | pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int __init ppc4xx_add_gpiochips(void) | ||
176 | { | ||
177 | struct device_node *np; | ||
178 | |||
179 | for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") { | ||
180 | int ret; | ||
181 | struct ppc4xx_gpio_chip *ppc4xx_gc; | ||
182 | struct of_mm_gpio_chip *mm_gc; | ||
183 | struct of_gpio_chip *of_gc; | ||
184 | struct gpio_chip *gc; | ||
185 | |||
186 | ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL); | ||
187 | if (!ppc4xx_gc) { | ||
188 | ret = -ENOMEM; | ||
189 | goto err; | ||
190 | } | ||
191 | |||
192 | spin_lock_init(&ppc4xx_gc->lock); | ||
193 | |||
194 | mm_gc = &ppc4xx_gc->mm_gc; | ||
195 | of_gc = &mm_gc->of_gc; | ||
196 | gc = &of_gc->gc; | ||
197 | |||
198 | of_gc->gpio_cells = 2; | ||
199 | gc->ngpio = 32; | ||
200 | gc->direction_input = ppc4xx_gpio_dir_in; | ||
201 | gc->direction_output = ppc4xx_gpio_dir_out; | ||
202 | gc->get = ppc4xx_gpio_get; | ||
203 | gc->set = ppc4xx_gpio_set; | ||
204 | |||
205 | ret = of_mm_gpiochip_add(np, mm_gc); | ||
206 | if (ret) | ||
207 | goto err; | ||
208 | continue; | ||
209 | err: | ||
210 | pr_err("%s: registration failed with status %d\n", | ||
211 | np->full_name, ret); | ||
212 | kfree(ppc4xx_gc); | ||
213 | /* try others anyway */ | ||
214 | } | ||
215 | return 0; | ||
216 | } | ||
217 | arch_initcall(ppc4xx_add_gpiochips); | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 4cd8083c58be..0cdcda35a05f 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) | |||
212 | /* Programs the physical address of the device table into the IOMMU hardware */ | 212 | /* Programs the physical address of the device table into the IOMMU hardware */ |
213 | static void __init iommu_set_device_table(struct amd_iommu *iommu) | 213 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
214 | { | 214 | { |
215 | u32 entry; | 215 | u64 entry; |
216 | 216 | ||
217 | BUG_ON(iommu->mmio_base == NULL); | 217 | BUG_ON(iommu->mmio_base == NULL); |
218 | 218 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index ccf6c503fc3b..d1d4dc52f649 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -36,7 +36,7 @@ void ack_bad_irq(unsigned int irq) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | #ifdef CONFIG_X86_32 | 38 | #ifdef CONFIG_X86_32 |
39 | # define irq_stats(x) (&per_cpu(irq_stat,x)) | 39 | # define irq_stats(x) (&per_cpu(irq_stat, x)) |
40 | #else | 40 | #else |
41 | # define irq_stats(x) cpu_pda(x) | 41 | # define irq_stats(x) cpu_pda(x) |
42 | #endif | 42 | #endif |
@@ -113,7 +113,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
113 | if (i == 0) { | 113 | if (i == 0) { |
114 | seq_printf(p, " "); | 114 | seq_printf(p, " "); |
115 | for_each_online_cpu(j) | 115 | for_each_online_cpu(j) |
116 | seq_printf(p, "CPU%-8d",j); | 116 | seq_printf(p, "CPU%-8d", j); |
117 | seq_putc(p, '\n'); | 117 | seq_putc(p, '\n'); |
118 | } | 118 | } |
119 | 119 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 192624820217..1972266e8ba5 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -9,8 +9,6 @@ | |||
9 | #include <asm/calgary.h> | 9 | #include <asm/calgary.h> |
10 | #include <asm/amd_iommu.h> | 10 | #include <asm/amd_iommu.h> |
11 | 11 | ||
12 | static int forbid_dac __read_mostly; | ||
13 | |||
14 | struct dma_mapping_ops *dma_ops; | 12 | struct dma_mapping_ops *dma_ops; |
15 | EXPORT_SYMBOL(dma_ops); | 13 | EXPORT_SYMBOL(dma_ops); |
16 | 14 | ||
@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void) | |||
293 | } | 291 | } |
294 | /* Must execute after PCI subsystem */ | 292 | /* Must execute after PCI subsystem */ |
295 | fs_initcall(pci_iommu_init); | 293 | fs_initcall(pci_iommu_init); |
296 | |||
297 | #ifdef CONFIG_PCI | ||
298 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
299 | |||
300 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
301 | { | ||
302 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
303 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | ||
304 | "Disabling DAC.\n"); | ||
305 | forbid_dac = 1; | ||
306 | } | ||
307 | } | ||
308 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
309 | #endif | ||
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index a213260b51e5..6c873dceb177 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -64,7 +64,12 @@ choice | |||
64 | default XTENSA_VARIANT_FSF | 64 | default XTENSA_VARIANT_FSF |
65 | 65 | ||
66 | config XTENSA_VARIANT_FSF | 66 | config XTENSA_VARIANT_FSF |
67 | bool "fsf" | 67 | bool "fsf - default (not generic) configuration" |
68 | |||
69 | config XTENSA_VARIANT_DC232B | ||
70 | bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" | ||
71 | help | ||
72 | This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE). | ||
68 | endchoice | 73 | endchoice |
69 | 74 | ||
70 | config MMU | 75 | config MMU |
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 4bd1e14c6b90..015b6b2a26b9 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile | |||
@@ -14,6 +14,7 @@ | |||
14 | # (Use VAR=<xtensa_config> to use another default compiler.) | 14 | # (Use VAR=<xtensa_config> to use another default compiler.) |
15 | 15 | ||
16 | variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf | 16 | variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf |
17 | variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b | ||
17 | variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom | 18 | variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom |
18 | 19 | ||
19 | VARIANT = $(variant-y) | 20 | VARIANT = $(variant-y) |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c9ea73b7031b..5fbcde59a92d 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -48,7 +48,7 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs) | |||
48 | 48 | ||
49 | if (irq >= NR_IRQS) { | 49 | if (irq >= NR_IRQS) { |
50 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 50 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
51 | __FUNCTION__, irq); | 51 | __func__, irq); |
52 | } | 52 | } |
53 | 53 | ||
54 | irq_enter(); | 54 | irq_enter(); |
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index a2e252217428..11a20adc1409 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c | |||
@@ -640,7 +640,7 @@ static int iss_net_configure(int index, char *init) | |||
640 | *lp = ((struct iss_net_private) { | 640 | *lp = ((struct iss_net_private) { |
641 | .device_list = LIST_HEAD_INIT(lp->device_list), | 641 | .device_list = LIST_HEAD_INIT(lp->device_list), |
642 | .opened_list = LIST_HEAD_INIT(lp->opened_list), | 642 | .opened_list = LIST_HEAD_INIT(lp->opened_list), |
643 | .lock = SPIN_LOCK_UNLOCKED, | 643 | .lock = __SPIN_LOCK_UNLOCKED(lp.lock), |
644 | .dev = dev, | 644 | .dev = dev, |
645 | .index = index, | 645 | .index = index, |
646 | //.fd = -1, | 646 | //.fd = -1, |
diff --git a/drivers/Kconfig b/drivers/Kconfig index d19b6f5a1106..d38f43f593d4 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -78,6 +78,8 @@ source "drivers/hid/Kconfig" | |||
78 | 78 | ||
79 | source "drivers/usb/Kconfig" | 79 | source "drivers/usb/Kconfig" |
80 | 80 | ||
81 | source "drivers/uwb/Kconfig" | ||
82 | |||
81 | source "drivers/mmc/Kconfig" | 83 | source "drivers/mmc/Kconfig" |
82 | 84 | ||
83 | source "drivers/memstick/Kconfig" | 85 | source "drivers/memstick/Kconfig" |
diff --git a/drivers/Makefile b/drivers/Makefile index 46c8681a07f4..cadc64fe8f68 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -100,3 +100,4 @@ obj-$(CONFIG_SSB) += ssb/ | |||
100 | obj-$(CONFIG_VIRTIO) += virtio/ | 100 | obj-$(CONFIG_VIRTIO) += virtio/ |
101 | obj-$(CONFIG_REGULATOR) += regulator/ | 101 | obj-$(CONFIG_REGULATOR) += regulator/ |
102 | obj-$(CONFIG_STAGING) += staging/ | 102 | obj-$(CONFIG_STAGING) += staging/ |
103 | obj-$(CONFIG_UWB) += uwb/ | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1ee9499bd343..bbb3cae57492 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -5373,6 +5373,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
5373 | 5373 | ||
5374 | #ifdef CONFIG_ATA_SFF | 5374 | #ifdef CONFIG_ATA_SFF |
5375 | INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); | 5375 | INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); |
5376 | #else | ||
5377 | INIT_DELAYED_WORK(&ap->port_task, NULL); | ||
5376 | #endif | 5378 | #endif |
5377 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); | 5379 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
5378 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); | 5380 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index a93247cc395a..5d687d7cffae 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -1206,7 +1206,10 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | |||
1206 | 1206 | ||
1207 | ata_eh_clear_action(link, dev, ehi, action); | 1207 | ata_eh_clear_action(link, dev, ehi, action); |
1208 | 1208 | ||
1209 | if (!(ehc->i.flags & ATA_EHI_QUIET)) | 1209 | /* About to take EH action, set RECOVERED. Ignore actions on |
1210 | * slave links as master will do them again. | ||
1211 | */ | ||
1212 | if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) | ||
1210 | ap->pflags |= ATA_PFLAG_RECOVERED; | 1213 | ap->pflags |= ATA_PFLAG_RECOVERED; |
1211 | 1214 | ||
1212 | spin_unlock_irqrestore(ap->lock, flags); | 1215 | spin_unlock_irqrestore(ap->lock, flags); |
@@ -2010,8 +2013,13 @@ void ata_eh_autopsy(struct ata_port *ap) | |||
2010 | struct ata_eh_context *mehc = &ap->link.eh_context; | 2013 | struct ata_eh_context *mehc = &ap->link.eh_context; |
2011 | struct ata_eh_context *sehc = &ap->slave_link->eh_context; | 2014 | struct ata_eh_context *sehc = &ap->slave_link->eh_context; |
2012 | 2015 | ||
2016 | /* transfer control flags from master to slave */ | ||
2017 | sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; | ||
2018 | |||
2019 | /* perform autopsy on the slave link */ | ||
2013 | ata_eh_link_autopsy(ap->slave_link); | 2020 | ata_eh_link_autopsy(ap->slave_link); |
2014 | 2021 | ||
2022 | /* transfer actions from slave to master and clear slave */ | ||
2015 | ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); | 2023 | ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); |
2016 | mehc->i.action |= sehc->i.action; | 2024 | mehc->i.action |= sehc->i.action; |
2017 | mehc->i.dev_action[1] |= sehc->i.dev_action[1]; | 2025 | mehc->i.dev_action[1] |= sehc->i.dev_action[1]; |
@@ -2447,14 +2455,14 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2447 | dev->pio_mode = XFER_PIO_0; | 2455 | dev->pio_mode = XFER_PIO_0; |
2448 | dev->flags &= ~ATA_DFLAG_SLEEPING; | 2456 | dev->flags &= ~ATA_DFLAG_SLEEPING; |
2449 | 2457 | ||
2450 | if (ata_phys_link_offline(ata_dev_phys_link(dev))) | 2458 | if (!ata_phys_link_offline(ata_dev_phys_link(dev))) { |
2451 | continue; | 2459 | /* apply class override */ |
2452 | 2460 | if (lflags & ATA_LFLAG_ASSUME_ATA) | |
2453 | /* apply class override */ | 2461 | classes[dev->devno] = ATA_DEV_ATA; |
2454 | if (lflags & ATA_LFLAG_ASSUME_ATA) | 2462 | else if (lflags & ATA_LFLAG_ASSUME_SEMB) |
2455 | classes[dev->devno] = ATA_DEV_ATA; | 2463 | classes[dev->devno] = ATA_DEV_SEMB_UNSUP; |
2456 | else if (lflags & ATA_LFLAG_ASSUME_SEMB) | 2464 | } else |
2457 | classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ | 2465 | classes[dev->devno] = ATA_DEV_NONE; |
2458 | } | 2466 | } |
2459 | 2467 | ||
2460 | /* record current link speed */ | 2468 | /* record current link speed */ |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 2a4c516894f0..4b4739486327 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -2153,8 +2153,17 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2153 | */ | 2153 | */ |
2154 | void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) | 2154 | void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) |
2155 | { | 2155 | { |
2156 | if (qc->ap->ioaddr.bmdma_addr) | 2156 | struct ata_port *ap = qc->ap; |
2157 | unsigned long flags; | ||
2158 | |||
2159 | spin_lock_irqsave(ap->lock, flags); | ||
2160 | |||
2161 | ap->hsm_task_state = HSM_ST_IDLE; | ||
2162 | |||
2163 | if (ap->ioaddr.bmdma_addr) | ||
2157 | ata_bmdma_stop(qc); | 2164 | ata_bmdma_stop(qc); |
2165 | |||
2166 | spin_unlock_irqrestore(ap->lock, flags); | ||
2158 | } | 2167 | } |
2159 | 2168 | ||
2160 | /** | 2169 | /** |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 1cfa74535d91..5b72e734300a 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -70,6 +70,7 @@ enum { | |||
70 | static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 70 | static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
71 | static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); | 71 | static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); |
72 | static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); | 72 | static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
73 | static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
73 | static void svia_noop_freeze(struct ata_port *ap); | 74 | static void svia_noop_freeze(struct ata_port *ap); |
74 | static int vt6420_prereset(struct ata_link *link, unsigned long deadline); | 75 | static int vt6420_prereset(struct ata_link *link, unsigned long deadline); |
75 | static int vt6421_pata_cable_detect(struct ata_port *ap); | 76 | static int vt6421_pata_cable_detect(struct ata_port *ap); |
@@ -103,21 +104,26 @@ static struct scsi_host_template svia_sht = { | |||
103 | ATA_BMDMA_SHT(DRV_NAME), | 104 | ATA_BMDMA_SHT(DRV_NAME), |
104 | }; | 105 | }; |
105 | 106 | ||
106 | static struct ata_port_operations vt6420_sata_ops = { | 107 | static struct ata_port_operations svia_base_ops = { |
107 | .inherits = &ata_bmdma_port_ops, | 108 | .inherits = &ata_bmdma_port_ops, |
109 | .sff_tf_load = svia_tf_load, | ||
110 | }; | ||
111 | |||
112 | static struct ata_port_operations vt6420_sata_ops = { | ||
113 | .inherits = &svia_base_ops, | ||
108 | .freeze = svia_noop_freeze, | 114 | .freeze = svia_noop_freeze, |
109 | .prereset = vt6420_prereset, | 115 | .prereset = vt6420_prereset, |
110 | }; | 116 | }; |
111 | 117 | ||
112 | static struct ata_port_operations vt6421_pata_ops = { | 118 | static struct ata_port_operations vt6421_pata_ops = { |
113 | .inherits = &ata_bmdma_port_ops, | 119 | .inherits = &svia_base_ops, |
114 | .cable_detect = vt6421_pata_cable_detect, | 120 | .cable_detect = vt6421_pata_cable_detect, |
115 | .set_piomode = vt6421_set_pio_mode, | 121 | .set_piomode = vt6421_set_pio_mode, |
116 | .set_dmamode = vt6421_set_dma_mode, | 122 | .set_dmamode = vt6421_set_dma_mode, |
117 | }; | 123 | }; |
118 | 124 | ||
119 | static struct ata_port_operations vt6421_sata_ops = { | 125 | static struct ata_port_operations vt6421_sata_ops = { |
120 | .inherits = &ata_bmdma_port_ops, | 126 | .inherits = &svia_base_ops, |
121 | .scr_read = svia_scr_read, | 127 | .scr_read = svia_scr_read, |
122 | .scr_write = svia_scr_write, | 128 | .scr_write = svia_scr_write, |
123 | }; | 129 | }; |
@@ -168,6 +174,29 @@ static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) | |||
168 | return 0; | 174 | return 0; |
169 | } | 175 | } |
170 | 176 | ||
177 | /** | ||
178 | * svia_tf_load - send taskfile registers to host controller | ||
179 | * @ap: Port to which output is sent | ||
180 | * @tf: ATA taskfile register set | ||
181 | * | ||
182 | * Outputs ATA taskfile to standard ATA host controller. | ||
183 | * | ||
184 | * This is to fix the internal bug of via chipsets, which will | ||
185 | * reset the device register after changing the IEN bit on ctl | ||
186 | * register. | ||
187 | */ | ||
188 | static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | ||
189 | { | ||
190 | struct ata_taskfile ttf; | ||
191 | |||
192 | if (tf->ctl != ap->last_ctl) { | ||
193 | ttf = *tf; | ||
194 | ttf.flags |= ATA_TFLAG_DEVICE; | ||
195 | tf = &ttf; | ||
196 | } | ||
197 | ata_sff_tf_load(ap, tf); | ||
198 | } | ||
199 | |||
171 | static void svia_noop_freeze(struct ata_port *ap) | 200 | static void svia_noop_freeze(struct ata_port *ap) |
172 | { | 201 | { |
173 | /* Some VIA controllers choke if ATA_NIEN is manipulated in | 202 | /* Some VIA controllers choke if ATA_NIEN is manipulated in |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index bf70450a49cc..5b819b12675a 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -161,7 +161,7 @@ static void hvc_console_print(struct console *co, const char *b, | |||
161 | } | 161 | } |
162 | } else { | 162 | } else { |
163 | r = cons_ops[index]->put_chars(vtermnos[index], c, i); | 163 | r = cons_ops[index]->put_chars(vtermnos[index], c, i); |
164 | if (r < 0) { | 164 | if (r <= 0) { |
165 | /* throw away chars on error */ | 165 | /* throw away chars on error */ |
166 | i = 0; | 166 | i = 0; |
167 | } else if (r > 0) { | 167 | } else if (r > 0) { |
@@ -374,6 +374,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) | |||
374 | if (hp->ops->notifier_del) | 374 | if (hp->ops->notifier_del) |
375 | hp->ops->notifier_del(hp, hp->data); | 375 | hp->ops->notifier_del(hp, hp->data); |
376 | 376 | ||
377 | /* cancel pending tty resize work */ | ||
378 | cancel_work_sync(&hp->tty_resize); | ||
379 | |||
377 | /* | 380 | /* |
378 | * Chain calls chars_in_buffer() and returns immediately if | 381 | * Chain calls chars_in_buffer() and returns immediately if |
379 | * there is no buffered data otherwise sleeps on a wait queue | 382 | * there is no buffered data otherwise sleeps on a wait queue |
@@ -399,6 +402,9 @@ static void hvc_hangup(struct tty_struct *tty) | |||
399 | if (!hp) | 402 | if (!hp) |
400 | return; | 403 | return; |
401 | 404 | ||
405 | /* cancel pending tty resize work */ | ||
406 | cancel_work_sync(&hp->tty_resize); | ||
407 | |||
402 | spin_lock_irqsave(&hp->lock, flags); | 408 | spin_lock_irqsave(&hp->lock, flags); |
403 | 409 | ||
404 | /* | 410 | /* |
@@ -418,8 +424,8 @@ static void hvc_hangup(struct tty_struct *tty) | |||
418 | 424 | ||
419 | spin_unlock_irqrestore(&hp->lock, flags); | 425 | spin_unlock_irqrestore(&hp->lock, flags); |
420 | 426 | ||
421 | if (hp->ops->notifier_del) | 427 | if (hp->ops->notifier_hangup) |
422 | hp->ops->notifier_del(hp, hp->data); | 428 | hp->ops->notifier_hangup(hp, hp->data); |
423 | 429 | ||
424 | while(temp_open_count) { | 430 | while(temp_open_count) { |
425 | --temp_open_count; | 431 | --temp_open_count; |
@@ -431,7 +437,7 @@ static void hvc_hangup(struct tty_struct *tty) | |||
431 | * Push buffered characters whether they were just recently buffered or waiting | 437 | * Push buffered characters whether they were just recently buffered or waiting |
432 | * on a blocked hypervisor. Call this function with hp->lock held. | 438 | * on a blocked hypervisor. Call this function with hp->lock held. |
433 | */ | 439 | */ |
434 | static void hvc_push(struct hvc_struct *hp) | 440 | static int hvc_push(struct hvc_struct *hp) |
435 | { | 441 | { |
436 | int n; | 442 | int n; |
437 | 443 | ||
@@ -439,7 +445,7 @@ static void hvc_push(struct hvc_struct *hp) | |||
439 | if (n <= 0) { | 445 | if (n <= 0) { |
440 | if (n == 0) { | 446 | if (n == 0) { |
441 | hp->do_wakeup = 1; | 447 | hp->do_wakeup = 1; |
442 | return; | 448 | return 0; |
443 | } | 449 | } |
444 | /* throw away output on error; this happens when | 450 | /* throw away output on error; this happens when |
445 | there is no session connected to the vterm. */ | 451 | there is no session connected to the vterm. */ |
@@ -450,6 +456,8 @@ static void hvc_push(struct hvc_struct *hp) | |||
450 | memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); | 456 | memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); |
451 | else | 457 | else |
452 | hp->do_wakeup = 1; | 458 | hp->do_wakeup = 1; |
459 | |||
460 | return n; | ||
453 | } | 461 | } |
454 | 462 | ||
455 | static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) | 463 | static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) |
@@ -492,6 +500,39 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count | |||
492 | return written; | 500 | return written; |
493 | } | 501 | } |
494 | 502 | ||
503 | /** | ||
504 | * hvc_set_winsz() - Resize the hvc tty terminal window. | ||
505 | * @work: work structure. | ||
506 | * | ||
507 | * The routine shall not be called within an atomic context because it | ||
508 | * might sleep. | ||
509 | * | ||
510 | * Locking: hp->lock | ||
511 | */ | ||
512 | static void hvc_set_winsz(struct work_struct *work) | ||
513 | { | ||
514 | struct hvc_struct *hp; | ||
515 | unsigned long hvc_flags; | ||
516 | struct tty_struct *tty; | ||
517 | struct winsize ws; | ||
518 | |||
519 | hp = container_of(work, struct hvc_struct, tty_resize); | ||
520 | if (!hp) | ||
521 | return; | ||
522 | |||
523 | spin_lock_irqsave(&hp->lock, hvc_flags); | ||
524 | if (!hp->tty) { | ||
525 | spin_unlock_irqrestore(&hp->lock, hvc_flags); | ||
526 | return; | ||
527 | } | ||
528 | ws = hp->ws; | ||
529 | tty = tty_kref_get(hp->tty); | ||
530 | spin_unlock_irqrestore(&hp->lock, hvc_flags); | ||
531 | |||
532 | tty_do_resize(tty, tty, &ws); | ||
533 | tty_kref_put(tty); | ||
534 | } | ||
535 | |||
495 | /* | 536 | /* |
496 | * This is actually a contract between the driver and the tty layer outlining | 537 | * This is actually a contract between the driver and the tty layer outlining |
497 | * how much write room the driver can guarantee will be sent OR BUFFERED. This | 538 | * how much write room the driver can guarantee will be sent OR BUFFERED. This |
@@ -538,16 +579,20 @@ int hvc_poll(struct hvc_struct *hp) | |||
538 | char buf[N_INBUF] __ALIGNED__; | 579 | char buf[N_INBUF] __ALIGNED__; |
539 | unsigned long flags; | 580 | unsigned long flags; |
540 | int read_total = 0; | 581 | int read_total = 0; |
582 | int written_total = 0; | ||
541 | 583 | ||
542 | spin_lock_irqsave(&hp->lock, flags); | 584 | spin_lock_irqsave(&hp->lock, flags); |
543 | 585 | ||
544 | /* Push pending writes */ | 586 | /* Push pending writes */ |
545 | if (hp->n_outbuf > 0) | 587 | if (hp->n_outbuf > 0) |
546 | hvc_push(hp); | 588 | written_total = hvc_push(hp); |
547 | 589 | ||
548 | /* Reschedule us if still some write pending */ | 590 | /* Reschedule us if still some write pending */ |
549 | if (hp->n_outbuf > 0) | 591 | if (hp->n_outbuf > 0) { |
550 | poll_mask |= HVC_POLL_WRITE; | 592 | poll_mask |= HVC_POLL_WRITE; |
593 | /* If hvc_push() was not able to write, sleep a few msecs */ | ||
594 | timeout = (written_total) ? 0 : MIN_TIMEOUT; | ||
595 | } | ||
551 | 596 | ||
552 | /* No tty attached, just skip */ | 597 | /* No tty attached, just skip */ |
553 | tty = hp->tty; | 598 | tty = hp->tty; |
@@ -632,6 +677,24 @@ int hvc_poll(struct hvc_struct *hp) | |||
632 | } | 677 | } |
633 | EXPORT_SYMBOL_GPL(hvc_poll); | 678 | EXPORT_SYMBOL_GPL(hvc_poll); |
634 | 679 | ||
680 | /** | ||
681 | * hvc_resize() - Update terminal window size information. | ||
682 | * @hp: HVC console pointer | ||
683 | * @ws: Terminal window size structure | ||
684 | * | ||
685 | * Stores the specified window size information in the hvc structure of @hp. | ||
686 | * The function schedule the tty resize update. | ||
687 | * | ||
688 | * Locking: Locking free; the function MUST be called holding hp->lock | ||
689 | */ | ||
690 | void hvc_resize(struct hvc_struct *hp, struct winsize ws) | ||
691 | { | ||
692 | if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) { | ||
693 | hp->ws = ws; | ||
694 | schedule_work(&hp->tty_resize); | ||
695 | } | ||
696 | } | ||
697 | |||
635 | /* | 698 | /* |
636 | * This kthread is either polling or interrupt driven. This is determined by | 699 | * This kthread is either polling or interrupt driven. This is determined by |
637 | * calling hvc_poll() who determines whether a console adapter support | 700 | * calling hvc_poll() who determines whether a console adapter support |
@@ -659,10 +722,6 @@ static int khvcd(void *unused) | |||
659 | poll_mask |= HVC_POLL_READ; | 722 | poll_mask |= HVC_POLL_READ; |
660 | if (hvc_kicked) | 723 | if (hvc_kicked) |
661 | continue; | 724 | continue; |
662 | if (poll_mask & HVC_POLL_WRITE) { | ||
663 | yield(); | ||
664 | continue; | ||
665 | } | ||
666 | set_current_state(TASK_INTERRUPTIBLE); | 725 | set_current_state(TASK_INTERRUPTIBLE); |
667 | if (!hvc_kicked) { | 726 | if (!hvc_kicked) { |
668 | if (poll_mask == 0) | 727 | if (poll_mask == 0) |
@@ -718,6 +777,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, | |||
718 | 777 | ||
719 | kref_init(&hp->kref); | 778 | kref_init(&hp->kref); |
720 | 779 | ||
780 | INIT_WORK(&hp->tty_resize, hvc_set_winsz); | ||
721 | spin_lock_init(&hp->lock); | 781 | spin_lock_init(&hp->lock); |
722 | spin_lock(&hvc_structs_lock); | 782 | spin_lock(&hvc_structs_lock); |
723 | 783 | ||
@@ -743,7 +803,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, | |||
743 | } | 803 | } |
744 | EXPORT_SYMBOL_GPL(hvc_alloc); | 804 | EXPORT_SYMBOL_GPL(hvc_alloc); |
745 | 805 | ||
746 | int __devexit hvc_remove(struct hvc_struct *hp) | 806 | int hvc_remove(struct hvc_struct *hp) |
747 | { | 807 | { |
748 | unsigned long flags; | 808 | unsigned long flags; |
749 | struct tty_struct *tty; | 809 | struct tty_struct *tty; |
@@ -796,7 +856,7 @@ static int hvc_init(void) | |||
796 | drv->minor_start = HVC_MINOR; | 856 | drv->minor_start = HVC_MINOR; |
797 | drv->type = TTY_DRIVER_TYPE_SYSTEM; | 857 | drv->type = TTY_DRIVER_TYPE_SYSTEM; |
798 | drv->init_termios = tty_std_termios; | 858 | drv->init_termios = tty_std_termios; |
799 | drv->flags = TTY_DRIVER_REAL_RAW; | 859 | drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; |
800 | tty_set_operations(drv, &hvc_ops); | 860 | tty_set_operations(drv, &hvc_ops); |
801 | 861 | ||
802 | /* Always start the kthread because there can be hotplug vty adapters | 862 | /* Always start the kthread because there can be hotplug vty adapters |
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 9790201718ae..8297dbc2e6ec 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #ifndef HVC_CONSOLE_H | 27 | #ifndef HVC_CONSOLE_H |
28 | #define HVC_CONSOLE_H | 28 | #define HVC_CONSOLE_H |
29 | #include <linux/kref.h> | 29 | #include <linux/kref.h> |
30 | #include <linux/tty.h> | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * This is the max number of console adapters that can/will be found as | 33 | * This is the max number of console adapters that can/will be found as |
@@ -56,6 +57,8 @@ struct hvc_struct { | |||
56 | struct hv_ops *ops; | 57 | struct hv_ops *ops; |
57 | int irq_requested; | 58 | int irq_requested; |
58 | int data; | 59 | int data; |
60 | struct winsize ws; | ||
61 | struct work_struct tty_resize; | ||
59 | struct list_head next; | 62 | struct list_head next; |
60 | struct kref kref; /* ref count & hvc_struct lifetime */ | 63 | struct kref kref; /* ref count & hvc_struct lifetime */ |
61 | }; | 64 | }; |
@@ -65,9 +68,10 @@ struct hv_ops { | |||
65 | int (*get_chars)(uint32_t vtermno, char *buf, int count); | 68 | int (*get_chars)(uint32_t vtermno, char *buf, int count); |
66 | int (*put_chars)(uint32_t vtermno, const char *buf, int count); | 69 | int (*put_chars)(uint32_t vtermno, const char *buf, int count); |
67 | 70 | ||
68 | /* Callbacks for notification. Called in open and close */ | 71 | /* Callbacks for notification. Called in open, close and hangup */ |
69 | int (*notifier_add)(struct hvc_struct *hp, int irq); | 72 | int (*notifier_add)(struct hvc_struct *hp, int irq); |
70 | void (*notifier_del)(struct hvc_struct *hp, int irq); | 73 | void (*notifier_del)(struct hvc_struct *hp, int irq); |
74 | void (*notifier_hangup)(struct hvc_struct *hp, int irq); | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | /* Register a vterm and a slot index for use as a console (console_init) */ | 77 | /* Register a vterm and a slot index for use as a console (console_init) */ |
@@ -77,15 +81,19 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); | |||
77 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, | 81 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, |
78 | struct hv_ops *ops, int outbuf_size); | 82 | struct hv_ops *ops, int outbuf_size); |
79 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ | 83 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ |
80 | extern int __devexit hvc_remove(struct hvc_struct *hp); | 84 | extern int hvc_remove(struct hvc_struct *hp); |
81 | 85 | ||
82 | /* data available */ | 86 | /* data available */ |
83 | int hvc_poll(struct hvc_struct *hp); | 87 | int hvc_poll(struct hvc_struct *hp); |
84 | void hvc_kick(void); | 88 | void hvc_kick(void); |
85 | 89 | ||
90 | /* Resize hvc tty terminal window */ | ||
91 | extern void hvc_resize(struct hvc_struct *hp, struct winsize ws); | ||
92 | |||
86 | /* default notifier for irq based notification */ | 93 | /* default notifier for irq based notification */ |
87 | extern int notifier_add_irq(struct hvc_struct *hp, int data); | 94 | extern int notifier_add_irq(struct hvc_struct *hp, int data); |
88 | extern void notifier_del_irq(struct hvc_struct *hp, int data); | 95 | extern void notifier_del_irq(struct hvc_struct *hp, int data); |
96 | extern void notifier_hangup_irq(struct hvc_struct *hp, int data); | ||
89 | 97 | ||
90 | 98 | ||
91 | #if defined(CONFIG_XMON) && defined(CONFIG_SMP) | 99 | #if defined(CONFIG_XMON) && defined(CONFIG_SMP) |
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c index 73a59cdb8947..d09e5688d449 100644 --- a/drivers/char/hvc_irq.c +++ b/drivers/char/hvc_irq.c | |||
@@ -42,3 +42,8 @@ void notifier_del_irq(struct hvc_struct *hp, int irq) | |||
42 | free_irq(irq, hp); | 42 | free_irq(irq, hp); |
43 | hp->irq_requested = 0; | 43 | hp->irq_requested = 0; |
44 | } | 44 | } |
45 | |||
46 | void notifier_hangup_irq(struct hvc_struct *hp, int irq) | ||
47 | { | ||
48 | notifier_del_irq(hp, irq); | ||
49 | } | ||
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c index b71c610fe5ae..b74a2f8ab908 100644 --- a/drivers/char/hvc_iseries.c +++ b/drivers/char/hvc_iseries.c | |||
@@ -202,6 +202,7 @@ static struct hv_ops hvc_get_put_ops = { | |||
202 | .put_chars = put_chars, | 202 | .put_chars = put_chars, |
203 | .notifier_add = notifier_add_irq, | 203 | .notifier_add = notifier_add_irq, |
204 | .notifier_del = notifier_del_irq, | 204 | .notifier_del = notifier_del_irq, |
205 | .notifier_hangup = notifier_hangup_irq, | ||
205 | }; | 206 | }; |
206 | 207 | ||
207 | static int __devinit hvc_vio_probe(struct vio_dev *vdev, | 208 | static int __devinit hvc_vio_probe(struct vio_dev *vdev, |
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c index 93f3840c1682..019e0b58593d 100644 --- a/drivers/char/hvc_vio.c +++ b/drivers/char/hvc_vio.c | |||
@@ -82,6 +82,7 @@ static struct hv_ops hvc_get_put_ops = { | |||
82 | .put_chars = hvc_put_chars, | 82 | .put_chars = hvc_put_chars, |
83 | .notifier_add = notifier_add_irq, | 83 | .notifier_add = notifier_add_irq, |
84 | .notifier_del = notifier_del_irq, | 84 | .notifier_del = notifier_del_irq, |
85 | .notifier_hangup = notifier_hangup_irq, | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | static int __devinit hvc_vio_probe(struct vio_dev *vdev, | 88 | static int __devinit hvc_vio_probe(struct vio_dev *vdev, |
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c index 538ceea5e7df..eba999f8598d 100644 --- a/drivers/char/hvc_xen.c +++ b/drivers/char/hvc_xen.c | |||
@@ -102,6 +102,7 @@ static struct hv_ops hvc_ops = { | |||
102 | .put_chars = write_console, | 102 | .put_chars = write_console, |
103 | .notifier_add = notifier_add_irq, | 103 | .notifier_add = notifier_add_irq, |
104 | .notifier_del = notifier_del_irq, | 104 | .notifier_del = notifier_del_irq, |
105 | .notifier_hangup = notifier_hangup_irq, | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | static int __init xen_init(void) | 108 | static int __init xen_init(void) |
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 553b0e9d8d17..c8f8024cb40e 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c | |||
@@ -90,7 +90,7 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty) | |||
90 | spin_lock_irqsave(&port->lock, flags); | 90 | spin_lock_irqsave(&port->lock, flags); |
91 | if (port->tty) | 91 | if (port->tty) |
92 | tty_kref_put(port->tty); | 92 | tty_kref_put(port->tty); |
93 | port->tty = tty; | 93 | port->tty = tty_kref_get(tty); |
94 | spin_unlock_irqrestore(&port->lock, flags); | 94 | spin_unlock_irqrestore(&port->lock, flags); |
95 | } | 95 | } |
96 | EXPORT_SYMBOL(tty_port_tty_set); | 96 | EXPORT_SYMBOL(tty_port_tty_set); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index d0f4eb6fdb7f..3fb0d2c88ba5 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -198,6 +198,7 @@ static int __devinit virtcons_probe(struct virtio_device *dev) | |||
198 | virtio_cons.put_chars = put_chars; | 198 | virtio_cons.put_chars = put_chars; |
199 | virtio_cons.notifier_add = notifier_add_vio; | 199 | virtio_cons.notifier_add = notifier_add_vio; |
200 | virtio_cons.notifier_del = notifier_del_vio; | 200 | virtio_cons.notifier_del = notifier_del_vio; |
201 | virtio_cons.notifier_hangup = notifier_del_vio; | ||
201 | 202 | ||
202 | /* The first argument of hvc_alloc() is the virtual console number, so | 203 | /* The first argument of hvc_alloc() is the virtual console number, so |
203 | * we use zero. The second argument is the parameter for the | 204 | * we use zero. The second argument is the parameter for the |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index dbd42d6c93a7..7f2ee27fe76b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -127,6 +127,13 @@ config GPIO_PCF857X | |||
127 | This driver provides an in-kernel interface to those GPIOs using | 127 | This driver provides an in-kernel interface to those GPIOs using |
128 | platform-neutral GPIO calls. | 128 | platform-neutral GPIO calls. |
129 | 129 | ||
130 | config GPIO_TWL4030 | ||
131 | tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" | ||
132 | depends on TWL4030_CORE | ||
133 | help | ||
134 | Say yes here to access the GPIO signals of various multi-function | ||
135 | power management chips from Texas Instruments. | ||
136 | |||
130 | comment "PCI GPIO expanders:" | 137 | comment "PCI GPIO expanders:" |
131 | 138 | ||
132 | config GPIO_BT8XX | 139 | config GPIO_BT8XX |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 01b4bbde1956..6aafdeb9ad03 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -9,4 +9,5 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o | |||
9 | obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o | 9 | obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o |
10 | obj-$(CONFIG_GPIO_PCA953X) += pca953x.o | 10 | obj-$(CONFIG_GPIO_PCA953X) += pca953x.o |
11 | obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o | 11 | obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o |
12 | obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o | ||
12 | obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o | 13 | obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o |
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c new file mode 100644 index 000000000000..37d3eec8730a --- /dev/null +++ b/drivers/gpio/twl4030-gpio.c | |||
@@ -0,0 +1,521 @@ | |||
1 | /* | ||
2 | * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Texas Instruments, Inc. | ||
5 | * Copyright (C) 2006 MontaVista Software, Inc. | ||
6 | * | ||
7 | * Code re-arranged and cleaned up by: | ||
8 | * Syed Mohammed Khasim <x0khasim@ti.com> | ||
9 | * | ||
10 | * Initial Code: | ||
11 | * Andy Lowe / Nishanth Menon | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/kthread.h> | ||
32 | #include <linux/irq.h> | ||
33 | #include <linux/gpio.h> | ||
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/slab.h> | ||
36 | |||
37 | #include <linux/i2c/twl4030.h> | ||
38 | |||
39 | |||
40 | /* | ||
41 | * The GPIO "subchip" supports 18 GPIOs which can be configured as | ||
42 | * inputs or outputs, with pullups or pulldowns on each pin. Each | ||
43 | * GPIO can trigger interrupts on either or both edges. | ||
44 | * | ||
45 | * GPIO interrupts can be fed to either of two IRQ lines; this is | ||
46 | * intended to support multiple hosts. | ||
47 | * | ||
48 | * There are also two LED pins used sometimes as output-only GPIOs. | ||
49 | */ | ||
50 | |||
51 | |||
52 | static struct gpio_chip twl_gpiochip; | ||
53 | static int twl4030_gpio_irq_base; | ||
54 | |||
55 | /* genirq interfaces are not available to modules */ | ||
56 | #ifdef MODULE | ||
57 | #define is_module() true | ||
58 | #else | ||
59 | #define is_module() false | ||
60 | #endif | ||
61 | |||
62 | /* GPIO_CTRL Fields */ | ||
63 | #define MASK_GPIO_CTRL_GPIO0CD1 BIT(0) | ||
64 | #define MASK_GPIO_CTRL_GPIO1CD2 BIT(1) | ||
65 | #define MASK_GPIO_CTRL_GPIO_ON BIT(2) | ||
66 | |||
67 | /* Mask for GPIO registers when aggregated into a 32-bit integer */ | ||
68 | #define GPIO_32_MASK 0x0003ffff | ||
69 | |||
70 | /* Data structures */ | ||
71 | static DEFINE_MUTEX(gpio_lock); | ||
72 | |||
73 | /* store usage of each GPIO. - each bit represents one GPIO */ | ||
74 | static unsigned int gpio_usage_count; | ||
75 | |||
76 | /*----------------------------------------------------------------------*/ | ||
77 | |||
78 | /* | ||
79 | * To configure TWL4030 GPIO module registers | ||
80 | */ | ||
81 | static inline int gpio_twl4030_write(u8 address, u8 data) | ||
82 | { | ||
83 | return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address); | ||
84 | } | ||
85 | |||
86 | /*----------------------------------------------------------------------*/ | ||
87 | |||
88 | /* | ||
89 | * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB})) | ||
90 | * PWMs A and B are dedicated to LEDs A and B, respectively. | ||
91 | */ | ||
92 | |||
93 | #define TWL4030_LED_LEDEN 0x0 | ||
94 | |||
95 | /* LEDEN bits */ | ||
96 | #define LEDEN_LEDAON BIT(0) | ||
97 | #define LEDEN_LEDBON BIT(1) | ||
98 | #define LEDEN_LEDAEXT BIT(2) | ||
99 | #define LEDEN_LEDBEXT BIT(3) | ||
100 | #define LEDEN_LEDAPWM BIT(4) | ||
101 | #define LEDEN_LEDBPWM BIT(5) | ||
102 | #define LEDEN_PWM_LENGTHA BIT(6) | ||
103 | #define LEDEN_PWM_LENGTHB BIT(7) | ||
104 | |||
105 | #define TWL4030_PWMx_PWMxON 0x0 | ||
106 | #define TWL4030_PWMx_PWMxOFF 0x1 | ||
107 | |||
108 | #define PWMxON_LENGTH BIT(7) | ||
109 | |||
110 | /*----------------------------------------------------------------------*/ | ||
111 | |||
112 | /* | ||
113 | * To read a TWL4030 GPIO module register | ||
114 | */ | ||
115 | static inline int gpio_twl4030_read(u8 address) | ||
116 | { | ||
117 | u8 data; | ||
118 | int ret = 0; | ||
119 | |||
120 | ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address); | ||
121 | return (ret < 0) ? ret : data; | ||
122 | } | ||
123 | |||
124 | /*----------------------------------------------------------------------*/ | ||
125 | |||
126 | static u8 cached_leden; /* protected by gpio_lock */ | ||
127 | |||
128 | /* The LED lines are open drain outputs ... a FET pulls to GND, so an | ||
129 | * external pullup is needed. We could also expose the integrated PWM | ||
130 | * as a LED brightness control; we initialize it as "always on". | ||
131 | */ | ||
132 | static void twl4030_led_set_value(int led, int value) | ||
133 | { | ||
134 | u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM; | ||
135 | int status; | ||
136 | |||
137 | if (led) | ||
138 | mask <<= 1; | ||
139 | |||
140 | mutex_lock(&gpio_lock); | ||
141 | if (value) | ||
142 | cached_leden &= ~mask; | ||
143 | else | ||
144 | cached_leden |= mask; | ||
145 | status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, | ||
146 | TWL4030_LED_LEDEN); | ||
147 | mutex_unlock(&gpio_lock); | ||
148 | } | ||
149 | |||
150 | static int twl4030_set_gpio_direction(int gpio, int is_input) | ||
151 | { | ||
152 | u8 d_bnk = gpio >> 3; | ||
153 | u8 d_msk = BIT(gpio & 0x7); | ||
154 | u8 reg = 0; | ||
155 | u8 base = REG_GPIODATADIR1 + d_bnk; | ||
156 | int ret = 0; | ||
157 | |||
158 | mutex_lock(&gpio_lock); | ||
159 | ret = gpio_twl4030_read(base); | ||
160 | if (ret >= 0) { | ||
161 | if (is_input) | ||
162 | reg = ret & ~d_msk; | ||
163 | else | ||
164 | reg = ret | d_msk; | ||
165 | |||
166 | ret = gpio_twl4030_write(base, reg); | ||
167 | } | ||
168 | mutex_unlock(&gpio_lock); | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | static int twl4030_set_gpio_dataout(int gpio, int enable) | ||
173 | { | ||
174 | u8 d_bnk = gpio >> 3; | ||
175 | u8 d_msk = BIT(gpio & 0x7); | ||
176 | u8 base = 0; | ||
177 | |||
178 | if (enable) | ||
179 | base = REG_SETGPIODATAOUT1 + d_bnk; | ||
180 | else | ||
181 | base = REG_CLEARGPIODATAOUT1 + d_bnk; | ||
182 | |||
183 | return gpio_twl4030_write(base, d_msk); | ||
184 | } | ||
185 | |||
186 | static int twl4030_get_gpio_datain(int gpio) | ||
187 | { | ||
188 | u8 d_bnk = gpio >> 3; | ||
189 | u8 d_off = gpio & 0x7; | ||
190 | u8 base = 0; | ||
191 | int ret = 0; | ||
192 | |||
193 | if (unlikely((gpio >= TWL4030_GPIO_MAX) | ||
194 | || !(gpio_usage_count & BIT(gpio)))) | ||
195 | return -EPERM; | ||
196 | |||
197 | base = REG_GPIODATAIN1 + d_bnk; | ||
198 | ret = gpio_twl4030_read(base); | ||
199 | if (ret > 0) | ||
200 | ret = (ret >> d_off) & 0x1; | ||
201 | |||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Configure debounce timing value for a GPIO pin on TWL4030 | ||
207 | */ | ||
208 | int twl4030_set_gpio_debounce(int gpio, int enable) | ||
209 | { | ||
210 | u8 d_bnk = gpio >> 3; | ||
211 | u8 d_msk = BIT(gpio & 0x7); | ||
212 | u8 reg = 0; | ||
213 | u8 base = 0; | ||
214 | int ret = 0; | ||
215 | |||
216 | if (unlikely((gpio >= TWL4030_GPIO_MAX) | ||
217 | || !(gpio_usage_count & BIT(gpio)))) | ||
218 | return -EPERM; | ||
219 | |||
220 | base = REG_GPIO_DEBEN1 + d_bnk; | ||
221 | mutex_lock(&gpio_lock); | ||
222 | ret = gpio_twl4030_read(base); | ||
223 | if (ret >= 0) { | ||
224 | if (enable) | ||
225 | reg = ret | d_msk; | ||
226 | else | ||
227 | reg = ret & ~d_msk; | ||
228 | |||
229 | ret = gpio_twl4030_write(base, reg); | ||
230 | } | ||
231 | mutex_unlock(&gpio_lock); | ||
232 | return ret; | ||
233 | } | ||
234 | EXPORT_SYMBOL(twl4030_set_gpio_debounce); | ||
235 | |||
236 | /*----------------------------------------------------------------------*/ | ||
237 | |||
238 | static int twl_request(struct gpio_chip *chip, unsigned offset) | ||
239 | { | ||
240 | int status = 0; | ||
241 | |||
242 | mutex_lock(&gpio_lock); | ||
243 | |||
244 | /* Support the two LED outputs as output-only GPIOs. */ | ||
245 | if (offset >= TWL4030_GPIO_MAX) { | ||
246 | u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT | ||
247 | | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA; | ||
248 | u8 module = TWL4030_MODULE_PWMA; | ||
249 | |||
250 | offset -= TWL4030_GPIO_MAX; | ||
251 | if (offset) { | ||
252 | ledclr_mask <<= 1; | ||
253 | module = TWL4030_MODULE_PWMB; | ||
254 | } | ||
255 | |||
256 | /* initialize PWM to always-drive */ | ||
257 | status = twl4030_i2c_write_u8(module, 0x7f, | ||
258 | TWL4030_PWMx_PWMxOFF); | ||
259 | if (status < 0) | ||
260 | goto done; | ||
261 | status = twl4030_i2c_write_u8(module, 0x7f, | ||
262 | TWL4030_PWMx_PWMxON); | ||
263 | if (status < 0) | ||
264 | goto done; | ||
265 | |||
266 | /* init LED to not-driven (high) */ | ||
267 | module = TWL4030_MODULE_LED; | ||
268 | status = twl4030_i2c_read_u8(module, &cached_leden, | ||
269 | TWL4030_LED_LEDEN); | ||
270 | if (status < 0) | ||
271 | goto done; | ||
272 | cached_leden &= ~ledclr_mask; | ||
273 | status = twl4030_i2c_write_u8(module, cached_leden, | ||
274 | TWL4030_LED_LEDEN); | ||
275 | if (status < 0) | ||
276 | goto done; | ||
277 | |||
278 | status = 0; | ||
279 | goto done; | ||
280 | } | ||
281 | |||
282 | /* on first use, turn GPIO module "on" */ | ||
283 | if (!gpio_usage_count) { | ||
284 | struct twl4030_gpio_platform_data *pdata; | ||
285 | u8 value = MASK_GPIO_CTRL_GPIO_ON; | ||
286 | |||
287 | /* optionally have the first two GPIOs switch vMMC1 | ||
288 | * and vMMC2 power supplies based on card presence. | ||
289 | */ | ||
290 | pdata = chip->dev->platform_data; | ||
291 | value |= pdata->mmc_cd & 0x03; | ||
292 | |||
293 | status = gpio_twl4030_write(REG_GPIO_CTRL, value); | ||
294 | } | ||
295 | |||
296 | if (!status) | ||
297 | gpio_usage_count |= (0x1 << offset); | ||
298 | |||
299 | done: | ||
300 | mutex_unlock(&gpio_lock); | ||
301 | return status; | ||
302 | } | ||
303 | |||
304 | static void twl_free(struct gpio_chip *chip, unsigned offset) | ||
305 | { | ||
306 | if (offset >= TWL4030_GPIO_MAX) { | ||
307 | twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1); | ||
308 | return; | ||
309 | } | ||
310 | |||
311 | mutex_lock(&gpio_lock); | ||
312 | |||
313 | gpio_usage_count &= ~BIT(offset); | ||
314 | |||
315 | /* on last use, switch off GPIO module */ | ||
316 | if (!gpio_usage_count) | ||
317 | gpio_twl4030_write(REG_GPIO_CTRL, 0x0); | ||
318 | |||
319 | mutex_unlock(&gpio_lock); | ||
320 | } | ||
321 | |||
322 | static int twl_direction_in(struct gpio_chip *chip, unsigned offset) | ||
323 | { | ||
324 | return (offset < TWL4030_GPIO_MAX) | ||
325 | ? twl4030_set_gpio_direction(offset, 1) | ||
326 | : -EINVAL; | ||
327 | } | ||
328 | |||
329 | static int twl_get(struct gpio_chip *chip, unsigned offset) | ||
330 | { | ||
331 | int status = 0; | ||
332 | |||
333 | if (offset < TWL4030_GPIO_MAX) | ||
334 | status = twl4030_get_gpio_datain(offset); | ||
335 | else if (offset == TWL4030_GPIO_MAX) | ||
336 | status = cached_leden & LEDEN_LEDAON; | ||
337 | else | ||
338 | status = cached_leden & LEDEN_LEDBON; | ||
339 | return (status < 0) ? 0 : status; | ||
340 | } | ||
341 | |||
342 | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) | ||
343 | { | ||
344 | if (offset < TWL4030_GPIO_MAX) { | ||
345 | twl4030_set_gpio_dataout(offset, value); | ||
346 | return twl4030_set_gpio_direction(offset, 0); | ||
347 | } else { | ||
348 | twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); | ||
349 | return 0; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | static void twl_set(struct gpio_chip *chip, unsigned offset, int value) | ||
354 | { | ||
355 | if (offset < TWL4030_GPIO_MAX) | ||
356 | twl4030_set_gpio_dataout(offset, value); | ||
357 | else | ||
358 | twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); | ||
359 | } | ||
360 | |||
361 | static int twl_to_irq(struct gpio_chip *chip, unsigned offset) | ||
362 | { | ||
363 | return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX)) | ||
364 | ? (twl4030_gpio_irq_base + offset) | ||
365 | : -EINVAL; | ||
366 | } | ||
367 | |||
368 | static struct gpio_chip twl_gpiochip = { | ||
369 | .label = "twl4030", | ||
370 | .owner = THIS_MODULE, | ||
371 | .request = twl_request, | ||
372 | .free = twl_free, | ||
373 | .direction_input = twl_direction_in, | ||
374 | .get = twl_get, | ||
375 | .direction_output = twl_direction_out, | ||
376 | .set = twl_set, | ||
377 | .to_irq = twl_to_irq, | ||
378 | .can_sleep = 1, | ||
379 | }; | ||
380 | |||
381 | /*----------------------------------------------------------------------*/ | ||
382 | |||
383 | static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs) | ||
384 | { | ||
385 | u8 message[6]; | ||
386 | unsigned i, gpio_bit; | ||
387 | |||
388 | /* For most pins, a pulldown was enabled by default. | ||
389 | * We should have data that's specific to this board. | ||
390 | */ | ||
391 | for (gpio_bit = 1, i = 1; i < 6; i++) { | ||
392 | u8 bit_mask; | ||
393 | unsigned j; | ||
394 | |||
395 | for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) { | ||
396 | if (ups & gpio_bit) | ||
397 | bit_mask |= 1 << (j + 1); | ||
398 | else if (downs & gpio_bit) | ||
399 | bit_mask |= 1 << (j + 0); | ||
400 | } | ||
401 | message[i] = bit_mask; | ||
402 | } | ||
403 | |||
404 | return twl4030_i2c_write(TWL4030_MODULE_GPIO, message, | ||
405 | REG_GPIOPUPDCTR1, 5); | ||
406 | } | ||
407 | |||
408 | static int gpio_twl4030_remove(struct platform_device *pdev); | ||
409 | |||
410 | static int __devinit gpio_twl4030_probe(struct platform_device *pdev) | ||
411 | { | ||
412 | struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; | ||
413 | int ret; | ||
414 | |||
415 | /* maybe setup IRQs */ | ||
416 | if (pdata->irq_base) { | ||
417 | if (is_module()) { | ||
418 | dev_err(&pdev->dev, | ||
419 | "can't dispatch IRQs from modules\n"); | ||
420 | goto no_irqs; | ||
421 | } | ||
422 | ret = twl4030_sih_setup(TWL4030_MODULE_GPIO); | ||
423 | if (ret < 0) | ||
424 | return ret; | ||
425 | WARN_ON(ret != pdata->irq_base); | ||
426 | twl4030_gpio_irq_base = ret; | ||
427 | } | ||
428 | |||
429 | no_irqs: | ||
430 | /* | ||
431 | * NOTE: boards may waste power if they don't set pullups | ||
432 | * and pulldowns correctly ... default for non-ULPI pins is | ||
433 | * pulldown, and some other pins may have external pullups | ||
434 | * or pulldowns. Careful! | ||
435 | */ | ||
436 | ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns); | ||
437 | if (ret) | ||
438 | dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n", | ||
439 | pdata->pullups, pdata->pulldowns, | ||
440 | ret); | ||
441 | |||
442 | twl_gpiochip.base = pdata->gpio_base; | ||
443 | twl_gpiochip.ngpio = TWL4030_GPIO_MAX; | ||
444 | twl_gpiochip.dev = &pdev->dev; | ||
445 | |||
446 | /* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE, | ||
447 | * is (still) clear if use_leds is set. | ||
448 | */ | ||
449 | if (pdata->use_leds) | ||
450 | twl_gpiochip.ngpio += 2; | ||
451 | |||
452 | ret = gpiochip_add(&twl_gpiochip); | ||
453 | if (ret < 0) { | ||
454 | dev_err(&pdev->dev, | ||
455 | "could not register gpiochip, %d\n", | ||
456 | ret); | ||
457 | twl_gpiochip.ngpio = 0; | ||
458 | gpio_twl4030_remove(pdev); | ||
459 | } else if (pdata->setup) { | ||
460 | int status; | ||
461 | |||
462 | status = pdata->setup(&pdev->dev, | ||
463 | pdata->gpio_base, TWL4030_GPIO_MAX); | ||
464 | if (status) | ||
465 | dev_dbg(&pdev->dev, "setup --> %d\n", status); | ||
466 | } | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int __devexit gpio_twl4030_remove(struct platform_device *pdev) | ||
472 | { | ||
473 | struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; | ||
474 | int status; | ||
475 | |||
476 | if (pdata->teardown) { | ||
477 | status = pdata->teardown(&pdev->dev, | ||
478 | pdata->gpio_base, TWL4030_GPIO_MAX); | ||
479 | if (status) { | ||
480 | dev_dbg(&pdev->dev, "teardown --> %d\n", status); | ||
481 | return status; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | status = gpiochip_remove(&twl_gpiochip); | ||
486 | if (status < 0) | ||
487 | return status; | ||
488 | |||
489 | if (is_module()) | ||
490 | return 0; | ||
491 | |||
492 | /* REVISIT no support yet for deregistering all the IRQs */ | ||
493 | WARN_ON(1); | ||
494 | return -EIO; | ||
495 | } | ||
496 | |||
497 | /* Note: this hardware lives inside an I2C-based multi-function device. */ | ||
498 | MODULE_ALIAS("platform:twl4030_gpio"); | ||
499 | |||
500 | static struct platform_driver gpio_twl4030_driver = { | ||
501 | .driver.name = "twl4030_gpio", | ||
502 | .driver.owner = THIS_MODULE, | ||
503 | .probe = gpio_twl4030_probe, | ||
504 | .remove = __devexit_p(gpio_twl4030_remove), | ||
505 | }; | ||
506 | |||
507 | static int __init gpio_twl4030_init(void) | ||
508 | { | ||
509 | return platform_driver_register(&gpio_twl4030_driver); | ||
510 | } | ||
511 | subsys_initcall(gpio_twl4030_init); | ||
512 | |||
513 | static void __exit gpio_twl4030_exit(void) | ||
514 | { | ||
515 | platform_driver_unregister(&gpio_twl4030_driver); | ||
516 | } | ||
517 | module_exit(gpio_twl4030_exit); | ||
518 | |||
519 | MODULE_AUTHOR("Texas Instruments, Inc."); | ||
520 | MODULE_DESCRIPTION("GPIO interface for TWL4030"); | ||
521 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 8164de1f4d72..228f75723063 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
@@ -423,7 +423,6 @@ static const struct i2c_adapter cpm_ops = { | |||
423 | .owner = THIS_MODULE, | 423 | .owner = THIS_MODULE, |
424 | .name = "i2c-cpm", | 424 | .name = "i2c-cpm", |
425 | .algo = &cpm_i2c_algo, | 425 | .algo = &cpm_i2c_algo, |
426 | .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, | ||
427 | }; | 426 | }; |
428 | 427 | ||
429 | static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) | 428 | static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) |
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index 093d3248ca89..9cf92ac939d2 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile | |||
@@ -18,22 +18,66 @@ ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o | |||
18 | 18 | ||
19 | obj-$(CONFIG_IDE) += ide-core.o | 19 | obj-$(CONFIG_IDE) += ide-core.o |
20 | 20 | ||
21 | ifeq ($(CONFIG_IDE_ARM), y) | 21 | obj-$(CONFIG_IDE_ARM) += ide_arm.o |
22 | ide-arm-core-y += arm/ide_arm.o | 22 | |
23 | obj-y += ide-arm-core.o | 23 | obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o |
24 | endif | 24 | obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o |
25 | 25 | obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o | |
26 | obj-$(CONFIG_IDE) += legacy/ pci/ | 26 | obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o |
27 | obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o | ||
28 | obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o | ||
29 | |||
30 | obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o | ||
31 | obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o | ||
32 | obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o | ||
33 | obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o | ||
34 | obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o | ||
35 | |||
36 | obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o | ||
37 | obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o | ||
38 | obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o | ||
39 | obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o | ||
40 | obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o | ||
41 | obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o | ||
42 | obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o | ||
43 | obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o | ||
44 | obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o | ||
45 | obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o | ||
46 | obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o | ||
47 | obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o | ||
48 | obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o | ||
49 | obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o | ||
50 | obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o | ||
51 | obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o | ||
52 | obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o | ||
53 | obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o | ||
54 | obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o | ||
55 | obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o | ||
56 | obj-$(CONFIG_BLK_DEV_PIIX) += piix.o | ||
57 | obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o | ||
58 | obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o | ||
59 | obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o | ||
60 | obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o | ||
61 | obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o | ||
62 | obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o | ||
63 | obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o | ||
64 | obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o | ||
65 | obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o | ||
66 | obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o | ||
67 | obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o | ||
68 | |||
69 | # Must appear at the end of the block | ||
70 | obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o | ||
71 | ide-pci-generic-y += generic.o | ||
27 | 72 | ||
28 | obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o | 73 | obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o |
29 | 74 | ||
30 | ifeq ($(CONFIG_BLK_DEV_CMD640), y) | 75 | obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o |
31 | cmd640-core-y += pci/cmd640.o | 76 | |
32 | obj-y += cmd640-core.o | 77 | obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o |
33 | endif | 78 | |
79 | obj-$(CONFIG_IDE_H8300) += ide-h8300.o | ||
34 | 80 | ||
35 | obj-$(CONFIG_IDE) += ppc/ | ||
36 | obj-$(CONFIG_IDE_H8300) += h8300/ | ||
37 | obj-$(CONFIG_IDE_GENERIC) += ide-generic.o | 81 | obj-$(CONFIG_IDE_GENERIC) += ide-generic.o |
38 | obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o | 82 | obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o |
39 | 83 | ||
@@ -58,14 +102,12 @@ obj-$(CONFIG_IDE_GD) += ide-gd_mod.o | |||
58 | obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o | 102 | obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o |
59 | obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o | 103 | obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o |
60 | 104 | ||
61 | ifeq ($(CONFIG_BLK_DEV_IDECS), y) | 105 | obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o |
62 | ide-cs-core-y += legacy/ide-cs.o | ||
63 | obj-y += ide-cs-core.o | ||
64 | endif | ||
65 | 106 | ||
66 | ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) | 107 | obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o |
67 | ide-platform-core-y += legacy/ide_platform.o | 108 | |
68 | obj-y += ide-platform-core.o | 109 | obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o |
69 | endif | 110 | obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o |
111 | obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o | ||
70 | 112 | ||
71 | obj-$(CONFIG_IDE) += arm/ mips/ | 113 | obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o |
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/aec62xx.c index 4142c698e0d3..4142c698e0d3 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/aec62xx.c | |||
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/ali14xx.c index 90da1f953ed0..90da1f953ed0 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/ali14xx.c | |||
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/alim15x3.c index daf9dce39e52..daf9dce39e52 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/alim15x3.c | |||
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/amd74xx.c index 81ec73134eda..81ec73134eda 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/amd74xx.c | |||
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile deleted file mode 100644 index 5bc26053afa6..000000000000 --- a/drivers/ide/arm/Makefile +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | |||
2 | obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o | ||
3 | obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o | ||
4 | obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o | ||
5 | |||
6 | ifeq ($(CONFIG_IDE_ARM), m) | ||
7 | obj-m += ide_arm.o | ||
8 | endif | ||
9 | |||
10 | EXTRA_CFLAGS := -Idrivers/ide | ||
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/atiixp.c index b2735d28f5cc..b2735d28f5cc 100644 --- a/drivers/ide/pci/atiixp.c +++ b/drivers/ide/atiixp.c | |||
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c index 0ec8fd1e4dcb..0ec8fd1e4dcb 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/au1xxx-ide.c | |||
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/buddha.c index c5a3c9ef6a5d..c5a3c9ef6a5d 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/buddha.c | |||
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/cmd640.c index e4306647d00d..e4306647d00d 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/cmd640.c | |||
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/cmd64x.c index 935385c77e06..935385c77e06 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/cmd64x.c | |||
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/cs5520.c index 5efb467f8fa0..5efb467f8fa0 100644 --- a/drivers/ide/pci/cs5520.c +++ b/drivers/ide/cs5520.c | |||
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/cs5530.c index 53f079cc00af..53f079cc00af 100644 --- a/drivers/ide/pci/cs5530.c +++ b/drivers/ide/cs5530.c | |||
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/cs5535.c index 983d957a0189..983d957a0189 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/cs5535.c | |||
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/cy82c693.c index 5297f07d2933..5297f07d2933 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/cy82c693.c | |||
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/delkin_cb.c index 8f1b2d9f0513..8f1b2d9f0513 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/delkin_cb.c | |||
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/dtc2278.c index 689b2e493413..689b2e493413 100644 --- a/drivers/ide/legacy/dtc2278.c +++ b/drivers/ide/dtc2278.c | |||
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/falconide.c index 39d500d84b07..39d500d84b07 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/falconide.c | |||
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/gayle.c index 691506886561..691506886561 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/gayle.c | |||
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/generic.c index 474f96a7c076..474f96a7c076 100644 --- a/drivers/ide/pci/generic.c +++ b/drivers/ide/generic.c | |||
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile deleted file mode 100644 index 5eba16f423f4..000000000000 --- a/drivers/ide/h8300/Makefile +++ /dev/null | |||
@@ -1,2 +0,0 @@ | |||
1 | |||
2 | obj-$(CONFIG_IDE_H8300) += ide-h8300.o | ||
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/hpt366.c index a7909e9c720e..a7909e9c720e 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/hpt366.c | |||
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/ht6560b.c index c7e5c2246b79..c7e5c2246b79 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/ht6560b.c | |||
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/icside.c index 76bdc9a27f6f..76bdc9a27f6f 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/icside.c | |||
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/ide-4drives.c index 9e85b1ec9607..9e85b1ec9607 100644 --- a/drivers/ide/legacy/ide-4drives.c +++ b/drivers/ide/ide-4drives.c | |||
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/ide-cs.c index cb199c815b53..cb199c815b53 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/ide-h8300.c index e2cdd2e9cdec..e2cdd2e9cdec 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/ide-h8300.c | |||
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/ide_arm.c index f728f2927b5a..f728f2927b5a 100644 --- a/drivers/ide/arm/ide_arm.c +++ b/drivers/ide/ide_arm.c | |||
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/ide_platform.c index 051b4ab0f359..051b4ab0f359 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/ide_platform.c | |||
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/it8213.c index 7c2feeb3c5ec..7c2feeb3c5ec 100644 --- a/drivers/ide/pci/it8213.c +++ b/drivers/ide/it8213.c | |||
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/it821x.c index 995e18bb3139..995e18bb3139 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/it821x.c | |||
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/jmicron.c index 9a68433cf46d..9a68433cf46d 100644 --- a/drivers/ide/pci/jmicron.c +++ b/drivers/ide/jmicron.c | |||
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile deleted file mode 100644 index 6939329f89e8..000000000000 --- a/drivers/ide/legacy/Makefile +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | |||
2 | # link order is important here | ||
3 | |||
4 | obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o | ||
5 | obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o | ||
6 | obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o | ||
7 | obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o | ||
8 | obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o | ||
9 | obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o | ||
10 | |||
11 | obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o | ||
12 | obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o | ||
13 | obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o | ||
14 | obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o | ||
15 | obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o | ||
16 | |||
17 | ifeq ($(CONFIG_BLK_DEV_IDECS), m) | ||
18 | obj-m += ide-cs.o | ||
19 | endif | ||
20 | |||
21 | ifeq ($(CONFIG_BLK_DEV_PLATFORM), m) | ||
22 | obj-m += ide_platform.o | ||
23 | endif | ||
24 | |||
25 | EXTRA_CFLAGS := -Idrivers/ide | ||
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/macide.c index 43f97cc1d30e..43f97cc1d30e 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/macide.c | |||
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile deleted file mode 100644 index 5873fa0b8769..000000000000 --- a/drivers/ide/mips/Makefile +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o | ||
2 | |||
3 | EXTRA_CFLAGS := -Idrivers/ide | ||
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/ns87415.c index 13789060f407..13789060f407 100644 --- a/drivers/ide/pci/ns87415.c +++ b/drivers/ide/ns87415.c | |||
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/opti621.c index 6048eda3cd61..6048eda3cd61 100644 --- a/drivers/ide/pci/opti621.c +++ b/drivers/ide/opti621.c | |||
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 122ed3c072fd..122ed3c072fd 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c | |||
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile deleted file mode 100644 index ab44a1f5f5a9..000000000000 --- a/drivers/ide/pci/Makefile +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | |||
2 | obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o | ||
3 | obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o | ||
4 | obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o | ||
5 | obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o | ||
6 | obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o | ||
7 | obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o | ||
8 | obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o | ||
9 | obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o | ||
10 | obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o | ||
11 | obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o | ||
12 | obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o | ||
13 | obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o | ||
14 | obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o | ||
15 | obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o | ||
16 | obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o | ||
17 | obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o | ||
18 | obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o | ||
19 | obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o | ||
20 | obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o | ||
21 | obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o | ||
22 | obj-$(CONFIG_BLK_DEV_PIIX) += piix.o | ||
23 | obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o | ||
24 | obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o | ||
25 | obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o | ||
26 | obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o | ||
27 | obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o | ||
28 | obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o | ||
29 | obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o | ||
30 | obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o | ||
31 | obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o | ||
32 | obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o | ||
33 | obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o | ||
34 | |||
35 | # Must appear at the end of the block | ||
36 | obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o | ||
37 | ide-pci-generic-y += generic.o | ||
38 | |||
39 | ifeq ($(CONFIG_BLK_DEV_CMD640), m) | ||
40 | obj-m += cmd640.o | ||
41 | endif | ||
42 | |||
43 | EXTRA_CFLAGS := -Idrivers/ide | ||
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c index 211ae46e3e0c..211ae46e3e0c 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pdc202xx_new.c | |||
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 799557c25eef..799557c25eef 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c | |||
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/piix.c index d63f9fdca76b..d63f9fdca76b 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/piix.c | |||
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/pmac.c index 2e19d6298536..2e19d6298536 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/pmac.c | |||
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile deleted file mode 100644 index 74e52adcdf4b..000000000000 --- a/drivers/ide/ppc/Makefile +++ /dev/null | |||
@@ -1,2 +0,0 @@ | |||
1 | |||
2 | obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o | ||
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/q40ide.c index 4af4a8ce4cdf..4af4a8ce4cdf 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/q40ide.c | |||
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/qd65xx.c index bc27c7aba936..bc27c7aba936 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/qd65xx.c | |||
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/qd65xx.h index c83dea85e621..c83dea85e621 100644 --- a/drivers/ide/legacy/qd65xx.h +++ b/drivers/ide/qd65xx.h | |||
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/rapide.c index 78d27d9ae430..78d27d9ae430 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/rapide.c | |||
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/rz1000.c index 7daf0135cbac..7daf0135cbac 100644 --- a/drivers/ide/pci/rz1000.c +++ b/drivers/ide/rz1000.c | |||
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/sc1200.c index f1a8758e3a99..f1a8758e3a99 100644 --- a/drivers/ide/pci/sc1200.c +++ b/drivers/ide/sc1200.c | |||
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/scc_pata.c index 49f163aa51e3..49f163aa51e3 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/scc_pata.c | |||
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/serverworks.c index 437bc919dafd..437bc919dafd 100644 --- a/drivers/ide/pci/serverworks.c +++ b/drivers/ide/serverworks.c | |||
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/sgiioc4.c index 8af9b23499fd..8af9b23499fd 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/sgiioc4.c | |||
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/siimage.c index eb4faf92c571..eb4faf92c571 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/siimage.c | |||
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/sis5513.c index ad32e18c5ba3..ad32e18c5ba3 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/sis5513.c | |||
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/sl82c105.c index 84dc33602ff8..84dc33602ff8 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/sl82c105.c | |||
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/slc90e66.c index 0f759e4ed779..0f759e4ed779 100644 --- a/drivers/ide/pci/slc90e66.c +++ b/drivers/ide/slc90e66.c | |||
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/tc86c001.c index 93e2cce4b296..93e2cce4b296 100644 --- a/drivers/ide/pci/tc86c001.c +++ b/drivers/ide/tc86c001.c | |||
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/triflex.c index b6ff40336aa9..b6ff40336aa9 100644 --- a/drivers/ide/pci/triflex.c +++ b/drivers/ide/triflex.c | |||
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/trm290.c index 75ea61526566..75ea61526566 100644 --- a/drivers/ide/pci/trm290.c +++ b/drivers/ide/trm290.c | |||
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/umc8672.c index 1da076e0c917..1da076e0c917 100644 --- a/drivers/ide/legacy/umc8672.c +++ b/drivers/ide/umc8672.c | |||
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/via82cxxx.c index 2a812d3207e9..2a812d3207e9 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/via82cxxx.c | |||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 49c45feccd5b..5c54fc2350be 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info, | |||
406 | 406 | ||
407 | if (i == qp_info->snoop_table_size) { | 407 | if (i == qp_info->snoop_table_size) { |
408 | /* Grow table. */ | 408 | /* Grow table. */ |
409 | new_snoop_table = kmalloc(sizeof mad_snoop_priv * | 409 | new_snoop_table = krealloc(qp_info->snoop_table, |
410 | qp_info->snoop_table_size + 1, | 410 | sizeof mad_snoop_priv * |
411 | GFP_ATOMIC); | 411 | (qp_info->snoop_table_size + 1), |
412 | GFP_ATOMIC); | ||
412 | if (!new_snoop_table) { | 413 | if (!new_snoop_table) { |
413 | i = -ENOMEM; | 414 | i = -ENOMEM; |
414 | goto out; | 415 | goto out; |
415 | } | 416 | } |
416 | if (qp_info->snoop_table) { | 417 | |
417 | memcpy(new_snoop_table, qp_info->snoop_table, | ||
418 | sizeof mad_snoop_priv * | ||
419 | qp_info->snoop_table_size); | ||
420 | kfree(qp_info->snoop_table); | ||
421 | } | ||
422 | qp_info->snoop_table = new_snoop_table; | 418 | qp_info->snoop_table = new_snoop_table; |
423 | qp_info->snoop_table_size++; | 419 | qp_info->snoop_table_size++; |
424 | } | 420 | } |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 3ddacf39b7ba..4346a24568fb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, | |||
904 | 904 | ||
905 | mutex_lock(&file->mut); | 905 | mutex_lock(&file->mut); |
906 | mc = ucma_alloc_multicast(ctx); | 906 | mc = ucma_alloc_multicast(ctx); |
907 | if (IS_ERR(mc)) { | 907 | if (!mc) { |
908 | ret = PTR_ERR(mc); | 908 | ret = -ENOMEM; |
909 | goto err1; | 909 | goto err1; |
910 | } | 910 | } |
911 | 911 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index c325c44807e8..44e936e48a31 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -1942,6 +1942,7 @@ fail4: | |||
1942 | fail3: | 1942 | fail3: |
1943 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1943 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1944 | fail2: | 1944 | fail2: |
1945 | cm_id->rem_ref(cm_id); | ||
1945 | put_ep(&ep->com); | 1946 | put_ep(&ep->com); |
1946 | out: | 1947 | out: |
1947 | return err; | 1948 | return err; |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 5d7b7855afb9..4df887af66a5 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -128,6 +128,8 @@ struct ehca_shca { | |||
128 | /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ | 128 | /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ |
129 | u32 hca_cap_mr_pgsize; | 129 | u32 hca_cap_mr_pgsize; |
130 | int max_mtu; | 130 | int max_mtu; |
131 | int max_num_qps; | ||
132 | int max_num_cqs; | ||
131 | atomic_t num_cqs; | 133 | atomic_t num_cqs; |
132 | atomic_t num_qps; | 134 | atomic_t num_qps; |
133 | }; | 135 | }; |
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 33647a95eb9a..2f4c28a30271 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, | |||
132 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) | 132 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) |
133 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
134 | 134 | ||
135 | if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { | 135 | if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) { |
136 | ehca_err(device, "Unable to create CQ, max number of %i " | 136 | ehca_err(device, "Unable to create CQ, max number of %i " |
137 | "CQs reached.", ehca_max_cq); | 137 | "CQs reached.", shca->max_num_cqs); |
138 | ehca_err(device, "To increase the maximum number of CQs " | 138 | ehca_err(device, "To increase the maximum number of CQs " |
139 | "use the number_of_cqs module parameter.\n"); | 139 | "use the number_of_cqs module parameter.\n"); |
140 | return ERR_PTR(-ENOSPC); | 140 | return ERR_PTR(-ENOSPC); |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 598844d2edc9..bb02a86aa526 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #include <linux/notifier.h> | ||
48 | #include <linux/memory.h> | ||
47 | #include "ehca_classes.h" | 49 | #include "ehca_classes.h" |
48 | #include "ehca_iverbs.h" | 50 | #include "ehca_iverbs.h" |
49 | #include "ehca_mrmw.h" | 51 | #include "ehca_mrmw.h" |
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
366 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; | 368 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; |
367 | 369 | ||
368 | /* Set maximum number of CQs and QPs to calculate EQ size */ | 370 | /* Set maximum number of CQs and QPs to calculate EQ size */ |
369 | if (ehca_max_qp == -1) | 371 | if (shca->max_num_qps == -1) |
370 | ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); | 372 | shca->max_num_qps = min_t(int, rblock->max_qp, |
371 | else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { | 373 | EHCA_MAX_NUM_QUEUES); |
372 | ehca_gen_err("Requested number of QPs is out of range (1 - %i) " | 374 | else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) { |
373 | "specified by HW", rblock->max_qp); | 375 | ehca_gen_warn("The requested number of QPs is out of range " |
374 | ret = -EINVAL; | 376 | "(1 - %i) specified by HW. Value is set to %i", |
375 | goto sense_attributes1; | 377 | rblock->max_qp, rblock->max_qp); |
378 | shca->max_num_qps = rblock->max_qp; | ||
376 | } | 379 | } |
377 | 380 | ||
378 | if (ehca_max_cq == -1) | 381 | if (shca->max_num_cqs == -1) |
379 | ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); | 382 | shca->max_num_cqs = min_t(int, rblock->max_cq, |
380 | else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { | 383 | EHCA_MAX_NUM_QUEUES); |
381 | ehca_gen_err("Requested number of CQs is out of range (1 - %i) " | 384 | else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) { |
382 | "specified by HW", rblock->max_cq); | 385 | ehca_gen_warn("The requested number of CQs is out of range " |
383 | ret = -EINVAL; | 386 | "(1 - %i) specified by HW. Value is set to %i", |
384 | goto sense_attributes1; | 387 | rblock->max_cq, rblock->max_cq); |
385 | } | 388 | } |
386 | 389 | ||
387 | /* query max MTU from first port -- it's the same for all ports */ | 390 | /* query max MTU from first port -- it's the same for all ports */ |
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
733 | ehca_gen_err("Cannot allocate shca memory."); | 736 | ehca_gen_err("Cannot allocate shca memory."); |
734 | return -ENOMEM; | 737 | return -ENOMEM; |
735 | } | 738 | } |
739 | |||
736 | mutex_init(&shca->modify_mutex); | 740 | mutex_init(&shca->modify_mutex); |
737 | atomic_set(&shca->num_cqs, 0); | 741 | atomic_set(&shca->num_cqs, 0); |
738 | atomic_set(&shca->num_qps, 0); | 742 | atomic_set(&shca->num_qps, 0); |
743 | shca->max_num_qps = ehca_max_qp; | ||
744 | shca->max_num_cqs = ehca_max_cq; | ||
745 | |||
739 | for (i = 0; i < ARRAY_SIZE(shca->sport); i++) | 746 | for (i = 0; i < ARRAY_SIZE(shca->sport); i++) |
740 | spin_lock_init(&shca->sport[i].mod_sqp_lock); | 747 | spin_lock_init(&shca->sport[i].mod_sqp_lock); |
741 | 748 | ||
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
755 | goto probe1; | 762 | goto probe1; |
756 | } | 763 | } |
757 | 764 | ||
758 | eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; | 765 | eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps; |
759 | /* create event queues */ | 766 | /* create event queues */ |
760 | ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); | 767 | ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); |
761 | if (ret) { | 768 | if (ret) { |
@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data) | |||
964 | spin_unlock(&shca_list_lock); | 971 | spin_unlock(&shca_list_lock); |
965 | } | 972 | } |
966 | 973 | ||
974 | static int ehca_mem_notifier(struct notifier_block *nb, | ||
975 | unsigned long action, void *data) | ||
976 | { | ||
977 | static unsigned long ehca_dmem_warn_time; | ||
978 | |||
979 | switch (action) { | ||
980 | case MEM_CANCEL_OFFLINE: | ||
981 | case MEM_CANCEL_ONLINE: | ||
982 | case MEM_ONLINE: | ||
983 | case MEM_OFFLINE: | ||
984 | return NOTIFY_OK; | ||
985 | case MEM_GOING_ONLINE: | ||
986 | case MEM_GOING_OFFLINE: | ||
987 | /* only ok if no hca is attached to the lpar */ | ||
988 | spin_lock(&shca_list_lock); | ||
989 | if (list_empty(&shca_list)) { | ||
990 | spin_unlock(&shca_list_lock); | ||
991 | return NOTIFY_OK; | ||
992 | } else { | ||
993 | spin_unlock(&shca_list_lock); | ||
994 | if (printk_timed_ratelimit(&ehca_dmem_warn_time, | ||
995 | 30 * 1000)) | ||
996 | ehca_gen_err("DMEM operations are not allowed" | ||
997 | "as long as an ehca adapter is" | ||
998 | "attached to the LPAR"); | ||
999 | return NOTIFY_BAD; | ||
1000 | } | ||
1001 | } | ||
1002 | return NOTIFY_OK; | ||
1003 | } | ||
1004 | |||
1005 | static struct notifier_block ehca_mem_nb = { | ||
1006 | .notifier_call = ehca_mem_notifier, | ||
1007 | }; | ||
1008 | |||
967 | static int __init ehca_module_init(void) | 1009 | static int __init ehca_module_init(void) |
968 | { | 1010 | { |
969 | int ret; | 1011 | int ret; |
@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void) | |||
991 | goto module_init2; | 1033 | goto module_init2; |
992 | } | 1034 | } |
993 | 1035 | ||
1036 | ret = register_memory_notifier(&ehca_mem_nb); | ||
1037 | if (ret) { | ||
1038 | ehca_gen_err("Failed registering memory add/remove notifier"); | ||
1039 | goto module_init3; | ||
1040 | } | ||
1041 | |||
994 | if (ehca_poll_all_eqs != 1) { | 1042 | if (ehca_poll_all_eqs != 1) { |
995 | ehca_gen_err("WARNING!!!"); | 1043 | ehca_gen_err("WARNING!!!"); |
996 | ehca_gen_err("It is possible to lose interrupts."); | 1044 | ehca_gen_err("It is possible to lose interrupts."); |
@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void) | |||
1003 | 1051 | ||
1004 | return 0; | 1052 | return 0; |
1005 | 1053 | ||
1054 | module_init3: | ||
1055 | ibmebus_unregister_driver(&ehca_driver); | ||
1056 | |||
1006 | module_init2: | 1057 | module_init2: |
1007 | ehca_destroy_slab_caches(); | 1058 | ehca_destroy_slab_caches(); |
1008 | 1059 | ||
@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void) | |||
1018 | 1069 | ||
1019 | ibmebus_unregister_driver(&ehca_driver); | 1070 | ibmebus_unregister_driver(&ehca_driver); |
1020 | 1071 | ||
1072 | unregister_memory_notifier(&ehca_mem_nb); | ||
1073 | |||
1021 | ehca_destroy_slab_caches(); | 1074 | ehca_destroy_slab_caches(); |
1022 | 1075 | ||
1023 | ehca_destroy_comp_pool(); | 1076 | ehca_destroy_comp_pool(); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4dbe2870e014..4d54b9f64567 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp( | |||
465 | u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; | 465 | u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; |
466 | unsigned long flags; | 466 | unsigned long flags; |
467 | 467 | ||
468 | if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { | 468 | if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) { |
469 | ehca_err(pd->device, "Unable to create QP, max number of %i " | 469 | ehca_err(pd->device, "Unable to create QP, max number of %i " |
470 | "QPs reached.", ehca_max_qp); | 470 | "QPs reached.", shca->max_num_qps); |
471 | ehca_err(pd->device, "To increase the maximum number of QPs " | 471 | ehca_err(pd->device, "To increase the maximum number of QPs " |
472 | "use the number_of_qps module parameter.\n"); | 472 | "use the number_of_qps module parameter.\n"); |
473 | return ERR_PTR(-ENOSPC); | 473 | return ERR_PTR(-ENOSPC); |
@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp( | |||
502 | if (init_attr->srq) { | 502 | if (init_attr->srq) { |
503 | my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); | 503 | my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); |
504 | 504 | ||
505 | if (qp_type == IB_QPT_UC) { | ||
506 | ehca_err(pd->device, "UC with SRQ not supported"); | ||
507 | atomic_dec(&shca->num_qps); | ||
508 | return ERR_PTR(-EINVAL); | ||
509 | } | ||
510 | |||
505 | has_srq = 1; | 511 | has_srq = 1; |
506 | parms.ext_type = EQPT_SRQBASE; | 512 | parms.ext_type = EQPT_SRQBASE; |
507 | parms.srq_qpn = my_srq->real_qp_num; | 513 | parms.srq_qpn = my_srq->real_qp_num; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index cdca3a511e1c..606f1e2ef284 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
298 | int p, q; | 298 | int p, q; |
299 | int ret; | 299 | int ret; |
300 | 300 | ||
301 | for (p = 0; p < dev->dev->caps.num_ports; ++p) | 301 | for (p = 0; p < dev->num_ports; ++p) |
302 | for (q = 0; q <= 1; ++q) { | 302 | for (q = 0; q <= 1; ++q) { |
303 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | 303 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, |
304 | q ? IB_QPT_GSI : IB_QPT_SMI, | 304 | q ? IB_QPT_GSI : IB_QPT_SMI, |
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
314 | return 0; | 314 | return 0; |
315 | 315 | ||
316 | err: | 316 | err: |
317 | for (p = 0; p < dev->dev->caps.num_ports; ++p) | 317 | for (p = 0; p < dev->num_ports; ++p) |
318 | for (q = 0; q <= 1; ++q) | 318 | for (q = 0; q <= 1; ++q) |
319 | if (dev->send_agent[p][q]) | 319 | if (dev->send_agent[p][q]) |
320 | ib_unregister_mad_agent(dev->send_agent[p][q]); | 320 | ib_unregister_mad_agent(dev->send_agent[p][q]); |
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |||
327 | struct ib_mad_agent *agent; | 327 | struct ib_mad_agent *agent; |
328 | int p, q; | 328 | int p, q; |
329 | 329 | ||
330 | for (p = 0; p < dev->dev->caps.num_ports; ++p) { | 330 | for (p = 0; p < dev->num_ports; ++p) { |
331 | for (q = 0; q <= 1; ++q) { | 331 | for (q = 0; q <= 1; ++q) { |
332 | agent = dev->send_agent[p][q]; | 332 | agent = dev->send_agent[p][q]; |
333 | dev->send_agent[p][q] = NULL; | 333 | dev->send_agent[p][q] = NULL; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a3c2851c0545..2e80f8f47b02 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
574 | ibdev->ib_dev.owner = THIS_MODULE; | 574 | ibdev->ib_dev.owner = THIS_MODULE; |
575 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 575 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
576 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | 576 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
577 | ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; | 577 | ibdev->num_ports = 0; |
578 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
579 | ibdev->num_ports++; | ||
580 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | ||
578 | ibdev->ib_dev.num_comp_vectors = 1; | 581 | ibdev->ib_dev.num_comp_vectors = 1; |
579 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 582 | ibdev->ib_dev.dma_device = &dev->pdev->dev; |
580 | 583 | ||
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
691 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 694 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
692 | int p; | 695 | int p; |
693 | 696 | ||
694 | for (p = 1; p <= dev->caps.num_ports; ++p) | 697 | for (p = 1; p <= ibdev->num_ports; ++p) |
695 | mlx4_CLOSE_PORT(dev, p); | 698 | mlx4_CLOSE_PORT(dev, p); |
696 | 699 | ||
697 | mlx4_ib_mad_cleanup(ibdev); | 700 | mlx4_ib_mad_cleanup(ibdev); |
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
706 | enum mlx4_dev_event event, int port) | 709 | enum mlx4_dev_event event, int port) |
707 | { | 710 | { |
708 | struct ib_event ibev; | 711 | struct ib_event ibev; |
712 | struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); | ||
713 | |||
714 | if (port > ibdev->num_ports) | ||
715 | return; | ||
709 | 716 | ||
710 | switch (event) { | 717 | switch (event) { |
711 | case MLX4_DEV_EVENT_PORT_UP: | 718 | case MLX4_DEV_EVENT_PORT_UP: |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6e2b0dc21b61..9974e886b8de 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -162,6 +162,7 @@ struct mlx4_ib_ah { | |||
162 | struct mlx4_ib_dev { | 162 | struct mlx4_ib_dev { |
163 | struct ib_device ib_dev; | 163 | struct ib_device ib_dev; |
164 | struct mlx4_dev *dev; | 164 | struct mlx4_dev *dev; |
165 | int num_ports; | ||
165 | void __iomem *uar_map; | 166 | void __iomem *uar_map; |
166 | 167 | ||
167 | struct mlx4_uar priv_uar; | 168 | struct mlx4_uar priv_uar; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index baa01deb2436..39167a797f99 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
451 | struct ib_qp_init_attr *init_attr, | 451 | struct ib_qp_init_attr *init_attr, |
452 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) | 452 | struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) |
453 | { | 453 | { |
454 | int qpn; | ||
454 | int err; | 455 | int err; |
455 | 456 | ||
456 | mutex_init(&qp->mutex); | 457 | mutex_init(&qp->mutex); |
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
545 | } | 546 | } |
546 | } | 547 | } |
547 | 548 | ||
548 | err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); | 549 | if (sqpn) { |
550 | qpn = sqpn; | ||
551 | } else { | ||
552 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); | ||
553 | if (err) | ||
554 | goto err_wrid; | ||
555 | } | ||
556 | |||
557 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | ||
549 | if (err) | 558 | if (err) |
550 | goto err_wrid; | 559 | goto err_qpn; |
551 | 560 | ||
552 | /* | 561 | /* |
553 | * Hardware wants QPN written in big-endian order (after | 562 | * Hardware wants QPN written in big-endian order (after |
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
560 | 569 | ||
561 | return 0; | 570 | return 0; |
562 | 571 | ||
572 | err_qpn: | ||
573 | if (!sqpn) | ||
574 | mlx4_qp_release_range(dev->dev, qpn, 1); | ||
575 | |||
563 | err_wrid: | 576 | err_wrid: |
564 | if (pd->uobject) { | 577 | if (pd->uobject) { |
565 | if (!init_attr->srq) | 578 | if (!init_attr->srq) |
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
655 | mlx4_ib_unlock_cqs(send_cq, recv_cq); | 668 | mlx4_ib_unlock_cqs(send_cq, recv_cq); |
656 | 669 | ||
657 | mlx4_qp_free(dev->dev, &qp->mqp); | 670 | mlx4_qp_free(dev->dev, &qp->mqp); |
671 | |||
672 | if (!is_sqp(dev, qp)) | ||
673 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | ||
674 | |||
658 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 675 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
659 | 676 | ||
660 | if (is_user) { | 677 | if (is_user) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 68ba5c3482e4..e0c7dfabf2b4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev); | |||
507 | void ipoib_drain_cq(struct net_device *dev); | 507 | void ipoib_drain_cq(struct net_device *dev); |
508 | 508 | ||
509 | void ipoib_set_ethtool_ops(struct net_device *dev); | 509 | void ipoib_set_ethtool_ops(struct net_device *dev); |
510 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); | ||
510 | 511 | ||
511 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 512 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
512 | 513 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 66af5c1a76e5..e9795f60e5d6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev, | |||
42 | strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); | 42 | strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); |
43 | } | 43 | } |
44 | 44 | ||
45 | static u32 ipoib_get_rx_csum(struct net_device *dev) | ||
46 | { | ||
47 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
48 | return test_bit(IPOIB_FLAG_CSUM, &priv->flags) && | ||
49 | !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
50 | } | ||
51 | |||
45 | static int ipoib_get_coalesce(struct net_device *dev, | 52 | static int ipoib_get_coalesce(struct net_device *dev, |
46 | struct ethtool_coalesce *coal) | 53 | struct ethtool_coalesce *coal) |
47 | { | 54 | { |
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, | |||
129 | 136 | ||
130 | static const struct ethtool_ops ipoib_ethtool_ops = { | 137 | static const struct ethtool_ops ipoib_ethtool_ops = { |
131 | .get_drvinfo = ipoib_get_drvinfo, | 138 | .get_drvinfo = ipoib_get_drvinfo, |
132 | .get_tso = ethtool_op_get_tso, | 139 | .get_rx_csum = ipoib_get_rx_csum, |
133 | .get_coalesce = ipoib_get_coalesce, | 140 | .get_coalesce = ipoib_get_coalesce, |
134 | .set_coalesce = ipoib_set_coalesce, | 141 | .set_coalesce = ipoib_set_coalesce, |
135 | .get_flags = ethtool_op_get_flags, | 142 | .get_flags = ethtool_op_get_flags, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0e748aeeae99..28eb6f03c588 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
685 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, | 685 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
686 | round_jiffies_relative(HZ)); | 686 | round_jiffies_relative(HZ)); |
687 | 687 | ||
688 | init_timer(&priv->poll_timer); | ||
689 | priv->poll_timer.function = ipoib_ib_tx_timer_func; | ||
690 | priv->poll_timer.data = (unsigned long)dev; | ||
691 | |||
692 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 688 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
693 | 689 | ||
694 | return 0; | 690 | return 0; |
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
906 | return -ENODEV; | 902 | return -ENODEV; |
907 | } | 903 | } |
908 | 904 | ||
905 | setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, | ||
906 | (unsigned long) dev); | ||
907 | |||
909 | if (dev->flags & IFF_UP) { | 908 | if (dev->flags & IFF_UP) { |
910 | if (ipoib_ib_dev_open(dev)) { | 909 | if (ipoib_ib_dev_open(dev)) { |
911 | ipoib_transport_dev_cleanup(dev); | 910 | ipoib_transport_dev_cleanup(dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index c0ee514396df..fddded7900d1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev) | |||
1173 | return device_create_file(&dev->dev, &dev_attr_pkey); | 1173 | return device_create_file(&dev->dev, &dev_attr_pkey); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) | ||
1177 | { | ||
1178 | struct ib_device_attr *device_attr; | ||
1179 | int result = -ENOMEM; | ||
1180 | |||
1181 | device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); | ||
1182 | if (!device_attr) { | ||
1183 | printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", | ||
1184 | hca->name, sizeof *device_attr); | ||
1185 | return result; | ||
1186 | } | ||
1187 | |||
1188 | result = ib_query_device(hca, device_attr); | ||
1189 | if (result) { | ||
1190 | printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", | ||
1191 | hca->name, result); | ||
1192 | kfree(device_attr); | ||
1193 | return result; | ||
1194 | } | ||
1195 | priv->hca_caps = device_attr->device_cap_flags; | ||
1196 | |||
1197 | kfree(device_attr); | ||
1198 | |||
1199 | if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { | ||
1200 | set_bit(IPOIB_FLAG_CSUM, &priv->flags); | ||
1201 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1202 | } | ||
1203 | |||
1204 | if (lro) | ||
1205 | priv->dev->features |= NETIF_F_LRO; | ||
1206 | |||
1207 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) | ||
1208 | priv->dev->features |= NETIF_F_TSO; | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1213 | |||
1176 | static struct net_device *ipoib_add_port(const char *format, | 1214 | static struct net_device *ipoib_add_port(const char *format, |
1177 | struct ib_device *hca, u8 port) | 1215 | struct ib_device *hca, u8 port) |
1178 | { | 1216 | { |
1179 | struct ipoib_dev_priv *priv; | 1217 | struct ipoib_dev_priv *priv; |
1180 | struct ib_device_attr *device_attr; | ||
1181 | struct ib_port_attr attr; | 1218 | struct ib_port_attr attr; |
1182 | int result = -ENOMEM; | 1219 | int result = -ENOMEM; |
1183 | 1220 | ||
@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1206 | goto device_init_failed; | 1243 | goto device_init_failed; |
1207 | } | 1244 | } |
1208 | 1245 | ||
1209 | device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); | 1246 | if (ipoib_set_dev_features(priv, hca)) |
1210 | if (!device_attr) { | ||
1211 | printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", | ||
1212 | hca->name, sizeof *device_attr); | ||
1213 | goto device_init_failed; | 1247 | goto device_init_failed; |
1214 | } | ||
1215 | |||
1216 | result = ib_query_device(hca, device_attr); | ||
1217 | if (result) { | ||
1218 | printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", | ||
1219 | hca->name, result); | ||
1220 | kfree(device_attr); | ||
1221 | goto device_init_failed; | ||
1222 | } | ||
1223 | priv->hca_caps = device_attr->device_cap_flags; | ||
1224 | |||
1225 | kfree(device_attr); | ||
1226 | |||
1227 | if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { | ||
1228 | set_bit(IPOIB_FLAG_CSUM, &priv->flags); | ||
1229 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1230 | } | ||
1231 | |||
1232 | if (lro) | ||
1233 | priv->dev->features |= NETIF_F_LRO; | ||
1234 | 1248 | ||
1235 | /* | 1249 | /* |
1236 | * Set the full membership bit, so that we join the right | 1250 | * Set the full membership bit, so that we join the right |
@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1266 | goto event_failed; | 1280 | goto event_failed; |
1267 | } | 1281 | } |
1268 | 1282 | ||
1269 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) | ||
1270 | priv->dev->features |= NETIF_F_TSO; | ||
1271 | |||
1272 | result = register_netdev(priv->dev); | 1283 | result = register_netdev(priv->dev); |
1273 | if (result) { | 1284 | if (result) { |
1274 | printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", | 1285 | printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index b08eb56196d3..2cf1a4088718 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
93 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | 93 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; |
94 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); | 94 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
95 | 95 | ||
96 | result = ipoib_set_dev_features(priv, ppriv->ca); | ||
97 | if (result) | ||
98 | goto device_init_failed; | ||
99 | |||
96 | priv->pkey = pkey; | 100 | priv->pkey = pkey; |
97 | 101 | ||
98 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); | 102 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f1ef33dfd8cf..1c615804ea76 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | |||
34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
35 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 35 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
36 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o | 36 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o |
37 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o | 37 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o |
38 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 38 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
39 | 39 | ||
40 | quiet_cmd_unroll = UNROLL $@ | 40 | quiet_cmd_unroll = UNROLL $@ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 682ef9e6acd3..ce26c84af064 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
25 | 25 | ||
26 | #include "dm.h" | 26 | #include <linux/device-mapper.h> |
27 | 27 | ||
28 | #define DM_MSG_PREFIX "crypt" | 28 | #define DM_MSG_PREFIX "crypt" |
29 | #define MESG_STR(x) x, sizeof(x) | 29 | #define MESG_STR(x) x, sizeof(x) |
@@ -56,6 +56,7 @@ struct dm_crypt_io { | |||
56 | atomic_t pending; | 56 | atomic_t pending; |
57 | int error; | 57 | int error; |
58 | sector_t sector; | 58 | sector_t sector; |
59 | struct dm_crypt_io *base_io; | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | struct dm_crypt_request { | 62 | struct dm_crypt_request { |
@@ -93,7 +94,6 @@ struct crypt_config { | |||
93 | 94 | ||
94 | struct workqueue_struct *io_queue; | 95 | struct workqueue_struct *io_queue; |
95 | struct workqueue_struct *crypt_queue; | 96 | struct workqueue_struct *crypt_queue; |
96 | wait_queue_head_t writeq; | ||
97 | 97 | ||
98 | /* | 98 | /* |
99 | * crypto related data | 99 | * crypto related data |
@@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, | |||
534 | io->base_bio = bio; | 534 | io->base_bio = bio; |
535 | io->sector = sector; | 535 | io->sector = sector; |
536 | io->error = 0; | 536 | io->error = 0; |
537 | io->base_io = NULL; | ||
537 | atomic_set(&io->pending, 0); | 538 | atomic_set(&io->pending, 0); |
538 | 539 | ||
539 | return io; | 540 | return io; |
@@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) | |||
547 | /* | 548 | /* |
548 | * One of the bios was finished. Check for completion of | 549 | * One of the bios was finished. Check for completion of |
549 | * the whole request and correctly clean up the buffer. | 550 | * the whole request and correctly clean up the buffer. |
551 | * If base_io is set, wait for the last fragment to complete. | ||
550 | */ | 552 | */ |
551 | static void crypt_dec_pending(struct dm_crypt_io *io) | 553 | static void crypt_dec_pending(struct dm_crypt_io *io) |
552 | { | 554 | { |
@@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
555 | if (!atomic_dec_and_test(&io->pending)) | 557 | if (!atomic_dec_and_test(&io->pending)) |
556 | return; | 558 | return; |
557 | 559 | ||
558 | bio_endio(io->base_bio, io->error); | 560 | if (likely(!io->base_io)) |
561 | bio_endio(io->base_bio, io->error); | ||
562 | else { | ||
563 | if (io->error && !io->base_io->error) | ||
564 | io->base_io->error = io->error; | ||
565 | crypt_dec_pending(io->base_io); | ||
566 | } | ||
567 | |||
559 | mempool_free(io, cc->io_pool); | 568 | mempool_free(io, cc->io_pool); |
560 | } | 569 | } |
561 | 570 | ||
@@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) | |||
646 | static void kcryptd_io_write(struct dm_crypt_io *io) | 655 | static void kcryptd_io_write(struct dm_crypt_io *io) |
647 | { | 656 | { |
648 | struct bio *clone = io->ctx.bio_out; | 657 | struct bio *clone = io->ctx.bio_out; |
649 | struct crypt_config *cc = io->target->private; | ||
650 | |||
651 | generic_make_request(clone); | 658 | generic_make_request(clone); |
652 | wake_up(&cc->writeq); | ||
653 | } | 659 | } |
654 | 660 | ||
655 | static void kcryptd_io(struct work_struct *work) | 661 | static void kcryptd_io(struct work_struct *work) |
@@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, | |||
688 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | 694 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); |
689 | 695 | ||
690 | clone->bi_sector = cc->start + io->sector; | 696 | clone->bi_sector = cc->start + io->sector; |
691 | io->sector += bio_sectors(clone); | ||
692 | 697 | ||
693 | if (async) | 698 | if (async) |
694 | kcryptd_queue_io(io); | 699 | kcryptd_queue_io(io); |
@@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
700 | { | 705 | { |
701 | struct crypt_config *cc = io->target->private; | 706 | struct crypt_config *cc = io->target->private; |
702 | struct bio *clone; | 707 | struct bio *clone; |
708 | struct dm_crypt_io *new_io; | ||
703 | int crypt_finished; | 709 | int crypt_finished; |
704 | unsigned out_of_pages = 0; | 710 | unsigned out_of_pages = 0; |
705 | unsigned remaining = io->base_bio->bi_size; | 711 | unsigned remaining = io->base_bio->bi_size; |
712 | sector_t sector = io->sector; | ||
706 | int r; | 713 | int r; |
707 | 714 | ||
708 | /* | 715 | /* |
709 | * Prevent io from disappearing until this function completes. | 716 | * Prevent io from disappearing until this function completes. |
710 | */ | 717 | */ |
711 | crypt_inc_pending(io); | 718 | crypt_inc_pending(io); |
712 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | 719 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
713 | 720 | ||
714 | /* | 721 | /* |
715 | * The allocated buffers can be smaller than the whole bio, | 722 | * The allocated buffers can be smaller than the whole bio, |
@@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
726 | io->ctx.idx_out = 0; | 733 | io->ctx.idx_out = 0; |
727 | 734 | ||
728 | remaining -= clone->bi_size; | 735 | remaining -= clone->bi_size; |
736 | sector += bio_sectors(clone); | ||
729 | 737 | ||
730 | crypt_inc_pending(io); | 738 | crypt_inc_pending(io); |
731 | r = crypt_convert(cc, &io->ctx); | 739 | r = crypt_convert(cc, &io->ctx); |
@@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
741 | */ | 749 | */ |
742 | if (unlikely(r < 0)) | 750 | if (unlikely(r < 0)) |
743 | break; | 751 | break; |
752 | |||
753 | io->sector = sector; | ||
744 | } | 754 | } |
745 | 755 | ||
746 | /* | 756 | /* |
@@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
750 | if (unlikely(out_of_pages)) | 760 | if (unlikely(out_of_pages)) |
751 | congestion_wait(WRITE, HZ/100); | 761 | congestion_wait(WRITE, HZ/100); |
752 | 762 | ||
753 | if (unlikely(remaining)) | 763 | /* |
754 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); | 764 | * With async crypto it is unsafe to share the crypto context |
765 | * between fragments, so switch to a new dm_crypt_io structure. | ||
766 | */ | ||
767 | if (unlikely(!crypt_finished && remaining)) { | ||
768 | new_io = crypt_io_alloc(io->target, io->base_bio, | ||
769 | sector); | ||
770 | crypt_inc_pending(new_io); | ||
771 | crypt_convert_init(cc, &new_io->ctx, NULL, | ||
772 | io->base_bio, sector); | ||
773 | new_io->ctx.idx_in = io->ctx.idx_in; | ||
774 | new_io->ctx.offset_in = io->ctx.offset_in; | ||
775 | |||
776 | /* | ||
777 | * Fragments after the first use the base_io | ||
778 | * pending count. | ||
779 | */ | ||
780 | if (!io->base_io) | ||
781 | new_io->base_io = io; | ||
782 | else { | ||
783 | new_io->base_io = io->base_io; | ||
784 | crypt_inc_pending(io->base_io); | ||
785 | crypt_dec_pending(io); | ||
786 | } | ||
787 | |||
788 | io = new_io; | ||
789 | } | ||
755 | } | 790 | } |
756 | 791 | ||
757 | crypt_dec_pending(io); | 792 | crypt_dec_pending(io); |
@@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1078 | goto bad_crypt_queue; | 1113 | goto bad_crypt_queue; |
1079 | } | 1114 | } |
1080 | 1115 | ||
1081 | init_waitqueue_head(&cc->writeq); | ||
1082 | ti->private = cc; | 1116 | ti->private = cc; |
1083 | return 0; | 1117 | return 0; |
1084 | 1118 | ||
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index bdd37f881c42..848b381f1173 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -13,7 +13,8 @@ | |||
13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | 15 | ||
16 | #include "dm.h" | 16 | #include <linux/device-mapper.h> |
17 | |||
17 | #include "dm-bio-list.h" | 18 | #include "dm-bio-list.h" |
18 | 19 | ||
19 | #define DM_MSG_PREFIX "delay" | 20 | #define DM_MSG_PREFIX "delay" |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 769ab677f8e0..01590f3e0009 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * This file is released under the GPL. | 7 | * This file is released under the GPL. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include "dm.h" | ||
11 | #include "dm-snap.h" | 10 | #include "dm-snap.h" |
12 | 11 | ||
13 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
@@ -105,6 +104,11 @@ struct pstore { | |||
105 | void *area; | 104 | void *area; |
106 | 105 | ||
107 | /* | 106 | /* |
107 | * An area of zeros used to clear the next area. | ||
108 | */ | ||
109 | void *zero_area; | ||
110 | |||
111 | /* | ||
108 | * Used to keep track of which metadata area the data in | 112 | * Used to keep track of which metadata area the data in |
109 | * 'chunk' refers to. | 113 | * 'chunk' refers to. |
110 | */ | 114 | */ |
@@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps) | |||
149 | if (!ps->area) | 153 | if (!ps->area) |
150 | return r; | 154 | return r; |
151 | 155 | ||
156 | ps->zero_area = vmalloc(len); | ||
157 | if (!ps->zero_area) { | ||
158 | vfree(ps->area); | ||
159 | return r; | ||
160 | } | ||
161 | memset(ps->zero_area, 0, len); | ||
162 | |||
152 | return 0; | 163 | return 0; |
153 | } | 164 | } |
154 | 165 | ||
@@ -156,6 +167,8 @@ static void free_area(struct pstore *ps) | |||
156 | { | 167 | { |
157 | vfree(ps->area); | 168 | vfree(ps->area); |
158 | ps->area = NULL; | 169 | ps->area = NULL; |
170 | vfree(ps->zero_area); | ||
171 | ps->zero_area = NULL; | ||
159 | } | 172 | } |
160 | 173 | ||
161 | struct mdata_req { | 174 | struct mdata_req { |
@@ -220,25 +233,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) | |||
220 | * Read or write a metadata area. Remembering to skip the first | 233 | * Read or write a metadata area. Remembering to skip the first |
221 | * chunk which holds the header. | 234 | * chunk which holds the header. |
222 | */ | 235 | */ |
223 | static int area_io(struct pstore *ps, chunk_t area, int rw) | 236 | static int area_io(struct pstore *ps, int rw) |
224 | { | 237 | { |
225 | int r; | 238 | int r; |
226 | chunk_t chunk; | 239 | chunk_t chunk; |
227 | 240 | ||
228 | chunk = area_location(ps, area); | 241 | chunk = area_location(ps, ps->current_area); |
229 | 242 | ||
230 | r = chunk_io(ps, chunk, rw, 0); | 243 | r = chunk_io(ps, chunk, rw, 0); |
231 | if (r) | 244 | if (r) |
232 | return r; | 245 | return r; |
233 | 246 | ||
234 | ps->current_area = area; | ||
235 | return 0; | 247 | return 0; |
236 | } | 248 | } |
237 | 249 | ||
238 | static int zero_area(struct pstore *ps, chunk_t area) | 250 | static void zero_memory_area(struct pstore *ps) |
239 | { | 251 | { |
240 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); | 252 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); |
241 | return area_io(ps, area, WRITE); | 253 | } |
254 | |||
255 | static int zero_disk_area(struct pstore *ps, chunk_t area) | ||
256 | { | ||
257 | struct dm_io_region where = { | ||
258 | .bdev = ps->snap->cow->bdev, | ||
259 | .sector = ps->snap->chunk_size * area_location(ps, area), | ||
260 | .count = ps->snap->chunk_size, | ||
261 | }; | ||
262 | struct dm_io_request io_req = { | ||
263 | .bi_rw = WRITE, | ||
264 | .mem.type = DM_IO_VMA, | ||
265 | .mem.ptr.vma = ps->zero_area, | ||
266 | .client = ps->io_client, | ||
267 | .notify.fn = NULL, | ||
268 | }; | ||
269 | |||
270 | return dm_io(&io_req, 1, &where, NULL); | ||
242 | } | 271 | } |
243 | 272 | ||
244 | static int read_header(struct pstore *ps, int *new_snapshot) | 273 | static int read_header(struct pstore *ps, int *new_snapshot) |
@@ -411,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full) | |||
411 | 440 | ||
412 | static int read_exceptions(struct pstore *ps) | 441 | static int read_exceptions(struct pstore *ps) |
413 | { | 442 | { |
414 | chunk_t area; | ||
415 | int r, full = 1; | 443 | int r, full = 1; |
416 | 444 | ||
417 | /* | 445 | /* |
418 | * Keeping reading chunks and inserting exceptions until | 446 | * Keeping reading chunks and inserting exceptions until |
419 | * we find a partially full area. | 447 | * we find a partially full area. |
420 | */ | 448 | */ |
421 | for (area = 0; full; area++) { | 449 | for (ps->current_area = 0; full; ps->current_area++) { |
422 | r = area_io(ps, area, READ); | 450 | r = area_io(ps, READ); |
423 | if (r) | 451 | if (r) |
424 | return r; | 452 | return r; |
425 | 453 | ||
@@ -428,6 +456,8 @@ static int read_exceptions(struct pstore *ps) | |||
428 | return r; | 456 | return r; |
429 | } | 457 | } |
430 | 458 | ||
459 | ps->current_area--; | ||
460 | |||
431 | return 0; | 461 | return 0; |
432 | } | 462 | } |
433 | 463 | ||
@@ -486,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store) | |||
486 | return r; | 516 | return r; |
487 | } | 517 | } |
488 | 518 | ||
489 | r = zero_area(ps, 0); | 519 | ps->current_area = 0; |
520 | zero_memory_area(ps); | ||
521 | r = zero_disk_area(ps, 0); | ||
490 | if (r) { | 522 | if (r) { |
491 | DMWARN("zero_area(0) failed"); | 523 | DMWARN("zero_disk_area(0) failed"); |
492 | return r; | 524 | return r; |
493 | } | 525 | } |
494 | |||
495 | } else { | 526 | } else { |
496 | /* | 527 | /* |
497 | * Sanity checks. | 528 | * Sanity checks. |
@@ -551,7 +582,6 @@ static void persistent_commit(struct exception_store *store, | |||
551 | void (*callback) (void *, int success), | 582 | void (*callback) (void *, int success), |
552 | void *callback_context) | 583 | void *callback_context) |
553 | { | 584 | { |
554 | int r; | ||
555 | unsigned int i; | 585 | unsigned int i; |
556 | struct pstore *ps = get_info(store); | 586 | struct pstore *ps = get_info(store); |
557 | struct disk_exception de; | 587 | struct disk_exception de; |
@@ -572,33 +602,41 @@ static void persistent_commit(struct exception_store *store, | |||
572 | cb->context = callback_context; | 602 | cb->context = callback_context; |
573 | 603 | ||
574 | /* | 604 | /* |
575 | * If there are no more exceptions in flight, or we have | 605 | * If there are exceptions in flight and we have not yet |
576 | * filled this metadata area we commit the exceptions to | 606 | * filled this metadata area there's nothing more to do. |
577 | * disk. | ||
578 | */ | 607 | */ |
579 | if (atomic_dec_and_test(&ps->pending_count) || | 608 | if (!atomic_dec_and_test(&ps->pending_count) && |
580 | (ps->current_committed == ps->exceptions_per_area)) { | 609 | (ps->current_committed != ps->exceptions_per_area)) |
581 | r = area_io(ps, ps->current_area, WRITE); | 610 | return; |
582 | if (r) | ||
583 | ps->valid = 0; | ||
584 | 611 | ||
585 | /* | 612 | /* |
586 | * Have we completely filled the current area ? | 613 | * If we completely filled the current area, then wipe the next one. |
587 | */ | 614 | */ |
588 | if (ps->current_committed == ps->exceptions_per_area) { | 615 | if ((ps->current_committed == ps->exceptions_per_area) && |
589 | ps->current_committed = 0; | 616 | zero_disk_area(ps, ps->current_area + 1)) |
590 | r = zero_area(ps, ps->current_area + 1); | 617 | ps->valid = 0; |
591 | if (r) | ||
592 | ps->valid = 0; | ||
593 | } | ||
594 | 618 | ||
595 | for (i = 0; i < ps->callback_count; i++) { | 619 | /* |
596 | cb = ps->callbacks + i; | 620 | * Commit exceptions to disk. |
597 | cb->callback(cb->context, r == 0 ? 1 : 0); | 621 | */ |
598 | } | 622 | if (ps->valid && area_io(ps, WRITE)) |
623 | ps->valid = 0; | ||
599 | 624 | ||
600 | ps->callback_count = 0; | 625 | /* |
626 | * Advance to the next area if this one is full. | ||
627 | */ | ||
628 | if (ps->current_committed == ps->exceptions_per_area) { | ||
629 | ps->current_committed = 0; | ||
630 | ps->current_area++; | ||
631 | zero_memory_area(ps); | ||
601 | } | 632 | } |
633 | |||
634 | for (i = 0; i < ps->callback_count; i++) { | ||
635 | cb = ps->callbacks + i; | ||
636 | cb->callback(cb->context, ps->valid); | ||
637 | } | ||
638 | |||
639 | ps->callback_count = 0; | ||
602 | } | 640 | } |
603 | 641 | ||
604 | static void persistent_drop(struct exception_store *store) | 642 | static void persistent_drop(struct exception_store *store) |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4789c42d9a3a..2fd6d4450637 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm.h" | 8 | #include <linux/device-mapper.h> |
9 | 9 | ||
10 | #include <linux/bio.h> | 10 | #include <linux/bio.h> |
11 | #include <linux/mempool.h> | 11 | #include <linux/mempool.h> |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 996802b8a452..3073618269ea 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/device-mapper.h> | ||
25 | #include <linux/dm-kcopyd.h> | 26 | #include <linux/dm-kcopyd.h> |
26 | 27 | ||
27 | #include "dm.h" | 28 | #include "dm.h" |
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job) | |||
268 | spin_unlock_irqrestore(&kc->job_lock, flags); | 269 | spin_unlock_irqrestore(&kc->job_lock, flags); |
269 | } | 270 | } |
270 | 271 | ||
272 | |||
273 | static void push_head(struct list_head *jobs, struct kcopyd_job *job) | ||
274 | { | ||
275 | unsigned long flags; | ||
276 | struct dm_kcopyd_client *kc = job->kc; | ||
277 | |||
278 | spin_lock_irqsave(&kc->job_lock, flags); | ||
279 | list_add(&job->list, jobs); | ||
280 | spin_unlock_irqrestore(&kc->job_lock, flags); | ||
281 | } | ||
282 | |||
271 | /* | 283 | /* |
272 | * These three functions process 1 item from the corresponding | 284 | * These three functions process 1 item from the corresponding |
273 | * job list. | 285 | * job list. |
@@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, | |||
398 | * We couldn't service this job ATM, so | 410 | * We couldn't service this job ATM, so |
399 | * push this job back onto the list. | 411 | * push this job back onto the list. |
400 | */ | 412 | */ |
401 | push(jobs, job); | 413 | push_head(jobs, job); |
402 | break; | 414 | break; |
403 | } | 415 | } |
404 | 416 | ||
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 6449bcdf84ca..1b29e9136758 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -5,12 +5,12 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "dm.h" | 7 | #include "dm.h" |
8 | |||
9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
10 | #include <linux/init.h> | 9 | #include <linux/init.h> |
11 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
12 | #include <linux/bio.h> | 11 | #include <linux/bio.h> |
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/device-mapper.h> | ||
14 | 14 | ||
15 | #define DM_MSG_PREFIX "linear" | 15 | #define DM_MSG_PREFIX "linear" |
16 | 16 | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 5b48478c79f5..a8c0fc79ca78 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
13 | #include <linux/dm-dirty-log.h> | 13 | #include <linux/dm-dirty-log.h> |
14 | 14 | ||
15 | #include "dm.h" | 15 | #include <linux/device-mapper.h> |
16 | 16 | ||
17 | #define DM_MSG_PREFIX "dirty region log" | 17 | #define DM_MSG_PREFIX "dirty region log" |
18 | 18 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 9bf3460c5540..abf6e8cfaedb 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm.h" | 8 | #include <linux/device-mapper.h> |
9 | |||
9 | #include "dm-path-selector.h" | 10 | #include "dm-path-selector.h" |
10 | #include "dm-bio-list.h" | 11 | #include "dm-bio-list.h" |
11 | #include "dm-bio-record.h" | 12 | #include "dm-bio-record.h" |
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c index ca1bb636a3e4..96ea226155b1 100644 --- a/drivers/md/dm-path-selector.c +++ b/drivers/md/dm-path-selector.c | |||
@@ -9,7 +9,8 @@ | |||
9 | * Path selector registration. | 9 | * Path selector registration. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include "dm.h" | 12 | #include <linux/device-mapper.h> |
13 | |||
13 | #include "dm-path-selector.h" | 14 | #include "dm-path-selector.h" |
14 | 15 | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 29913e42c4ab..92dcc06832a4 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1,30 +1,30 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | 2 | * Copyright (C) 2003 Sistina Software Limited. |
3 | * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. | ||
3 | * | 4 | * |
4 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
5 | */ | 6 | */ |
6 | 7 | ||
7 | #include "dm.h" | ||
8 | #include "dm-bio-list.h" | 8 | #include "dm-bio-list.h" |
9 | #include "dm-bio-record.h" | 9 | #include "dm-bio-record.h" |
10 | 10 | ||
11 | #include <linux/ctype.h> | ||
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
13 | #include <linux/mempool.h> | 12 | #include <linux/mempool.h> |
14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
15 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
17 | #include <linux/time.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/workqueue.h> | 16 | #include <linux/workqueue.h> |
20 | #include <linux/log2.h> | 17 | #include <linux/device-mapper.h> |
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/dm-io.h> | 18 | #include <linux/dm-io.h> |
23 | #include <linux/dm-dirty-log.h> | 19 | #include <linux/dm-dirty-log.h> |
24 | #include <linux/dm-kcopyd.h> | 20 | #include <linux/dm-kcopyd.h> |
21 | #include <linux/dm-region-hash.h> | ||
25 | 22 | ||
26 | #define DM_MSG_PREFIX "raid1" | 23 | #define DM_MSG_PREFIX "raid1" |
24 | |||
25 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ | ||
27 | #define DM_IO_PAGES 64 | 26 | #define DM_IO_PAGES 64 |
27 | #define DM_KCOPYD_PAGES 64 | ||
28 | 28 | ||
29 | #define DM_RAID1_HANDLE_ERRORS 0x01 | 29 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
30 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) | 30 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
@@ -32,87 +32,6 @@ | |||
32 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); | 32 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); |
33 | 33 | ||
34 | /*----------------------------------------------------------------- | 34 | /*----------------------------------------------------------------- |
35 | * Region hash | ||
36 | * | ||
37 | * The mirror splits itself up into discrete regions. Each | ||
38 | * region can be in one of three states: clean, dirty, | ||
39 | * nosync. There is no need to put clean regions in the hash. | ||
40 | * | ||
41 | * In addition to being present in the hash table a region _may_ | ||
42 | * be present on one of three lists. | ||
43 | * | ||
44 | * clean_regions: Regions on this list have no io pending to | ||
45 | * them, they are in sync, we are no longer interested in them, | ||
46 | * they are dull. rh_update_states() will remove them from the | ||
47 | * hash table. | ||
48 | * | ||
49 | * quiesced_regions: These regions have been spun down, ready | ||
50 | * for recovery. rh_recovery_start() will remove regions from | ||
51 | * this list and hand them to kmirrord, which will schedule the | ||
52 | * recovery io with kcopyd. | ||
53 | * | ||
54 | * recovered_regions: Regions that kcopyd has successfully | ||
55 | * recovered. rh_update_states() will now schedule any delayed | ||
56 | * io, up the recovery_count, and remove the region from the | ||
57 | * hash. | ||
58 | * | ||
59 | * There are 2 locks: | ||
60 | * A rw spin lock 'hash_lock' protects just the hash table, | ||
61 | * this is never held in write mode from interrupt context, | ||
62 | * which I believe means that we only have to disable irqs when | ||
63 | * doing a write lock. | ||
64 | * | ||
65 | * An ordinary spin lock 'region_lock' that protects the three | ||
66 | * lists in the region_hash, with the 'state', 'list' and | ||
67 | * 'bhs_delayed' fields of the regions. This is used from irq | ||
68 | * context, so all other uses will have to suspend local irqs. | ||
69 | *---------------------------------------------------------------*/ | ||
70 | struct mirror_set; | ||
71 | struct region_hash { | ||
72 | struct mirror_set *ms; | ||
73 | uint32_t region_size; | ||
74 | unsigned region_shift; | ||
75 | |||
76 | /* holds persistent region state */ | ||
77 | struct dm_dirty_log *log; | ||
78 | |||
79 | /* hash table */ | ||
80 | rwlock_t hash_lock; | ||
81 | mempool_t *region_pool; | ||
82 | unsigned int mask; | ||
83 | unsigned int nr_buckets; | ||
84 | struct list_head *buckets; | ||
85 | |||
86 | spinlock_t region_lock; | ||
87 | atomic_t recovery_in_flight; | ||
88 | struct semaphore recovery_count; | ||
89 | struct list_head clean_regions; | ||
90 | struct list_head quiesced_regions; | ||
91 | struct list_head recovered_regions; | ||
92 | struct list_head failed_recovered_regions; | ||
93 | }; | ||
94 | |||
95 | enum { | ||
96 | RH_CLEAN, | ||
97 | RH_DIRTY, | ||
98 | RH_NOSYNC, | ||
99 | RH_RECOVERING | ||
100 | }; | ||
101 | |||
102 | struct region { | ||
103 | struct region_hash *rh; /* FIXME: can we get rid of this ? */ | ||
104 | region_t key; | ||
105 | int state; | ||
106 | |||
107 | struct list_head hash_list; | ||
108 | struct list_head list; | ||
109 | |||
110 | atomic_t pending; | ||
111 | struct bio_list delayed_bios; | ||
112 | }; | ||
113 | |||
114 | |||
115 | /*----------------------------------------------------------------- | ||
116 | * Mirror set structures. | 35 | * Mirror set structures. |
117 | *---------------------------------------------------------------*/ | 36 | *---------------------------------------------------------------*/ |
118 | enum dm_raid1_error { | 37 | enum dm_raid1_error { |
@@ -132,8 +51,7 @@ struct mirror { | |||
132 | struct mirror_set { | 51 | struct mirror_set { |
133 | struct dm_target *ti; | 52 | struct dm_target *ti; |
134 | struct list_head list; | 53 | struct list_head list; |
135 | struct region_hash rh; | 54 | |
136 | struct dm_kcopyd_client *kcopyd_client; | ||
137 | uint64_t features; | 55 | uint64_t features; |
138 | 56 | ||
139 | spinlock_t lock; /* protects the lists */ | 57 | spinlock_t lock; /* protects the lists */ |
@@ -141,6 +59,8 @@ struct mirror_set { | |||
141 | struct bio_list writes; | 59 | struct bio_list writes; |
142 | struct bio_list failures; | 60 | struct bio_list failures; |
143 | 61 | ||
62 | struct dm_region_hash *rh; | ||
63 | struct dm_kcopyd_client *kcopyd_client; | ||
144 | struct dm_io_client *io_client; | 64 | struct dm_io_client *io_client; |
145 | mempool_t *read_record_pool; | 65 | mempool_t *read_record_pool; |
146 | 66 | ||
@@ -159,25 +79,14 @@ struct mirror_set { | |||
159 | 79 | ||
160 | struct work_struct trigger_event; | 80 | struct work_struct trigger_event; |
161 | 81 | ||
162 | unsigned int nr_mirrors; | 82 | unsigned nr_mirrors; |
163 | struct mirror mirror[0]; | 83 | struct mirror mirror[0]; |
164 | }; | 84 | }; |
165 | 85 | ||
166 | /* | 86 | static void wakeup_mirrord(void *context) |
167 | * Conversion fns | ||
168 | */ | ||
169 | static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) | ||
170 | { | ||
171 | return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift; | ||
172 | } | ||
173 | |||
174 | static inline sector_t region_to_sector(struct region_hash *rh, region_t region) | ||
175 | { | 87 | { |
176 | return region << rh->region_shift; | 88 | struct mirror_set *ms = context; |
177 | } | ||
178 | 89 | ||
179 | static void wake(struct mirror_set *ms) | ||
180 | { | ||
181 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); | 90 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); |
182 | } | 91 | } |
183 | 92 | ||
@@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data) | |||
186 | struct mirror_set *ms = (struct mirror_set *) data; | 95 | struct mirror_set *ms = (struct mirror_set *) data; |
187 | 96 | ||
188 | clear_bit(0, &ms->timer_pending); | 97 | clear_bit(0, &ms->timer_pending); |
189 | wake(ms); | 98 | wakeup_mirrord(ms); |
190 | } | 99 | } |
191 | 100 | ||
192 | static void delayed_wake(struct mirror_set *ms) | 101 | static void delayed_wake(struct mirror_set *ms) |
@@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms) | |||
200 | add_timer(&ms->timer); | 109 | add_timer(&ms->timer); |
201 | } | 110 | } |
202 | 111 | ||
203 | /* FIXME move this */ | 112 | static void wakeup_all_recovery_waiters(void *context) |
204 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); | ||
205 | |||
206 | #define MIN_REGIONS 64 | ||
207 | #define MAX_RECOVERY 1 | ||
208 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, | ||
209 | struct dm_dirty_log *log, uint32_t region_size, | ||
210 | region_t nr_regions) | ||
211 | { | 113 | { |
212 | unsigned int nr_buckets, max_buckets; | 114 | wake_up_all(&_kmirrord_recovery_stopped); |
213 | size_t i; | ||
214 | |||
215 | /* | ||
216 | * Calculate a suitable number of buckets for our hash | ||
217 | * table. | ||
218 | */ | ||
219 | max_buckets = nr_regions >> 6; | ||
220 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | ||
221 | ; | ||
222 | nr_buckets >>= 1; | ||
223 | |||
224 | rh->ms = ms; | ||
225 | rh->log = log; | ||
226 | rh->region_size = region_size; | ||
227 | rh->region_shift = ffs(region_size) - 1; | ||
228 | rwlock_init(&rh->hash_lock); | ||
229 | rh->mask = nr_buckets - 1; | ||
230 | rh->nr_buckets = nr_buckets; | ||
231 | |||
232 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | ||
233 | if (!rh->buckets) { | ||
234 | DMERR("unable to allocate region hash memory"); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | |||
238 | for (i = 0; i < nr_buckets; i++) | ||
239 | INIT_LIST_HEAD(rh->buckets + i); | ||
240 | |||
241 | spin_lock_init(&rh->region_lock); | ||
242 | sema_init(&rh->recovery_count, 0); | ||
243 | atomic_set(&rh->recovery_in_flight, 0); | ||
244 | INIT_LIST_HEAD(&rh->clean_regions); | ||
245 | INIT_LIST_HEAD(&rh->quiesced_regions); | ||
246 | INIT_LIST_HEAD(&rh->recovered_regions); | ||
247 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | ||
248 | |||
249 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | ||
250 | sizeof(struct region)); | ||
251 | if (!rh->region_pool) { | ||
252 | vfree(rh->buckets); | ||
253 | rh->buckets = NULL; | ||
254 | return -ENOMEM; | ||
255 | } | ||
256 | |||
257 | return 0; | ||
258 | } | 115 | } |
259 | 116 | ||
260 | static void rh_exit(struct region_hash *rh) | 117 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) |
261 | { | ||
262 | unsigned int h; | ||
263 | struct region *reg, *nreg; | ||
264 | |||
265 | BUG_ON(!list_empty(&rh->quiesced_regions)); | ||
266 | for (h = 0; h < rh->nr_buckets; h++) { | ||
267 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { | ||
268 | BUG_ON(atomic_read(®->pending)); | ||
269 | mempool_free(reg, rh->region_pool); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | if (rh->log) | ||
274 | dm_dirty_log_destroy(rh->log); | ||
275 | if (rh->region_pool) | ||
276 | mempool_destroy(rh->region_pool); | ||
277 | vfree(rh->buckets); | ||
278 | } | ||
279 | |||
280 | #define RH_HASH_MULT 2654435387U | ||
281 | |||
282 | static inline unsigned int rh_hash(struct region_hash *rh, region_t region) | ||
283 | { | ||
284 | return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; | ||
285 | } | ||
286 | |||
287 | static struct region *__rh_lookup(struct region_hash *rh, region_t region) | ||
288 | { | ||
289 | struct region *reg; | ||
290 | |||
291 | list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) | ||
292 | if (reg->key == region) | ||
293 | return reg; | ||
294 | |||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | static void __rh_insert(struct region_hash *rh, struct region *reg) | ||
299 | { | ||
300 | unsigned int h = rh_hash(rh, reg->key); | ||
301 | list_add(®->hash_list, rh->buckets + h); | ||
302 | } | ||
303 | |||
304 | static struct region *__rh_alloc(struct region_hash *rh, region_t region) | ||
305 | { | ||
306 | struct region *reg, *nreg; | ||
307 | |||
308 | read_unlock(&rh->hash_lock); | ||
309 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | ||
310 | if (unlikely(!nreg)) | ||
311 | nreg = kmalloc(sizeof(struct region), GFP_NOIO); | ||
312 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | ||
313 | RH_CLEAN : RH_NOSYNC; | ||
314 | nreg->rh = rh; | ||
315 | nreg->key = region; | ||
316 | |||
317 | INIT_LIST_HEAD(&nreg->list); | ||
318 | |||
319 | atomic_set(&nreg->pending, 0); | ||
320 | bio_list_init(&nreg->delayed_bios); | ||
321 | write_lock_irq(&rh->hash_lock); | ||
322 | |||
323 | reg = __rh_lookup(rh, region); | ||
324 | if (reg) | ||
325 | /* we lost the race */ | ||
326 | mempool_free(nreg, rh->region_pool); | ||
327 | |||
328 | else { | ||
329 | __rh_insert(rh, nreg); | ||
330 | if (nreg->state == RH_CLEAN) { | ||
331 | spin_lock(&rh->region_lock); | ||
332 | list_add(&nreg->list, &rh->clean_regions); | ||
333 | spin_unlock(&rh->region_lock); | ||
334 | } | ||
335 | reg = nreg; | ||
336 | } | ||
337 | write_unlock_irq(&rh->hash_lock); | ||
338 | read_lock(&rh->hash_lock); | ||
339 | |||
340 | return reg; | ||
341 | } | ||
342 | |||
343 | static inline struct region *__rh_find(struct region_hash *rh, region_t region) | ||
344 | { | ||
345 | struct region *reg; | ||
346 | |||
347 | reg = __rh_lookup(rh, region); | ||
348 | if (!reg) | ||
349 | reg = __rh_alloc(rh, region); | ||
350 | |||
351 | return reg; | ||
352 | } | ||
353 | |||
354 | static int rh_state(struct region_hash *rh, region_t region, int may_block) | ||
355 | { | ||
356 | int r; | ||
357 | struct region *reg; | ||
358 | |||
359 | read_lock(&rh->hash_lock); | ||
360 | reg = __rh_lookup(rh, region); | ||
361 | read_unlock(&rh->hash_lock); | ||
362 | |||
363 | if (reg) | ||
364 | return reg->state; | ||
365 | |||
366 | /* | ||
367 | * The region wasn't in the hash, so we fall back to the | ||
368 | * dirty log. | ||
369 | */ | ||
370 | r = rh->log->type->in_sync(rh->log, region, may_block); | ||
371 | |||
372 | /* | ||
373 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | ||
374 | * taken as a RH_NOSYNC | ||
375 | */ | ||
376 | return r == 1 ? RH_CLEAN : RH_NOSYNC; | ||
377 | } | ||
378 | |||
379 | static inline int rh_in_sync(struct region_hash *rh, | ||
380 | region_t region, int may_block) | ||
381 | { | ||
382 | int state = rh_state(rh, region, may_block); | ||
383 | return state == RH_CLEAN || state == RH_DIRTY; | ||
384 | } | ||
385 | |||
386 | static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) | ||
387 | { | ||
388 | struct bio *bio; | ||
389 | |||
390 | while ((bio = bio_list_pop(bio_list))) { | ||
391 | queue_bio(ms, bio, WRITE); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | static void complete_resync_work(struct region *reg, int success) | ||
396 | { | ||
397 | struct region_hash *rh = reg->rh; | ||
398 | |||
399 | rh->log->type->set_region_sync(rh->log, reg->key, success); | ||
400 | |||
401 | /* | ||
402 | * Dispatch the bios before we call 'wake_up_all'. | ||
403 | * This is important because if we are suspending, | ||
404 | * we want to know that recovery is complete and | ||
405 | * the work queue is flushed. If we wake_up_all | ||
406 | * before we dispatch_bios (queue bios and call wake()), | ||
407 | * then we risk suspending before the work queue | ||
408 | * has been properly flushed. | ||
409 | */ | ||
410 | dispatch_bios(rh->ms, ®->delayed_bios); | ||
411 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | ||
412 | wake_up_all(&_kmirrord_recovery_stopped); | ||
413 | up(&rh->recovery_count); | ||
414 | } | ||
415 | |||
416 | static void rh_update_states(struct region_hash *rh) | ||
417 | { | ||
418 | struct region *reg, *next; | ||
419 | |||
420 | LIST_HEAD(clean); | ||
421 | LIST_HEAD(recovered); | ||
422 | LIST_HEAD(failed_recovered); | ||
423 | |||
424 | /* | ||
425 | * Quickly grab the lists. | ||
426 | */ | ||
427 | write_lock_irq(&rh->hash_lock); | ||
428 | spin_lock(&rh->region_lock); | ||
429 | if (!list_empty(&rh->clean_regions)) { | ||
430 | list_splice_init(&rh->clean_regions, &clean); | ||
431 | |||
432 | list_for_each_entry(reg, &clean, list) | ||
433 | list_del(®->hash_list); | ||
434 | } | ||
435 | |||
436 | if (!list_empty(&rh->recovered_regions)) { | ||
437 | list_splice_init(&rh->recovered_regions, &recovered); | ||
438 | |||
439 | list_for_each_entry (reg, &recovered, list) | ||
440 | list_del(®->hash_list); | ||
441 | } | ||
442 | |||
443 | if (!list_empty(&rh->failed_recovered_regions)) { | ||
444 | list_splice_init(&rh->failed_recovered_regions, | ||
445 | &failed_recovered); | ||
446 | |||
447 | list_for_each_entry(reg, &failed_recovered, list) | ||
448 | list_del(®->hash_list); | ||
449 | } | ||
450 | |||
451 | spin_unlock(&rh->region_lock); | ||
452 | write_unlock_irq(&rh->hash_lock); | ||
453 | |||
454 | /* | ||
455 | * All the regions on the recovered and clean lists have | ||
456 | * now been pulled out of the system, so no need to do | ||
457 | * any more locking. | ||
458 | */ | ||
459 | list_for_each_entry_safe (reg, next, &recovered, list) { | ||
460 | rh->log->type->clear_region(rh->log, reg->key); | ||
461 | complete_resync_work(reg, 1); | ||
462 | mempool_free(reg, rh->region_pool); | ||
463 | } | ||
464 | |||
465 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { | ||
466 | complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); | ||
467 | mempool_free(reg, rh->region_pool); | ||
468 | } | ||
469 | |||
470 | list_for_each_entry_safe(reg, next, &clean, list) { | ||
471 | rh->log->type->clear_region(rh->log, reg->key); | ||
472 | mempool_free(reg, rh->region_pool); | ||
473 | } | ||
474 | |||
475 | rh->log->type->flush(rh->log); | ||
476 | } | ||
477 | |||
478 | static void rh_inc(struct region_hash *rh, region_t region) | ||
479 | { | ||
480 | struct region *reg; | ||
481 | |||
482 | read_lock(&rh->hash_lock); | ||
483 | reg = __rh_find(rh, region); | ||
484 | |||
485 | spin_lock_irq(&rh->region_lock); | ||
486 | atomic_inc(®->pending); | ||
487 | |||
488 | if (reg->state == RH_CLEAN) { | ||
489 | reg->state = RH_DIRTY; | ||
490 | list_del_init(®->list); /* take off the clean list */ | ||
491 | spin_unlock_irq(&rh->region_lock); | ||
492 | |||
493 | rh->log->type->mark_region(rh->log, reg->key); | ||
494 | } else | ||
495 | spin_unlock_irq(&rh->region_lock); | ||
496 | |||
497 | |||
498 | read_unlock(&rh->hash_lock); | ||
499 | } | ||
500 | |||
501 | static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) | ||
502 | { | ||
503 | struct bio *bio; | ||
504 | |||
505 | for (bio = bios->head; bio; bio = bio->bi_next) | ||
506 | rh_inc(rh, bio_to_region(rh, bio)); | ||
507 | } | ||
508 | |||
509 | static void rh_dec(struct region_hash *rh, region_t region) | ||
510 | { | 118 | { |
511 | unsigned long flags; | 119 | unsigned long flags; |
512 | struct region *reg; | ||
513 | int should_wake = 0; | 120 | int should_wake = 0; |
121 | struct bio_list *bl; | ||
514 | 122 | ||
515 | read_lock(&rh->hash_lock); | 123 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; |
516 | reg = __rh_lookup(rh, region); | 124 | spin_lock_irqsave(&ms->lock, flags); |
517 | read_unlock(&rh->hash_lock); | 125 | should_wake = !(bl->head); |
518 | 126 | bio_list_add(bl, bio); | |
519 | spin_lock_irqsave(&rh->region_lock, flags); | 127 | spin_unlock_irqrestore(&ms->lock, flags); |
520 | if (atomic_dec_and_test(®->pending)) { | ||
521 | /* | ||
522 | * There is no pending I/O for this region. | ||
523 | * We can move the region to corresponding list for next action. | ||
524 | * At this point, the region is not yet connected to any list. | ||
525 | * | ||
526 | * If the state is RH_NOSYNC, the region should be kept off | ||
527 | * from clean list. | ||
528 | * The hash entry for RH_NOSYNC will remain in memory | ||
529 | * until the region is recovered or the map is reloaded. | ||
530 | */ | ||
531 | |||
532 | /* do nothing for RH_NOSYNC */ | ||
533 | if (reg->state == RH_RECOVERING) { | ||
534 | list_add_tail(®->list, &rh->quiesced_regions); | ||
535 | } else if (reg->state == RH_DIRTY) { | ||
536 | reg->state = RH_CLEAN; | ||
537 | list_add(®->list, &rh->clean_regions); | ||
538 | } | ||
539 | should_wake = 1; | ||
540 | } | ||
541 | spin_unlock_irqrestore(&rh->region_lock, flags); | ||
542 | 128 | ||
543 | if (should_wake) | 129 | if (should_wake) |
544 | wake(rh->ms); | 130 | wakeup_mirrord(ms); |
545 | } | ||
546 | |||
547 | /* | ||
548 | * Starts quiescing a region in preparation for recovery. | ||
549 | */ | ||
550 | static int __rh_recovery_prepare(struct region_hash *rh) | ||
551 | { | ||
552 | int r; | ||
553 | struct region *reg; | ||
554 | region_t region; | ||
555 | |||
556 | /* | ||
557 | * Ask the dirty log what's next. | ||
558 | */ | ||
559 | r = rh->log->type->get_resync_work(rh->log, ®ion); | ||
560 | if (r <= 0) | ||
561 | return r; | ||
562 | |||
563 | /* | ||
564 | * Get this region, and start it quiescing by setting the | ||
565 | * recovering flag. | ||
566 | */ | ||
567 | read_lock(&rh->hash_lock); | ||
568 | reg = __rh_find(rh, region); | ||
569 | read_unlock(&rh->hash_lock); | ||
570 | |||
571 | spin_lock_irq(&rh->region_lock); | ||
572 | reg->state = RH_RECOVERING; | ||
573 | |||
574 | /* Already quiesced ? */ | ||
575 | if (atomic_read(®->pending)) | ||
576 | list_del_init(®->list); | ||
577 | else | ||
578 | list_move(®->list, &rh->quiesced_regions); | ||
579 | |||
580 | spin_unlock_irq(&rh->region_lock); | ||
581 | |||
582 | return 1; | ||
583 | } | ||
584 | |||
585 | static void rh_recovery_prepare(struct region_hash *rh) | ||
586 | { | ||
587 | /* Extra reference to avoid race with rh_stop_recovery */ | ||
588 | atomic_inc(&rh->recovery_in_flight); | ||
589 | |||
590 | while (!down_trylock(&rh->recovery_count)) { | ||
591 | atomic_inc(&rh->recovery_in_flight); | ||
592 | if (__rh_recovery_prepare(rh) <= 0) { | ||
593 | atomic_dec(&rh->recovery_in_flight); | ||
594 | up(&rh->recovery_count); | ||
595 | break; | ||
596 | } | ||
597 | } | ||
598 | |||
599 | /* Drop the extra reference */ | ||
600 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | ||
601 | wake_up_all(&_kmirrord_recovery_stopped); | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Returns any quiesced regions. | ||
606 | */ | ||
607 | static struct region *rh_recovery_start(struct region_hash *rh) | ||
608 | { | ||
609 | struct region *reg = NULL; | ||
610 | |||
611 | spin_lock_irq(&rh->region_lock); | ||
612 | if (!list_empty(&rh->quiesced_regions)) { | ||
613 | reg = list_entry(rh->quiesced_regions.next, | ||
614 | struct region, list); | ||
615 | list_del_init(®->list); /* remove from the quiesced list */ | ||
616 | } | ||
617 | spin_unlock_irq(&rh->region_lock); | ||
618 | |||
619 | return reg; | ||
620 | } | ||
621 | |||
622 | static void rh_recovery_end(struct region *reg, int success) | ||
623 | { | ||
624 | struct region_hash *rh = reg->rh; | ||
625 | |||
626 | spin_lock_irq(&rh->region_lock); | ||
627 | if (success) | ||
628 | list_add(®->list, ®->rh->recovered_regions); | ||
629 | else { | ||
630 | reg->state = RH_NOSYNC; | ||
631 | list_add(®->list, ®->rh->failed_recovered_regions); | ||
632 | } | ||
633 | spin_unlock_irq(&rh->region_lock); | ||
634 | |||
635 | wake(rh->ms); | ||
636 | } | 131 | } |
637 | 132 | ||
638 | static int rh_flush(struct region_hash *rh) | 133 | static void dispatch_bios(void *context, struct bio_list *bio_list) |
639 | { | 134 | { |
640 | return rh->log->type->flush(rh->log); | 135 | struct mirror_set *ms = context; |
641 | } | 136 | struct bio *bio; |
642 | |||
643 | static void rh_delay(struct region_hash *rh, struct bio *bio) | ||
644 | { | ||
645 | struct region *reg; | ||
646 | |||
647 | read_lock(&rh->hash_lock); | ||
648 | reg = __rh_find(rh, bio_to_region(rh, bio)); | ||
649 | bio_list_add(®->delayed_bios, bio); | ||
650 | read_unlock(&rh->hash_lock); | ||
651 | } | ||
652 | |||
653 | static void rh_stop_recovery(struct region_hash *rh) | ||
654 | { | ||
655 | int i; | ||
656 | |||
657 | /* wait for any recovering regions */ | ||
658 | for (i = 0; i < MAX_RECOVERY; i++) | ||
659 | down(&rh->recovery_count); | ||
660 | } | ||
661 | |||
662 | static void rh_start_recovery(struct region_hash *rh) | ||
663 | { | ||
664 | int i; | ||
665 | |||
666 | for (i = 0; i < MAX_RECOVERY; i++) | ||
667 | up(&rh->recovery_count); | ||
668 | 137 | ||
669 | wake(rh->ms); | 138 | while ((bio = bio_list_pop(bio_list))) |
139 | queue_bio(ms, bio, WRITE); | ||
670 | } | 140 | } |
671 | 141 | ||
672 | #define MIN_READ_RECORDS 20 | 142 | #define MIN_READ_RECORDS 20 |
@@ -776,8 +246,8 @@ out: | |||
776 | static void recovery_complete(int read_err, unsigned long write_err, | 246 | static void recovery_complete(int read_err, unsigned long write_err, |
777 | void *context) | 247 | void *context) |
778 | { | 248 | { |
779 | struct region *reg = (struct region *)context; | 249 | struct dm_region *reg = context; |
780 | struct mirror_set *ms = reg->rh->ms; | 250 | struct mirror_set *ms = dm_rh_region_context(reg); |
781 | int m, bit = 0; | 251 | int m, bit = 0; |
782 | 252 | ||
783 | if (read_err) { | 253 | if (read_err) { |
@@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err, | |||
803 | } | 273 | } |
804 | } | 274 | } |
805 | 275 | ||
806 | rh_recovery_end(reg, !(read_err || write_err)); | 276 | dm_rh_recovery_end(reg, !(read_err || write_err)); |
807 | } | 277 | } |
808 | 278 | ||
809 | static int recover(struct mirror_set *ms, struct region *reg) | 279 | static int recover(struct mirror_set *ms, struct dm_region *reg) |
810 | { | 280 | { |
811 | int r; | 281 | int r; |
812 | unsigned int i; | 282 | unsigned i; |
813 | struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; | 283 | struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; |
814 | struct mirror *m; | 284 | struct mirror *m; |
815 | unsigned long flags = 0; | 285 | unsigned long flags = 0; |
286 | region_t key = dm_rh_get_region_key(reg); | ||
287 | sector_t region_size = dm_rh_get_region_size(ms->rh); | ||
816 | 288 | ||
817 | /* fill in the source */ | 289 | /* fill in the source */ |
818 | m = get_default_mirror(ms); | 290 | m = get_default_mirror(ms); |
819 | from.bdev = m->dev->bdev; | 291 | from.bdev = m->dev->bdev; |
820 | from.sector = m->offset + region_to_sector(reg->rh, reg->key); | 292 | from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); |
821 | if (reg->key == (ms->nr_regions - 1)) { | 293 | if (key == (ms->nr_regions - 1)) { |
822 | /* | 294 | /* |
823 | * The final region may be smaller than | 295 | * The final region may be smaller than |
824 | * region_size. | 296 | * region_size. |
825 | */ | 297 | */ |
826 | from.count = ms->ti->len & (reg->rh->region_size - 1); | 298 | from.count = ms->ti->len & (region_size - 1); |
827 | if (!from.count) | 299 | if (!from.count) |
828 | from.count = reg->rh->region_size; | 300 | from.count = region_size; |
829 | } else | 301 | } else |
830 | from.count = reg->rh->region_size; | 302 | from.count = region_size; |
831 | 303 | ||
832 | /* fill in the destinations */ | 304 | /* fill in the destinations */ |
833 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | 305 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { |
@@ -836,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg) | |||
836 | 308 | ||
837 | m = ms->mirror + i; | 309 | m = ms->mirror + i; |
838 | dest->bdev = m->dev->bdev; | 310 | dest->bdev = m->dev->bdev; |
839 | dest->sector = m->offset + region_to_sector(reg->rh, reg->key); | 311 | dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); |
840 | dest->count = from.count; | 312 | dest->count = from.count; |
841 | dest++; | 313 | dest++; |
842 | } | 314 | } |
@@ -853,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg) | |||
853 | 325 | ||
854 | static void do_recovery(struct mirror_set *ms) | 326 | static void do_recovery(struct mirror_set *ms) |
855 | { | 327 | { |
328 | struct dm_region *reg; | ||
329 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | ||
856 | int r; | 330 | int r; |
857 | struct region *reg; | ||
858 | struct dm_dirty_log *log = ms->rh.log; | ||
859 | 331 | ||
860 | /* | 332 | /* |
861 | * Start quiescing some regions. | 333 | * Start quiescing some regions. |
862 | */ | 334 | */ |
863 | rh_recovery_prepare(&ms->rh); | 335 | dm_rh_recovery_prepare(ms->rh); |
864 | 336 | ||
865 | /* | 337 | /* |
866 | * Copy any already quiesced regions. | 338 | * Copy any already quiesced regions. |
867 | */ | 339 | */ |
868 | while ((reg = rh_recovery_start(&ms->rh))) { | 340 | while ((reg = dm_rh_recovery_start(ms->rh))) { |
869 | r = recover(ms, reg); | 341 | r = recover(ms, reg); |
870 | if (r) | 342 | if (r) |
871 | rh_recovery_end(reg, 0); | 343 | dm_rh_recovery_end(reg, 0); |
872 | } | 344 | } |
873 | 345 | ||
874 | /* | 346 | /* |
@@ -909,9 +381,10 @@ static int default_ok(struct mirror *m) | |||
909 | 381 | ||
910 | static int mirror_available(struct mirror_set *ms, struct bio *bio) | 382 | static int mirror_available(struct mirror_set *ms, struct bio *bio) |
911 | { | 383 | { |
912 | region_t region = bio_to_region(&ms->rh, bio); | 384 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
385 | region_t region = dm_rh_bio_to_region(ms->rh, bio); | ||
913 | 386 | ||
914 | if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) | 387 | if (log->type->in_sync(log, region, 0)) |
915 | return choose_mirror(ms, bio->bi_sector) ? 1 : 0; | 388 | return choose_mirror(ms, bio->bi_sector) ? 1 : 0; |
916 | 389 | ||
917 | return 0; | 390 | return 0; |
@@ -985,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio) | |||
985 | 458 | ||
986 | map_region(&io, m, bio); | 459 | map_region(&io, m, bio); |
987 | bio_set_m(bio, m); | 460 | bio_set_m(bio, m); |
988 | (void) dm_io(&io_req, 1, &io, NULL); | 461 | BUG_ON(dm_io(&io_req, 1, &io, NULL)); |
462 | } | ||
463 | |||
464 | static inline int region_in_sync(struct mirror_set *ms, region_t region, | ||
465 | int may_block) | ||
466 | { | ||
467 | int state = dm_rh_get_state(ms->rh, region, may_block); | ||
468 | return state == DM_RH_CLEAN || state == DM_RH_DIRTY; | ||
989 | } | 469 | } |
990 | 470 | ||
991 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | 471 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) |
@@ -995,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |||
995 | struct mirror *m; | 475 | struct mirror *m; |
996 | 476 | ||
997 | while ((bio = bio_list_pop(reads))) { | 477 | while ((bio = bio_list_pop(reads))) { |
998 | region = bio_to_region(&ms->rh, bio); | 478 | region = dm_rh_bio_to_region(ms->rh, bio); |
999 | m = get_default_mirror(ms); | 479 | m = get_default_mirror(ms); |
1000 | 480 | ||
1001 | /* | 481 | /* |
1002 | * We can only read balance if the region is in sync. | 482 | * We can only read balance if the region is in sync. |
1003 | */ | 483 | */ |
1004 | if (likely(rh_in_sync(&ms->rh, region, 1))) | 484 | if (likely(region_in_sync(ms, region, 1))) |
1005 | m = choose_mirror(ms, bio->bi_sector); | 485 | m = choose_mirror(ms, bio->bi_sector); |
1006 | else if (m && atomic_read(&m->error_count)) | 486 | else if (m && atomic_read(&m->error_count)) |
1007 | m = NULL; | 487 | m = NULL; |
@@ -1024,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |||
1024 | * NOSYNC: increment pending, just write to the default mirror | 504 | * NOSYNC: increment pending, just write to the default mirror |
1025 | *---------------------------------------------------------------*/ | 505 | *---------------------------------------------------------------*/ |
1026 | 506 | ||
1027 | /* __bio_mark_nosync | ||
1028 | * @ms | ||
1029 | * @bio | ||
1030 | * @done | ||
1031 | * @error | ||
1032 | * | ||
1033 | * The bio was written on some mirror(s) but failed on other mirror(s). | ||
1034 | * We can successfully endio the bio but should avoid the region being | ||
1035 | * marked clean by setting the state RH_NOSYNC. | ||
1036 | * | ||
1037 | * This function is _not_ safe in interrupt context! | ||
1038 | */ | ||
1039 | static void __bio_mark_nosync(struct mirror_set *ms, | ||
1040 | struct bio *bio, unsigned done, int error) | ||
1041 | { | ||
1042 | unsigned long flags; | ||
1043 | struct region_hash *rh = &ms->rh; | ||
1044 | struct dm_dirty_log *log = ms->rh.log; | ||
1045 | struct region *reg; | ||
1046 | region_t region = bio_to_region(rh, bio); | ||
1047 | int recovering = 0; | ||
1048 | |||
1049 | /* We must inform the log that the sync count has changed. */ | ||
1050 | log->type->set_region_sync(log, region, 0); | ||
1051 | ms->in_sync = 0; | ||
1052 | |||
1053 | read_lock(&rh->hash_lock); | ||
1054 | reg = __rh_find(rh, region); | ||
1055 | read_unlock(&rh->hash_lock); | ||
1056 | |||
1057 | /* region hash entry should exist because write was in-flight */ | ||
1058 | BUG_ON(!reg); | ||
1059 | BUG_ON(!list_empty(®->list)); | ||
1060 | |||
1061 | spin_lock_irqsave(&rh->region_lock, flags); | ||
1062 | /* | ||
1063 | * Possible cases: | ||
1064 | * 1) RH_DIRTY | ||
1065 | * 2) RH_NOSYNC: was dirty, other preceeding writes failed | ||
1066 | * 3) RH_RECOVERING: flushing pending writes | ||
1067 | * Either case, the region should have not been connected to list. | ||
1068 | */ | ||
1069 | recovering = (reg->state == RH_RECOVERING); | ||
1070 | reg->state = RH_NOSYNC; | ||
1071 | BUG_ON(!list_empty(®->list)); | ||
1072 | spin_unlock_irqrestore(&rh->region_lock, flags); | ||
1073 | |||
1074 | bio_endio(bio, error); | ||
1075 | if (recovering) | ||
1076 | complete_resync_work(reg, 0); | ||
1077 | } | ||
1078 | 507 | ||
1079 | static void write_callback(unsigned long error, void *context) | 508 | static void write_callback(unsigned long error, void *context) |
1080 | { | 509 | { |
@@ -1119,7 +548,7 @@ static void write_callback(unsigned long error, void *context) | |||
1119 | bio_list_add(&ms->failures, bio); | 548 | bio_list_add(&ms->failures, bio); |
1120 | spin_unlock_irqrestore(&ms->lock, flags); | 549 | spin_unlock_irqrestore(&ms->lock, flags); |
1121 | if (should_wake) | 550 | if (should_wake) |
1122 | wake(ms); | 551 | wakeup_mirrord(ms); |
1123 | return; | 552 | return; |
1124 | } | 553 | } |
1125 | out: | 554 | out: |
@@ -1149,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
1149 | */ | 578 | */ |
1150 | bio_set_m(bio, get_default_mirror(ms)); | 579 | bio_set_m(bio, get_default_mirror(ms)); |
1151 | 580 | ||
1152 | (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); | 581 | BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); |
1153 | } | 582 | } |
1154 | 583 | ||
1155 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | 584 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) |
@@ -1169,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
1169 | bio_list_init(&recover); | 598 | bio_list_init(&recover); |
1170 | 599 | ||
1171 | while ((bio = bio_list_pop(writes))) { | 600 | while ((bio = bio_list_pop(writes))) { |
1172 | state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); | 601 | state = dm_rh_get_state(ms->rh, |
602 | dm_rh_bio_to_region(ms->rh, bio), 1); | ||
1173 | switch (state) { | 603 | switch (state) { |
1174 | case RH_CLEAN: | 604 | case DM_RH_CLEAN: |
1175 | case RH_DIRTY: | 605 | case DM_RH_DIRTY: |
1176 | this_list = &sync; | 606 | this_list = &sync; |
1177 | break; | 607 | break; |
1178 | 608 | ||
1179 | case RH_NOSYNC: | 609 | case DM_RH_NOSYNC: |
1180 | this_list = &nosync; | 610 | this_list = &nosync; |
1181 | break; | 611 | break; |
1182 | 612 | ||
1183 | case RH_RECOVERING: | 613 | case DM_RH_RECOVERING: |
1184 | this_list = &recover; | 614 | this_list = &recover; |
1185 | break; | 615 | break; |
1186 | } | 616 | } |
@@ -1193,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
1193 | * be written to (writes to recover regions are going to | 623 | * be written to (writes to recover regions are going to |
1194 | * be delayed). | 624 | * be delayed). |
1195 | */ | 625 | */ |
1196 | rh_inc_pending(&ms->rh, &sync); | 626 | dm_rh_inc_pending(ms->rh, &sync); |
1197 | rh_inc_pending(&ms->rh, &nosync); | 627 | dm_rh_inc_pending(ms->rh, &nosync); |
1198 | ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; | 628 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; |
1199 | 629 | ||
1200 | /* | 630 | /* |
1201 | * Dispatch io. | 631 | * Dispatch io. |
@@ -1204,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
1204 | spin_lock_irq(&ms->lock); | 634 | spin_lock_irq(&ms->lock); |
1205 | bio_list_merge(&ms->failures, &sync); | 635 | bio_list_merge(&ms->failures, &sync); |
1206 | spin_unlock_irq(&ms->lock); | 636 | spin_unlock_irq(&ms->lock); |
1207 | wake(ms); | 637 | wakeup_mirrord(ms); |
1208 | } else | 638 | } else |
1209 | while ((bio = bio_list_pop(&sync))) | 639 | while ((bio = bio_list_pop(&sync))) |
1210 | do_write(ms, bio); | 640 | do_write(ms, bio); |
1211 | 641 | ||
1212 | while ((bio = bio_list_pop(&recover))) | 642 | while ((bio = bio_list_pop(&recover))) |
1213 | rh_delay(&ms->rh, bio); | 643 | dm_rh_delay(ms->rh, bio); |
1214 | 644 | ||
1215 | while ((bio = bio_list_pop(&nosync))) { | 645 | while ((bio = bio_list_pop(&nosync))) { |
1216 | map_bio(get_default_mirror(ms), bio); | 646 | map_bio(get_default_mirror(ms), bio); |
@@ -1227,7 +657,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) | |||
1227 | 657 | ||
1228 | if (!ms->log_failure) { | 658 | if (!ms->log_failure) { |
1229 | while ((bio = bio_list_pop(failures))) | 659 | while ((bio = bio_list_pop(failures))) |
1230 | __bio_mark_nosync(ms, bio, bio->bi_size, 0); | 660 | ms->in_sync = 0; |
661 | dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); | ||
1231 | return; | 662 | return; |
1232 | } | 663 | } |
1233 | 664 | ||
@@ -1280,8 +711,8 @@ static void trigger_event(struct work_struct *work) | |||
1280 | *---------------------------------------------------------------*/ | 711 | *---------------------------------------------------------------*/ |
1281 | static void do_mirror(struct work_struct *work) | 712 | static void do_mirror(struct work_struct *work) |
1282 | { | 713 | { |
1283 | struct mirror_set *ms =container_of(work, struct mirror_set, | 714 | struct mirror_set *ms = container_of(work, struct mirror_set, |
1284 | kmirrord_work); | 715 | kmirrord_work); |
1285 | struct bio_list reads, writes, failures; | 716 | struct bio_list reads, writes, failures; |
1286 | unsigned long flags; | 717 | unsigned long flags; |
1287 | 718 | ||
@@ -1294,7 +725,7 @@ static void do_mirror(struct work_struct *work) | |||
1294 | bio_list_init(&ms->failures); | 725 | bio_list_init(&ms->failures); |
1295 | spin_unlock_irqrestore(&ms->lock, flags); | 726 | spin_unlock_irqrestore(&ms->lock, flags); |
1296 | 727 | ||
1297 | rh_update_states(&ms->rh); | 728 | dm_rh_update_states(ms->rh, errors_handled(ms)); |
1298 | do_recovery(ms); | 729 | do_recovery(ms); |
1299 | do_reads(ms, &reads); | 730 | do_reads(ms, &reads); |
1300 | do_writes(ms, &writes); | 731 | do_writes(ms, &writes); |
@@ -1303,7 +734,6 @@ static void do_mirror(struct work_struct *work) | |||
1303 | dm_table_unplug_all(ms->ti->table); | 734 | dm_table_unplug_all(ms->ti->table); |
1304 | } | 735 | } |
1305 | 736 | ||
1306 | |||
1307 | /*----------------------------------------------------------------- | 737 | /*----------------------------------------------------------------- |
1308 | * Target functions | 738 | * Target functions |
1309 | *---------------------------------------------------------------*/ | 739 | *---------------------------------------------------------------*/ |
@@ -1315,9 +745,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |||
1315 | size_t len; | 745 | size_t len; |
1316 | struct mirror_set *ms = NULL; | 746 | struct mirror_set *ms = NULL; |
1317 | 747 | ||
1318 | if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) | ||
1319 | return NULL; | ||
1320 | |||
1321 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); | 748 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); |
1322 | 749 | ||
1323 | ms = kzalloc(len, GFP_KERNEL); | 750 | ms = kzalloc(len, GFP_KERNEL); |
@@ -1353,7 +780,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |||
1353 | return NULL; | 780 | return NULL; |
1354 | } | 781 | } |
1355 | 782 | ||
1356 | if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { | 783 | ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, |
784 | wakeup_all_recovery_waiters, | ||
785 | ms->ti->begin, MAX_RECOVERY, | ||
786 | dl, region_size, ms->nr_regions); | ||
787 | if (IS_ERR(ms->rh)) { | ||
1357 | ti->error = "Error creating dirty region hash"; | 788 | ti->error = "Error creating dirty region hash"; |
1358 | dm_io_client_destroy(ms->io_client); | 789 | dm_io_client_destroy(ms->io_client); |
1359 | mempool_destroy(ms->read_record_pool); | 790 | mempool_destroy(ms->read_record_pool); |
@@ -1371,7 +802,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti, | |||
1371 | dm_put_device(ti, ms->mirror[m].dev); | 802 | dm_put_device(ti, ms->mirror[m].dev); |
1372 | 803 | ||
1373 | dm_io_client_destroy(ms->io_client); | 804 | dm_io_client_destroy(ms->io_client); |
1374 | rh_exit(&ms->rh); | 805 | dm_region_hash_destroy(ms->rh); |
1375 | mempool_destroy(ms->read_record_pool); | 806 | mempool_destroy(ms->read_record_pool); |
1376 | kfree(ms); | 807 | kfree(ms); |
1377 | } | 808 | } |
@@ -1411,10 +842,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, | |||
1411 | * Create dirty log: log_type #log_params <log_params> | 842 | * Create dirty log: log_type #log_params <log_params> |
1412 | */ | 843 | */ |
1413 | static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, | 844 | static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, |
1414 | unsigned int argc, char **argv, | 845 | unsigned argc, char **argv, |
1415 | unsigned int *args_used) | 846 | unsigned *args_used) |
1416 | { | 847 | { |
1417 | unsigned int param_count; | 848 | unsigned param_count; |
1418 | struct dm_dirty_log *dl; | 849 | struct dm_dirty_log *dl; |
1419 | 850 | ||
1420 | if (argc < 2) { | 851 | if (argc < 2) { |
@@ -1545,7 +976,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1545 | } | 976 | } |
1546 | 977 | ||
1547 | ti->private = ms; | 978 | ti->private = ms; |
1548 | ti->split_io = ms->rh.region_size; | 979 | ti->split_io = dm_rh_get_region_size(ms->rh); |
1549 | 980 | ||
1550 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); | 981 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1551 | if (!ms->kmirrord_wq) { | 982 | if (!ms->kmirrord_wq) { |
@@ -1580,11 +1011,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1580 | goto err_destroy_wq; | 1011 | goto err_destroy_wq; |
1581 | } | 1012 | } |
1582 | 1013 | ||
1583 | r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); | 1014 | r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); |
1584 | if (r) | 1015 | if (r) |
1585 | goto err_destroy_wq; | 1016 | goto err_destroy_wq; |
1586 | 1017 | ||
1587 | wake(ms); | 1018 | wakeup_mirrord(ms); |
1588 | return 0; | 1019 | return 0; |
1589 | 1020 | ||
1590 | err_destroy_wq: | 1021 | err_destroy_wq: |
@@ -1605,22 +1036,6 @@ static void mirror_dtr(struct dm_target *ti) | |||
1605 | free_context(ms, ti, ms->nr_mirrors); | 1036 | free_context(ms, ti, ms->nr_mirrors); |
1606 | } | 1037 | } |
1607 | 1038 | ||
1608 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) | ||
1609 | { | ||
1610 | unsigned long flags; | ||
1611 | int should_wake = 0; | ||
1612 | struct bio_list *bl; | ||
1613 | |||
1614 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; | ||
1615 | spin_lock_irqsave(&ms->lock, flags); | ||
1616 | should_wake = !(bl->head); | ||
1617 | bio_list_add(bl, bio); | ||
1618 | spin_unlock_irqrestore(&ms->lock, flags); | ||
1619 | |||
1620 | if (should_wake) | ||
1621 | wake(ms); | ||
1622 | } | ||
1623 | |||
1624 | /* | 1039 | /* |
1625 | * Mirror mapping function | 1040 | * Mirror mapping function |
1626 | */ | 1041 | */ |
@@ -1631,16 +1046,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio, | |||
1631 | struct mirror *m; | 1046 | struct mirror *m; |
1632 | struct mirror_set *ms = ti->private; | 1047 | struct mirror_set *ms = ti->private; |
1633 | struct dm_raid1_read_record *read_record = NULL; | 1048 | struct dm_raid1_read_record *read_record = NULL; |
1049 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | ||
1634 | 1050 | ||
1635 | if (rw == WRITE) { | 1051 | if (rw == WRITE) { |
1636 | /* Save region for mirror_end_io() handler */ | 1052 | /* Save region for mirror_end_io() handler */ |
1637 | map_context->ll = bio_to_region(&ms->rh, bio); | 1053 | map_context->ll = dm_rh_bio_to_region(ms->rh, bio); |
1638 | queue_bio(ms, bio, rw); | 1054 | queue_bio(ms, bio, rw); |
1639 | return DM_MAPIO_SUBMITTED; | 1055 | return DM_MAPIO_SUBMITTED; |
1640 | } | 1056 | } |
1641 | 1057 | ||
1642 | r = ms->rh.log->type->in_sync(ms->rh.log, | 1058 | r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); |
1643 | bio_to_region(&ms->rh, bio), 0); | ||
1644 | if (r < 0 && r != -EWOULDBLOCK) | 1059 | if (r < 0 && r != -EWOULDBLOCK) |
1645 | return r; | 1060 | return r; |
1646 | 1061 | ||
@@ -1688,7 +1103,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |||
1688 | * We need to dec pending if this was a write. | 1103 | * We need to dec pending if this was a write. |
1689 | */ | 1104 | */ |
1690 | if (rw == WRITE) { | 1105 | if (rw == WRITE) { |
1691 | rh_dec(&ms->rh, map_context->ll); | 1106 | dm_rh_dec(ms->rh, map_context->ll); |
1692 | return error; | 1107 | return error; |
1693 | } | 1108 | } |
1694 | 1109 | ||
@@ -1744,7 +1159,7 @@ out: | |||
1744 | static void mirror_presuspend(struct dm_target *ti) | 1159 | static void mirror_presuspend(struct dm_target *ti) |
1745 | { | 1160 | { |
1746 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 1161 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
1747 | struct dm_dirty_log *log = ms->rh.log; | 1162 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1748 | 1163 | ||
1749 | atomic_set(&ms->suspend, 1); | 1164 | atomic_set(&ms->suspend, 1); |
1750 | 1165 | ||
@@ -1752,10 +1167,10 @@ static void mirror_presuspend(struct dm_target *ti) | |||
1752 | * We must finish up all the work that we've | 1167 | * We must finish up all the work that we've |
1753 | * generated (i.e. recovery work). | 1168 | * generated (i.e. recovery work). |
1754 | */ | 1169 | */ |
1755 | rh_stop_recovery(&ms->rh); | 1170 | dm_rh_stop_recovery(ms->rh); |
1756 | 1171 | ||
1757 | wait_event(_kmirrord_recovery_stopped, | 1172 | wait_event(_kmirrord_recovery_stopped, |
1758 | !atomic_read(&ms->rh.recovery_in_flight)); | 1173 | !dm_rh_recovery_in_flight(ms->rh)); |
1759 | 1174 | ||
1760 | if (log->type->presuspend && log->type->presuspend(log)) | 1175 | if (log->type->presuspend && log->type->presuspend(log)) |
1761 | /* FIXME: need better error handling */ | 1176 | /* FIXME: need better error handling */ |
@@ -1773,7 +1188,7 @@ static void mirror_presuspend(struct dm_target *ti) | |||
1773 | static void mirror_postsuspend(struct dm_target *ti) | 1188 | static void mirror_postsuspend(struct dm_target *ti) |
1774 | { | 1189 | { |
1775 | struct mirror_set *ms = ti->private; | 1190 | struct mirror_set *ms = ti->private; |
1776 | struct dm_dirty_log *log = ms->rh.log; | 1191 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1777 | 1192 | ||
1778 | if (log->type->postsuspend && log->type->postsuspend(log)) | 1193 | if (log->type->postsuspend && log->type->postsuspend(log)) |
1779 | /* FIXME: need better error handling */ | 1194 | /* FIXME: need better error handling */ |
@@ -1783,13 +1198,13 @@ static void mirror_postsuspend(struct dm_target *ti) | |||
1783 | static void mirror_resume(struct dm_target *ti) | 1198 | static void mirror_resume(struct dm_target *ti) |
1784 | { | 1199 | { |
1785 | struct mirror_set *ms = ti->private; | 1200 | struct mirror_set *ms = ti->private; |
1786 | struct dm_dirty_log *log = ms->rh.log; | 1201 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1787 | 1202 | ||
1788 | atomic_set(&ms->suspend, 0); | 1203 | atomic_set(&ms->suspend, 0); |
1789 | if (log->type->resume && log->type->resume(log)) | 1204 | if (log->type->resume && log->type->resume(log)) |
1790 | /* FIXME: need better error handling */ | 1205 | /* FIXME: need better error handling */ |
1791 | DMWARN("log resume failed"); | 1206 | DMWARN("log resume failed"); |
1792 | rh_start_recovery(&ms->rh); | 1207 | dm_rh_start_recovery(ms->rh); |
1793 | } | 1208 | } |
1794 | 1209 | ||
1795 | /* | 1210 | /* |
@@ -1821,7 +1236,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type, | |||
1821 | { | 1236 | { |
1822 | unsigned int m, sz = 0; | 1237 | unsigned int m, sz = 0; |
1823 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 1238 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
1824 | struct dm_dirty_log *log = ms->rh.log; | 1239 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1825 | char buffer[ms->nr_mirrors + 1]; | 1240 | char buffer[ms->nr_mirrors + 1]; |
1826 | 1241 | ||
1827 | switch (type) { | 1242 | switch (type) { |
@@ -1834,15 +1249,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type, | |||
1834 | buffer[m] = '\0'; | 1249 | buffer[m] = '\0'; |
1835 | 1250 | ||
1836 | DMEMIT("%llu/%llu 1 %s ", | 1251 | DMEMIT("%llu/%llu 1 %s ", |
1837 | (unsigned long long)log->type->get_sync_count(ms->rh.log), | 1252 | (unsigned long long)log->type->get_sync_count(log), |
1838 | (unsigned long long)ms->nr_regions, buffer); | 1253 | (unsigned long long)ms->nr_regions, buffer); |
1839 | 1254 | ||
1840 | sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); | 1255 | sz += log->type->status(log, type, result+sz, maxlen-sz); |
1841 | 1256 | ||
1842 | break; | 1257 | break; |
1843 | 1258 | ||
1844 | case STATUSTYPE_TABLE: | 1259 | case STATUSTYPE_TABLE: |
1845 | sz = log->type->status(ms->rh.log, type, result, maxlen); | 1260 | sz = log->type->status(log, type, result, maxlen); |
1846 | 1261 | ||
1847 | DMEMIT("%d", ms->nr_mirrors); | 1262 | DMEMIT("%d", ms->nr_mirrors); |
1848 | for (m = 0; m < ms->nr_mirrors; m++) | 1263 | for (m = 0; m < ms->nr_mirrors; m++) |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c new file mode 100644 index 000000000000..59f8d9df9e1a --- /dev/null +++ b/drivers/md/dm-region-hash.c | |||
@@ -0,0 +1,704 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Sistina Software Limited. | ||
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This file is released under the GPL. | ||
6 | */ | ||
7 | |||
8 | #include <linux/dm-dirty-log.h> | ||
9 | #include <linux/dm-region-hash.h> | ||
10 | |||
11 | #include <linux/ctype.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | |||
16 | #include "dm.h" | ||
17 | #include "dm-bio-list.h" | ||
18 | |||
19 | #define DM_MSG_PREFIX "region hash" | ||
20 | |||
21 | /*----------------------------------------------------------------- | ||
22 | * Region hash | ||
23 | * | ||
24 | * The mirror splits itself up into discrete regions. Each | ||
25 | * region can be in one of three states: clean, dirty, | ||
26 | * nosync. There is no need to put clean regions in the hash. | ||
27 | * | ||
28 | * In addition to being present in the hash table a region _may_ | ||
29 | * be present on one of three lists. | ||
30 | * | ||
31 | * clean_regions: Regions on this list have no io pending to | ||
32 | * them, they are in sync, we are no longer interested in them, | ||
33 | * they are dull. dm_rh_update_states() will remove them from the | ||
34 | * hash table. | ||
35 | * | ||
36 | * quiesced_regions: These regions have been spun down, ready | ||
37 | * for recovery. rh_recovery_start() will remove regions from | ||
38 | * this list and hand them to kmirrord, which will schedule the | ||
39 | * recovery io with kcopyd. | ||
40 | * | ||
41 | * recovered_regions: Regions that kcopyd has successfully | ||
42 | * recovered. dm_rh_update_states() will now schedule any delayed | ||
43 | * io, up the recovery_count, and remove the region from the | ||
44 | * hash. | ||
45 | * | ||
46 | * There are 2 locks: | ||
47 | * A rw spin lock 'hash_lock' protects just the hash table, | ||
48 | * this is never held in write mode from interrupt context, | ||
49 | * which I believe means that we only have to disable irqs when | ||
50 | * doing a write lock. | ||
51 | * | ||
52 | * An ordinary spin lock 'region_lock' that protects the three | ||
53 | * lists in the region_hash, with the 'state', 'list' and | ||
54 | * 'delayed_bios' fields of the regions. This is used from irq | ||
55 | * context, so all other uses will have to suspend local irqs. | ||
56 | *---------------------------------------------------------------*/ | ||
57 | struct dm_region_hash { | ||
58 | uint32_t region_size; | ||
59 | unsigned region_shift; | ||
60 | |||
61 | /* holds persistent region state */ | ||
62 | struct dm_dirty_log *log; | ||
63 | |||
64 | /* hash table */ | ||
65 | rwlock_t hash_lock; | ||
66 | mempool_t *region_pool; | ||
67 | unsigned mask; | ||
68 | unsigned nr_buckets; | ||
69 | unsigned prime; | ||
70 | unsigned shift; | ||
71 | struct list_head *buckets; | ||
72 | |||
73 | unsigned max_recovery; /* Max # of regions to recover in parallel */ | ||
74 | |||
75 | spinlock_t region_lock; | ||
76 | atomic_t recovery_in_flight; | ||
77 | struct semaphore recovery_count; | ||
78 | struct list_head clean_regions; | ||
79 | struct list_head quiesced_regions; | ||
80 | struct list_head recovered_regions; | ||
81 | struct list_head failed_recovered_regions; | ||
82 | |||
83 | void *context; | ||
84 | sector_t target_begin; | ||
85 | |||
86 | /* Callback function to schedule bios writes */ | ||
87 | void (*dispatch_bios)(void *context, struct bio_list *bios); | ||
88 | |||
89 | /* Callback function to wakeup callers worker thread. */ | ||
90 | void (*wakeup_workers)(void *context); | ||
91 | |||
92 | /* Callback function to wakeup callers recovery waiters. */ | ||
93 | void (*wakeup_all_recovery_waiters)(void *context); | ||
94 | }; | ||
95 | |||
96 | struct dm_region { | ||
97 | struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ | ||
98 | region_t key; | ||
99 | int state; | ||
100 | |||
101 | struct list_head hash_list; | ||
102 | struct list_head list; | ||
103 | |||
104 | atomic_t pending; | ||
105 | struct bio_list delayed_bios; | ||
106 | }; | ||
107 | |||
108 | /* | ||
109 | * Conversion fns | ||
110 | */ | ||
111 | static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) | ||
112 | { | ||
113 | return sector >> rh->region_shift; | ||
114 | } | ||
115 | |||
116 | sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) | ||
117 | { | ||
118 | return region << rh->region_shift; | ||
119 | } | ||
120 | EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); | ||
121 | |||
122 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) | ||
123 | { | ||
124 | return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); | ||
127 | |||
128 | void *dm_rh_region_context(struct dm_region *reg) | ||
129 | { | ||
130 | return reg->rh->context; | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(dm_rh_region_context); | ||
133 | |||
134 | region_t dm_rh_get_region_key(struct dm_region *reg) | ||
135 | { | ||
136 | return reg->key; | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(dm_rh_get_region_key); | ||
139 | |||
140 | sector_t dm_rh_get_region_size(struct dm_region_hash *rh) | ||
141 | { | ||
142 | return rh->region_size; | ||
143 | } | ||
144 | EXPORT_SYMBOL_GPL(dm_rh_get_region_size); | ||
145 | |||
146 | /* | ||
147 | * FIXME: shall we pass in a structure instead of all these args to | ||
148 | * dm_region_hash_create()???? | ||
149 | */ | ||
150 | #define RH_HASH_MULT 2654435387U | ||
151 | #define RH_HASH_SHIFT 12 | ||
152 | |||
153 | #define MIN_REGIONS 64 | ||
154 | struct dm_region_hash *dm_region_hash_create( | ||
155 | void *context, void (*dispatch_bios)(void *context, | ||
156 | struct bio_list *bios), | ||
157 | void (*wakeup_workers)(void *context), | ||
158 | void (*wakeup_all_recovery_waiters)(void *context), | ||
159 | sector_t target_begin, unsigned max_recovery, | ||
160 | struct dm_dirty_log *log, uint32_t region_size, | ||
161 | region_t nr_regions) | ||
162 | { | ||
163 | struct dm_region_hash *rh; | ||
164 | unsigned nr_buckets, max_buckets; | ||
165 | size_t i; | ||
166 | |||
167 | /* | ||
168 | * Calculate a suitable number of buckets for our hash | ||
169 | * table. | ||
170 | */ | ||
171 | max_buckets = nr_regions >> 6; | ||
172 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | ||
173 | ; | ||
174 | nr_buckets >>= 1; | ||
175 | |||
176 | rh = kmalloc(sizeof(*rh), GFP_KERNEL); | ||
177 | if (!rh) { | ||
178 | DMERR("unable to allocate region hash memory"); | ||
179 | return ERR_PTR(-ENOMEM); | ||
180 | } | ||
181 | |||
182 | rh->context = context; | ||
183 | rh->dispatch_bios = dispatch_bios; | ||
184 | rh->wakeup_workers = wakeup_workers; | ||
185 | rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; | ||
186 | rh->target_begin = target_begin; | ||
187 | rh->max_recovery = max_recovery; | ||
188 | rh->log = log; | ||
189 | rh->region_size = region_size; | ||
190 | rh->region_shift = ffs(region_size) - 1; | ||
191 | rwlock_init(&rh->hash_lock); | ||
192 | rh->mask = nr_buckets - 1; | ||
193 | rh->nr_buckets = nr_buckets; | ||
194 | |||
195 | rh->shift = RH_HASH_SHIFT; | ||
196 | rh->prime = RH_HASH_MULT; | ||
197 | |||
198 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | ||
199 | if (!rh->buckets) { | ||
200 | DMERR("unable to allocate region hash bucket memory"); | ||
201 | kfree(rh); | ||
202 | return ERR_PTR(-ENOMEM); | ||
203 | } | ||
204 | |||
205 | for (i = 0; i < nr_buckets; i++) | ||
206 | INIT_LIST_HEAD(rh->buckets + i); | ||
207 | |||
208 | spin_lock_init(&rh->region_lock); | ||
209 | sema_init(&rh->recovery_count, 0); | ||
210 | atomic_set(&rh->recovery_in_flight, 0); | ||
211 | INIT_LIST_HEAD(&rh->clean_regions); | ||
212 | INIT_LIST_HEAD(&rh->quiesced_regions); | ||
213 | INIT_LIST_HEAD(&rh->recovered_regions); | ||
214 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | ||
215 | |||
216 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | ||
217 | sizeof(struct dm_region)); | ||
218 | if (!rh->region_pool) { | ||
219 | vfree(rh->buckets); | ||
220 | kfree(rh); | ||
221 | rh = ERR_PTR(-ENOMEM); | ||
222 | } | ||
223 | |||
224 | return rh; | ||
225 | } | ||
226 | EXPORT_SYMBOL_GPL(dm_region_hash_create); | ||
227 | |||
228 | void dm_region_hash_destroy(struct dm_region_hash *rh) | ||
229 | { | ||
230 | unsigned h; | ||
231 | struct dm_region *reg, *nreg; | ||
232 | |||
233 | BUG_ON(!list_empty(&rh->quiesced_regions)); | ||
234 | for (h = 0; h < rh->nr_buckets; h++) { | ||
235 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, | ||
236 | hash_list) { | ||
237 | BUG_ON(atomic_read(®->pending)); | ||
238 | mempool_free(reg, rh->region_pool); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | if (rh->log) | ||
243 | dm_dirty_log_destroy(rh->log); | ||
244 | |||
245 | if (rh->region_pool) | ||
246 | mempool_destroy(rh->region_pool); | ||
247 | |||
248 | vfree(rh->buckets); | ||
249 | kfree(rh); | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(dm_region_hash_destroy); | ||
252 | |||
253 | struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) | ||
254 | { | ||
255 | return rh->log; | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(dm_rh_dirty_log); | ||
258 | |||
259 | static unsigned rh_hash(struct dm_region_hash *rh, region_t region) | ||
260 | { | ||
261 | return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; | ||
262 | } | ||
263 | |||
264 | static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) | ||
265 | { | ||
266 | struct dm_region *reg; | ||
267 | struct list_head *bucket = rh->buckets + rh_hash(rh, region); | ||
268 | |||
269 | list_for_each_entry(reg, bucket, hash_list) | ||
270 | if (reg->key == region) | ||
271 | return reg; | ||
272 | |||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) | ||
277 | { | ||
278 | list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); | ||
279 | } | ||
280 | |||
281 | static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) | ||
282 | { | ||
283 | struct dm_region *reg, *nreg; | ||
284 | |||
285 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | ||
286 | if (unlikely(!nreg)) | ||
287 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO); | ||
288 | |||
289 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | ||
290 | DM_RH_CLEAN : DM_RH_NOSYNC; | ||
291 | nreg->rh = rh; | ||
292 | nreg->key = region; | ||
293 | INIT_LIST_HEAD(&nreg->list); | ||
294 | atomic_set(&nreg->pending, 0); | ||
295 | bio_list_init(&nreg->delayed_bios); | ||
296 | |||
297 | write_lock_irq(&rh->hash_lock); | ||
298 | reg = __rh_lookup(rh, region); | ||
299 | if (reg) | ||
300 | /* We lost the race. */ | ||
301 | mempool_free(nreg, rh->region_pool); | ||
302 | else { | ||
303 | __rh_insert(rh, nreg); | ||
304 | if (nreg->state == DM_RH_CLEAN) { | ||
305 | spin_lock(&rh->region_lock); | ||
306 | list_add(&nreg->list, &rh->clean_regions); | ||
307 | spin_unlock(&rh->region_lock); | ||
308 | } | ||
309 | |||
310 | reg = nreg; | ||
311 | } | ||
312 | write_unlock_irq(&rh->hash_lock); | ||
313 | |||
314 | return reg; | ||
315 | } | ||
316 | |||
317 | static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) | ||
318 | { | ||
319 | struct dm_region *reg; | ||
320 | |||
321 | reg = __rh_lookup(rh, region); | ||
322 | if (!reg) { | ||
323 | read_unlock(&rh->hash_lock); | ||
324 | reg = __rh_alloc(rh, region); | ||
325 | read_lock(&rh->hash_lock); | ||
326 | } | ||
327 | |||
328 | return reg; | ||
329 | } | ||
330 | |||
331 | int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) | ||
332 | { | ||
333 | int r; | ||
334 | struct dm_region *reg; | ||
335 | |||
336 | read_lock(&rh->hash_lock); | ||
337 | reg = __rh_lookup(rh, region); | ||
338 | read_unlock(&rh->hash_lock); | ||
339 | |||
340 | if (reg) | ||
341 | return reg->state; | ||
342 | |||
343 | /* | ||
344 | * The region wasn't in the hash, so we fall back to the | ||
345 | * dirty log. | ||
346 | */ | ||
347 | r = rh->log->type->in_sync(rh->log, region, may_block); | ||
348 | |||
349 | /* | ||
350 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | ||
351 | * taken as a DM_RH_NOSYNC | ||
352 | */ | ||
353 | return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(dm_rh_get_state); | ||
356 | |||
357 | static void complete_resync_work(struct dm_region *reg, int success) | ||
358 | { | ||
359 | struct dm_region_hash *rh = reg->rh; | ||
360 | |||
361 | rh->log->type->set_region_sync(rh->log, reg->key, success); | ||
362 | |||
363 | /* | ||
364 | * Dispatch the bios before we call 'wake_up_all'. | ||
365 | * This is important because if we are suspending, | ||
366 | * we want to know that recovery is complete and | ||
367 | * the work queue is flushed. If we wake_up_all | ||
368 | * before we dispatch_bios (queue bios and call wake()), | ||
369 | * then we risk suspending before the work queue | ||
370 | * has been properly flushed. | ||
371 | */ | ||
372 | rh->dispatch_bios(rh->context, ®->delayed_bios); | ||
373 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | ||
374 | rh->wakeup_all_recovery_waiters(rh->context); | ||
375 | up(&rh->recovery_count); | ||
376 | } | ||
377 | |||
378 | /* dm_rh_mark_nosync | ||
379 | * @ms | ||
380 | * @bio | ||
381 | * @done | ||
382 | * @error | ||
383 | * | ||
384 | * The bio was written on some mirror(s) but failed on other mirror(s). | ||
385 | * We can successfully endio the bio but should avoid the region being | ||
386 | * marked clean by setting the state DM_RH_NOSYNC. | ||
387 | * | ||
388 | * This function is _not_ safe in interrupt context! | ||
389 | */ | ||
390 | void dm_rh_mark_nosync(struct dm_region_hash *rh, | ||
391 | struct bio *bio, unsigned done, int error) | ||
392 | { | ||
393 | unsigned long flags; | ||
394 | struct dm_dirty_log *log = rh->log; | ||
395 | struct dm_region *reg; | ||
396 | region_t region = dm_rh_bio_to_region(rh, bio); | ||
397 | int recovering = 0; | ||
398 | |||
399 | /* We must inform the log that the sync count has changed. */ | ||
400 | log->type->set_region_sync(log, region, 0); | ||
401 | |||
402 | read_lock(&rh->hash_lock); | ||
403 | reg = __rh_find(rh, region); | ||
404 | read_unlock(&rh->hash_lock); | ||
405 | |||
406 | /* region hash entry should exist because write was in-flight */ | ||
407 | BUG_ON(!reg); | ||
408 | BUG_ON(!list_empty(®->list)); | ||
409 | |||
410 | spin_lock_irqsave(&rh->region_lock, flags); | ||
411 | /* | ||
412 | * Possible cases: | ||
413 | * 1) DM_RH_DIRTY | ||
414 | * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed | ||
415 | * 3) DM_RH_RECOVERING: flushing pending writes | ||
416 | * Either case, the region should have not been connected to list. | ||
417 | */ | ||
418 | recovering = (reg->state == DM_RH_RECOVERING); | ||
419 | reg->state = DM_RH_NOSYNC; | ||
420 | BUG_ON(!list_empty(®->list)); | ||
421 | spin_unlock_irqrestore(&rh->region_lock, flags); | ||
422 | |||
423 | bio_endio(bio, error); | ||
424 | if (recovering) | ||
425 | complete_resync_work(reg, 0); | ||
426 | } | ||
427 | EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); | ||
428 | |||
429 | void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) | ||
430 | { | ||
431 | struct dm_region *reg, *next; | ||
432 | |||
433 | LIST_HEAD(clean); | ||
434 | LIST_HEAD(recovered); | ||
435 | LIST_HEAD(failed_recovered); | ||
436 | |||
437 | /* | ||
438 | * Quickly grab the lists. | ||
439 | */ | ||
440 | write_lock_irq(&rh->hash_lock); | ||
441 | spin_lock(&rh->region_lock); | ||
442 | if (!list_empty(&rh->clean_regions)) { | ||
443 | list_splice_init(&rh->clean_regions, &clean); | ||
444 | |||
445 | list_for_each_entry(reg, &clean, list) | ||
446 | list_del(®->hash_list); | ||
447 | } | ||
448 | |||
449 | if (!list_empty(&rh->recovered_regions)) { | ||
450 | list_splice_init(&rh->recovered_regions, &recovered); | ||
451 | |||
452 | list_for_each_entry(reg, &recovered, list) | ||
453 | list_del(®->hash_list); | ||
454 | } | ||
455 | |||
456 | if (!list_empty(&rh->failed_recovered_regions)) { | ||
457 | list_splice_init(&rh->failed_recovered_regions, | ||
458 | &failed_recovered); | ||
459 | |||
460 | list_for_each_entry(reg, &failed_recovered, list) | ||
461 | list_del(®->hash_list); | ||
462 | } | ||
463 | |||
464 | spin_unlock(&rh->region_lock); | ||
465 | write_unlock_irq(&rh->hash_lock); | ||
466 | |||
467 | /* | ||
468 | * All the regions on the recovered and clean lists have | ||
469 | * now been pulled out of the system, so no need to do | ||
470 | * any more locking. | ||
471 | */ | ||
472 | list_for_each_entry_safe(reg, next, &recovered, list) { | ||
473 | rh->log->type->clear_region(rh->log, reg->key); | ||
474 | complete_resync_work(reg, 1); | ||
475 | mempool_free(reg, rh->region_pool); | ||
476 | } | ||
477 | |||
478 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { | ||
479 | complete_resync_work(reg, errors_handled ? 0 : 1); | ||
480 | mempool_free(reg, rh->region_pool); | ||
481 | } | ||
482 | |||
483 | list_for_each_entry_safe(reg, next, &clean, list) { | ||
484 | rh->log->type->clear_region(rh->log, reg->key); | ||
485 | mempool_free(reg, rh->region_pool); | ||
486 | } | ||
487 | |||
488 | rh->log->type->flush(rh->log); | ||
489 | } | ||
490 | EXPORT_SYMBOL_GPL(dm_rh_update_states); | ||
491 | |||
492 | static void rh_inc(struct dm_region_hash *rh, region_t region) | ||
493 | { | ||
494 | struct dm_region *reg; | ||
495 | |||
496 | read_lock(&rh->hash_lock); | ||
497 | reg = __rh_find(rh, region); | ||
498 | |||
499 | spin_lock_irq(&rh->region_lock); | ||
500 | atomic_inc(®->pending); | ||
501 | |||
502 | if (reg->state == DM_RH_CLEAN) { | ||
503 | reg->state = DM_RH_DIRTY; | ||
504 | list_del_init(®->list); /* take off the clean list */ | ||
505 | spin_unlock_irq(&rh->region_lock); | ||
506 | |||
507 | rh->log->type->mark_region(rh->log, reg->key); | ||
508 | } else | ||
509 | spin_unlock_irq(&rh->region_lock); | ||
510 | |||
511 | |||
512 | read_unlock(&rh->hash_lock); | ||
513 | } | ||
514 | |||
515 | void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | ||
516 | { | ||
517 | struct bio *bio; | ||
518 | |||
519 | for (bio = bios->head; bio; bio = bio->bi_next) | ||
520 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | ||
521 | } | ||
522 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); | ||
523 | |||
524 | void dm_rh_dec(struct dm_region_hash *rh, region_t region) | ||
525 | { | ||
526 | unsigned long flags; | ||
527 | struct dm_region *reg; | ||
528 | int should_wake = 0; | ||
529 | |||
530 | read_lock(&rh->hash_lock); | ||
531 | reg = __rh_lookup(rh, region); | ||
532 | read_unlock(&rh->hash_lock); | ||
533 | |||
534 | spin_lock_irqsave(&rh->region_lock, flags); | ||
535 | if (atomic_dec_and_test(®->pending)) { | ||
536 | /* | ||
537 | * There is no pending I/O for this region. | ||
538 | * We can move the region to corresponding list for next action. | ||
539 | * At this point, the region is not yet connected to any list. | ||
540 | * | ||
541 | * If the state is DM_RH_NOSYNC, the region should be kept off | ||
542 | * from clean list. | ||
543 | * The hash entry for DM_RH_NOSYNC will remain in memory | ||
544 | * until the region is recovered or the map is reloaded. | ||
545 | */ | ||
546 | |||
547 | /* do nothing for DM_RH_NOSYNC */ | ||
548 | if (reg->state == DM_RH_RECOVERING) { | ||
549 | list_add_tail(®->list, &rh->quiesced_regions); | ||
550 | } else if (reg->state == DM_RH_DIRTY) { | ||
551 | reg->state = DM_RH_CLEAN; | ||
552 | list_add(®->list, &rh->clean_regions); | ||
553 | } | ||
554 | should_wake = 1; | ||
555 | } | ||
556 | spin_unlock_irqrestore(&rh->region_lock, flags); | ||
557 | |||
558 | if (should_wake) | ||
559 | rh->wakeup_workers(rh->context); | ||
560 | } | ||
561 | EXPORT_SYMBOL_GPL(dm_rh_dec); | ||
562 | |||
563 | /* | ||
564 | * Starts quiescing a region in preparation for recovery. | ||
565 | */ | ||
566 | static int __rh_recovery_prepare(struct dm_region_hash *rh) | ||
567 | { | ||
568 | int r; | ||
569 | region_t region; | ||
570 | struct dm_region *reg; | ||
571 | |||
572 | /* | ||
573 | * Ask the dirty log what's next. | ||
574 | */ | ||
575 | r = rh->log->type->get_resync_work(rh->log, ®ion); | ||
576 | if (r <= 0) | ||
577 | return r; | ||
578 | |||
579 | /* | ||
580 | * Get this region, and start it quiescing by setting the | ||
581 | * recovering flag. | ||
582 | */ | ||
583 | read_lock(&rh->hash_lock); | ||
584 | reg = __rh_find(rh, region); | ||
585 | read_unlock(&rh->hash_lock); | ||
586 | |||
587 | spin_lock_irq(&rh->region_lock); | ||
588 | reg->state = DM_RH_RECOVERING; | ||
589 | |||
590 | /* Already quiesced ? */ | ||
591 | if (atomic_read(®->pending)) | ||
592 | list_del_init(®->list); | ||
593 | else | ||
594 | list_move(®->list, &rh->quiesced_regions); | ||
595 | |||
596 | spin_unlock_irq(&rh->region_lock); | ||
597 | |||
598 | return 1; | ||
599 | } | ||
600 | |||
601 | void dm_rh_recovery_prepare(struct dm_region_hash *rh) | ||
602 | { | ||
603 | /* Extra reference to avoid race with dm_rh_stop_recovery */ | ||
604 | atomic_inc(&rh->recovery_in_flight); | ||
605 | |||
606 | while (!down_trylock(&rh->recovery_count)) { | ||
607 | atomic_inc(&rh->recovery_in_flight); | ||
608 | if (__rh_recovery_prepare(rh) <= 0) { | ||
609 | atomic_dec(&rh->recovery_in_flight); | ||
610 | up(&rh->recovery_count); | ||
611 | break; | ||
612 | } | ||
613 | } | ||
614 | |||
615 | /* Drop the extra reference */ | ||
616 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | ||
617 | rh->wakeup_all_recovery_waiters(rh->context); | ||
618 | } | ||
619 | EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); | ||
620 | |||
621 | /* | ||
622 | * Returns any quiesced regions. | ||
623 | */ | ||
624 | struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) | ||
625 | { | ||
626 | struct dm_region *reg = NULL; | ||
627 | |||
628 | spin_lock_irq(&rh->region_lock); | ||
629 | if (!list_empty(&rh->quiesced_regions)) { | ||
630 | reg = list_entry(rh->quiesced_regions.next, | ||
631 | struct dm_region, list); | ||
632 | list_del_init(®->list); /* remove from the quiesced list */ | ||
633 | } | ||
634 | spin_unlock_irq(&rh->region_lock); | ||
635 | |||
636 | return reg; | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(dm_rh_recovery_start); | ||
639 | |||
640 | void dm_rh_recovery_end(struct dm_region *reg, int success) | ||
641 | { | ||
642 | struct dm_region_hash *rh = reg->rh; | ||
643 | |||
644 | spin_lock_irq(&rh->region_lock); | ||
645 | if (success) | ||
646 | list_add(®->list, ®->rh->recovered_regions); | ||
647 | else { | ||
648 | reg->state = DM_RH_NOSYNC; | ||
649 | list_add(®->list, ®->rh->failed_recovered_regions); | ||
650 | } | ||
651 | spin_unlock_irq(&rh->region_lock); | ||
652 | |||
653 | rh->wakeup_workers(rh->context); | ||
654 | } | ||
655 | EXPORT_SYMBOL_GPL(dm_rh_recovery_end); | ||
656 | |||
657 | /* Return recovery in flight count. */ | ||
658 | int dm_rh_recovery_in_flight(struct dm_region_hash *rh) | ||
659 | { | ||
660 | return atomic_read(&rh->recovery_in_flight); | ||
661 | } | ||
662 | EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); | ||
663 | |||
664 | int dm_rh_flush(struct dm_region_hash *rh) | ||
665 | { | ||
666 | return rh->log->type->flush(rh->log); | ||
667 | } | ||
668 | EXPORT_SYMBOL_GPL(dm_rh_flush); | ||
669 | |||
670 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) | ||
671 | { | ||
672 | struct dm_region *reg; | ||
673 | |||
674 | read_lock(&rh->hash_lock); | ||
675 | reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); | ||
676 | bio_list_add(®->delayed_bios, bio); | ||
677 | read_unlock(&rh->hash_lock); | ||
678 | } | ||
679 | EXPORT_SYMBOL_GPL(dm_rh_delay); | ||
680 | |||
681 | void dm_rh_stop_recovery(struct dm_region_hash *rh) | ||
682 | { | ||
683 | int i; | ||
684 | |||
685 | /* wait for any recovering regions */ | ||
686 | for (i = 0; i < rh->max_recovery; i++) | ||
687 | down(&rh->recovery_count); | ||
688 | } | ||
689 | EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); | ||
690 | |||
691 | void dm_rh_start_recovery(struct dm_region_hash *rh) | ||
692 | { | ||
693 | int i; | ||
694 | |||
695 | for (i = 0; i < rh->max_recovery; i++) | ||
696 | up(&rh->recovery_count); | ||
697 | |||
698 | rh->wakeup_workers(rh->context); | ||
699 | } | ||
700 | EXPORT_SYMBOL_GPL(dm_rh_start_recovery); | ||
701 | |||
702 | MODULE_DESCRIPTION(DM_NAME " region hash"); | ||
703 | MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>"); | ||
704 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 391dfa2ad434..cdfbf65b28cb 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -9,7 +9,8 @@ | |||
9 | * Round-robin path selector. | 9 | * Round-robin path selector. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include "dm.h" | 12 | #include <linux/device-mapper.h> |
13 | |||
13 | #include "dm-path-selector.h" | 14 | #include "dm-path-selector.h" |
14 | 15 | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6e5528aecc98..b2d9d1ac28ad 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -600,7 +600,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
600 | 600 | ||
601 | s->valid = 1; | 601 | s->valid = 1; |
602 | s->active = 0; | 602 | s->active = 0; |
603 | s->last_percent = 0; | ||
604 | init_rwsem(&s->lock); | 603 | init_rwsem(&s->lock); |
605 | spin_lock_init(&s->pe_lock); | 604 | spin_lock_init(&s->pe_lock); |
606 | s->ti = ti; | 605 | s->ti = ti; |
@@ -824,8 +823,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) | |||
824 | * the bios for the original write to the origin. | 823 | * the bios for the original write to the origin. |
825 | */ | 824 | */ |
826 | if (primary_pe && | 825 | if (primary_pe && |
827 | atomic_dec_and_test(&primary_pe->ref_count)) | 826 | atomic_dec_and_test(&primary_pe->ref_count)) { |
828 | origin_bios = bio_list_get(&primary_pe->origin_bios); | 827 | origin_bios = bio_list_get(&primary_pe->origin_bios); |
828 | free_pending_exception(primary_pe); | ||
829 | } | ||
829 | 830 | ||
830 | /* | 831 | /* |
831 | * Free the pe if it's not linked to an origin write or if | 832 | * Free the pe if it's not linked to an origin write or if |
@@ -834,12 +835,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) | |||
834 | if (!primary_pe || primary_pe != pe) | 835 | if (!primary_pe || primary_pe != pe) |
835 | free_pending_exception(pe); | 836 | free_pending_exception(pe); |
836 | 837 | ||
837 | /* | ||
838 | * Free the primary pe if nothing references it. | ||
839 | */ | ||
840 | if (primary_pe && !atomic_read(&primary_pe->ref_count)) | ||
841 | free_pending_exception(primary_pe); | ||
842 | |||
843 | return origin_bios; | 838 | return origin_bios; |
844 | } | 839 | } |
845 | 840 | ||
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 292c15609ae3..f07315fe2362 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifndef DM_SNAPSHOT_H | 9 | #ifndef DM_SNAPSHOT_H |
10 | #define DM_SNAPSHOT_H | 10 | #define DM_SNAPSHOT_H |
11 | 11 | ||
12 | #include "dm.h" | 12 | #include <linux/device-mapper.h> |
13 | #include "dm-bio-list.h" | 13 | #include "dm-bio-list.h" |
14 | #include <linux/blkdev.h> | 14 | #include <linux/blkdev.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
@@ -158,9 +158,6 @@ struct dm_snapshot { | |||
158 | /* Used for display of table */ | 158 | /* Used for display of table */ |
159 | char type; | 159 | char type; |
160 | 160 | ||
161 | /* The last percentage we notified */ | ||
162 | int last_percent; | ||
163 | |||
164 | mempool_t *pending_pool; | 161 | mempool_t *pending_pool; |
165 | 162 | ||
166 | struct exception_table pending; | 163 | struct exception_table pending; |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b745d8ac625b..a2d068dbe9e2 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file is released under the GPL. | 4 | * This file is released under the GPL. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "dm.h" | 7 | #include <linux/device-mapper.h> |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
@@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes) | |||
60 | { | 60 | { |
61 | size_t len; | 61 | size_t len; |
62 | 62 | ||
63 | if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), | 63 | if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), |
64 | stripes)) | 64 | stripes)) |
65 | return NULL; | 65 | return NULL; |
66 | 66 | ||
67 | len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); | 67 | len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); |
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index bdec206c404b..cdbf126ec106 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file is released under the GPL. | 4 | * This file is released under the GPL. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "dm.h" | 7 | #include <linux/device-mapper.h> |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 327de03a5bdf..d1d0cd0f5750 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -76,7 +76,6 @@ union map_info *dm_get_mapinfo(struct bio *bio) | |||
76 | */ | 76 | */ |
77 | struct dm_wq_req { | 77 | struct dm_wq_req { |
78 | enum { | 78 | enum { |
79 | DM_WQ_FLUSH_ALL, | ||
80 | DM_WQ_FLUSH_DEFERRED, | 79 | DM_WQ_FLUSH_DEFERRED, |
81 | } type; | 80 | } type; |
82 | struct work_struct work; | 81 | struct work_struct work; |
@@ -151,40 +150,40 @@ static struct kmem_cache *_tio_cache; | |||
151 | 150 | ||
152 | static int __init local_init(void) | 151 | static int __init local_init(void) |
153 | { | 152 | { |
154 | int r; | 153 | int r = -ENOMEM; |
155 | 154 | ||
156 | /* allocate a slab for the dm_ios */ | 155 | /* allocate a slab for the dm_ios */ |
157 | _io_cache = KMEM_CACHE(dm_io, 0); | 156 | _io_cache = KMEM_CACHE(dm_io, 0); |
158 | if (!_io_cache) | 157 | if (!_io_cache) |
159 | return -ENOMEM; | 158 | return r; |
160 | 159 | ||
161 | /* allocate a slab for the target ios */ | 160 | /* allocate a slab for the target ios */ |
162 | _tio_cache = KMEM_CACHE(dm_target_io, 0); | 161 | _tio_cache = KMEM_CACHE(dm_target_io, 0); |
163 | if (!_tio_cache) { | 162 | if (!_tio_cache) |
164 | kmem_cache_destroy(_io_cache); | 163 | goto out_free_io_cache; |
165 | return -ENOMEM; | ||
166 | } | ||
167 | 164 | ||
168 | r = dm_uevent_init(); | 165 | r = dm_uevent_init(); |
169 | if (r) { | 166 | if (r) |
170 | kmem_cache_destroy(_tio_cache); | 167 | goto out_free_tio_cache; |
171 | kmem_cache_destroy(_io_cache); | ||
172 | return r; | ||
173 | } | ||
174 | 168 | ||
175 | _major = major; | 169 | _major = major; |
176 | r = register_blkdev(_major, _name); | 170 | r = register_blkdev(_major, _name); |
177 | if (r < 0) { | 171 | if (r < 0) |
178 | kmem_cache_destroy(_tio_cache); | 172 | goto out_uevent_exit; |
179 | kmem_cache_destroy(_io_cache); | ||
180 | dm_uevent_exit(); | ||
181 | return r; | ||
182 | } | ||
183 | 173 | ||
184 | if (!_major) | 174 | if (!_major) |
185 | _major = r; | 175 | _major = r; |
186 | 176 | ||
187 | return 0; | 177 | return 0; |
178 | |||
179 | out_uevent_exit: | ||
180 | dm_uevent_exit(); | ||
181 | out_free_tio_cache: | ||
182 | kmem_cache_destroy(_tio_cache); | ||
183 | out_free_io_cache: | ||
184 | kmem_cache_destroy(_io_cache); | ||
185 | |||
186 | return r; | ||
188 | } | 187 | } |
189 | 188 | ||
190 | static void local_exit(void) | 189 | static void local_exit(void) |
@@ -669,6 +668,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
669 | clone->bi_size = to_bytes(len); | 668 | clone->bi_size = to_bytes(len); |
670 | clone->bi_io_vec->bv_offset = offset; | 669 | clone->bi_io_vec->bv_offset = offset; |
671 | clone->bi_io_vec->bv_len = clone->bi_size; | 670 | clone->bi_io_vec->bv_len = clone->bi_size; |
671 | clone->bi_flags |= 1 << BIO_CLONED; | ||
672 | 672 | ||
673 | return clone; | 673 | return clone; |
674 | } | 674 | } |
@@ -1394,9 +1394,6 @@ static void dm_wq_work(struct work_struct *work) | |||
1394 | 1394 | ||
1395 | down_write(&md->io_lock); | 1395 | down_write(&md->io_lock); |
1396 | switch (req->type) { | 1396 | switch (req->type) { |
1397 | case DM_WQ_FLUSH_ALL: | ||
1398 | __merge_pushback_list(md); | ||
1399 | /* pass through */ | ||
1400 | case DM_WQ_FLUSH_DEFERRED: | 1397 | case DM_WQ_FLUSH_DEFERRED: |
1401 | __flush_deferred_io(md); | 1398 | __flush_deferred_io(md); |
1402 | break; | 1399 | break; |
@@ -1526,7 +1523,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1526 | if (!md->suspended_bdev) { | 1523 | if (!md->suspended_bdev) { |
1527 | DMWARN("bdget failed in dm_suspend"); | 1524 | DMWARN("bdget failed in dm_suspend"); |
1528 | r = -ENOMEM; | 1525 | r = -ENOMEM; |
1529 | goto flush_and_out; | 1526 | goto out; |
1530 | } | 1527 | } |
1531 | 1528 | ||
1532 | /* | 1529 | /* |
@@ -1577,14 +1574,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1577 | 1574 | ||
1578 | set_bit(DMF_SUSPENDED, &md->flags); | 1575 | set_bit(DMF_SUSPENDED, &md->flags); |
1579 | 1576 | ||
1580 | flush_and_out: | ||
1581 | if (r && noflush) | ||
1582 | /* | ||
1583 | * Because there may be already I/Os in the pushback list, | ||
1584 | * flush them before return. | ||
1585 | */ | ||
1586 | dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); | ||
1587 | |||
1588 | out: | 1577 | out: |
1589 | if (r && md->suspended_bdev) { | 1578 | if (r && md->suspended_bdev) { |
1590 | bdput(md->suspended_bdev); | 1579 | bdput(md->suspended_bdev); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index cd189da2b2fa..0ade60cdef42 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t); | |||
62 | int dm_target_iterate(void (*iter_func)(struct target_type *tt, | 62 | int dm_target_iterate(void (*iter_func)(struct target_type *tt, |
63 | void *param), void *param); | 63 | void *param), void *param); |
64 | 64 | ||
65 | /*----------------------------------------------------------------- | ||
66 | * Useful inlines. | ||
67 | *---------------------------------------------------------------*/ | ||
68 | static inline int array_too_big(unsigned long fixed, unsigned long obj, | ||
69 | unsigned long num) | ||
70 | { | ||
71 | return (num > (ULONG_MAX - fixed) / obj); | ||
72 | } | ||
73 | |||
74 | int dm_split_args(int *argc, char ***argvp, char *input); | 65 | int dm_split_args(int *argc, char ***argvp, char *input); |
75 | 66 | ||
76 | /* | 67 | /* |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 68e237b830ad..0acefe8aff87 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -17,7 +17,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o | |||
17 | obj-$(CONFIG_MFD_WM8350) += wm8350.o | 17 | obj-$(CONFIG_MFD_WM8350) += wm8350.o |
18 | obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o | 18 | obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o |
19 | 19 | ||
20 | obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o | 20 | obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o |
21 | 21 | ||
22 | obj-$(CONFIG_MFD_CORE) += mfd-core.o | 22 | obj-$(CONFIG_MFD_CORE) += mfd-core.o |
23 | 23 | ||
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 220e4371266b..170f9d47c2f9 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
@@ -1374,31 +1374,31 @@ static int sm501_init_dev(struct sm501_devdata *sm) | |||
1374 | static int sm501_plat_probe(struct platform_device *dev) | 1374 | static int sm501_plat_probe(struct platform_device *dev) |
1375 | { | 1375 | { |
1376 | struct sm501_devdata *sm; | 1376 | struct sm501_devdata *sm; |
1377 | int err; | 1377 | int ret; |
1378 | 1378 | ||
1379 | sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL); | 1379 | sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL); |
1380 | if (sm == NULL) { | 1380 | if (sm == NULL) { |
1381 | dev_err(&dev->dev, "no memory for device data\n"); | 1381 | dev_err(&dev->dev, "no memory for device data\n"); |
1382 | err = -ENOMEM; | 1382 | ret = -ENOMEM; |
1383 | goto err1; | 1383 | goto err1; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | sm->dev = &dev->dev; | 1386 | sm->dev = &dev->dev; |
1387 | sm->pdev_id = dev->id; | 1387 | sm->pdev_id = dev->id; |
1388 | sm->irq = platform_get_irq(dev, 0); | ||
1389 | sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1); | ||
1390 | sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
1391 | sm->platdata = dev->dev.platform_data; | 1388 | sm->platdata = dev->dev.platform_data; |
1392 | 1389 | ||
1393 | if (sm->irq < 0) { | 1390 | ret = platform_get_irq(dev, 0); |
1391 | if (ret < 0) { | ||
1394 | dev_err(&dev->dev, "failed to get irq resource\n"); | 1392 | dev_err(&dev->dev, "failed to get irq resource\n"); |
1395 | err = sm->irq; | ||
1396 | goto err_res; | 1393 | goto err_res; |
1397 | } | 1394 | } |
1395 | sm->irq = ret; | ||
1398 | 1396 | ||
1397 | sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1); | ||
1398 | sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
1399 | if (sm->io_res == NULL || sm->mem_res == NULL) { | 1399 | if (sm->io_res == NULL || sm->mem_res == NULL) { |
1400 | dev_err(&dev->dev, "failed to get IO resource\n"); | 1400 | dev_err(&dev->dev, "failed to get IO resource\n"); |
1401 | err = -ENOENT; | 1401 | ret = -ENOENT; |
1402 | goto err_res; | 1402 | goto err_res; |
1403 | } | 1403 | } |
1404 | 1404 | ||
@@ -1407,7 +1407,7 @@ static int sm501_plat_probe(struct platform_device *dev) | |||
1407 | 1407 | ||
1408 | if (sm->regs_claim == NULL) { | 1408 | if (sm->regs_claim == NULL) { |
1409 | dev_err(&dev->dev, "cannot claim registers\n"); | 1409 | dev_err(&dev->dev, "cannot claim registers\n"); |
1410 | err= -EBUSY; | 1410 | ret = -EBUSY; |
1411 | goto err_res; | 1411 | goto err_res; |
1412 | } | 1412 | } |
1413 | 1413 | ||
@@ -1418,7 +1418,7 @@ static int sm501_plat_probe(struct platform_device *dev) | |||
1418 | 1418 | ||
1419 | if (sm->regs == NULL) { | 1419 | if (sm->regs == NULL) { |
1420 | dev_err(&dev->dev, "cannot remap registers\n"); | 1420 | dev_err(&dev->dev, "cannot remap registers\n"); |
1421 | err = -EIO; | 1421 | ret = -EIO; |
1422 | goto err_claim; | 1422 | goto err_claim; |
1423 | } | 1423 | } |
1424 | 1424 | ||
@@ -1430,7 +1430,7 @@ static int sm501_plat_probe(struct platform_device *dev) | |||
1430 | err_res: | 1430 | err_res: |
1431 | kfree(sm); | 1431 | kfree(sm); |
1432 | err1: | 1432 | err1: |
1433 | return err; | 1433 | return ret; |
1434 | 1434 | ||
1435 | } | 1435 | } |
1436 | 1436 | ||
@@ -1625,8 +1625,7 @@ static int sm501_pci_probe(struct pci_dev *dev, | |||
1625 | goto err3; | 1625 | goto err3; |
1626 | } | 1626 | } |
1627 | 1627 | ||
1628 | sm->regs = ioremap(pci_resource_start(dev, 1), | 1628 | sm->regs = pci_ioremap_bar(dev, 1); |
1629 | pci_resource_len(dev, 1)); | ||
1630 | 1629 | ||
1631 | if (sm->regs == NULL) { | 1630 | if (sm->regs == NULL) { |
1632 | dev_err(&dev->dev, "cannot remap registers\n"); | 1631 | dev_err(&dev->dev, "cannot remap registers\n"); |
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index fd9a0160202c..dd843c4fbcc7 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c | |||
@@ -27,15 +27,11 @@ | |||
27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/kernel_stat.h> | ||
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
32 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/irq.h> | ||
35 | #include <linux/random.h> | ||
36 | #include <linux/kthread.h> | ||
37 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
38 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
34 | #include <linux/err.h> | ||
39 | 35 | ||
40 | #include <linux/i2c.h> | 36 | #include <linux/i2c.h> |
41 | #include <linux/i2c/twl4030.h> | 37 | #include <linux/i2c/twl4030.h> |
@@ -93,26 +89,6 @@ | |||
93 | #define twl_has_usb() false | 89 | #define twl_has_usb() false |
94 | #endif | 90 | #endif |
95 | 91 | ||
96 | static inline void activate_irq(int irq) | ||
97 | { | ||
98 | #ifdef CONFIG_ARM | ||
99 | /* ARM requires an extra step to clear IRQ_NOREQUEST, which it | ||
100 | * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. | ||
101 | */ | ||
102 | set_irq_flags(irq, IRQF_VALID); | ||
103 | #else | ||
104 | /* same effect on other architectures */ | ||
105 | set_irq_noprobe(irq); | ||
106 | #endif | ||
107 | } | ||
108 | |||
109 | /* Primary Interrupt Handler on TWL4030 Registers */ | ||
110 | |||
111 | /* Register Definitions */ | ||
112 | |||
113 | #define REG_PIH_ISR_P1 (0x1) | ||
114 | #define REG_PIH_ISR_P2 (0x2) | ||
115 | #define REG_PIH_SIR (0x3) | ||
116 | 92 | ||
117 | /* Triton Core internal information (BEGIN) */ | 93 | /* Triton Core internal information (BEGIN) */ |
118 | 94 | ||
@@ -175,138 +151,6 @@ static inline void activate_irq(int irq) | |||
175 | 151 | ||
176 | /*----------------------------------------------------------------------*/ | 152 | /*----------------------------------------------------------------------*/ |
177 | 153 | ||
178 | /** | ||
179 | * struct twl4030_mod_iregs - TWL module IMR/ISR regs to mask/clear at init | ||
180 | * @mod_no: TWL4030 module number (e.g., TWL4030_MODULE_GPIO) | ||
181 | * @sih_ctrl: address of module SIH_CTRL register | ||
182 | * @reg_cnt: number of IMR/ISR regs | ||
183 | * @imrs: pointer to array of TWL module interrupt mask register indices | ||
184 | * @isrs: pointer to array of TWL module interrupt status register indices | ||
185 | * | ||
186 | * Ties together TWL4030 modules and lists of IMR/ISR registers to mask/clear | ||
187 | * during twl_init_irq(). | ||
188 | */ | ||
189 | struct twl4030_mod_iregs { | ||
190 | const u8 mod_no; | ||
191 | const u8 sih_ctrl; | ||
192 | const u8 reg_cnt; | ||
193 | const u8 *imrs; | ||
194 | const u8 *isrs; | ||
195 | }; | ||
196 | |||
197 | /* TWL4030 INT module interrupt mask registers */ | ||
198 | static const u8 __initconst twl4030_int_imr_regs[] = { | ||
199 | TWL4030_INT_PWR_IMR1, | ||
200 | TWL4030_INT_PWR_IMR2, | ||
201 | }; | ||
202 | |||
203 | /* TWL4030 INT module interrupt status registers */ | ||
204 | static const u8 __initconst twl4030_int_isr_regs[] = { | ||
205 | TWL4030_INT_PWR_ISR1, | ||
206 | TWL4030_INT_PWR_ISR2, | ||
207 | }; | ||
208 | |||
209 | /* TWL4030 INTERRUPTS module interrupt mask registers */ | ||
210 | static const u8 __initconst twl4030_interrupts_imr_regs[] = { | ||
211 | TWL4030_INTERRUPTS_BCIIMR1A, | ||
212 | TWL4030_INTERRUPTS_BCIIMR1B, | ||
213 | TWL4030_INTERRUPTS_BCIIMR2A, | ||
214 | TWL4030_INTERRUPTS_BCIIMR2B, | ||
215 | }; | ||
216 | |||
217 | /* TWL4030 INTERRUPTS module interrupt status registers */ | ||
218 | static const u8 __initconst twl4030_interrupts_isr_regs[] = { | ||
219 | TWL4030_INTERRUPTS_BCIISR1A, | ||
220 | TWL4030_INTERRUPTS_BCIISR1B, | ||
221 | TWL4030_INTERRUPTS_BCIISR2A, | ||
222 | TWL4030_INTERRUPTS_BCIISR2B, | ||
223 | }; | ||
224 | |||
225 | /* TWL4030 MADC module interrupt mask registers */ | ||
226 | static const u8 __initconst twl4030_madc_imr_regs[] = { | ||
227 | TWL4030_MADC_IMR1, | ||
228 | TWL4030_MADC_IMR2, | ||
229 | }; | ||
230 | |||
231 | /* TWL4030 MADC module interrupt status registers */ | ||
232 | static const u8 __initconst twl4030_madc_isr_regs[] = { | ||
233 | TWL4030_MADC_ISR1, | ||
234 | TWL4030_MADC_ISR2, | ||
235 | }; | ||
236 | |||
237 | /* TWL4030 keypad module interrupt mask registers */ | ||
238 | static const u8 __initconst twl4030_keypad_imr_regs[] = { | ||
239 | TWL4030_KEYPAD_KEYP_IMR1, | ||
240 | TWL4030_KEYPAD_KEYP_IMR2, | ||
241 | }; | ||
242 | |||
243 | /* TWL4030 keypad module interrupt status registers */ | ||
244 | static const u8 __initconst twl4030_keypad_isr_regs[] = { | ||
245 | TWL4030_KEYPAD_KEYP_ISR1, | ||
246 | TWL4030_KEYPAD_KEYP_ISR2, | ||
247 | }; | ||
248 | |||
249 | /* TWL4030 GPIO module interrupt mask registers */ | ||
250 | static const u8 __initconst twl4030_gpio_imr_regs[] = { | ||
251 | REG_GPIO_IMR1A, | ||
252 | REG_GPIO_IMR1B, | ||
253 | REG_GPIO_IMR2A, | ||
254 | REG_GPIO_IMR2B, | ||
255 | REG_GPIO_IMR3A, | ||
256 | REG_GPIO_IMR3B, | ||
257 | }; | ||
258 | |||
259 | /* TWL4030 GPIO module interrupt status registers */ | ||
260 | static const u8 __initconst twl4030_gpio_isr_regs[] = { | ||
261 | REG_GPIO_ISR1A, | ||
262 | REG_GPIO_ISR1B, | ||
263 | REG_GPIO_ISR2A, | ||
264 | REG_GPIO_ISR2B, | ||
265 | REG_GPIO_ISR3A, | ||
266 | REG_GPIO_ISR3B, | ||
267 | }; | ||
268 | |||
269 | /* TWL4030 modules that have IMR/ISR registers that must be masked/cleared */ | ||
270 | static const struct twl4030_mod_iregs __initconst twl4030_mod_regs[] = { | ||
271 | { | ||
272 | .mod_no = TWL4030_MODULE_INT, | ||
273 | .sih_ctrl = TWL4030_INT_PWR_SIH_CTRL, | ||
274 | .reg_cnt = ARRAY_SIZE(twl4030_int_imr_regs), | ||
275 | .imrs = twl4030_int_imr_regs, | ||
276 | .isrs = twl4030_int_isr_regs, | ||
277 | }, | ||
278 | { | ||
279 | .mod_no = TWL4030_MODULE_INTERRUPTS, | ||
280 | .sih_ctrl = TWL4030_INTERRUPTS_BCISIHCTRL, | ||
281 | .reg_cnt = ARRAY_SIZE(twl4030_interrupts_imr_regs), | ||
282 | .imrs = twl4030_interrupts_imr_regs, | ||
283 | .isrs = twl4030_interrupts_isr_regs, | ||
284 | }, | ||
285 | { | ||
286 | .mod_no = TWL4030_MODULE_MADC, | ||
287 | .sih_ctrl = TWL4030_MADC_SIH_CTRL, | ||
288 | .reg_cnt = ARRAY_SIZE(twl4030_madc_imr_regs), | ||
289 | .imrs = twl4030_madc_imr_regs, | ||
290 | .isrs = twl4030_madc_isr_regs, | ||
291 | }, | ||
292 | { | ||
293 | .mod_no = TWL4030_MODULE_KEYPAD, | ||
294 | .sih_ctrl = TWL4030_KEYPAD_KEYP_SIH_CTRL, | ||
295 | .reg_cnt = ARRAY_SIZE(twl4030_keypad_imr_regs), | ||
296 | .imrs = twl4030_keypad_imr_regs, | ||
297 | .isrs = twl4030_keypad_isr_regs, | ||
298 | }, | ||
299 | { | ||
300 | .mod_no = TWL4030_MODULE_GPIO, | ||
301 | .sih_ctrl = REG_GPIO_SIH_CTRL, | ||
302 | .reg_cnt = ARRAY_SIZE(twl4030_gpio_imr_regs), | ||
303 | .imrs = twl4030_gpio_imr_regs, | ||
304 | .isrs = twl4030_gpio_isr_regs, | ||
305 | }, | ||
306 | }; | ||
307 | |||
308 | /*----------------------------------------------------------------*/ | ||
309 | |||
310 | /* is driver active, bound to a chip? */ | 154 | /* is driver active, bound to a chip? */ |
311 | static bool inuse; | 155 | static bool inuse; |
312 | 156 | ||
@@ -367,33 +211,6 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { | |||
367 | 211 | ||
368 | /*----------------------------------------------------------------------*/ | 212 | /*----------------------------------------------------------------------*/ |
369 | 213 | ||
370 | /* | ||
371 | * TWL4030 doesn't have PIH mask, hence dummy function for mask | ||
372 | * and unmask of the (eight) interrupts reported at that level ... | ||
373 | * masking is only available from SIH (secondary) modules. | ||
374 | */ | ||
375 | |||
376 | static void twl4030_i2c_ackirq(unsigned int irq) | ||
377 | { | ||
378 | } | ||
379 | |||
380 | static void twl4030_i2c_disableint(unsigned int irq) | ||
381 | { | ||
382 | } | ||
383 | |||
384 | static void twl4030_i2c_enableint(unsigned int irq) | ||
385 | { | ||
386 | } | ||
387 | |||
388 | static struct irq_chip twl4030_irq_chip = { | ||
389 | .name = "twl4030", | ||
390 | .ack = twl4030_i2c_ackirq, | ||
391 | .mask = twl4030_i2c_disableint, | ||
392 | .unmask = twl4030_i2c_enableint, | ||
393 | }; | ||
394 | |||
395 | /*----------------------------------------------------------------------*/ | ||
396 | |||
397 | /* Exported Functions */ | 214 | /* Exported Functions */ |
398 | 215 | ||
399 | /** | 216 | /** |
@@ -535,108 +352,11 @@ EXPORT_SYMBOL(twl4030_i2c_read_u8); | |||
535 | 352 | ||
536 | /*----------------------------------------------------------------------*/ | 353 | /*----------------------------------------------------------------------*/ |
537 | 354 | ||
538 | static unsigned twl4030_irq_base; | ||
539 | |||
540 | static struct completion irq_event; | ||
541 | |||
542 | /* | ||
543 | * This thread processes interrupts reported by the Primary Interrupt Handler. | ||
544 | */ | ||
545 | static int twl4030_irq_thread(void *data) | ||
546 | { | ||
547 | long irq = (long)data; | ||
548 | irq_desc_t *desc = irq_desc + irq; | ||
549 | static unsigned i2c_errors; | ||
550 | const static unsigned max_i2c_errors = 100; | ||
551 | |||
552 | current->flags |= PF_NOFREEZE; | ||
553 | |||
554 | while (!kthread_should_stop()) { | ||
555 | int ret; | ||
556 | int module_irq; | ||
557 | u8 pih_isr; | ||
558 | |||
559 | /* Wait for IRQ, then read PIH irq status (also blocking) */ | ||
560 | wait_for_completion_interruptible(&irq_event); | ||
561 | |||
562 | ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr, | ||
563 | REG_PIH_ISR_P1); | ||
564 | if (ret) { | ||
565 | pr_warning("%s: I2C error %d reading PIH ISR\n", | ||
566 | DRIVER_NAME, ret); | ||
567 | if (++i2c_errors >= max_i2c_errors) { | ||
568 | printk(KERN_ERR "Maximum I2C error count" | ||
569 | " exceeded. Terminating %s.\n", | ||
570 | __func__); | ||
571 | break; | ||
572 | } | ||
573 | complete(&irq_event); | ||
574 | continue; | ||
575 | } | ||
576 | |||
577 | /* these handlers deal with the relevant SIH irq status */ | ||
578 | local_irq_disable(); | ||
579 | for (module_irq = twl4030_irq_base; | ||
580 | pih_isr; | ||
581 | pih_isr >>= 1, module_irq++) { | ||
582 | if (pih_isr & 0x1) { | ||
583 | irq_desc_t *d = irq_desc + module_irq; | ||
584 | |||
585 | d->handle_irq(module_irq, d); | ||
586 | } | ||
587 | } | ||
588 | local_irq_enable(); | ||
589 | |||
590 | desc->chip->unmask(irq); | ||
591 | } | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /* | 355 | /* |
597 | * do_twl4030_irq() is the desc->handle method for the twl4030 interrupt. | 356 | * NOTE: We know the first 8 IRQs after pdata->base_irq are |
598 | * This is a chained interrupt, so there is no desc->action method for it. | 357 | * for the PIH, and the next are for the PWR_INT SIH, since |
599 | * Now we need to query the interrupt controller in the twl4030 to determine | 358 | * that's how twl_init_irq() sets things up. |
600 | * which module is generating the interrupt request. However, we can't do i2c | ||
601 | * transactions in interrupt context, so we must defer that work to a kernel | ||
602 | * thread. All we do here is acknowledge and mask the interrupt and wakeup | ||
603 | * the kernel thread. | ||
604 | */ | 359 | */ |
605 | static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc) | ||
606 | { | ||
607 | const unsigned int cpu = smp_processor_id(); | ||
608 | |||
609 | /* | ||
610 | * Earlier this was desc->triggered = 1; | ||
611 | */ | ||
612 | desc->status |= IRQ_LEVEL; | ||
613 | |||
614 | /* | ||
615 | * Acknowledge, clear _AND_ disable the interrupt. | ||
616 | */ | ||
617 | desc->chip->ack(irq); | ||
618 | |||
619 | if (!desc->depth) { | ||
620 | kstat_cpu(cpu).irqs[irq]++; | ||
621 | |||
622 | complete(&irq_event); | ||
623 | } | ||
624 | } | ||
625 | |||
626 | static struct task_struct * __init start_twl4030_irq_thread(long irq) | ||
627 | { | ||
628 | struct task_struct *thread; | ||
629 | |||
630 | init_completion(&irq_event); | ||
631 | thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq"); | ||
632 | if (!thread) | ||
633 | pr_err("%s: could not create twl4030 irq %ld thread!\n", | ||
634 | DRIVER_NAME, irq); | ||
635 | |||
636 | return thread; | ||
637 | } | ||
638 | |||
639 | /*----------------------------------------------------------------------*/ | ||
640 | 360 | ||
641 | static int add_children(struct twl4030_platform_data *pdata) | 361 | static int add_children(struct twl4030_platform_data *pdata) |
642 | { | 362 | { |
@@ -668,7 +388,7 @@ static int add_children(struct twl4030_platform_data *pdata) | |||
668 | 388 | ||
669 | if (status == 0) { | 389 | if (status == 0) { |
670 | struct resource r = { | 390 | struct resource r = { |
671 | .start = TWL4030_PWRIRQ_CHG_PRES, | 391 | .start = pdata->irq_base + 8 + 1, |
672 | .flags = IORESOURCE_IRQ, | 392 | .flags = IORESOURCE_IRQ, |
673 | }; | 393 | }; |
674 | 394 | ||
@@ -817,8 +537,7 @@ static int add_children(struct twl4030_platform_data *pdata) | |||
817 | /* RTC module IRQ */ | 537 | /* RTC module IRQ */ |
818 | if (status == 0) { | 538 | if (status == 0) { |
819 | struct resource r = { | 539 | struct resource r = { |
820 | /* REVISIT don't hard-wire this stuff */ | 540 | .start = pdata->irq_base + 8 + 3, |
821 | .start = TWL4030_PWRIRQ_RTC, | ||
822 | .flags = IORESOURCE_IRQ, | 541 | .flags = IORESOURCE_IRQ, |
823 | }; | 542 | }; |
824 | 543 | ||
@@ -863,7 +582,7 @@ static int add_children(struct twl4030_platform_data *pdata) | |||
863 | 582 | ||
864 | if (status == 0) { | 583 | if (status == 0) { |
865 | struct resource r = { | 584 | struct resource r = { |
866 | .start = TWL4030_PWRIRQ_USB_PRES, | 585 | .start = pdata->irq_base + 8 + 2, |
867 | .flags = IORESOURCE_IRQ, | 586 | .flags = IORESOURCE_IRQ, |
868 | }; | 587 | }; |
869 | 588 | ||
@@ -965,123 +684,17 @@ static void __init clocks_init(void) | |||
965 | 684 | ||
966 | /*----------------------------------------------------------------------*/ | 685 | /*----------------------------------------------------------------------*/ |
967 | 686 | ||
968 | /** | 687 | int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); |
969 | * twl4030_i2c_clear_isr - clear TWL4030 SIH ISR regs via read + write | 688 | int twl_exit_irq(void); |
970 | * @mod_no: TWL4030 module number | ||
971 | * @reg: register index to clear | ||
972 | * @cor: value of the <module>_SIH_CTRL.COR bit (1 or 0) | ||
973 | * | ||
974 | * Either reads (cor == 1) or writes (cor == 0) to a TWL4030 interrupt | ||
975 | * status register to ensure that any prior interrupts are cleared. | ||
976 | * Returns the status from the I2C read operation. | ||
977 | */ | ||
978 | static int __init twl4030_i2c_clear_isr(u8 mod_no, u8 reg, u8 cor) | ||
979 | { | ||
980 | u8 tmp; | ||
981 | |||
982 | return (cor) ? twl4030_i2c_read_u8(mod_no, &tmp, reg) : | ||
983 | twl4030_i2c_write_u8(mod_no, 0xff, reg); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * twl4030_read_cor_bit - are TWL module ISRs cleared by reads or writes? | ||
988 | * @mod_no: TWL4030 module number | ||
989 | * @reg: register index to clear | ||
990 | * | ||
991 | * Returns 1 if the TWL4030 SIH interrupt status registers (ISRs) for | ||
992 | * the specified TWL module are cleared by reads, or 0 if cleared by | ||
993 | * writes. | ||
994 | */ | ||
995 | static int twl4030_read_cor_bit(u8 mod_no, u8 reg) | ||
996 | { | ||
997 | u8 tmp = 0; | ||
998 | |||
999 | WARN_ON(twl4030_i2c_read_u8(mod_no, &tmp, reg) < 0); | ||
1000 | |||
1001 | tmp &= TWL4030_SIH_CTRL_COR_MASK; | ||
1002 | tmp >>= __ffs(TWL4030_SIH_CTRL_COR_MASK); | ||
1003 | |||
1004 | return tmp; | ||
1005 | } | ||
1006 | |||
1007 | /** | ||
1008 | * twl4030_mask_clear_intrs - mask and clear all TWL4030 interrupts | ||
1009 | * @t: pointer to twl4030_mod_iregs array | ||
1010 | * @t_sz: ARRAY_SIZE(t) (starting at 1) | ||
1011 | * | ||
1012 | * Mask all TWL4030 interrupt mask registers (IMRs) and clear all | ||
1013 | * interrupt status registers (ISRs). No return value, but will WARN if | ||
1014 | * any I2C operations fail. | ||
1015 | */ | ||
1016 | static void __init twl4030_mask_clear_intrs(const struct twl4030_mod_iregs *t, | ||
1017 | const u8 t_sz) | ||
1018 | { | ||
1019 | int i, j; | ||
1020 | |||
1021 | /* | ||
1022 | * N.B. - further efficiency is possible here. Eight I2C | ||
1023 | * operations on BCI and GPIO modules are avoidable if I2C | ||
1024 | * burst read/write transactions were implemented. Would | ||
1025 | * probably save about 1ms of boot time and a small amount of | ||
1026 | * power. | ||
1027 | */ | ||
1028 | for (i = 0; i < t_sz; i++) { | ||
1029 | const struct twl4030_mod_iregs tmr = t[i]; | ||
1030 | int cor; | ||
1031 | |||
1032 | /* Are ISRs cleared by reads or writes? */ | ||
1033 | cor = twl4030_read_cor_bit(tmr.mod_no, tmr.sih_ctrl); | ||
1034 | |||
1035 | for (j = 0; j < tmr.reg_cnt; j++) { | ||
1036 | |||
1037 | /* Mask interrupts at the TWL4030 */ | ||
1038 | WARN_ON(twl4030_i2c_write_u8(tmr.mod_no, 0xff, | ||
1039 | tmr.imrs[j]) < 0); | ||
1040 | |||
1041 | /* Clear TWL4030 ISRs */ | ||
1042 | WARN_ON(twl4030_i2c_clear_isr(tmr.mod_no, | ||
1043 | tmr.isrs[j], cor) < 0); | ||
1044 | } | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | |||
1049 | static void twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | ||
1050 | { | ||
1051 | int i; | ||
1052 | |||
1053 | /* | ||
1054 | * Mask and clear all TWL4030 interrupts since initially we do | ||
1055 | * not have any TWL4030 module interrupt handlers present | ||
1056 | */ | ||
1057 | twl4030_mask_clear_intrs(twl4030_mod_regs, | ||
1058 | ARRAY_SIZE(twl4030_mod_regs)); | ||
1059 | |||
1060 | twl4030_irq_base = irq_base; | ||
1061 | |||
1062 | /* install an irq handler for each of the PIH modules */ | ||
1063 | for (i = irq_base; i < irq_end; i++) { | ||
1064 | set_irq_chip_and_handler(i, &twl4030_irq_chip, | ||
1065 | handle_simple_irq); | ||
1066 | activate_irq(i); | ||
1067 | } | ||
1068 | |||
1069 | /* install an irq handler to demultiplex the TWL4030 interrupt */ | ||
1070 | set_irq_data(irq_num, start_twl4030_irq_thread(irq_num)); | ||
1071 | set_irq_chained_handler(irq_num, do_twl4030_irq); | ||
1072 | } | ||
1073 | |||
1074 | /*----------------------------------------------------------------------*/ | ||
1075 | 689 | ||
1076 | static int twl4030_remove(struct i2c_client *client) | 690 | static int twl4030_remove(struct i2c_client *client) |
1077 | { | 691 | { |
1078 | unsigned i; | 692 | unsigned i; |
693 | int status; | ||
1079 | 694 | ||
1080 | /* FIXME undo twl_init_irq() */ | 695 | status = twl_exit_irq(); |
1081 | if (twl4030_irq_base) { | 696 | if (status < 0) |
1082 | dev_err(&client->dev, "can't yet clean up IRQs?\n"); | 697 | return status; |
1083 | return -ENOSYS; | ||
1084 | } | ||
1085 | 698 | ||
1086 | for (i = 0; i < TWL4030_NUM_SLAVES; i++) { | 699 | for (i = 0; i < TWL4030_NUM_SLAVES; i++) { |
1087 | struct twl4030_client *twl = &twl4030_modules[i]; | 700 | struct twl4030_client *twl = &twl4030_modules[i]; |
@@ -1112,7 +725,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1112 | return -EIO; | 725 | return -EIO; |
1113 | } | 726 | } |
1114 | 727 | ||
1115 | if (inuse || twl4030_irq_base) { | 728 | if (inuse) { |
1116 | dev_dbg(&client->dev, "driver is already in use\n"); | 729 | dev_dbg(&client->dev, "driver is already in use\n"); |
1117 | return -EBUSY; | 730 | return -EBUSY; |
1118 | } | 731 | } |
@@ -1146,9 +759,9 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1146 | if (client->irq | 759 | if (client->irq |
1147 | && pdata->irq_base | 760 | && pdata->irq_base |
1148 | && pdata->irq_end > pdata->irq_base) { | 761 | && pdata->irq_end > pdata->irq_base) { |
1149 | twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); | 762 | status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); |
1150 | dev_info(&client->dev, "IRQ %d chains IRQs %d..%d\n", | 763 | if (status < 0) |
1151 | client->irq, pdata->irq_base, pdata->irq_end - 1); | 764 | goto fail; |
1152 | } | 765 | } |
1153 | 766 | ||
1154 | status = add_children(pdata); | 767 | status = add_children(pdata); |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c new file mode 100644 index 000000000000..fae868a8d499 --- /dev/null +++ b/drivers/mfd/twl4030-irq.c | |||
@@ -0,0 +1,743 @@ | |||
1 | /* | ||
2 | * twl4030-irq.c - TWL4030/TPS659x0 irq support | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
5 | * | ||
6 | * Modifications to defer interrupt handling to a kernel thread: | ||
7 | * Copyright (C) 2006 MontaVista Software, Inc. | ||
8 | * | ||
9 | * Based on tlv320aic23.c: | ||
10 | * Copyright (c) by Kai Svahn <kai.svahn@nokia.com> | ||
11 | * | ||
12 | * Code cleanup and modifications to IRQ handler. | ||
13 | * by syed khasim <x0khasim@ti.com> | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, | ||
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
23 | * GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with this program; if not, write to the Free Software | ||
27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
28 | */ | ||
29 | |||
30 | #include <linux/init.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/irq.h> | ||
33 | #include <linux/kthread.h> | ||
34 | |||
35 | #include <linux/i2c/twl4030.h> | ||
36 | |||
37 | |||
38 | /* | ||
39 | * TWL4030 IRQ handling has two stages in hardware, and thus in software. | ||
40 | * The Primary Interrupt Handler (PIH) stage exposes status bits saying | ||
41 | * which Secondary Interrupt Handler (SIH) stage is raising an interrupt. | ||
42 | * SIH modules are more traditional IRQ components, which support per-IRQ | ||
43 | * enable/disable and trigger controls; they do most of the work. | ||
44 | * | ||
45 | * These chips are designed to support IRQ handling from two different | ||
46 | * I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status | ||
47 | * and mask registers in the PIH and SIH modules. | ||
48 | * | ||
49 | * We set up IRQs starting at a platform-specified base, always starting | ||
50 | * with PIH and the SIH for PWR_INT and then usually adding GPIO: | ||
51 | * base + 0 .. base + 7 PIH | ||
52 | * base + 8 .. base + 15 SIH for PWR_INT | ||
53 | * base + 16 .. base + 33 SIH for GPIO | ||
54 | */ | ||
55 | |||
56 | /* PIH register offsets */ | ||
57 | #define REG_PIH_ISR_P1 0x01 | ||
58 | #define REG_PIH_ISR_P2 0x02 | ||
59 | #define REG_PIH_SIR 0x03 /* for testing */ | ||
60 | |||
61 | |||
62 | /* Linux could (eventually) use either IRQ line */ | ||
63 | static int irq_line; | ||
64 | |||
65 | struct sih { | ||
66 | char name[8]; | ||
67 | u8 module; /* module id */ | ||
68 | u8 control_offset; /* for SIH_CTRL */ | ||
69 | bool set_cor; | ||
70 | |||
71 | u8 bits; /* valid in isr/imr */ | ||
72 | u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */ | ||
73 | |||
74 | u8 edr_offset; | ||
75 | u8 bytes_edr; /* bytelen of EDR */ | ||
76 | |||
77 | /* SIR ignored -- set interrupt, for testing only */ | ||
78 | struct irq_data { | ||
79 | u8 isr_offset; | ||
80 | u8 imr_offset; | ||
81 | } mask[2]; | ||
82 | /* + 2 bytes padding */ | ||
83 | }; | ||
84 | |||
85 | #define SIH_INITIALIZER(modname, nbits) \ | ||
86 | .module = TWL4030_MODULE_ ## modname, \ | ||
87 | .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \ | ||
88 | .bits = nbits, \ | ||
89 | .bytes_ixr = DIV_ROUND_UP(nbits, 8), \ | ||
90 | .edr_offset = TWL4030_ ## modname ## _EDR, \ | ||
91 | .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \ | ||
92 | .mask = { { \ | ||
93 | .isr_offset = TWL4030_ ## modname ## _ISR1, \ | ||
94 | .imr_offset = TWL4030_ ## modname ## _IMR1, \ | ||
95 | }, \ | ||
96 | { \ | ||
97 | .isr_offset = TWL4030_ ## modname ## _ISR2, \ | ||
98 | .imr_offset = TWL4030_ ## modname ## _IMR2, \ | ||
99 | }, }, | ||
100 | |||
101 | /* register naming policies are inconsistent ... */ | ||
102 | #define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1 | ||
103 | #define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD | ||
104 | #define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT | ||
105 | |||
106 | |||
107 | /* Order in this table matches order in PIH_ISR. That is, | ||
108 | * BIT(n) in PIH_ISR is sih_modules[n]. | ||
109 | */ | ||
110 | static const struct sih sih_modules[6] = { | ||
111 | [0] = { | ||
112 | .name = "gpio", | ||
113 | .module = TWL4030_MODULE_GPIO, | ||
114 | .control_offset = REG_GPIO_SIH_CTRL, | ||
115 | .set_cor = true, | ||
116 | .bits = TWL4030_GPIO_MAX, | ||
117 | .bytes_ixr = 3, | ||
118 | /* Note: *all* of these IRQs default to no-trigger */ | ||
119 | .edr_offset = REG_GPIO_EDR1, | ||
120 | .bytes_edr = 5, | ||
121 | .mask = { { | ||
122 | .isr_offset = REG_GPIO_ISR1A, | ||
123 | .imr_offset = REG_GPIO_IMR1A, | ||
124 | }, { | ||
125 | .isr_offset = REG_GPIO_ISR1B, | ||
126 | .imr_offset = REG_GPIO_IMR1B, | ||
127 | }, }, | ||
128 | }, | ||
129 | [1] = { | ||
130 | .name = "keypad", | ||
131 | .set_cor = true, | ||
132 | SIH_INITIALIZER(KEYPAD_KEYP, 4) | ||
133 | }, | ||
134 | [2] = { | ||
135 | .name = "bci", | ||
136 | .module = TWL4030_MODULE_INTERRUPTS, | ||
137 | .control_offset = TWL4030_INTERRUPTS_BCISIHCTRL, | ||
138 | .bits = 12, | ||
139 | .bytes_ixr = 2, | ||
140 | .edr_offset = TWL4030_INTERRUPTS_BCIEDR1, | ||
141 | /* Note: most of these IRQs default to no-trigger */ | ||
142 | .bytes_edr = 3, | ||
143 | .mask = { { | ||
144 | .isr_offset = TWL4030_INTERRUPTS_BCIISR1A, | ||
145 | .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A, | ||
146 | }, { | ||
147 | .isr_offset = TWL4030_INTERRUPTS_BCIISR1B, | ||
148 | .imr_offset = TWL4030_INTERRUPTS_BCIIMR1B, | ||
149 | }, }, | ||
150 | }, | ||
151 | [3] = { | ||
152 | .name = "madc", | ||
153 | SIH_INITIALIZER(MADC, 4) | ||
154 | }, | ||
155 | [4] = { | ||
156 | /* USB doesn't use the same SIH organization */ | ||
157 | .name = "usb", | ||
158 | }, | ||
159 | [5] = { | ||
160 | .name = "power", | ||
161 | .set_cor = true, | ||
162 | SIH_INITIALIZER(INT_PWR, 8) | ||
163 | }, | ||
164 | /* there are no SIH modules #6 or #7 ... */ | ||
165 | }; | ||
166 | |||
167 | #undef TWL4030_MODULE_KEYPAD_KEYP | ||
168 | #undef TWL4030_MODULE_INT_PWR | ||
169 | #undef TWL4030_INT_PWR_EDR | ||
170 | |||
171 | /*----------------------------------------------------------------------*/ | ||
172 | |||
173 | static unsigned twl4030_irq_base; | ||
174 | |||
175 | static struct completion irq_event; | ||
176 | |||
177 | /* | ||
178 | * This thread processes interrupts reported by the Primary Interrupt Handler. | ||
179 | */ | ||
180 | static int twl4030_irq_thread(void *data) | ||
181 | { | ||
182 | long irq = (long)data; | ||
183 | irq_desc_t *desc = irq_desc + irq; | ||
184 | static unsigned i2c_errors; | ||
185 | const static unsigned max_i2c_errors = 100; | ||
186 | |||
187 | current->flags |= PF_NOFREEZE; | ||
188 | |||
189 | while (!kthread_should_stop()) { | ||
190 | int ret; | ||
191 | int module_irq; | ||
192 | u8 pih_isr; | ||
193 | |||
194 | /* Wait for IRQ, then read PIH irq status (also blocking) */ | ||
195 | wait_for_completion_interruptible(&irq_event); | ||
196 | |||
197 | ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr, | ||
198 | REG_PIH_ISR_P1); | ||
199 | if (ret) { | ||
200 | pr_warning("twl4030: I2C error %d reading PIH ISR\n", | ||
201 | ret); | ||
202 | if (++i2c_errors >= max_i2c_errors) { | ||
203 | printk(KERN_ERR "Maximum I2C error count" | ||
204 | " exceeded. Terminating %s.\n", | ||
205 | __func__); | ||
206 | break; | ||
207 | } | ||
208 | complete(&irq_event); | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | /* these handlers deal with the relevant SIH irq status */ | ||
213 | local_irq_disable(); | ||
214 | for (module_irq = twl4030_irq_base; | ||
215 | pih_isr; | ||
216 | pih_isr >>= 1, module_irq++) { | ||
217 | if (pih_isr & 0x1) { | ||
218 | irq_desc_t *d = irq_desc + module_irq; | ||
219 | |||
220 | /* These can't be masked ... always warn | ||
221 | * if we get any surprises. | ||
222 | */ | ||
223 | if (d->status & IRQ_DISABLED) | ||
224 | note_interrupt(module_irq, d, | ||
225 | IRQ_NONE); | ||
226 | else | ||
227 | d->handle_irq(module_irq, d); | ||
228 | } | ||
229 | } | ||
230 | local_irq_enable(); | ||
231 | |||
232 | desc->chip->unmask(irq); | ||
233 | } | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt. | ||
240 | * This is a chained interrupt, so there is no desc->action method for it. | ||
241 | * Now we need to query the interrupt controller in the twl4030 to determine | ||
242 | * which module is generating the interrupt request. However, we can't do i2c | ||
243 | * transactions in interrupt context, so we must defer that work to a kernel | ||
244 | * thread. All we do here is acknowledge and mask the interrupt and wakeup | ||
245 | * the kernel thread. | ||
246 | */ | ||
247 | static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc) | ||
248 | { | ||
249 | /* Acknowledge, clear *AND* mask the interrupt... */ | ||
250 | desc->chip->ack(irq); | ||
251 | complete(&irq_event); | ||
252 | } | ||
253 | |||
254 | static struct task_struct *start_twl4030_irq_thread(long irq) | ||
255 | { | ||
256 | struct task_struct *thread; | ||
257 | |||
258 | init_completion(&irq_event); | ||
259 | thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq"); | ||
260 | if (!thread) | ||
261 | pr_err("twl4030: could not create irq %ld thread!\n", irq); | ||
262 | |||
263 | return thread; | ||
264 | } | ||
265 | |||
266 | /*----------------------------------------------------------------------*/ | ||
267 | |||
268 | /* | ||
269 | * twl4030_init_sih_modules() ... start from a known state where no | ||
270 | * IRQs will be coming in, and where we can quickly enable them then | ||
271 | * handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL. | ||
272 | * | ||
273 | * NOTE: we don't touch EDR registers here; they stay with hardware | ||
274 | * defaults or whatever the last value was. Note that when both EDR | ||
275 | * bits for an IRQ are clear, that's as if its IMR bit is set... | ||
276 | */ | ||
277 | static int twl4030_init_sih_modules(unsigned line) | ||
278 | { | ||
279 | const struct sih *sih; | ||
280 | u8 buf[4]; | ||
281 | int i; | ||
282 | int status; | ||
283 | |||
284 | /* line 0 == int1_n signal; line 1 == int2_n signal */ | ||
285 | if (line > 1) | ||
286 | return -EINVAL; | ||
287 | |||
288 | irq_line = line; | ||
289 | |||
290 | /* disable all interrupts on our line */ | ||
291 | memset(buf, 0xff, sizeof buf); | ||
292 | sih = sih_modules; | ||
293 | for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) { | ||
294 | |||
295 | /* skip USB -- it's funky */ | ||
296 | if (!sih->bytes_ixr) | ||
297 | continue; | ||
298 | |||
299 | status = twl4030_i2c_write(sih->module, buf, | ||
300 | sih->mask[line].imr_offset, sih->bytes_ixr); | ||
301 | if (status < 0) | ||
302 | pr_err("twl4030: err %d initializing %s %s\n", | ||
303 | status, sih->name, "IMR"); | ||
304 | |||
305 | /* Maybe disable "exclusive" mode; buffer second pending irq; | ||
306 | * set Clear-On-Read (COR) bit. | ||
307 | * | ||
308 | * NOTE that sometimes COR polarity is documented as being | ||
309 | * inverted: for MADC and BCI, COR=1 means "clear on write". | ||
310 | * And for PWR_INT it's not documented... | ||
311 | */ | ||
312 | if (sih->set_cor) { | ||
313 | status = twl4030_i2c_write_u8(sih->module, | ||
314 | TWL4030_SIH_CTRL_COR_MASK, | ||
315 | sih->control_offset); | ||
316 | if (status < 0) | ||
317 | pr_err("twl4030: err %d initializing %s %s\n", | ||
318 | status, sih->name, "SIH_CTRL"); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | sih = sih_modules; | ||
323 | for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) { | ||
324 | u8 rxbuf[4]; | ||
325 | int j; | ||
326 | |||
327 | /* skip USB */ | ||
328 | if (!sih->bytes_ixr) | ||
329 | continue; | ||
330 | |||
331 | /* Clear pending interrupt status. Either the read was | ||
332 | * enough, or we need to write those bits. Repeat, in | ||
333 | * case an IRQ is pending (PENDDIS=0) ... that's not | ||
334 | * uncommon with PWR_INT.PWRON. | ||
335 | */ | ||
336 | for (j = 0; j < 2; j++) { | ||
337 | status = twl4030_i2c_read(sih->module, rxbuf, | ||
338 | sih->mask[line].isr_offset, sih->bytes_ixr); | ||
339 | if (status < 0) | ||
340 | pr_err("twl4030: err %d initializing %s %s\n", | ||
341 | status, sih->name, "ISR"); | ||
342 | |||
343 | if (!sih->set_cor) | ||
344 | status = twl4030_i2c_write(sih->module, buf, | ||
345 | sih->mask[line].isr_offset, | ||
346 | sih->bytes_ixr); | ||
347 | /* else COR=1 means read sufficed. | ||
348 | * (for most SIH modules...) | ||
349 | */ | ||
350 | } | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static inline void activate_irq(int irq) | ||
357 | { | ||
358 | #ifdef CONFIG_ARM | ||
359 | /* ARM requires an extra step to clear IRQ_NOREQUEST, which it | ||
360 | * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. | ||
361 | */ | ||
362 | set_irq_flags(irq, IRQF_VALID); | ||
363 | #else | ||
364 | /* same effect on other architectures */ | ||
365 | set_irq_noprobe(irq); | ||
366 | #endif | ||
367 | } | ||
368 | |||
369 | /*----------------------------------------------------------------------*/ | ||
370 | |||
371 | static DEFINE_SPINLOCK(sih_agent_lock); | ||
372 | |||
373 | static struct workqueue_struct *wq; | ||
374 | |||
375 | struct sih_agent { | ||
376 | int irq_base; | ||
377 | const struct sih *sih; | ||
378 | |||
379 | u32 imr; | ||
380 | bool imr_change_pending; | ||
381 | struct work_struct mask_work; | ||
382 | |||
383 | u32 edge_change; | ||
384 | struct work_struct edge_work; | ||
385 | }; | ||
386 | |||
387 | static void twl4030_sih_do_mask(struct work_struct *work) | ||
388 | { | ||
389 | struct sih_agent *agent; | ||
390 | const struct sih *sih; | ||
391 | union { | ||
392 | u8 bytes[4]; | ||
393 | u32 word; | ||
394 | } imr; | ||
395 | int status; | ||
396 | |||
397 | agent = container_of(work, struct sih_agent, mask_work); | ||
398 | |||
399 | /* see what work we have */ | ||
400 | spin_lock_irq(&sih_agent_lock); | ||
401 | if (agent->imr_change_pending) { | ||
402 | sih = agent->sih; | ||
403 | /* byte[0] gets overwritten as we write ... */ | ||
404 | imr.word = cpu_to_le32(agent->imr << 8); | ||
405 | agent->imr_change_pending = false; | ||
406 | } else | ||
407 | sih = NULL; | ||
408 | spin_unlock_irq(&sih_agent_lock); | ||
409 | if (!sih) | ||
410 | return; | ||
411 | |||
412 | /* write the whole mask ... simpler than subsetting it */ | ||
413 | status = twl4030_i2c_write(sih->module, imr.bytes, | ||
414 | sih->mask[irq_line].imr_offset, sih->bytes_ixr); | ||
415 | if (status) | ||
416 | pr_err("twl4030: %s, %s --> %d\n", __func__, | ||
417 | "write", status); | ||
418 | } | ||
419 | |||
420 | static void twl4030_sih_do_edge(struct work_struct *work) | ||
421 | { | ||
422 | struct sih_agent *agent; | ||
423 | const struct sih *sih; | ||
424 | u8 bytes[6]; | ||
425 | u32 edge_change; | ||
426 | int status; | ||
427 | |||
428 | agent = container_of(work, struct sih_agent, edge_work); | ||
429 | |||
430 | /* see what work we have */ | ||
431 | spin_lock_irq(&sih_agent_lock); | ||
432 | edge_change = agent->edge_change; | ||
433 | agent->edge_change = 0;; | ||
434 | sih = edge_change ? agent->sih : NULL; | ||
435 | spin_unlock_irq(&sih_agent_lock); | ||
436 | if (!sih) | ||
437 | return; | ||
438 | |||
439 | /* Read, reserving first byte for write scratch. Yes, this | ||
440 | * could be cached for some speedup ... but be careful about | ||
441 | * any processor on the other IRQ line, EDR registers are | ||
442 | * shared. | ||
443 | */ | ||
444 | status = twl4030_i2c_read(sih->module, bytes + 1, | ||
445 | sih->edr_offset, sih->bytes_edr); | ||
446 | if (status) { | ||
447 | pr_err("twl4030: %s, %s --> %d\n", __func__, | ||
448 | "read", status); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | /* Modify only the bits we know must change */ | ||
453 | while (edge_change) { | ||
454 | int i = fls(edge_change) - 1; | ||
455 | struct irq_desc *d = irq_desc + i + agent->irq_base; | ||
456 | int byte = 1 + (i >> 2); | ||
457 | int off = (i & 0x3) * 2; | ||
458 | |||
459 | bytes[byte] &= ~(0x03 << off); | ||
460 | |||
461 | spin_lock_irq(&d->lock); | ||
462 | if (d->status & IRQ_TYPE_EDGE_RISING) | ||
463 | bytes[byte] |= BIT(off + 1); | ||
464 | if (d->status & IRQ_TYPE_EDGE_FALLING) | ||
465 | bytes[byte] |= BIT(off + 0); | ||
466 | spin_unlock_irq(&d->lock); | ||
467 | |||
468 | edge_change &= ~BIT(i); | ||
469 | } | ||
470 | |||
471 | /* Write */ | ||
472 | status = twl4030_i2c_write(sih->module, bytes, | ||
473 | sih->edr_offset, sih->bytes_edr); | ||
474 | if (status) | ||
475 | pr_err("twl4030: %s, %s --> %d\n", __func__, | ||
476 | "write", status); | ||
477 | } | ||
478 | |||
479 | /*----------------------------------------------------------------------*/ | ||
480 | |||
481 | /* | ||
482 | * All irq_chip methods get issued from code holding irq_desc[irq].lock, | ||
483 | * which can't perform the underlying I2C operations (because they sleep). | ||
484 | * So we must hand them off to a thread (workqueue) and cope with asynch | ||
485 | * completion, potentially including some re-ordering, of these requests. | ||
486 | */ | ||
487 | |||
488 | static void twl4030_sih_mask(unsigned irq) | ||
489 | { | ||
490 | struct sih_agent *sih = get_irq_chip_data(irq); | ||
491 | unsigned long flags; | ||
492 | |||
493 | spin_lock_irqsave(&sih_agent_lock, flags); | ||
494 | sih->imr |= BIT(irq - sih->irq_base); | ||
495 | sih->imr_change_pending = true; | ||
496 | queue_work(wq, &sih->mask_work); | ||
497 | spin_unlock_irqrestore(&sih_agent_lock, flags); | ||
498 | } | ||
499 | |||
500 | static void twl4030_sih_unmask(unsigned irq) | ||
501 | { | ||
502 | struct sih_agent *sih = get_irq_chip_data(irq); | ||
503 | unsigned long flags; | ||
504 | |||
505 | spin_lock_irqsave(&sih_agent_lock, flags); | ||
506 | sih->imr &= ~BIT(irq - sih->irq_base); | ||
507 | sih->imr_change_pending = true; | ||
508 | queue_work(wq, &sih->mask_work); | ||
509 | spin_unlock_irqrestore(&sih_agent_lock, flags); | ||
510 | } | ||
511 | |||
512 | static int twl4030_sih_set_type(unsigned irq, unsigned trigger) | ||
513 | { | ||
514 | struct sih_agent *sih = get_irq_chip_data(irq); | ||
515 | struct irq_desc *desc = irq_desc + irq; | ||
516 | unsigned long flags; | ||
517 | |||
518 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | ||
519 | return -EINVAL; | ||
520 | |||
521 | spin_lock_irqsave(&sih_agent_lock, flags); | ||
522 | if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) { | ||
523 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
524 | desc->status |= trigger; | ||
525 | sih->edge_change |= BIT(irq - sih->irq_base); | ||
526 | queue_work(wq, &sih->edge_work); | ||
527 | } | ||
528 | spin_unlock_irqrestore(&sih_agent_lock, flags); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static struct irq_chip twl4030_sih_irq_chip = { | ||
533 | .name = "twl4030", | ||
534 | .mask = twl4030_sih_mask, | ||
535 | .unmask = twl4030_sih_unmask, | ||
536 | .set_type = twl4030_sih_set_type, | ||
537 | }; | ||
538 | |||
539 | /*----------------------------------------------------------------------*/ | ||
540 | |||
541 | static inline int sih_read_isr(const struct sih *sih) | ||
542 | { | ||
543 | int status; | ||
544 | union { | ||
545 | u8 bytes[4]; | ||
546 | u32 word; | ||
547 | } isr; | ||
548 | |||
549 | /* FIXME need retry-on-error ... */ | ||
550 | |||
551 | isr.word = 0; | ||
552 | status = twl4030_i2c_read(sih->module, isr.bytes, | ||
553 | sih->mask[irq_line].isr_offset, sih->bytes_ixr); | ||
554 | |||
555 | return (status < 0) ? status : le32_to_cpu(isr.word); | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * Generic handler for SIH interrupts ... we "know" this is called | ||
560 | * in task context, with IRQs enabled. | ||
561 | */ | ||
562 | static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) | ||
563 | { | ||
564 | struct sih_agent *agent = get_irq_data(irq); | ||
565 | const struct sih *sih = agent->sih; | ||
566 | int isr; | ||
567 | |||
568 | /* reading ISR acks the IRQs, using clear-on-read mode */ | ||
569 | local_irq_enable(); | ||
570 | isr = sih_read_isr(sih); | ||
571 | local_irq_disable(); | ||
572 | |||
573 | if (isr < 0) { | ||
574 | pr_err("twl4030: %s SIH, read ISR error %d\n", | ||
575 | sih->name, isr); | ||
576 | /* REVISIT: recover; eventually mask it all, etc */ | ||
577 | return; | ||
578 | } | ||
579 | |||
580 | while (isr) { | ||
581 | irq = fls(isr); | ||
582 | irq--; | ||
583 | isr &= ~BIT(irq); | ||
584 | |||
585 | if (irq < sih->bits) | ||
586 | generic_handle_irq(agent->irq_base + irq); | ||
587 | else | ||
588 | pr_err("twl4030: %s SIH, invalid ISR bit %d\n", | ||
589 | sih->name, irq); | ||
590 | } | ||
591 | } | ||
592 | |||
593 | static unsigned twl4030_irq_next; | ||
594 | |||
595 | /* returns the first IRQ used by this SIH bank, | ||
596 | * or negative errno | ||
597 | */ | ||
598 | int twl4030_sih_setup(int module) | ||
599 | { | ||
600 | int sih_mod; | ||
601 | const struct sih *sih = NULL; | ||
602 | struct sih_agent *agent; | ||
603 | int i, irq; | ||
604 | int status = -EINVAL; | ||
605 | unsigned irq_base = twl4030_irq_next; | ||
606 | |||
607 | /* only support modules with standard clear-on-read for now */ | ||
608 | for (sih_mod = 0, sih = sih_modules; | ||
609 | sih_mod < ARRAY_SIZE(sih_modules); | ||
610 | sih_mod++, sih++) { | ||
611 | if (sih->module == module && sih->set_cor) { | ||
612 | if (!WARN((irq_base + sih->bits) > NR_IRQS, | ||
613 | "irq %d for %s too big\n", | ||
614 | irq_base + sih->bits, | ||
615 | sih->name)) | ||
616 | status = 0; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | if (status < 0) | ||
621 | return status; | ||
622 | |||
623 | agent = kzalloc(sizeof *agent, GFP_KERNEL); | ||
624 | if (!agent) | ||
625 | return -ENOMEM; | ||
626 | |||
627 | status = 0; | ||
628 | |||
629 | agent->irq_base = irq_base; | ||
630 | agent->sih = sih; | ||
631 | agent->imr = ~0; | ||
632 | INIT_WORK(&agent->mask_work, twl4030_sih_do_mask); | ||
633 | INIT_WORK(&agent->edge_work, twl4030_sih_do_edge); | ||
634 | |||
635 | for (i = 0; i < sih->bits; i++) { | ||
636 | irq = irq_base + i; | ||
637 | |||
638 | set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip, | ||
639 | handle_edge_irq); | ||
640 | set_irq_chip_data(irq, agent); | ||
641 | activate_irq(irq); | ||
642 | } | ||
643 | |||
644 | status = irq_base; | ||
645 | twl4030_irq_next += i; | ||
646 | |||
647 | /* replace generic PIH handler (handle_simple_irq) */ | ||
648 | irq = sih_mod + twl4030_irq_base; | ||
649 | set_irq_data(irq, agent); | ||
650 | set_irq_chained_handler(irq, handle_twl4030_sih); | ||
651 | |||
652 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, | ||
653 | irq, irq_base, twl4030_irq_next - 1); | ||
654 | |||
655 | return status; | ||
656 | } | ||
657 | |||
658 | /* FIXME need a call to reverse twl4030_sih_setup() ... */ | ||
659 | |||
660 | |||
661 | /*----------------------------------------------------------------------*/ | ||
662 | |||
663 | /* FIXME pass in which interrupt line we'll use ... */ | ||
664 | #define twl_irq_line 0 | ||
665 | |||
666 | int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | ||
667 | { | ||
668 | static struct irq_chip twl4030_irq_chip; | ||
669 | |||
670 | int status; | ||
671 | int i; | ||
672 | struct task_struct *task; | ||
673 | |||
674 | /* | ||
675 | * Mask and clear all TWL4030 interrupts since initially we do | ||
676 | * not have any TWL4030 module interrupt handlers present | ||
677 | */ | ||
678 | status = twl4030_init_sih_modules(twl_irq_line); | ||
679 | if (status < 0) | ||
680 | return status; | ||
681 | |||
682 | wq = create_singlethread_workqueue("twl4030-irqchip"); | ||
683 | if (!wq) { | ||
684 | pr_err("twl4030: workqueue FAIL\n"); | ||
685 | return -ESRCH; | ||
686 | } | ||
687 | |||
688 | twl4030_irq_base = irq_base; | ||
689 | |||
690 | /* install an irq handler for each of the SIH modules; | ||
691 | * clone dummy irq_chip since PIH can't *do* anything | ||
692 | */ | ||
693 | twl4030_irq_chip = dummy_irq_chip; | ||
694 | twl4030_irq_chip.name = "twl4030"; | ||
695 | |||
696 | twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; | ||
697 | |||
698 | for (i = irq_base; i < irq_end; i++) { | ||
699 | set_irq_chip_and_handler(i, &twl4030_irq_chip, | ||
700 | handle_simple_irq); | ||
701 | activate_irq(i); | ||
702 | } | ||
703 | twl4030_irq_next = i; | ||
704 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH", | ||
705 | irq_num, irq_base, twl4030_irq_next - 1); | ||
706 | |||
707 | /* ... and the PWR_INT module ... */ | ||
708 | status = twl4030_sih_setup(TWL4030_MODULE_INT); | ||
709 | if (status < 0) { | ||
710 | pr_err("twl4030: sih_setup PWR INT --> %d\n", status); | ||
711 | goto fail; | ||
712 | } | ||
713 | |||
714 | /* install an irq handler to demultiplex the TWL4030 interrupt */ | ||
715 | task = start_twl4030_irq_thread(irq_num); | ||
716 | if (!task) { | ||
717 | pr_err("twl4030: irq thread FAIL\n"); | ||
718 | status = -ESRCH; | ||
719 | goto fail; | ||
720 | } | ||
721 | |||
722 | set_irq_data(irq_num, task); | ||
723 | set_irq_chained_handler(irq_num, handle_twl4030_pih); | ||
724 | |||
725 | return status; | ||
726 | |||
727 | fail: | ||
728 | for (i = irq_base; i < irq_end; i++) | ||
729 | set_irq_chip_and_handler(i, NULL, NULL); | ||
730 | destroy_workqueue(wq); | ||
731 | wq = NULL; | ||
732 | return status; | ||
733 | } | ||
734 | |||
735 | int twl_exit_irq(void) | ||
736 | { | ||
737 | /* FIXME undo twl_init_irq() */ | ||
738 | if (twl4030_irq_base) { | ||
739 | pr_err("twl4030: can't yet clean up IRQs?\n"); | ||
740 | return -ENOSYS; | ||
741 | } | ||
742 | return 0; | ||
743 | } | ||
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index bf87f675e7fa..0d47fb9e4b3b 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c | |||
@@ -183,6 +183,9 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src) | |||
183 | (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) | 183 | (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) |
184 | | src[i - reg]; | 184 | | src[i - reg]; |
185 | 185 | ||
186 | /* Don't store volatile bits */ | ||
187 | wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol; | ||
188 | |||
186 | src[i - reg] = cpu_to_be16(src[i - reg]); | 189 | src[i - reg] = cpu_to_be16(src[i - reg]); |
187 | } | 190 | } |
188 | 191 | ||
@@ -1120,6 +1123,7 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode) | |||
1120 | } | 1123 | } |
1121 | value = be16_to_cpu(value); | 1124 | value = be16_to_cpu(value); |
1122 | value &= wm8350_reg_io_map[i].readable; | 1125 | value &= wm8350_reg_io_map[i].readable; |
1126 | value &= ~wm8350_reg_io_map[i].vol; | ||
1123 | wm8350->reg_cache[i] = value; | 1127 | wm8350->reg_cache[i] = value; |
1124 | } else | 1128 | } else |
1125 | wm8350->reg_cache[i] = reg_map[i]; | 1129 | wm8350->reg_cache[i] = reg_map[i]; |
@@ -1128,7 +1132,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode) | |||
1128 | out: | 1132 | out: |
1129 | return ret; | 1133 | return ret; |
1130 | } | 1134 | } |
1131 | EXPORT_SYMBOL_GPL(wm8350_create_cache); | ||
1132 | 1135 | ||
1133 | /* | 1136 | /* |
1134 | * Register a client device. This is non-fatal since there is no need to | 1137 | * Register a client device. This is non-fatal since there is no need to |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ad301ace6085..0b71ebc074b6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC | |||
464 | This is the driver for the onboard card of MIPS Magnum 4000, | 464 | This is the driver for the onboard card of MIPS Magnum 4000, |
465 | Acer PICA, Olivetti M700-10 and a few other identical OEM systems. | 465 | Acer PICA, Olivetti M700-10 and a few other identical OEM systems. |
466 | 466 | ||
467 | config XTENSA_XT2000_SONIC | ||
468 | tristate "Xtensa XT2000 onboard SONIC Ethernet support" | ||
469 | depends on XTENSA_PLATFORM_XT2000 | ||
470 | help | ||
471 | This is the driver for the onboard card of the Xtensa XT2000 board. | ||
472 | |||
467 | config MIPS_AU1X00_ENET | 473 | config MIPS_AU1X00_ENET |
468 | bool "MIPS AU1000 Ethernet support" | 474 | bool "MIPS AU1000 Ethernet support" |
469 | depends on SOC_AU1X00 | 475 | depends on SOC_AU1X00 |
@@ -2504,6 +2510,15 @@ config PASEMI_MAC | |||
2504 | This driver supports the on-chip 1/10Gbit Ethernet controller on | 2510 | This driver supports the on-chip 1/10Gbit Ethernet controller on |
2505 | PA Semi's PWRficient line of chips. | 2511 | PA Semi's PWRficient line of chips. |
2506 | 2512 | ||
2513 | config MLX4_EN | ||
2514 | tristate "Mellanox Technologies 10Gbit Ethernet support" | ||
2515 | depends on PCI && INET | ||
2516 | select MLX4_CORE | ||
2517 | select INET_LRO | ||
2518 | help | ||
2519 | This driver supports Mellanox Technologies ConnectX Ethernet | ||
2520 | devices. | ||
2521 | |||
2507 | config MLX4_CORE | 2522 | config MLX4_CORE |
2508 | tristate | 2523 | tristate |
2509 | depends on PCI | 2524 | depends on PCI |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index fa2510b2e609..f19acf8b9220 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o | |||
227 | obj-$(CONFIG_MLX4_CORE) += mlx4/ | 227 | obj-$(CONFIG_MLX4_CORE) += mlx4/ |
228 | obj-$(CONFIG_ENC28J60) += enc28j60.o | 228 | obj-$(CONFIG_ENC28J60) += enc28j60.o |
229 | 229 | ||
230 | obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o | ||
231 | |||
230 | obj-$(CONFIG_MACB) += macb.o | 232 | obj-$(CONFIG_MACB) += macb.o |
231 | 233 | ||
232 | obj-$(CONFIG_ARM) += arm/ | 234 | obj-$(CONFIG_ARM) += arm/ |
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 08e18bcb970f..45dd9bdc5d62 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver | 2 | * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | 4 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. |
5 | * Copyright (C) 2008 Wolfram Sang, Pengutronix | ||
5 | * | 6 | * |
6 | * This file is licensed under the terms of the GNU General Public License | 7 | * This file is licensed under the terms of the GNU General Public License |
7 | * version 2. This program is licensed "as is" without any warranty of any | 8 | * version 2. This program is licensed "as is" without any warranty of any |
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv { | |||
21 | struct mpc52xx_fec __iomem *regs; | 22 | struct mpc52xx_fec __iomem *regs; |
22 | }; | 23 | }; |
23 | 24 | ||
24 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) | 25 | static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, |
26 | int reg, u32 value) | ||
25 | { | 27 | { |
26 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | 28 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; |
27 | struct mpc52xx_fec __iomem *fec; | 29 | struct mpc52xx_fec __iomem *fec; |
28 | int tries = 100; | 30 | int tries = 100; |
29 | u32 request = FEC_MII_READ_FRAME; | 31 | |
32 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
33 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
30 | 34 | ||
31 | fec = priv->regs; | 35 | fec = priv->regs; |
32 | out_be32(&fec->ievent, FEC_IEVENT_MII); | 36 | out_be32(&fec->ievent, FEC_IEVENT_MII); |
33 | 37 | out_be32(&priv->regs->mii_data, value); | |
34 | request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
35 | request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
36 | |||
37 | out_be32(&priv->regs->mii_data, request); | ||
38 | 38 | ||
39 | /* wait for it to finish, this takes about 23 us on lite5200b */ | 39 | /* wait for it to finish, this takes about 23 us on lite5200b */ |
40 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | 40 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) |
41 | udelay(5); | 41 | udelay(5); |
42 | 42 | ||
43 | if (tries == 0) | 43 | if (!tries) |
44 | return -ETIMEDOUT; | 44 | return -ETIMEDOUT; |
45 | 45 | ||
46 | return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; | 46 | return value & FEC_MII_DATA_OP_RD ? |
47 | in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0; | ||
47 | } | 48 | } |
48 | 49 | ||
49 | static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) | 50 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) |
50 | { | 51 | { |
51 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | 52 | return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME); |
52 | struct mpc52xx_fec __iomem *fec; | 53 | } |
53 | u32 value = data; | ||
54 | int tries = 100; | ||
55 | |||
56 | fec = priv->regs; | ||
57 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
58 | |||
59 | value |= FEC_MII_WRITE_FRAME; | ||
60 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
61 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
62 | |||
63 | out_be32(&priv->regs->mii_data, value); | ||
64 | |||
65 | /* wait for request to finish */ | ||
66 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
67 | udelay(5); | ||
68 | |||
69 | if (tries == 0) | ||
70 | return -ETIMEDOUT; | ||
71 | 54 | ||
72 | return 0; | 55 | static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, |
56 | u16 data) | ||
57 | { | ||
58 | return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, | ||
59 | data | FEC_MII_WRITE_FRAME); | ||
73 | } | 60 | } |
74 | 61 | ||
75 | static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) | 62 | static int mpc52xx_fec_mdio_probe(struct of_device *of, |
63 | const struct of_device_id *match) | ||
76 | { | 64 | { |
77 | struct device *dev = &of->dev; | 65 | struct device *dev = &of->dev; |
78 | struct device_node *np = of->node; | 66 | struct device_node *np = of->node; |
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i | |||
131 | dev_set_drvdata(dev, bus); | 119 | dev_set_drvdata(dev, bus); |
132 | 120 | ||
133 | /* set MII speed */ | 121 | /* set MII speed */ |
134 | out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); | 122 | out_be32(&priv->regs->mii_speed, |
123 | ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); | ||
135 | 124 | ||
136 | /* enable MII interrupt */ | 125 | /* enable MII interrupt */ |
137 | out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); | 126 | out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index efcf21c9f5c7..2ee2622258f5 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev) | |||
2604 | if (of_device_is_compatible(np, "ibm,emac-440ep") || | 2604 | if (of_device_is_compatible(np, "ibm,emac-440ep") || |
2605 | of_device_is_compatible(np, "ibm,emac-440gr")) | 2605 | of_device_is_compatible(np, "ibm,emac-440gr")) |
2606 | dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; | 2606 | dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; |
2607 | if (of_device_is_compatible(np, "ibm,emac-405ez")) | 2607 | if (of_device_is_compatible(np, "ibm,emac-405ez")) { |
2608 | #ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CONTROL | ||
2608 | dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; | 2609 | dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; |
2610 | #else | ||
2611 | printk(KERN_ERR "%s: Flow control not disabled!\n", | ||
2612 | np->full_name); | ||
2613 | return -ENXIO; | ||
2614 | #endif | ||
2615 | } | ||
2616 | |||
2609 | } | 2617 | } |
2610 | 2618 | ||
2611 | /* Fixup some feature bits based on the device tree */ | 2619 | /* Fixup some feature bits based on the device tree */ |
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 1839d3f154a3..ecf9798987fa 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c | |||
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance) | |||
280 | mal_schedule_poll(mal); | 280 | mal_schedule_poll(mal); |
281 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | 281 | set_mal_dcrn(mal, MAL_TXEOBISR, r); |
282 | 282 | ||
283 | #ifdef CONFIG_PPC_DCR_NATIVE | ||
283 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) | 284 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) |
284 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, | 285 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, |
285 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); | 286 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); |
287 | #endif | ||
286 | 288 | ||
287 | return IRQ_HANDLED; | 289 | return IRQ_HANDLED; |
288 | } | 290 | } |
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance) | |||
298 | mal_schedule_poll(mal); | 300 | mal_schedule_poll(mal); |
299 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | 301 | set_mal_dcrn(mal, MAL_RXEOBISR, r); |
300 | 302 | ||
303 | #ifdef CONFIG_PPC_DCR_NATIVE | ||
301 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) | 304 | if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) |
302 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, | 305 | mtdcri(SDR0, DCRN_SDR_ICINTSTAT, |
303 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); | 306 | (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); |
307 | #endif | ||
304 | 308 | ||
305 | return IRQ_HANDLED; | 309 | return IRQ_HANDLED; |
306 | } | 310 | } |
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev, | |||
572 | goto fail; | 576 | goto fail; |
573 | } | 577 | } |
574 | 578 | ||
575 | if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) | 579 | if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) { |
580 | #if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \ | ||
581 | defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR) | ||
576 | mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | | 582 | mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | |
577 | MAL_FTR_COMMON_ERR_INT); | 583 | MAL_FTR_COMMON_ERR_INT); |
584 | #else | ||
585 | printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", | ||
586 | ofdev->node->full_name); | ||
587 | err = -ENODEV; | ||
588 | goto fail; | ||
589 | #endif | ||
590 | } | ||
578 | 591 | ||
579 | mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); | 592 | mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); |
580 | mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); | 593 | mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); |
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index 0952a6528f58..a7a97bf998f8 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile | |||
@@ -1,4 +1,9 @@ | |||
1 | obj-$(CONFIG_MLX4_CORE) += mlx4_core.o | 1 | obj-$(CONFIG_MLX4_CORE) += mlx4_core.o |
2 | 2 | ||
3 | mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ | 3 | mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ |
4 | mr.o pd.o profile.o qp.o reset.o srq.o | 4 | mr.o pd.o port.o profile.o qp.o reset.o srq.o |
5 | |||
6 | obj-$(CONFIG_MLX4_EN) += mlx4_en.o | ||
7 | |||
8 | mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \ | ||
9 | en_resources.o en_netdev.o | ||
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index b411b79d72ad..ad95d5f7b630 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
48 | 48 | ||
49 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); | 49 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); |
50 | if (obj >= bitmap->max) { | 50 | if (obj >= bitmap->max) { |
51 | bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; | 51 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
52 | & bitmap->mask; | ||
52 | obj = find_first_zero_bit(bitmap->table, bitmap->max); | 53 | obj = find_first_zero_bit(bitmap->table, bitmap->max); |
53 | } | 54 | } |
54 | 55 | ||
55 | if (obj < bitmap->max) { | 56 | if (obj < bitmap->max) { |
56 | set_bit(obj, bitmap->table); | 57 | set_bit(obj, bitmap->table); |
57 | bitmap->last = (obj + 1) & (bitmap->max - 1); | 58 | bitmap->last = (obj + 1); |
59 | if (bitmap->last == bitmap->max) | ||
60 | bitmap->last = 0; | ||
58 | obj |= bitmap->top; | 61 | obj |= bitmap->top; |
59 | } else | 62 | } else |
60 | obj = -1; | 63 | obj = -1; |
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
66 | 69 | ||
67 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) | 70 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) |
68 | { | 71 | { |
69 | obj &= bitmap->max - 1; | 72 | mlx4_bitmap_free_range(bitmap, obj, 1); |
73 | } | ||
74 | |||
75 | static unsigned long find_aligned_range(unsigned long *bitmap, | ||
76 | u32 start, u32 nbits, | ||
77 | int len, int align) | ||
78 | { | ||
79 | unsigned long end, i; | ||
80 | |||
81 | again: | ||
82 | start = ALIGN(start, align); | ||
83 | |||
84 | while ((start < nbits) && test_bit(start, bitmap)) | ||
85 | start += align; | ||
86 | |||
87 | if (start >= nbits) | ||
88 | return -1; | ||
89 | |||
90 | end = start+len; | ||
91 | if (end > nbits) | ||
92 | return -1; | ||
93 | |||
94 | for (i = start + 1; i < end; i++) { | ||
95 | if (test_bit(i, bitmap)) { | ||
96 | start = i + 1; | ||
97 | goto again; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | return start; | ||
102 | } | ||
103 | |||
104 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | ||
105 | { | ||
106 | u32 obj, i; | ||
107 | |||
108 | if (likely(cnt == 1 && align == 1)) | ||
109 | return mlx4_bitmap_alloc(bitmap); | ||
110 | |||
111 | spin_lock(&bitmap->lock); | ||
112 | |||
113 | obj = find_aligned_range(bitmap->table, bitmap->last, | ||
114 | bitmap->max, cnt, align); | ||
115 | if (obj >= bitmap->max) { | ||
116 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
117 | & bitmap->mask; | ||
118 | obj = find_aligned_range(bitmap->table, 0, bitmap->max, | ||
119 | cnt, align); | ||
120 | } | ||
121 | |||
122 | if (obj < bitmap->max) { | ||
123 | for (i = 0; i < cnt; i++) | ||
124 | set_bit(obj + i, bitmap->table); | ||
125 | if (obj == bitmap->last) { | ||
126 | bitmap->last = (obj + cnt); | ||
127 | if (bitmap->last >= bitmap->max) | ||
128 | bitmap->last = 0; | ||
129 | } | ||
130 | obj |= bitmap->top; | ||
131 | } else | ||
132 | obj = -1; | ||
133 | |||
134 | spin_unlock(&bitmap->lock); | ||
135 | |||
136 | return obj; | ||
137 | } | ||
138 | |||
139 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | ||
140 | { | ||
141 | u32 i; | ||
142 | |||
143 | obj &= bitmap->max + bitmap->reserved_top - 1; | ||
70 | 144 | ||
71 | spin_lock(&bitmap->lock); | 145 | spin_lock(&bitmap->lock); |
72 | clear_bit(obj, bitmap->table); | 146 | for (i = 0; i < cnt; i++) |
147 | clear_bit(obj + i, bitmap->table); | ||
73 | bitmap->last = min(bitmap->last, obj); | 148 | bitmap->last = min(bitmap->last, obj); |
74 | bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; | 149 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
150 | & bitmap->mask; | ||
75 | spin_unlock(&bitmap->lock); | 151 | spin_unlock(&bitmap->lock); |
76 | } | 152 | } |
77 | 153 | ||
78 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) | 154 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, |
155 | u32 reserved_bot, u32 reserved_top) | ||
79 | { | 156 | { |
80 | int i; | 157 | int i; |
81 | 158 | ||
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved | |||
85 | 162 | ||
86 | bitmap->last = 0; | 163 | bitmap->last = 0; |
87 | bitmap->top = 0; | 164 | bitmap->top = 0; |
88 | bitmap->max = num; | 165 | bitmap->max = num - reserved_top; |
89 | bitmap->mask = mask; | 166 | bitmap->mask = mask; |
167 | bitmap->reserved_top = reserved_top; | ||
90 | spin_lock_init(&bitmap->lock); | 168 | spin_lock_init(&bitmap->lock); |
91 | bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); | 169 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * |
170 | sizeof (long), GFP_KERNEL); | ||
92 | if (!bitmap->table) | 171 | if (!bitmap->table) |
93 | return -ENOMEM; | 172 | return -ENOMEM; |
94 | 173 | ||
95 | for (i = 0; i < reserved; ++i) | 174 | for (i = 0; i < reserved_bot; ++i) |
96 | set_bit(i, bitmap->table); | 175 | set_bit(i, bitmap->table); |
97 | 176 | ||
98 | return 0; | 177 | return 0; |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 9bb50e3f8974..b7ad2829d67e 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) | |||
300 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); | 300 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); |
301 | 301 | ||
302 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, | 302 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, |
303 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs); | 303 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); |
304 | if (err) | 304 | if (err) |
305 | return err; | 305 | return err; |
306 | 306 | ||
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c new file mode 100644 index 000000000000..1368a8010af4 --- /dev/null +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/mlx4/cq.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | #include <linux/mlx4/cmd.h> | ||
37 | |||
38 | #include "mlx4_en.h" | ||
39 | |||
40 | static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) | ||
41 | { | ||
42 | return; | ||
43 | } | ||
44 | |||
45 | |||
46 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, | ||
47 | struct mlx4_en_cq *cq, | ||
48 | int entries, int ring, enum cq_type mode) | ||
49 | { | ||
50 | struct mlx4_en_dev *mdev = priv->mdev; | ||
51 | int err; | ||
52 | |||
53 | cq->size = entries; | ||
54 | if (mode == RX) | ||
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | ||
56 | else | ||
57 | cq->buf_size = sizeof(struct mlx4_cqe); | ||
58 | |||
59 | cq->ring = ring; | ||
60 | cq->is_tx = mode; | ||
61 | spin_lock_init(&cq->lock); | ||
62 | |||
63 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, | ||
64 | cq->buf_size, 2 * PAGE_SIZE); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | err = mlx4_en_map_buffer(&cq->wqres.buf); | ||
69 | if (err) | ||
70 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
71 | |||
72 | return err; | ||
73 | } | ||
74 | |||
75 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
76 | { | ||
77 | struct mlx4_en_dev *mdev = priv->mdev; | ||
78 | int err; | ||
79 | |||
80 | cq->dev = mdev->pndev[priv->port]; | ||
81 | cq->mcq.set_ci_db = cq->wqres.db.db; | ||
82 | cq->mcq.arm_db = cq->wqres.db.db + 1; | ||
83 | *cq->mcq.set_ci_db = 0; | ||
84 | *cq->mcq.arm_db = 0; | ||
85 | cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; | ||
86 | memset(cq->buf, 0, cq->buf_size); | ||
87 | |||
88 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, | ||
89 | cq->wqres.db.dma, &cq->mcq, cq->is_tx); | ||
90 | if (err) | ||
91 | return err; | ||
92 | |||
93 | cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; | ||
94 | cq->mcq.event = mlx4_en_cq_event; | ||
95 | |||
96 | if (cq->is_tx) { | ||
97 | init_timer(&cq->timer); | ||
98 | cq->timer.function = mlx4_en_poll_tx_cq; | ||
99 | cq->timer.data = (unsigned long) cq; | ||
100 | } else { | ||
101 | netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); | ||
102 | napi_enable(&cq->napi); | ||
103 | } | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
109 | { | ||
110 | struct mlx4_en_dev *mdev = priv->mdev; | ||
111 | |||
112 | mlx4_en_unmap_buffer(&cq->wqres.buf); | ||
113 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
114 | cq->buf_size = 0; | ||
115 | cq->buf = NULL; | ||
116 | } | ||
117 | |||
118 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
119 | { | ||
120 | struct mlx4_en_dev *mdev = priv->mdev; | ||
121 | |||
122 | if (cq->is_tx) | ||
123 | del_timer(&cq->timer); | ||
124 | else | ||
125 | napi_disable(&cq->napi); | ||
126 | |||
127 | mlx4_cq_free(mdev->dev, &cq->mcq); | ||
128 | } | ||
129 | |||
130 | /* Set rx cq moderation parameters */ | ||
131 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
132 | { | ||
133 | return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, | ||
134 | cq->moder_cnt, cq->moder_time); | ||
135 | } | ||
136 | |||
137 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
138 | { | ||
139 | cq->armed = 1; | ||
140 | mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, | ||
141 | &priv->mdev->uar_lock); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | |||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c new file mode 100644 index 000000000000..1b0eebf84f76 --- /dev/null +++ b/drivers/net/mlx4/en_main.c | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/cpumask.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/cpumask.h> | ||
39 | |||
40 | #include <linux/mlx4/driver.h> | ||
41 | #include <linux/mlx4/device.h> | ||
42 | #include <linux/mlx4/cmd.h> | ||
43 | |||
44 | #include "mlx4_en.h" | ||
45 | |||
46 | MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); | ||
47 | MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); | ||
48 | MODULE_LICENSE("Dual BSD/GPL"); | ||
49 | MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")"); | ||
50 | |||
51 | static const char mlx4_en_version[] = | ||
52 | DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" | ||
53 | DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
54 | |||
55 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | ||
56 | enum mlx4_dev_event event, int port) | ||
57 | { | ||
58 | struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; | ||
59 | struct mlx4_en_priv *priv; | ||
60 | |||
61 | if (!mdev->pndev[port]) | ||
62 | return; | ||
63 | |||
64 | priv = netdev_priv(mdev->pndev[port]); | ||
65 | switch (event) { | ||
66 | case MLX4_DEV_EVENT_PORT_UP: | ||
67 | case MLX4_DEV_EVENT_PORT_DOWN: | ||
68 | /* To prevent races, we poll the link state in a separate | ||
69 | task rather than changing it here */ | ||
70 | priv->link_state = event; | ||
71 | queue_work(mdev->workqueue, &priv->linkstate_task); | ||
72 | break; | ||
73 | |||
74 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: | ||
75 | mlx4_err(mdev, "Internal error detected, restarting device\n"); | ||
76 | break; | ||
77 | |||
78 | default: | ||
79 | mlx4_warn(mdev, "Unhandled event: %d\n", event); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) | ||
84 | { | ||
85 | struct mlx4_en_dev *mdev = endev_ptr; | ||
86 | int i; | ||
87 | |||
88 | mutex_lock(&mdev->state_lock); | ||
89 | mdev->device_up = false; | ||
90 | mutex_unlock(&mdev->state_lock); | ||
91 | |||
92 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
93 | if (mdev->pndev[i]) | ||
94 | mlx4_en_destroy_netdev(mdev->pndev[i]); | ||
95 | |||
96 | flush_workqueue(mdev->workqueue); | ||
97 | destroy_workqueue(mdev->workqueue); | ||
98 | mlx4_mr_free(dev, &mdev->mr); | ||
99 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
100 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
101 | kfree(mdev); | ||
102 | } | ||
103 | |||
104 | static void *mlx4_en_add(struct mlx4_dev *dev) | ||
105 | { | ||
106 | static int mlx4_en_version_printed; | ||
107 | struct mlx4_en_dev *mdev; | ||
108 | int i; | ||
109 | int err; | ||
110 | |||
111 | if (!mlx4_en_version_printed) { | ||
112 | printk(KERN_INFO "%s", mlx4_en_version); | ||
113 | mlx4_en_version_printed++; | ||
114 | } | ||
115 | |||
116 | mdev = kzalloc(sizeof *mdev, GFP_KERNEL); | ||
117 | if (!mdev) { | ||
118 | dev_err(&dev->pdev->dev, "Device struct alloc failed, " | ||
119 | "aborting.\n"); | ||
120 | err = -ENOMEM; | ||
121 | goto err_free_res; | ||
122 | } | ||
123 | |||
124 | if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) | ||
125 | goto err_free_dev; | ||
126 | |||
127 | if (mlx4_uar_alloc(dev, &mdev->priv_uar)) | ||
128 | goto err_pd; | ||
129 | |||
130 | mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | ||
131 | if (!mdev->uar_map) | ||
132 | goto err_uar; | ||
133 | spin_lock_init(&mdev->uar_lock); | ||
134 | |||
135 | mdev->dev = dev; | ||
136 | mdev->dma_device = &(dev->pdev->dev); | ||
137 | mdev->pdev = dev->pdev; | ||
138 | mdev->device_up = false; | ||
139 | |||
140 | mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); | ||
141 | if (!mdev->LSO_support) | ||
142 | mlx4_warn(mdev, "LSO not supported, please upgrade to later " | ||
143 | "FW version to enable LSO\n"); | ||
144 | |||
145 | if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, | ||
146 | MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, | ||
147 | 0, 0, &mdev->mr)) { | ||
148 | mlx4_err(mdev, "Failed allocating memory region\n"); | ||
149 | goto err_uar; | ||
150 | } | ||
151 | if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { | ||
152 | mlx4_err(mdev, "Failed enabling memory region\n"); | ||
153 | goto err_mr; | ||
154 | } | ||
155 | |||
156 | /* Build device profile according to supplied module parameters */ | ||
157 | err = mlx4_en_get_profile(mdev); | ||
158 | if (err) { | ||
159 | mlx4_err(mdev, "Bad module parameters, aborting.\n"); | ||
160 | goto err_mr; | ||
161 | } | ||
162 | |||
163 | /* Configure wich ports to start according to module parameters */ | ||
164 | mdev->port_cnt = 0; | ||
165 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
166 | mdev->port_cnt++; | ||
167 | |||
168 | /* If we did not receive an explicit number of Rx rings, default to | ||
169 | * the number of completion vectors populated by the mlx4_core */ | ||
170 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
171 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | ||
172 | mdev->profile.prof[i].tx_ring_num, i); | ||
173 | if (!mdev->profile.prof[i].rx_ring_num) { | ||
174 | mdev->profile.prof[i].rx_ring_num = 1; | ||
175 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | ||
176 | 1, i); | ||
177 | } else | ||
178 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", | ||
179 | mdev->profile.prof[i].rx_ring_num, i); | ||
180 | } | ||
181 | |||
182 | /* Create our own workqueue for reset/multicast tasks | ||
183 | * Note: we cannot use the shared workqueue because of deadlocks caused | ||
184 | * by the rtnl lock */ | ||
185 | mdev->workqueue = create_singlethread_workqueue("mlx4_en"); | ||
186 | if (!mdev->workqueue) { | ||
187 | err = -ENOMEM; | ||
188 | goto err_close_nic; | ||
189 | } | ||
190 | |||
191 | /* At this stage all non-port specific tasks are complete: | ||
192 | * mark the card state as up */ | ||
193 | mutex_init(&mdev->state_lock); | ||
194 | mdev->device_up = true; | ||
195 | |||
196 | /* Setup ports */ | ||
197 | |||
198 | /* Create a netdev for each port */ | ||
199 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
200 | mlx4_info(mdev, "Activating port:%d\n", i); | ||
201 | if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) { | ||
202 | mdev->pndev[i] = NULL; | ||
203 | goto err_free_netdev; | ||
204 | } | ||
205 | } | ||
206 | return mdev; | ||
207 | |||
208 | |||
209 | err_free_netdev: | ||
210 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
211 | if (mdev->pndev[i]) | ||
212 | mlx4_en_destroy_netdev(mdev->pndev[i]); | ||
213 | } | ||
214 | |||
215 | mutex_lock(&mdev->state_lock); | ||
216 | mdev->device_up = false; | ||
217 | mutex_unlock(&mdev->state_lock); | ||
218 | flush_workqueue(mdev->workqueue); | ||
219 | |||
220 | /* Stop event queue before we drop down to release shared SW state */ | ||
221 | |||
222 | err_close_nic: | ||
223 | destroy_workqueue(mdev->workqueue); | ||
224 | err_mr: | ||
225 | mlx4_mr_free(dev, &mdev->mr); | ||
226 | err_uar: | ||
227 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
228 | err_pd: | ||
229 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
230 | err_free_dev: | ||
231 | kfree(mdev); | ||
232 | err_free_res: | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | static struct mlx4_interface mlx4_en_interface = { | ||
237 | .add = mlx4_en_add, | ||
238 | .remove = mlx4_en_remove, | ||
239 | .event = mlx4_en_event, | ||
240 | }; | ||
241 | |||
242 | static int __init mlx4_en_init(void) | ||
243 | { | ||
244 | return mlx4_register_interface(&mlx4_en_interface); | ||
245 | } | ||
246 | |||
247 | static void __exit mlx4_en_cleanup(void) | ||
248 | { | ||
249 | mlx4_unregister_interface(&mlx4_en_interface); | ||
250 | } | ||
251 | |||
252 | module_init(mlx4_en_init); | ||
253 | module_exit(mlx4_en_cleanup); | ||
254 | |||
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c new file mode 100644 index 000000000000..a339afbeed38 --- /dev/null +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -0,0 +1,1088 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/etherdevice.h> | ||
35 | #include <linux/tcp.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/delay.h> | ||
38 | |||
39 | #include <linux/mlx4/driver.h> | ||
40 | #include <linux/mlx4/device.h> | ||
41 | #include <linux/mlx4/cmd.h> | ||
42 | #include <linux/mlx4/cq.h> | ||
43 | |||
44 | #include "mlx4_en.h" | ||
45 | #include "en_port.h" | ||
46 | |||
47 | |||
48 | static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
49 | { | ||
50 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
52 | int err; | ||
53 | |||
54 | mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); | ||
55 | priv->vlgrp = grp; | ||
56 | |||
57 | mutex_lock(&mdev->state_lock); | ||
58 | if (mdev->device_up && priv->port_up) { | ||
59 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); | ||
60 | if (err) | ||
61 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
62 | } | ||
63 | mutex_unlock(&mdev->state_lock); | ||
64 | } | ||
65 | |||
66 | static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
67 | { | ||
68 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
69 | struct mlx4_en_dev *mdev = priv->mdev; | ||
70 | int err; | ||
71 | |||
72 | if (!priv->vlgrp) | ||
73 | return; | ||
74 | |||
75 | mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", | ||
76 | vid, vlan_group_get_device(priv->vlgrp, vid)); | ||
77 | |||
78 | /* Add VID to port VLAN filter */ | ||
79 | mutex_lock(&mdev->state_lock); | ||
80 | if (mdev->device_up && priv->port_up) { | ||
81 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
82 | if (err) | ||
83 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
84 | } | ||
85 | mutex_unlock(&mdev->state_lock); | ||
86 | } | ||
87 | |||
88 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
89 | { | ||
90 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
91 | struct mlx4_en_dev *mdev = priv->mdev; | ||
92 | int err; | ||
93 | |||
94 | if (!priv->vlgrp) | ||
95 | return; | ||
96 | |||
97 | mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " | ||
98 | "entry:%p)\n", vid, priv->vlgrp, | ||
99 | vlan_group_get_device(priv->vlgrp, vid)); | ||
100 | vlan_group_set_device(priv->vlgrp, vid, NULL); | ||
101 | |||
102 | /* Remove VID from port VLAN filter */ | ||
103 | mutex_lock(&mdev->state_lock); | ||
104 | if (mdev->device_up && priv->port_up) { | ||
105 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
106 | if (err) | ||
107 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
108 | } | ||
109 | mutex_unlock(&mdev->state_lock); | ||
110 | } | ||
111 | |||
112 | static u64 mlx4_en_mac_to_u64(u8 *addr) | ||
113 | { | ||
114 | u64 mac = 0; | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < ETH_ALEN; i++) { | ||
118 | mac <<= 8; | ||
119 | mac |= addr[i]; | ||
120 | } | ||
121 | return mac; | ||
122 | } | ||
123 | |||
124 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
125 | { | ||
126 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
127 | struct mlx4_en_dev *mdev = priv->mdev; | ||
128 | struct sockaddr *saddr = addr; | ||
129 | |||
130 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
131 | return -EADDRNOTAVAIL; | ||
132 | |||
133 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
134 | priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); | ||
135 | queue_work(mdev->workqueue, &priv->mac_task); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
140 | { | ||
141 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
142 | mac_task); | ||
143 | struct mlx4_en_dev *mdev = priv->mdev; | ||
144 | int err = 0; | ||
145 | |||
146 | mutex_lock(&mdev->state_lock); | ||
147 | if (priv->port_up) { | ||
148 | /* Remove old MAC and insert the new one */ | ||
149 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
150 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
151 | priv->mac, &priv->mac_index); | ||
152 | if (err) | ||
153 | mlx4_err(mdev, "Failed changing HW MAC address\n"); | ||
154 | } else | ||
155 | mlx4_dbg(HW, priv, "Port is down, exiting...\n"); | ||
156 | |||
157 | mutex_unlock(&mdev->state_lock); | ||
158 | } | ||
159 | |||
160 | static void mlx4_en_clear_list(struct net_device *dev) | ||
161 | { | ||
162 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
163 | struct dev_mc_list *plist = priv->mc_list; | ||
164 | struct dev_mc_list *next; | ||
165 | |||
166 | while (plist) { | ||
167 | next = plist->next; | ||
168 | kfree(plist); | ||
169 | plist = next; | ||
170 | } | ||
171 | priv->mc_list = NULL; | ||
172 | } | ||
173 | |||
174 | static void mlx4_en_cache_mclist(struct net_device *dev) | ||
175 | { | ||
176 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
177 | struct mlx4_en_dev *mdev = priv->mdev; | ||
178 | struct dev_mc_list *mclist; | ||
179 | struct dev_mc_list *tmp; | ||
180 | struct dev_mc_list *plist = NULL; | ||
181 | |||
182 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | ||
183 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); | ||
184 | if (!tmp) { | ||
185 | mlx4_err(mdev, "failed to allocate multicast list\n"); | ||
186 | mlx4_en_clear_list(dev); | ||
187 | return; | ||
188 | } | ||
189 | memcpy(tmp, mclist, sizeof(struct dev_mc_list)); | ||
190 | tmp->next = NULL; | ||
191 | if (plist) | ||
192 | plist->next = tmp; | ||
193 | else | ||
194 | priv->mc_list = tmp; | ||
195 | plist = tmp; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | |||
200 | static void mlx4_en_set_multicast(struct net_device *dev) | ||
201 | { | ||
202 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
203 | |||
204 | if (!priv->port_up) | ||
205 | return; | ||
206 | |||
207 | queue_work(priv->mdev->workqueue, &priv->mcast_task); | ||
208 | } | ||
209 | |||
210 | static void mlx4_en_do_set_multicast(struct work_struct *work) | ||
211 | { | ||
212 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
213 | mcast_task); | ||
214 | struct mlx4_en_dev *mdev = priv->mdev; | ||
215 | struct net_device *dev = priv->dev; | ||
216 | struct dev_mc_list *mclist; | ||
217 | u64 mcast_addr = 0; | ||
218 | int err; | ||
219 | |||
220 | mutex_lock(&mdev->state_lock); | ||
221 | if (!mdev->device_up) { | ||
222 | mlx4_dbg(HW, priv, "Card is not up, ignoring " | ||
223 | "multicast change.\n"); | ||
224 | goto out; | ||
225 | } | ||
226 | if (!priv->port_up) { | ||
227 | mlx4_dbg(HW, priv, "Port is down, ignoring " | ||
228 | "multicast change.\n"); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Promsicuous mode: disable all filters | ||
234 | */ | ||
235 | |||
236 | if (dev->flags & IFF_PROMISC) { | ||
237 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | ||
238 | if (netif_msg_rx_status(priv)) | ||
239 | mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", | ||
240 | priv->port); | ||
241 | priv->flags |= MLX4_EN_FLAG_PROMISC; | ||
242 | |||
243 | /* Enable promiscouos mode */ | ||
244 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
245 | priv->base_qpn, 1); | ||
246 | if (err) | ||
247 | mlx4_err(mdev, "Failed enabling " | ||
248 | "promiscous mode\n"); | ||
249 | |||
250 | /* Disable port multicast filter (unconditionally) */ | ||
251 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
252 | 0, MLX4_MCAST_DISABLE); | ||
253 | if (err) | ||
254 | mlx4_err(mdev, "Failed disabling " | ||
255 | "multicast filter\n"); | ||
256 | |||
257 | /* Disable port VLAN filter */ | ||
258 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
259 | if (err) | ||
260 | mlx4_err(mdev, "Failed disabling " | ||
261 | "VLAN filter\n"); | ||
262 | } | ||
263 | goto out; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * Not in promiscous mode | ||
268 | */ | ||
269 | |||
270 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | ||
271 | if (netif_msg_rx_status(priv)) | ||
272 | mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", | ||
273 | priv->port); | ||
274 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | ||
275 | |||
276 | /* Disable promiscouos mode */ | ||
277 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
278 | priv->base_qpn, 0); | ||
279 | if (err) | ||
280 | mlx4_err(mdev, "Failed disabling promiscous mode\n"); | ||
281 | |||
282 | /* Enable port VLAN filter */ | ||
283 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
284 | if (err) | ||
285 | mlx4_err(mdev, "Failed enabling VLAN filter\n"); | ||
286 | } | ||
287 | |||
288 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | ||
289 | if (dev->flags & IFF_ALLMULTI) { | ||
290 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
291 | 0, MLX4_MCAST_DISABLE); | ||
292 | if (err) | ||
293 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
294 | } else { | ||
295 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
296 | 0, MLX4_MCAST_DISABLE); | ||
297 | if (err) | ||
298 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
299 | |||
300 | /* Flush mcast filter and init it with broadcast address */ | ||
301 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | ||
302 | 1, MLX4_MCAST_CONFIG); | ||
303 | |||
304 | /* Update multicast list - we cache all addresses so they won't | ||
305 | * change while HW is updated holding the command semaphor */ | ||
306 | netif_tx_lock_bh(dev); | ||
307 | mlx4_en_cache_mclist(dev); | ||
308 | netif_tx_unlock_bh(dev); | ||
309 | for (mclist = priv->mc_list; mclist; mclist = mclist->next) { | ||
310 | mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); | ||
311 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | ||
312 | mcast_addr, 0, MLX4_MCAST_CONFIG); | ||
313 | } | ||
314 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
315 | 0, MLX4_MCAST_ENABLE); | ||
316 | if (err) | ||
317 | mlx4_err(mdev, "Failed enabling multicast filter\n"); | ||
318 | |||
319 | mlx4_en_clear_list(dev); | ||
320 | } | ||
321 | out: | ||
322 | mutex_unlock(&mdev->state_lock); | ||
323 | } | ||
324 | |||
325 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
326 | static void mlx4_en_netpoll(struct net_device *dev) | ||
327 | { | ||
328 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
329 | struct mlx4_en_cq *cq; | ||
330 | unsigned long flags; | ||
331 | int i; | ||
332 | |||
333 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
334 | cq = &priv->rx_cq[i]; | ||
335 | spin_lock_irqsave(&cq->lock, flags); | ||
336 | napi_synchronize(&cq->napi); | ||
337 | mlx4_en_process_rx_cq(dev, cq, 0); | ||
338 | spin_unlock_irqrestore(&cq->lock, flags); | ||
339 | } | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | static void mlx4_en_tx_timeout(struct net_device *dev) | ||
344 | { | ||
345 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
346 | struct mlx4_en_dev *mdev = priv->mdev; | ||
347 | |||
348 | if (netif_msg_timer(priv)) | ||
349 | mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); | ||
350 | |||
351 | if (netif_carrier_ok(dev)) { | ||
352 | priv->port_stats.tx_timeout++; | ||
353 | mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); | ||
354 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | |||
359 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | ||
360 | { | ||
361 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
362 | |||
363 | spin_lock_bh(&priv->stats_lock); | ||
364 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | ||
365 | spin_unlock_bh(&priv->stats_lock); | ||
366 | |||
367 | return &priv->ret_stats; | ||
368 | } | ||
369 | |||
370 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | ||
371 | { | ||
372 | struct mlx4_en_dev *mdev = priv->mdev; | ||
373 | struct mlx4_en_cq *cq; | ||
374 | int i; | ||
375 | |||
376 | /* If we haven't received a specific coalescing setting | ||
377 | * (module param), we set the moderation paramters as follows: | ||
378 | * - moder_cnt is set to the number of mtu sized packets to | ||
379 | * satisfy our coelsing target. | ||
380 | * - moder_time is set to a fixed value. | ||
381 | */ | ||
382 | priv->rx_frames = (mdev->profile.rx_moder_cnt == | ||
383 | MLX4_EN_AUTO_CONF) ? | ||
384 | MLX4_EN_RX_COAL_TARGET / | ||
385 | priv->dev->mtu + 1 : | ||
386 | mdev->profile.rx_moder_cnt; | ||
387 | priv->rx_usecs = (mdev->profile.rx_moder_time == | ||
388 | MLX4_EN_AUTO_CONF) ? | ||
389 | MLX4_EN_RX_COAL_TIME : | ||
390 | mdev->profile.rx_moder_time; | ||
391 | mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | ||
392 | "rx_frames:%d rx_usecs:%d\n", | ||
393 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | ||
394 | |||
395 | /* Setup cq moderation params */ | ||
396 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
397 | cq = &priv->rx_cq[i]; | ||
398 | cq->moder_cnt = priv->rx_frames; | ||
399 | cq->moder_time = priv->rx_usecs; | ||
400 | } | ||
401 | |||
402 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
403 | cq = &priv->tx_cq[i]; | ||
404 | cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; | ||
405 | cq->moder_time = MLX4_EN_TX_COAL_TIME; | ||
406 | } | ||
407 | |||
408 | /* Reset auto-moderation params */ | ||
409 | priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; | ||
410 | priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; | ||
411 | priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; | ||
412 | priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; | ||
413 | priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; | ||
414 | priv->adaptive_rx_coal = mdev->profile.auto_moder; | ||
415 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
416 | priv->last_moder_jiffies = 0; | ||
417 | priv->last_moder_packets = 0; | ||
418 | priv->last_moder_tx_packets = 0; | ||
419 | priv->last_moder_bytes = 0; | ||
420 | } | ||
421 | |||
422 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | ||
423 | { | ||
424 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | ||
425 | struct mlx4_en_dev *mdev = priv->mdev; | ||
426 | struct mlx4_en_cq *cq; | ||
427 | unsigned long packets; | ||
428 | unsigned long rate; | ||
429 | unsigned long avg_pkt_size; | ||
430 | unsigned long rx_packets; | ||
431 | unsigned long rx_bytes; | ||
432 | unsigned long tx_packets; | ||
433 | unsigned long tx_pkt_diff; | ||
434 | unsigned long rx_pkt_diff; | ||
435 | int moder_time; | ||
436 | int i, err; | ||
437 | |||
438 | if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) | ||
439 | return; | ||
440 | |||
441 | spin_lock_bh(&priv->stats_lock); | ||
442 | rx_packets = priv->stats.rx_packets; | ||
443 | rx_bytes = priv->stats.rx_bytes; | ||
444 | tx_packets = priv->stats.tx_packets; | ||
445 | spin_unlock_bh(&priv->stats_lock); | ||
446 | |||
447 | if (!priv->last_moder_jiffies || !period) | ||
448 | goto out; | ||
449 | |||
450 | tx_pkt_diff = ((unsigned long) (tx_packets - | ||
451 | priv->last_moder_tx_packets)); | ||
452 | rx_pkt_diff = ((unsigned long) (rx_packets - | ||
453 | priv->last_moder_packets)); | ||
454 | packets = max(tx_pkt_diff, rx_pkt_diff); | ||
455 | rate = packets * HZ / period; | ||
456 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | ||
457 | priv->last_moder_bytes)) / packets : 0; | ||
458 | |||
459 | /* Apply auto-moderation only when packet rate exceeds a rate that | ||
460 | * it matters */ | ||
461 | if (rate > MLX4_EN_RX_RATE_THRESH) { | ||
462 | /* If tx and rx packet rates are not balanced, assume that | ||
463 | * traffic is mainly BW bound and apply maximum moderation. | ||
464 | * Otherwise, moderate according to packet rate */ | ||
465 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || | ||
466 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { | ||
467 | moder_time = priv->rx_usecs_high; | ||
468 | } else { | ||
469 | if (rate < priv->pkt_rate_low) | ||
470 | moder_time = priv->rx_usecs_low; | ||
471 | else if (rate > priv->pkt_rate_high) | ||
472 | moder_time = priv->rx_usecs_high; | ||
473 | else | ||
474 | moder_time = (rate - priv->pkt_rate_low) * | ||
475 | (priv->rx_usecs_high - priv->rx_usecs_low) / | ||
476 | (priv->pkt_rate_high - priv->pkt_rate_low) + | ||
477 | priv->rx_usecs_low; | ||
478 | } | ||
479 | } else { | ||
480 | /* When packet rate is low, use default moderation rather than | ||
481 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
482 | moder_time = priv->rx_usecs; | ||
483 | } | ||
484 | |||
485 | mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | ||
486 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | ||
487 | |||
488 | mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | ||
489 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | ||
490 | priv->last_moder_time, moder_time, period, packets, | ||
491 | avg_pkt_size, rate); | ||
492 | |||
493 | if (moder_time != priv->last_moder_time) { | ||
494 | priv->last_moder_time = moder_time; | ||
495 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
496 | cq = &priv->rx_cq[i]; | ||
497 | cq->moder_time = moder_time; | ||
498 | err = mlx4_en_set_cq_moder(priv, cq); | ||
499 | if (err) { | ||
500 | mlx4_err(mdev, "Failed modifying moderation for cq:%d " | ||
501 | "on port:%d\n", i, priv->port); | ||
502 | break; | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | |||
507 | out: | ||
508 | priv->last_moder_packets = rx_packets; | ||
509 | priv->last_moder_tx_packets = tx_packets; | ||
510 | priv->last_moder_bytes = rx_bytes; | ||
511 | priv->last_moder_jiffies = jiffies; | ||
512 | } | ||
513 | |||
514 | static void mlx4_en_do_get_stats(struct work_struct *work) | ||
515 | { | ||
516 | struct delayed_work *delay = container_of(work, struct delayed_work, work); | ||
517 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
518 | stats_task); | ||
519 | struct mlx4_en_dev *mdev = priv->mdev; | ||
520 | int err; | ||
521 | |||
522 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | ||
523 | if (err) | ||
524 | mlx4_dbg(HW, priv, "Could not update stats for " | ||
525 | "port:%d\n", priv->port); | ||
526 | |||
527 | mutex_lock(&mdev->state_lock); | ||
528 | if (mdev->device_up) { | ||
529 | if (priv->port_up) | ||
530 | mlx4_en_auto_moderation(priv); | ||
531 | |||
532 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
533 | } | ||
534 | mutex_unlock(&mdev->state_lock); | ||
535 | } | ||
536 | |||
537 | static void mlx4_en_linkstate(struct work_struct *work) | ||
538 | { | ||
539 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
540 | linkstate_task); | ||
541 | struct mlx4_en_dev *mdev = priv->mdev; | ||
542 | int linkstate = priv->link_state; | ||
543 | |||
544 | mutex_lock(&mdev->state_lock); | ||
545 | /* If observable port state changed set carrier state and | ||
546 | * report to system log */ | ||
547 | if (priv->last_link_state != linkstate) { | ||
548 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | ||
549 | if (netif_msg_link(priv)) | ||
550 | mlx4_info(mdev, "Port %d - link down\n", priv->port); | ||
551 | netif_carrier_off(priv->dev); | ||
552 | } else { | ||
553 | if (netif_msg_link(priv)) | ||
554 | mlx4_info(mdev, "Port %d - link up\n", priv->port); | ||
555 | netif_carrier_on(priv->dev); | ||
556 | } | ||
557 | } | ||
558 | priv->last_link_state = linkstate; | ||
559 | mutex_unlock(&mdev->state_lock); | ||
560 | } | ||
561 | |||
562 | |||
563 | static int mlx4_en_start_port(struct net_device *dev) | ||
564 | { | ||
565 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
566 | struct mlx4_en_dev *mdev = priv->mdev; | ||
567 | struct mlx4_en_cq *cq; | ||
568 | struct mlx4_en_tx_ring *tx_ring; | ||
569 | struct mlx4_en_rx_ring *rx_ring; | ||
570 | int rx_index = 0; | ||
571 | int tx_index = 0; | ||
572 | u16 stride; | ||
573 | int err = 0; | ||
574 | int i; | ||
575 | int j; | ||
576 | |||
577 | if (priv->port_up) { | ||
578 | mlx4_dbg(DRV, priv, "start port called while port already up\n"); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | /* Calculate Rx buf size */ | ||
583 | dev->mtu = min(dev->mtu, priv->max_mtu); | ||
584 | mlx4_en_calc_rx_buf(dev); | ||
585 | mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | ||
586 | stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
587 | DS_SIZE * priv->num_frags); | ||
588 | /* Configure rx cq's and rings */ | ||
589 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
590 | cq = &priv->rx_cq[i]; | ||
591 | rx_ring = &priv->rx_ring[i]; | ||
592 | |||
593 | err = mlx4_en_activate_cq(priv, cq); | ||
594 | if (err) { | ||
595 | mlx4_err(mdev, "Failed activating Rx CQ\n"); | ||
596 | goto rx_err; | ||
597 | } | ||
598 | for (j = 0; j < cq->size; j++) | ||
599 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
600 | err = mlx4_en_set_cq_moder(priv, cq); | ||
601 | if (err) { | ||
602 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
603 | mlx4_en_deactivate_cq(priv, cq); | ||
604 | goto cq_err; | ||
605 | } | ||
606 | mlx4_en_arm_cq(priv, cq); | ||
607 | |||
608 | ++rx_index; | ||
609 | } | ||
610 | |||
611 | err = mlx4_en_activate_rx_rings(priv); | ||
612 | if (err) { | ||
613 | mlx4_err(mdev, "Failed to activate RX rings\n"); | ||
614 | goto cq_err; | ||
615 | } | ||
616 | |||
617 | err = mlx4_en_config_rss_steer(priv); | ||
618 | if (err) { | ||
619 | mlx4_err(mdev, "Failed configuring rss steering\n"); | ||
620 | goto rx_err; | ||
621 | } | ||
622 | |||
623 | /* Configure tx cq's and rings */ | ||
624 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
625 | /* Configure cq */ | ||
626 | cq = &priv->tx_cq[i]; | ||
627 | err = mlx4_en_activate_cq(priv, cq); | ||
628 | if (err) { | ||
629 | mlx4_err(mdev, "Failed allocating Tx CQ\n"); | ||
630 | goto tx_err; | ||
631 | } | ||
632 | err = mlx4_en_set_cq_moder(priv, cq); | ||
633 | if (err) { | ||
634 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
635 | mlx4_en_deactivate_cq(priv, cq); | ||
636 | goto tx_err; | ||
637 | } | ||
638 | mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | ||
639 | cq->buf->wqe_index = cpu_to_be16(0xffff); | ||
640 | |||
641 | /* Configure ring */ | ||
642 | tx_ring = &priv->tx_ring[i]; | ||
643 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | ||
644 | priv->rx_ring[0].srq.srqn); | ||
645 | if (err) { | ||
646 | mlx4_err(mdev, "Failed allocating Tx ring\n"); | ||
647 | mlx4_en_deactivate_cq(priv, cq); | ||
648 | goto tx_err; | ||
649 | } | ||
650 | /* Set initial ownership of all Tx TXBBs to SW (1) */ | ||
651 | for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) | ||
652 | *((u32 *) (tx_ring->buf + j)) = 0xffffffff; | ||
653 | ++tx_index; | ||
654 | } | ||
655 | |||
656 | /* Configure port */ | ||
657 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
658 | priv->rx_skb_size + ETH_FCS_LEN, | ||
659 | mdev->profile.tx_pause, | ||
660 | mdev->profile.tx_ppp, | ||
661 | mdev->profile.rx_pause, | ||
662 | mdev->profile.rx_ppp); | ||
663 | if (err) { | ||
664 | mlx4_err(mdev, "Failed setting port general configurations" | ||
665 | " for port %d, with error %d\n", priv->port, err); | ||
666 | goto tx_err; | ||
667 | } | ||
668 | /* Set default qp number */ | ||
669 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | ||
670 | if (err) { | ||
671 | mlx4_err(mdev, "Failed setting default qp numbers\n"); | ||
672 | goto tx_err; | ||
673 | } | ||
674 | /* Set port mac number */ | ||
675 | mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
676 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
677 | priv->mac, &priv->mac_index); | ||
678 | if (err) { | ||
679 | mlx4_err(mdev, "Failed setting port mac\n"); | ||
680 | goto tx_err; | ||
681 | } | ||
682 | |||
683 | /* Init port */ | ||
684 | mlx4_dbg(HW, priv, "Initializing port\n"); | ||
685 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
686 | if (err) { | ||
687 | mlx4_err(mdev, "Failed Initializing port\n"); | ||
688 | goto mac_err; | ||
689 | } | ||
690 | |||
691 | /* Schedule multicast task to populate multicast list */ | ||
692 | queue_work(mdev->workqueue, &priv->mcast_task); | ||
693 | |||
694 | priv->port_up = true; | ||
695 | netif_start_queue(dev); | ||
696 | return 0; | ||
697 | |||
698 | mac_err: | ||
699 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
700 | tx_err: | ||
701 | while (tx_index--) { | ||
702 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | ||
703 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); | ||
704 | } | ||
705 | |||
706 | mlx4_en_release_rss_steer(priv); | ||
707 | rx_err: | ||
708 | for (i = 0; i < priv->rx_ring_num; i++) | ||
709 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]); | ||
710 | cq_err: | ||
711 | while (rx_index--) | ||
712 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | ||
713 | |||
714 | return err; /* need to close devices */ | ||
715 | } | ||
716 | |||
717 | |||
718 | static void mlx4_en_stop_port(struct net_device *dev) | ||
719 | { | ||
720 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
721 | struct mlx4_en_dev *mdev = priv->mdev; | ||
722 | int i; | ||
723 | |||
724 | if (!priv->port_up) { | ||
725 | mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", | ||
726 | priv->port); | ||
727 | return; | ||
728 | } | ||
729 | netif_stop_queue(dev); | ||
730 | |||
731 | /* Synchronize with tx routine */ | ||
732 | netif_tx_lock_bh(dev); | ||
733 | priv->port_up = false; | ||
734 | netif_tx_unlock_bh(dev); | ||
735 | |||
736 | /* close port*/ | ||
737 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | ||
738 | |||
739 | /* Unregister Mac address for the port */ | ||
740 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
741 | |||
742 | /* Free TX Rings */ | ||
743 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
744 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); | ||
745 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); | ||
746 | } | ||
747 | msleep(10); | ||
748 | |||
749 | for (i = 0; i < priv->tx_ring_num; i++) | ||
750 | mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); | ||
751 | |||
752 | /* Free RSS qps */ | ||
753 | mlx4_en_release_rss_steer(priv); | ||
754 | |||
755 | /* Free RX Rings */ | ||
756 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
757 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
758 | while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) | ||
759 | msleep(1); | ||
760 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); | ||
761 | } | ||
762 | } | ||
763 | |||
764 | static void mlx4_en_restart(struct work_struct *work) | ||
765 | { | ||
766 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
767 | watchdog_task); | ||
768 | struct mlx4_en_dev *mdev = priv->mdev; | ||
769 | struct net_device *dev = priv->dev; | ||
770 | |||
771 | mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | ||
772 | mlx4_en_stop_port(dev); | ||
773 | if (mlx4_en_start_port(dev)) | ||
774 | mlx4_err(mdev, "Failed restarting port %d\n", priv->port); | ||
775 | } | ||
776 | |||
777 | |||
778 | static int mlx4_en_open(struct net_device *dev) | ||
779 | { | ||
780 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
781 | struct mlx4_en_dev *mdev = priv->mdev; | ||
782 | int i; | ||
783 | int err = 0; | ||
784 | |||
785 | mutex_lock(&mdev->state_lock); | ||
786 | |||
787 | if (!mdev->device_up) { | ||
788 | mlx4_err(mdev, "Cannot open - device down/disabled\n"); | ||
789 | err = -EBUSY; | ||
790 | goto out; | ||
791 | } | ||
792 | |||
793 | /* Reset HW statistics and performance counters */ | ||
794 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | ||
795 | mlx4_dbg(HW, priv, "Failed dumping statistics\n"); | ||
796 | |||
797 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
798 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | ||
799 | |||
800 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
801 | priv->tx_ring[i].bytes = 0; | ||
802 | priv->tx_ring[i].packets = 0; | ||
803 | } | ||
804 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
805 | priv->rx_ring[i].bytes = 0; | ||
806 | priv->rx_ring[i].packets = 0; | ||
807 | } | ||
808 | |||
809 | mlx4_en_set_default_moderation(priv); | ||
810 | err = mlx4_en_start_port(dev); | ||
811 | if (err) | ||
812 | mlx4_err(mdev, "Failed starting port:%d\n", priv->port); | ||
813 | |||
814 | out: | ||
815 | mutex_unlock(&mdev->state_lock); | ||
816 | return err; | ||
817 | } | ||
818 | |||
819 | |||
820 | static int mlx4_en_close(struct net_device *dev) | ||
821 | { | ||
822 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
823 | struct mlx4_en_dev *mdev = priv->mdev; | ||
824 | |||
825 | if (netif_msg_ifdown(priv)) | ||
826 | mlx4_info(mdev, "Close called for port:%d\n", priv->port); | ||
827 | |||
828 | mutex_lock(&mdev->state_lock); | ||
829 | |||
830 | mlx4_en_stop_port(dev); | ||
831 | netif_carrier_off(dev); | ||
832 | |||
833 | mutex_unlock(&mdev->state_lock); | ||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
842 | if (priv->tx_ring[i].tx_info) | ||
843 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | ||
844 | if (priv->tx_cq[i].buf) | ||
845 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | ||
846 | } | ||
847 | |||
848 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
849 | if (priv->rx_ring[i].rx_info) | ||
850 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | ||
851 | if (priv->rx_cq[i].buf) | ||
852 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | ||
853 | } | ||
854 | } | ||
855 | |||
856 | static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | ||
857 | { | ||
858 | struct mlx4_en_dev *mdev = priv->mdev; | ||
859 | struct mlx4_en_port_profile *prof = priv->prof; | ||
860 | int i; | ||
861 | |||
862 | /* Create tx Rings */ | ||
863 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
864 | if (mlx4_en_create_cq(priv, &priv->tx_cq[i], | ||
865 | prof->tx_ring_size, i, TX)) | ||
866 | goto err; | ||
867 | |||
868 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | ||
869 | prof->tx_ring_size, TXBB_SIZE)) | ||
870 | goto err; | ||
871 | } | ||
872 | |||
873 | /* Create rx Rings */ | ||
874 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
875 | if (mlx4_en_create_cq(priv, &priv->rx_cq[i], | ||
876 | prof->rx_ring_size, i, RX)) | ||
877 | goto err; | ||
878 | |||
879 | if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], | ||
880 | prof->rx_ring_size, priv->stride)) | ||
881 | goto err; | ||
882 | } | ||
883 | |||
884 | return 0; | ||
885 | |||
886 | err: | ||
887 | mlx4_err(mdev, "Failed to allocate NIC resources\n"); | ||
888 | return -ENOMEM; | ||
889 | } | ||
890 | |||
891 | |||
892 | void mlx4_en_destroy_netdev(struct net_device *dev) | ||
893 | { | ||
894 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
895 | struct mlx4_en_dev *mdev = priv->mdev; | ||
896 | |||
897 | mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | ||
898 | |||
899 | /* Unregister device - this will close the port if it was up */ | ||
900 | if (priv->registered) | ||
901 | unregister_netdev(dev); | ||
902 | |||
903 | if (priv->allocated) | ||
904 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | ||
905 | |||
906 | cancel_delayed_work(&priv->stats_task); | ||
907 | cancel_delayed_work(&priv->refill_task); | ||
908 | /* flush any pending task for this netdev */ | ||
909 | flush_workqueue(mdev->workqueue); | ||
910 | |||
911 | /* Detach the netdev so tasks would not attempt to access it */ | ||
912 | mutex_lock(&mdev->state_lock); | ||
913 | mdev->pndev[priv->port] = NULL; | ||
914 | mutex_unlock(&mdev->state_lock); | ||
915 | |||
916 | mlx4_en_free_resources(priv); | ||
917 | free_netdev(dev); | ||
918 | } | ||
919 | |||
920 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | ||
921 | { | ||
922 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
923 | struct mlx4_en_dev *mdev = priv->mdev; | ||
924 | int err = 0; | ||
925 | |||
926 | mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | ||
927 | dev->mtu, new_mtu); | ||
928 | |||
929 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | ||
930 | mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); | ||
931 | return -EPERM; | ||
932 | } | ||
933 | dev->mtu = new_mtu; | ||
934 | |||
935 | if (netif_running(dev)) { | ||
936 | mutex_lock(&mdev->state_lock); | ||
937 | if (!mdev->device_up) { | ||
938 | /* NIC is probably restarting - let watchdog task reset | ||
939 | * the port */ | ||
940 | mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); | ||
941 | } else { | ||
942 | mlx4_en_stop_port(dev); | ||
943 | mlx4_en_set_default_moderation(priv); | ||
944 | err = mlx4_en_start_port(dev); | ||
945 | if (err) { | ||
946 | mlx4_err(mdev, "Failed restarting port:%d\n", | ||
947 | priv->port); | ||
948 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
949 | } | ||
950 | } | ||
951 | mutex_unlock(&mdev->state_lock); | ||
952 | } | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
957 | struct mlx4_en_port_profile *prof) | ||
958 | { | ||
959 | struct net_device *dev; | ||
960 | struct mlx4_en_priv *priv; | ||
961 | int i; | ||
962 | int err; | ||
963 | |||
964 | dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); | ||
965 | if (dev == NULL) { | ||
966 | mlx4_err(mdev, "Net device allocation failed\n"); | ||
967 | return -ENOMEM; | ||
968 | } | ||
969 | |||
970 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | ||
971 | |||
972 | /* | ||
973 | * Initialize driver private data | ||
974 | */ | ||
975 | |||
976 | priv = netdev_priv(dev); | ||
977 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | ||
978 | priv->dev = dev; | ||
979 | priv->mdev = mdev; | ||
980 | priv->prof = prof; | ||
981 | priv->port = port; | ||
982 | priv->port_up = false; | ||
983 | priv->rx_csum = 1; | ||
984 | priv->flags = prof->flags; | ||
985 | priv->tx_ring_num = prof->tx_ring_num; | ||
986 | priv->rx_ring_num = prof->rx_ring_num; | ||
987 | priv->mc_list = NULL; | ||
988 | priv->mac_index = -1; | ||
989 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | ||
990 | spin_lock_init(&priv->stats_lock); | ||
991 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | ||
992 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
993 | INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); | ||
994 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
995 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
996 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
997 | |||
998 | /* Query for default mac and max mtu */ | ||
999 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | ||
1000 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | ||
1001 | if (ILLEGAL_MAC(priv->mac)) { | ||
1002 | mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | ||
1003 | priv->port, priv->mac); | ||
1004 | err = -EINVAL; | ||
1005 | goto out; | ||
1006 | } | ||
1007 | |||
1008 | priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
1009 | DS_SIZE * MLX4_EN_MAX_RX_FRAGS); | ||
1010 | err = mlx4_en_alloc_resources(priv); | ||
1011 | if (err) | ||
1012 | goto out; | ||
1013 | |||
1014 | /* Populate Rx default RSS mappings */ | ||
1015 | mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num * | ||
1016 | RSS_FACTOR, priv->rx_ring_num); | ||
1017 | /* Allocate page for receive rings */ | ||
1018 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | ||
1019 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | ||
1020 | if (err) { | ||
1021 | mlx4_err(mdev, "Failed to allocate page for rx qps\n"); | ||
1022 | goto out; | ||
1023 | } | ||
1024 | priv->allocated = 1; | ||
1025 | |||
1026 | /* Populate Tx priority mappings */ | ||
1027 | mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num); | ||
1028 | |||
1029 | /* | ||
1030 | * Initialize netdev entry points | ||
1031 | */ | ||
1032 | |||
1033 | dev->open = &mlx4_en_open; | ||
1034 | dev->stop = &mlx4_en_close; | ||
1035 | dev->hard_start_xmit = &mlx4_en_xmit; | ||
1036 | dev->get_stats = &mlx4_en_get_stats; | ||
1037 | dev->set_multicast_list = &mlx4_en_set_multicast; | ||
1038 | dev->set_mac_address = &mlx4_en_set_mac; | ||
1039 | dev->change_mtu = &mlx4_en_change_mtu; | ||
1040 | dev->tx_timeout = &mlx4_en_tx_timeout; | ||
1041 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | ||
1042 | dev->vlan_rx_register = mlx4_en_vlan_rx_register; | ||
1043 | dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid; | ||
1044 | dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid; | ||
1045 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1046 | dev->poll_controller = mlx4_en_netpoll; | ||
1047 | #endif | ||
1048 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | ||
1049 | |||
1050 | /* Set defualt MAC */ | ||
1051 | dev->addr_len = ETH_ALEN; | ||
1052 | for (i = 0; i < ETH_ALEN; i++) | ||
1053 | dev->dev_addr[ETH_ALEN - 1 - i] = | ||
1054 | (u8) (priv->mac >> (8 * i)); | ||
1055 | |||
1056 | /* | ||
1057 | * Set driver features | ||
1058 | */ | ||
1059 | dev->features |= NETIF_F_SG; | ||
1060 | dev->features |= NETIF_F_HW_CSUM; | ||
1061 | dev->features |= NETIF_F_HIGHDMA; | ||
1062 | dev->features |= NETIF_F_HW_VLAN_TX | | ||
1063 | NETIF_F_HW_VLAN_RX | | ||
1064 | NETIF_F_HW_VLAN_FILTER; | ||
1065 | if (mdev->profile.num_lro) | ||
1066 | dev->features |= NETIF_F_LRO; | ||
1067 | if (mdev->LSO_support) { | ||
1068 | dev->features |= NETIF_F_TSO; | ||
1069 | dev->features |= NETIF_F_TSO6; | ||
1070 | } | ||
1071 | |||
1072 | mdev->pndev[port] = dev; | ||
1073 | |||
1074 | netif_carrier_off(dev); | ||
1075 | err = register_netdev(dev); | ||
1076 | if (err) { | ||
1077 | mlx4_err(mdev, "Netdev registration failed\n"); | ||
1078 | goto out; | ||
1079 | } | ||
1080 | priv->registered = 1; | ||
1081 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
1082 | return 0; | ||
1083 | |||
1084 | out: | ||
1085 | mlx4_en_destroy_netdev(dev); | ||
1086 | return err; | ||
1087 | } | ||
1088 | |||
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c new file mode 100644 index 000000000000..c2e69b1bcd0a --- /dev/null +++ b/drivers/net/mlx4/en_params.c | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | |||
38 | #include "mlx4_en.h" | ||
39 | #include "en_port.h" | ||
40 | |||
41 | #define MLX4_EN_PARM_INT(X, def_val, desc) \ | ||
42 | static unsigned int X = def_val;\ | ||
43 | module_param(X , uint, 0444); \ | ||
44 | MODULE_PARM_DESC(X, desc); | ||
45 | |||
46 | |||
47 | /* | ||
48 | * Device scope module parameters | ||
49 | */ | ||
50 | |||
51 | |||
52 | /* Use a XOR rathern than Toeplitz hash function for RSS */ | ||
53 | MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); | ||
54 | |||
55 | /* RSS hash type mask - default to <saddr, daddr, sport, dport> */ | ||
56 | MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); | ||
57 | |||
58 | /* Number of LRO sessions per Rx ring (rounded up to a power of two) */ | ||
59 | MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS, | ||
60 | "Number of LRO sessions per ring or disabled (0)"); | ||
61 | |||
62 | /* Priority pausing */ | ||
63 | MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE, | ||
64 | "Pause policy on TX: 0 never generate pause frames " | ||
65 | "1 generate pause frames according to RX buffer threshold"); | ||
66 | MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE, | ||
67 | "Pause policy on RX: 0 ignore received pause frames " | ||
68 | "1 respect received pause frames"); | ||
69 | MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." | ||
70 | " Per priority bit mask"); | ||
71 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." | ||
72 | " Per priority bit mask"); | ||
73 | |||
74 | /* Interrupt moderation tunning */ | ||
75 | MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF, | ||
76 | "Max coalesced descriptors for Rx interrupt moderation"); | ||
77 | MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF, | ||
78 | "Timeout following last packet for Rx interrupt moderation"); | ||
79 | MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation"); | ||
80 | |||
81 | MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)"); | ||
82 | MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)"); | ||
83 | |||
84 | MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1"); | ||
85 | MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2"); | ||
86 | MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1"); | ||
87 | MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2"); | ||
88 | |||
89 | |||
90 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | ||
91 | { | ||
92 | struct mlx4_en_profile *params = &mdev->profile; | ||
93 | |||
94 | params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF); | ||
95 | params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF); | ||
96 | params->auto_moder = auto_moder; | ||
97 | params->rss_xor = (rss_xor != 0); | ||
98 | params->rss_mask = rss_mask & 0x1f; | ||
99 | params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); | ||
100 | params->rx_pause = pprx; | ||
101 | params->rx_ppp = pfcrx; | ||
102 | params->tx_pause = pptx; | ||
103 | params->tx_ppp = pfctx; | ||
104 | if (params->rx_ppp || params->tx_ppp) { | ||
105 | params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; | ||
106 | params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM; | ||
107 | } else { | ||
108 | params->prof[1].tx_ring_num = 1; | ||
109 | params->prof[2].tx_ring_num = 1; | ||
110 | } | ||
111 | params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS); | ||
112 | params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS); | ||
113 | |||
114 | if (tx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
115 | tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE; | ||
116 | params->prof[1].tx_ring_size = | ||
117 | (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ? | ||
118 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1); | ||
119 | |||
120 | if (tx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
121 | tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE; | ||
122 | params->prof[2].tx_ring_size = | ||
123 | (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ? | ||
124 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2); | ||
125 | |||
126 | if (rx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
127 | rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE; | ||
128 | params->prof[1].rx_ring_size = | ||
129 | (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ? | ||
130 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1); | ||
131 | |||
132 | if (rx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
133 | rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE; | ||
134 | params->prof[2].rx_ring_size = | ||
135 | (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ? | ||
136 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | |||
141 | /* | ||
142 | * Ethtool support | ||
143 | */ | ||
144 | |||
145 | static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv) | ||
146 | { | ||
147 | int i; | ||
148 | |||
149 | priv->port_stats.lro_aggregated = 0; | ||
150 | priv->port_stats.lro_flushed = 0; | ||
151 | priv->port_stats.lro_no_desc = 0; | ||
152 | |||
153 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
154 | priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated; | ||
155 | priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed; | ||
156 | priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | ||
162 | { | ||
163 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
164 | struct mlx4_en_dev *mdev = priv->mdev; | ||
165 | |||
166 | sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); | ||
167 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); | ||
168 | sprintf(drvinfo->fw_version, "%d.%d.%d", | ||
169 | (u16) (mdev->dev->caps.fw_ver >> 32), | ||
170 | (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), | ||
171 | (u16) (mdev->dev->caps.fw_ver & 0xffff)); | ||
172 | strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32); | ||
173 | drvinfo->n_stats = 0; | ||
174 | drvinfo->regdump_len = 0; | ||
175 | drvinfo->eedump_len = 0; | ||
176 | } | ||
177 | |||
178 | static u32 mlx4_en_get_tso(struct net_device *dev) | ||
179 | { | ||
180 | return (dev->features & NETIF_F_TSO) != 0; | ||
181 | } | ||
182 | |||
183 | static int mlx4_en_set_tso(struct net_device *dev, u32 data) | ||
184 | { | ||
185 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
186 | |||
187 | if (data) { | ||
188 | if (!priv->mdev->LSO_support) | ||
189 | return -EPERM; | ||
190 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
191 | } else | ||
192 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static u32 mlx4_en_get_rx_csum(struct net_device *dev) | ||
197 | { | ||
198 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
199 | return priv->rx_csum; | ||
200 | } | ||
201 | |||
202 | static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data) | ||
203 | { | ||
204 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
205 | priv->rx_csum = (data != 0); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static const char main_strings[][ETH_GSTRING_LEN] = { | ||
210 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | ||
211 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | ||
212 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | ||
213 | "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", | ||
214 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | ||
215 | "tx_heartbeat_errors", "tx_window_errors", | ||
216 | |||
217 | /* port statistics */ | ||
218 | "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets", | ||
219 | "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", | ||
220 | "rx_csum_good", "rx_csum_none", "tx_chksum_offload", | ||
221 | |||
222 | /* packet statistics */ | ||
223 | "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", | ||
224 | "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0", | ||
225 | "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5", | ||
226 | "tx_prio_6", "tx_prio_7", | ||
227 | }; | ||
228 | #define NUM_MAIN_STATS 21 | ||
229 | #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) | ||
230 | |||
231 | static u32 mlx4_en_get_msglevel(struct net_device *dev) | ||
232 | { | ||
233 | return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; | ||
234 | } | ||
235 | |||
236 | static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) | ||
237 | { | ||
238 | ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; | ||
239 | } | ||
240 | |||
241 | static void mlx4_en_get_wol(struct net_device *netdev, | ||
242 | struct ethtool_wolinfo *wol) | ||
243 | { | ||
244 | wol->supported = 0; | ||
245 | wol->wolopts = 0; | ||
246 | |||
247 | return; | ||
248 | } | ||
249 | |||
250 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | ||
251 | { | ||
252 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
253 | |||
254 | if (sset != ETH_SS_STATS) | ||
255 | return -EOPNOTSUPP; | ||
256 | |||
257 | return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2; | ||
258 | } | ||
259 | |||
260 | static void mlx4_en_get_ethtool_stats(struct net_device *dev, | ||
261 | struct ethtool_stats *stats, uint64_t *data) | ||
262 | { | ||
263 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
264 | int index = 0; | ||
265 | int i; | ||
266 | |||
267 | spin_lock_bh(&priv->stats_lock); | ||
268 | |||
269 | mlx4_en_update_lro_stats(priv); | ||
270 | |||
271 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
272 | data[index++] = ((unsigned long *) &priv->stats)[i]; | ||
273 | for (i = 0; i < NUM_PORT_STATS; i++) | ||
274 | data[index++] = ((unsigned long *) &priv->port_stats)[i]; | ||
275 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
276 | data[index++] = priv->tx_ring[i].packets; | ||
277 | data[index++] = priv->tx_ring[i].bytes; | ||
278 | } | ||
279 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
280 | data[index++] = priv->rx_ring[i].packets; | ||
281 | data[index++] = priv->rx_ring[i].bytes; | ||
282 | } | ||
283 | for (i = 0; i < NUM_PKT_STATS; i++) | ||
284 | data[index++] = ((unsigned long *) &priv->pkstats)[i]; | ||
285 | spin_unlock_bh(&priv->stats_lock); | ||
286 | |||
287 | } | ||
288 | |||
289 | static void mlx4_en_get_strings(struct net_device *dev, | ||
290 | uint32_t stringset, uint8_t *data) | ||
291 | { | ||
292 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
293 | int index = 0; | ||
294 | int i; | ||
295 | |||
296 | if (stringset != ETH_SS_STATS) | ||
297 | return; | ||
298 | |||
299 | /* Add main counters */ | ||
300 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
301 | strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); | ||
302 | for (i = 0; i < NUM_PORT_STATS; i++) | ||
303 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
304 | main_strings[i + NUM_MAIN_STATS]); | ||
305 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
306 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
307 | "tx%d_packets", i); | ||
308 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
309 | "tx%d_bytes", i); | ||
310 | } | ||
311 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
312 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
313 | "rx%d_packets", i); | ||
314 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
315 | "rx%d_bytes", i); | ||
316 | } | ||
317 | for (i = 0; i < NUM_PKT_STATS; i++) | ||
318 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
319 | main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); | ||
320 | } | ||
321 | |||
322 | static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
323 | { | ||
324 | cmd->autoneg = AUTONEG_DISABLE; | ||
325 | cmd->supported = SUPPORTED_10000baseT_Full; | ||
326 | cmd->advertising = SUPPORTED_10000baseT_Full; | ||
327 | if (netif_carrier_ok(dev)) { | ||
328 | cmd->speed = SPEED_10000; | ||
329 | cmd->duplex = DUPLEX_FULL; | ||
330 | } else { | ||
331 | cmd->speed = -1; | ||
332 | cmd->duplex = -1; | ||
333 | } | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
338 | { | ||
339 | if ((cmd->autoneg == AUTONEG_ENABLE) || | ||
340 | (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL)) | ||
341 | return -EINVAL; | ||
342 | |||
343 | /* Nothing to change */ | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | static int mlx4_en_get_coalesce(struct net_device *dev, | ||
348 | struct ethtool_coalesce *coal) | ||
349 | { | ||
350 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
351 | |||
352 | coal->tx_coalesce_usecs = 0; | ||
353 | coal->tx_max_coalesced_frames = 0; | ||
354 | coal->rx_coalesce_usecs = priv->rx_usecs; | ||
355 | coal->rx_max_coalesced_frames = priv->rx_frames; | ||
356 | |||
357 | coal->pkt_rate_low = priv->pkt_rate_low; | ||
358 | coal->rx_coalesce_usecs_low = priv->rx_usecs_low; | ||
359 | coal->pkt_rate_high = priv->pkt_rate_high; | ||
360 | coal->rx_coalesce_usecs_high = priv->rx_usecs_high; | ||
361 | coal->rate_sample_interval = priv->sample_interval; | ||
362 | coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int mlx4_en_set_coalesce(struct net_device *dev, | ||
367 | struct ethtool_coalesce *coal) | ||
368 | { | ||
369 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
370 | int err, i; | ||
371 | |||
372 | priv->rx_frames = (coal->rx_max_coalesced_frames == | ||
373 | MLX4_EN_AUTO_CONF) ? | ||
374 | MLX4_EN_RX_COAL_TARGET / | ||
375 | priv->dev->mtu + 1 : | ||
376 | coal->rx_max_coalesced_frames; | ||
377 | priv->rx_usecs = (coal->rx_coalesce_usecs == | ||
378 | MLX4_EN_AUTO_CONF) ? | ||
379 | MLX4_EN_RX_COAL_TIME : | ||
380 | coal->rx_coalesce_usecs; | ||
381 | |||
382 | /* Set adaptive coalescing params */ | ||
383 | priv->pkt_rate_low = coal->pkt_rate_low; | ||
384 | priv->rx_usecs_low = coal->rx_coalesce_usecs_low; | ||
385 | priv->pkt_rate_high = coal->pkt_rate_high; | ||
386 | priv->rx_usecs_high = coal->rx_coalesce_usecs_high; | ||
387 | priv->sample_interval = coal->rate_sample_interval; | ||
388 | priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; | ||
389 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
390 | if (priv->adaptive_rx_coal) | ||
391 | return 0; | ||
392 | |||
393 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
394 | priv->rx_cq[i].moder_cnt = priv->rx_frames; | ||
395 | priv->rx_cq[i].moder_time = priv->rx_usecs; | ||
396 | err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); | ||
397 | if (err) | ||
398 | return err; | ||
399 | } | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static int mlx4_en_set_pauseparam(struct net_device *dev, | ||
404 | struct ethtool_pauseparam *pause) | ||
405 | { | ||
406 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
407 | struct mlx4_en_dev *mdev = priv->mdev; | ||
408 | int err; | ||
409 | |||
410 | mdev->profile.tx_pause = pause->tx_pause != 0; | ||
411 | mdev->profile.rx_pause = pause->rx_pause != 0; | ||
412 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
413 | priv->rx_skb_size + ETH_FCS_LEN, | ||
414 | mdev->profile.tx_pause, | ||
415 | mdev->profile.tx_ppp, | ||
416 | mdev->profile.rx_pause, | ||
417 | mdev->profile.rx_ppp); | ||
418 | if (err) | ||
419 | mlx4_err(mdev, "Failed setting pause params to\n"); | ||
420 | |||
421 | return err; | ||
422 | } | ||
423 | |||
424 | static void mlx4_en_get_pauseparam(struct net_device *dev, | ||
425 | struct ethtool_pauseparam *pause) | ||
426 | { | ||
427 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
428 | struct mlx4_en_dev *mdev = priv->mdev; | ||
429 | |||
430 | pause->tx_pause = mdev->profile.tx_pause; | ||
431 | pause->rx_pause = mdev->profile.rx_pause; | ||
432 | } | ||
433 | |||
434 | static void mlx4_en_get_ringparam(struct net_device *dev, | ||
435 | struct ethtool_ringparam *param) | ||
436 | { | ||
437 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
438 | struct mlx4_en_dev *mdev = priv->mdev; | ||
439 | |||
440 | memset(param, 0, sizeof(*param)); | ||
441 | param->rx_max_pending = mdev->dev->caps.max_rq_sg; | ||
442 | param->tx_max_pending = mdev->dev->caps.max_sq_sg; | ||
443 | param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; | ||
444 | param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; | ||
445 | } | ||
446 | |||
447 | const struct ethtool_ops mlx4_en_ethtool_ops = { | ||
448 | .get_drvinfo = mlx4_en_get_drvinfo, | ||
449 | .get_settings = mlx4_en_get_settings, | ||
450 | .set_settings = mlx4_en_set_settings, | ||
451 | #ifdef NETIF_F_TSO | ||
452 | .get_tso = mlx4_en_get_tso, | ||
453 | .set_tso = mlx4_en_set_tso, | ||
454 | #endif | ||
455 | .get_sg = ethtool_op_get_sg, | ||
456 | .set_sg = ethtool_op_set_sg, | ||
457 | .get_link = ethtool_op_get_link, | ||
458 | .get_rx_csum = mlx4_en_get_rx_csum, | ||
459 | .set_rx_csum = mlx4_en_set_rx_csum, | ||
460 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
461 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
462 | .get_strings = mlx4_en_get_strings, | ||
463 | .get_sset_count = mlx4_en_get_sset_count, | ||
464 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, | ||
465 | .get_wol = mlx4_en_get_wol, | ||
466 | .get_msglevel = mlx4_en_get_msglevel, | ||
467 | .set_msglevel = mlx4_en_set_msglevel, | ||
468 | .get_coalesce = mlx4_en_get_coalesce, | ||
469 | .set_coalesce = mlx4_en_set_coalesce, | ||
470 | .get_pauseparam = mlx4_en_get_pauseparam, | ||
471 | .set_pauseparam = mlx4_en_set_pauseparam, | ||
472 | .get_ringparam = mlx4_en_get_ringparam, | ||
473 | .get_flags = ethtool_op_get_flags, | ||
474 | .set_flags = ethtool_op_set_flags, | ||
475 | }; | ||
476 | |||
477 | |||
478 | |||
479 | |||
480 | |||
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c new file mode 100644 index 000000000000..c5a4c0389752 --- /dev/null +++ b/drivers/net/mlx4/en_port.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | |||
35 | #include <linux/if_vlan.h> | ||
36 | |||
37 | #include <linux/mlx4/device.h> | ||
38 | #include <linux/mlx4/cmd.h> | ||
39 | |||
40 | #include "en_port.h" | ||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | |||
44 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, | ||
45 | u64 mac, u64 clear, u8 mode) | ||
46 | { | ||
47 | return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, | ||
48 | MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B); | ||
49 | } | ||
50 | |||
51 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp) | ||
52 | { | ||
53 | struct mlx4_cmd_mailbox *mailbox; | ||
54 | struct mlx4_set_vlan_fltr_mbox *filter; | ||
55 | int i; | ||
56 | int j; | ||
57 | int index = 0; | ||
58 | u32 entry; | ||
59 | int err = 0; | ||
60 | |||
61 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
62 | if (IS_ERR(mailbox)) | ||
63 | return PTR_ERR(mailbox); | ||
64 | |||
65 | filter = mailbox->buf; | ||
66 | if (grp) { | ||
67 | memset(filter, 0, sizeof *filter); | ||
68 | for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { | ||
69 | entry = 0; | ||
70 | for (j = 0; j < 32; j++) | ||
71 | if (vlan_group_get_device(grp, index++)) | ||
72 | entry |= 1 << j; | ||
73 | filter->entry[i] = cpu_to_be32(entry); | ||
74 | } | ||
75 | } else { | ||
76 | /* When no vlans are configured we block all vlans */ | ||
77 | memset(filter, 0, sizeof(*filter)); | ||
78 | } | ||
79 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR, | ||
80 | MLX4_CMD_TIME_CLASS_B); | ||
81 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
82 | return err; | ||
83 | } | ||
84 | |||
85 | |||
86 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
87 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) | ||
88 | { | ||
89 | struct mlx4_cmd_mailbox *mailbox; | ||
90 | struct mlx4_set_port_general_context *context; | ||
91 | int err; | ||
92 | u32 in_mod; | ||
93 | |||
94 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
95 | if (IS_ERR(mailbox)) | ||
96 | return PTR_ERR(mailbox); | ||
97 | context = mailbox->buf; | ||
98 | memset(context, 0, sizeof *context); | ||
99 | |||
100 | context->flags = SET_PORT_GEN_ALL_VALID; | ||
101 | context->mtu = cpu_to_be16(mtu); | ||
102 | context->pptx = (pptx * (!pfctx)) << 7; | ||
103 | context->pfctx = pfctx; | ||
104 | context->pprx = (pprx * (!pfcrx)) << 7; | ||
105 | context->pfcrx = pfcrx; | ||
106 | |||
107 | in_mod = MLX4_SET_PORT_GENERAL << 8 | port; | ||
108 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
109 | MLX4_CMD_TIME_CLASS_B); | ||
110 | |||
111 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
112 | return err; | ||
113 | } | ||
114 | |||
115 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
116 | u8 promisc) | ||
117 | { | ||
118 | struct mlx4_cmd_mailbox *mailbox; | ||
119 | struct mlx4_set_port_rqp_calc_context *context; | ||
120 | int err; | ||
121 | u32 in_mod; | ||
122 | |||
123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
124 | if (IS_ERR(mailbox)) | ||
125 | return PTR_ERR(mailbox); | ||
126 | context = mailbox->buf; | ||
127 | memset(context, 0, sizeof *context); | ||
128 | |||
129 | context->base_qpn = cpu_to_be32(base_qpn); | ||
130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); | ||
131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); | ||
132 | context->intra_no_vlan = 0; | ||
133 | context->no_vlan = MLX4_NO_VLAN_IDX; | ||
134 | context->intra_vlan_miss = 0; | ||
135 | context->vlan_miss = MLX4_VLAN_MISS_IDX; | ||
136 | |||
137 | in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; | ||
138 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
139 | MLX4_CMD_TIME_CLASS_B); | ||
140 | |||
141 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
142 | return err; | ||
143 | } | ||
144 | |||
145 | |||
146 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | ||
147 | { | ||
148 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | ||
149 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | ||
150 | struct net_device_stats *stats = &priv->stats; | ||
151 | struct mlx4_cmd_mailbox *mailbox; | ||
152 | u64 in_mod = reset << 8 | port; | ||
153 | int err; | ||
154 | |||
155 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
156 | if (IS_ERR(mailbox)) | ||
157 | return PTR_ERR(mailbox); | ||
158 | memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); | ||
159 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, | ||
160 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B); | ||
161 | if (err) | ||
162 | goto out; | ||
163 | |||
164 | mlx4_en_stats = mailbox->buf; | ||
165 | |||
166 | spin_lock_bh(&priv->stats_lock); | ||
167 | |||
168 | stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) - | ||
169 | be32_to_cpu(mlx4_en_stats->RDROP); | ||
170 | stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) + | ||
171 | be64_to_cpu(mlx4_en_stats->TTOT_prio_1) + | ||
172 | be64_to_cpu(mlx4_en_stats->TTOT_prio_2) + | ||
173 | be64_to_cpu(mlx4_en_stats->TTOT_prio_3) + | ||
174 | be64_to_cpu(mlx4_en_stats->TTOT_prio_4) + | ||
175 | be64_to_cpu(mlx4_en_stats->TTOT_prio_5) + | ||
176 | be64_to_cpu(mlx4_en_stats->TTOT_prio_6) + | ||
177 | be64_to_cpu(mlx4_en_stats->TTOT_prio_7) + | ||
178 | be64_to_cpu(mlx4_en_stats->TTOT_novlan) + | ||
179 | be64_to_cpu(mlx4_en_stats->TTOT_loopbk); | ||
180 | stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) + | ||
181 | be64_to_cpu(mlx4_en_stats->ROCT_prio_1) + | ||
182 | be64_to_cpu(mlx4_en_stats->ROCT_prio_2) + | ||
183 | be64_to_cpu(mlx4_en_stats->ROCT_prio_3) + | ||
184 | be64_to_cpu(mlx4_en_stats->ROCT_prio_4) + | ||
185 | be64_to_cpu(mlx4_en_stats->ROCT_prio_5) + | ||
186 | be64_to_cpu(mlx4_en_stats->ROCT_prio_6) + | ||
187 | be64_to_cpu(mlx4_en_stats->ROCT_prio_7) + | ||
188 | be64_to_cpu(mlx4_en_stats->ROCT_novlan); | ||
189 | |||
190 | stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) + | ||
191 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) + | ||
192 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) + | ||
193 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) + | ||
194 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) + | ||
195 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) + | ||
196 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) + | ||
197 | be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) + | ||
198 | be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) + | ||
199 | be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk); | ||
200 | |||
201 | stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + | ||
202 | be32_to_cpu(mlx4_en_stats->RdropLength) + | ||
203 | be32_to_cpu(mlx4_en_stats->RJBBR) + | ||
204 | be32_to_cpu(mlx4_en_stats->RCRC) + | ||
205 | be32_to_cpu(mlx4_en_stats->RRUNT); | ||
206 | stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP); | ||
207 | stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) + | ||
208 | be64_to_cpu(mlx4_en_stats->MCAST_prio_1) + | ||
209 | be64_to_cpu(mlx4_en_stats->MCAST_prio_2) + | ||
210 | be64_to_cpu(mlx4_en_stats->MCAST_prio_3) + | ||
211 | be64_to_cpu(mlx4_en_stats->MCAST_prio_4) + | ||
212 | be64_to_cpu(mlx4_en_stats->MCAST_prio_5) + | ||
213 | be64_to_cpu(mlx4_en_stats->MCAST_prio_6) + | ||
214 | be64_to_cpu(mlx4_en_stats->MCAST_prio_7) + | ||
215 | be64_to_cpu(mlx4_en_stats->MCAST_novlan); | ||
216 | stats->collisions = 0; | ||
217 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | ||
218 | stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
219 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | ||
220 | stats->rx_frame_errors = 0; | ||
221 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
222 | stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
223 | stats->tx_aborted_errors = 0; | ||
224 | stats->tx_carrier_errors = 0; | ||
225 | stats->tx_fifo_errors = 0; | ||
226 | stats->tx_heartbeat_errors = 0; | ||
227 | stats->tx_window_errors = 0; | ||
228 | |||
229 | priv->pkstats.broadcast = | ||
230 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) + | ||
231 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) + | ||
232 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) + | ||
233 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) + | ||
234 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) + | ||
235 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) + | ||
236 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) + | ||
237 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) + | ||
238 | be64_to_cpu(mlx4_en_stats->RBCAST_novlan); | ||
239 | priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); | ||
240 | priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); | ||
241 | priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); | ||
242 | priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); | ||
243 | priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); | ||
244 | priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); | ||
245 | priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); | ||
246 | priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); | ||
247 | priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); | ||
248 | priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); | ||
249 | priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); | ||
250 | priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); | ||
251 | priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); | ||
252 | priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); | ||
253 | priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); | ||
254 | priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); | ||
255 | spin_unlock_bh(&priv->stats_lock); | ||
256 | |||
257 | out: | ||
258 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
259 | return err; | ||
260 | } | ||
261 | |||
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h new file mode 100644 index 000000000000..e6477f12beb5 --- /dev/null +++ b/drivers/net/mlx4/en_port.h | |||
@@ -0,0 +1,570 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef _MLX4_EN_PORT_H_ | ||
35 | #define _MLX4_EN_PORT_H_ | ||
36 | |||
37 | |||
38 | #define SET_PORT_GEN_ALL_VALID 0x7 | ||
39 | #define SET_PORT_PROMISC_SHIFT 31 | ||
40 | |||
41 | enum { | ||
42 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | ||
43 | MLX4_CMD_SET_MCAST_FLTR = 0x48, | ||
44 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | ||
45 | }; | ||
46 | |||
47 | struct mlx4_set_port_general_context { | ||
48 | u8 reserved[3]; | ||
49 | u8 flags; | ||
50 | u16 reserved2; | ||
51 | __be16 mtu; | ||
52 | u8 pptx; | ||
53 | u8 pfctx; | ||
54 | u16 reserved3; | ||
55 | u8 pprx; | ||
56 | u8 pfcrx; | ||
57 | u16 reserved4; | ||
58 | }; | ||
59 | |||
60 | struct mlx4_set_port_rqp_calc_context { | ||
61 | __be32 base_qpn; | ||
62 | __be32 flags; | ||
63 | u8 reserved[3]; | ||
64 | u8 mac_miss; | ||
65 | u8 intra_no_vlan; | ||
66 | u8 no_vlan; | ||
67 | u8 intra_vlan_miss; | ||
68 | u8 vlan_miss; | ||
69 | u8 reserved2[3]; | ||
70 | u8 no_vlan_prio; | ||
71 | __be32 promisc; | ||
72 | __be32 mcast; | ||
73 | }; | ||
74 | |||
75 | #define VLAN_FLTR_SIZE 128 | ||
76 | struct mlx4_set_vlan_fltr_mbox { | ||
77 | __be32 entry[VLAN_FLTR_SIZE]; | ||
78 | }; | ||
79 | |||
80 | |||
81 | enum { | ||
82 | MLX4_MCAST_CONFIG = 0, | ||
83 | MLX4_MCAST_DISABLE = 1, | ||
84 | MLX4_MCAST_ENABLE = 2, | ||
85 | }; | ||
86 | |||
87 | |||
88 | struct mlx4_en_stat_out_mbox { | ||
89 | /* Received frames with a length of 64 octets */ | ||
90 | __be64 R64_prio_0; | ||
91 | __be64 R64_prio_1; | ||
92 | __be64 R64_prio_2; | ||
93 | __be64 R64_prio_3; | ||
94 | __be64 R64_prio_4; | ||
95 | __be64 R64_prio_5; | ||
96 | __be64 R64_prio_6; | ||
97 | __be64 R64_prio_7; | ||
98 | __be64 R64_novlan; | ||
99 | /* Received frames with a length of 127 octets */ | ||
100 | __be64 R127_prio_0; | ||
101 | __be64 R127_prio_1; | ||
102 | __be64 R127_prio_2; | ||
103 | __be64 R127_prio_3; | ||
104 | __be64 R127_prio_4; | ||
105 | __be64 R127_prio_5; | ||
106 | __be64 R127_prio_6; | ||
107 | __be64 R127_prio_7; | ||
108 | __be64 R127_novlan; | ||
109 | /* Received frames with a length of 255 octets */ | ||
110 | __be64 R255_prio_0; | ||
111 | __be64 R255_prio_1; | ||
112 | __be64 R255_prio_2; | ||
113 | __be64 R255_prio_3; | ||
114 | __be64 R255_prio_4; | ||
115 | __be64 R255_prio_5; | ||
116 | __be64 R255_prio_6; | ||
117 | __be64 R255_prio_7; | ||
118 | __be64 R255_novlan; | ||
119 | /* Received frames with a length of 511 octets */ | ||
120 | __be64 R511_prio_0; | ||
121 | __be64 R511_prio_1; | ||
122 | __be64 R511_prio_2; | ||
123 | __be64 R511_prio_3; | ||
124 | __be64 R511_prio_4; | ||
125 | __be64 R511_prio_5; | ||
126 | __be64 R511_prio_6; | ||
127 | __be64 R511_prio_7; | ||
128 | __be64 R511_novlan; | ||
129 | /* Received frames with a length of 1023 octets */ | ||
130 | __be64 R1023_prio_0; | ||
131 | __be64 R1023_prio_1; | ||
132 | __be64 R1023_prio_2; | ||
133 | __be64 R1023_prio_3; | ||
134 | __be64 R1023_prio_4; | ||
135 | __be64 R1023_prio_5; | ||
136 | __be64 R1023_prio_6; | ||
137 | __be64 R1023_prio_7; | ||
138 | __be64 R1023_novlan; | ||
139 | /* Received frames with a length of 1518 octets */ | ||
140 | __be64 R1518_prio_0; | ||
141 | __be64 R1518_prio_1; | ||
142 | __be64 R1518_prio_2; | ||
143 | __be64 R1518_prio_3; | ||
144 | __be64 R1518_prio_4; | ||
145 | __be64 R1518_prio_5; | ||
146 | __be64 R1518_prio_6; | ||
147 | __be64 R1518_prio_7; | ||
148 | __be64 R1518_novlan; | ||
149 | /* Received frames with a length of 1522 octets */ | ||
150 | __be64 R1522_prio_0; | ||
151 | __be64 R1522_prio_1; | ||
152 | __be64 R1522_prio_2; | ||
153 | __be64 R1522_prio_3; | ||
154 | __be64 R1522_prio_4; | ||
155 | __be64 R1522_prio_5; | ||
156 | __be64 R1522_prio_6; | ||
157 | __be64 R1522_prio_7; | ||
158 | __be64 R1522_novlan; | ||
159 | /* Received frames with a length of 1548 octets */ | ||
160 | __be64 R1548_prio_0; | ||
161 | __be64 R1548_prio_1; | ||
162 | __be64 R1548_prio_2; | ||
163 | __be64 R1548_prio_3; | ||
164 | __be64 R1548_prio_4; | ||
165 | __be64 R1548_prio_5; | ||
166 | __be64 R1548_prio_6; | ||
167 | __be64 R1548_prio_7; | ||
168 | __be64 R1548_novlan; | ||
169 | /* Received frames with a length of 1548 < octets < MTU */ | ||
170 | __be64 R2MTU_prio_0; | ||
171 | __be64 R2MTU_prio_1; | ||
172 | __be64 R2MTU_prio_2; | ||
173 | __be64 R2MTU_prio_3; | ||
174 | __be64 R2MTU_prio_4; | ||
175 | __be64 R2MTU_prio_5; | ||
176 | __be64 R2MTU_prio_6; | ||
177 | __be64 R2MTU_prio_7; | ||
178 | __be64 R2MTU_novlan; | ||
179 | /* Received frames with a length of MTU< octets and good CRC */ | ||
180 | __be64 RGIANT_prio_0; | ||
181 | __be64 RGIANT_prio_1; | ||
182 | __be64 RGIANT_prio_2; | ||
183 | __be64 RGIANT_prio_3; | ||
184 | __be64 RGIANT_prio_4; | ||
185 | __be64 RGIANT_prio_5; | ||
186 | __be64 RGIANT_prio_6; | ||
187 | __be64 RGIANT_prio_7; | ||
188 | __be64 RGIANT_novlan; | ||
189 | /* Received broadcast frames with good CRC */ | ||
190 | __be64 RBCAST_prio_0; | ||
191 | __be64 RBCAST_prio_1; | ||
192 | __be64 RBCAST_prio_2; | ||
193 | __be64 RBCAST_prio_3; | ||
194 | __be64 RBCAST_prio_4; | ||
195 | __be64 RBCAST_prio_5; | ||
196 | __be64 RBCAST_prio_6; | ||
197 | __be64 RBCAST_prio_7; | ||
198 | __be64 RBCAST_novlan; | ||
199 | /* Received multicast frames with good CRC */ | ||
200 | __be64 MCAST_prio_0; | ||
201 | __be64 MCAST_prio_1; | ||
202 | __be64 MCAST_prio_2; | ||
203 | __be64 MCAST_prio_3; | ||
204 | __be64 MCAST_prio_4; | ||
205 | __be64 MCAST_prio_5; | ||
206 | __be64 MCAST_prio_6; | ||
207 | __be64 MCAST_prio_7; | ||
208 | __be64 MCAST_novlan; | ||
209 | /* Received unicast not short or GIANT frames with good CRC */ | ||
210 | __be64 RTOTG_prio_0; | ||
211 | __be64 RTOTG_prio_1; | ||
212 | __be64 RTOTG_prio_2; | ||
213 | __be64 RTOTG_prio_3; | ||
214 | __be64 RTOTG_prio_4; | ||
215 | __be64 RTOTG_prio_5; | ||
216 | __be64 RTOTG_prio_6; | ||
217 | __be64 RTOTG_prio_7; | ||
218 | __be64 RTOTG_novlan; | ||
219 | |||
220 | /* Count of total octets of received frames, includes framing characters */ | ||
221 | __be64 RTTLOCT_prio_0; | ||
222 | /* Count of total octets of received frames, not including framing | ||
223 | characters */ | ||
224 | __be64 RTTLOCT_NOFRM_prio_0; | ||
225 | /* Count of Total number of octets received | ||
226 | (only for frames without errors) */ | ||
227 | __be64 ROCT_prio_0; | ||
228 | |||
229 | __be64 RTTLOCT_prio_1; | ||
230 | __be64 RTTLOCT_NOFRM_prio_1; | ||
231 | __be64 ROCT_prio_1; | ||
232 | |||
233 | __be64 RTTLOCT_prio_2; | ||
234 | __be64 RTTLOCT_NOFRM_prio_2; | ||
235 | __be64 ROCT_prio_2; | ||
236 | |||
237 | __be64 RTTLOCT_prio_3; | ||
238 | __be64 RTTLOCT_NOFRM_prio_3; | ||
239 | __be64 ROCT_prio_3; | ||
240 | |||
241 | __be64 RTTLOCT_prio_4; | ||
242 | __be64 RTTLOCT_NOFRM_prio_4; | ||
243 | __be64 ROCT_prio_4; | ||
244 | |||
245 | __be64 RTTLOCT_prio_5; | ||
246 | __be64 RTTLOCT_NOFRM_prio_5; | ||
247 | __be64 ROCT_prio_5; | ||
248 | |||
249 | __be64 RTTLOCT_prio_6; | ||
250 | __be64 RTTLOCT_NOFRM_prio_6; | ||
251 | __be64 ROCT_prio_6; | ||
252 | |||
253 | __be64 RTTLOCT_prio_7; | ||
254 | __be64 RTTLOCT_NOFRM_prio_7; | ||
255 | __be64 ROCT_prio_7; | ||
256 | |||
257 | __be64 RTTLOCT_novlan; | ||
258 | __be64 RTTLOCT_NOFRM_novlan; | ||
259 | __be64 ROCT_novlan; | ||
260 | |||
261 | /* Count of Total received frames including bad frames */ | ||
262 | __be64 RTOT_prio_0; | ||
263 | /* Count of Total number of received frames with 802.1Q encapsulation */ | ||
264 | __be64 R1Q_prio_0; | ||
265 | __be64 reserved1; | ||
266 | |||
267 | __be64 RTOT_prio_1; | ||
268 | __be64 R1Q_prio_1; | ||
269 | __be64 reserved2; | ||
270 | |||
271 | __be64 RTOT_prio_2; | ||
272 | __be64 R1Q_prio_2; | ||
273 | __be64 reserved3; | ||
274 | |||
275 | __be64 RTOT_prio_3; | ||
276 | __be64 R1Q_prio_3; | ||
277 | __be64 reserved4; | ||
278 | |||
279 | __be64 RTOT_prio_4; | ||
280 | __be64 R1Q_prio_4; | ||
281 | __be64 reserved5; | ||
282 | |||
283 | __be64 RTOT_prio_5; | ||
284 | __be64 R1Q_prio_5; | ||
285 | __be64 reserved6; | ||
286 | |||
287 | __be64 RTOT_prio_6; | ||
288 | __be64 R1Q_prio_6; | ||
289 | __be64 reserved7; | ||
290 | |||
291 | __be64 RTOT_prio_7; | ||
292 | __be64 R1Q_prio_7; | ||
293 | __be64 reserved8; | ||
294 | |||
295 | __be64 RTOT_novlan; | ||
296 | __be64 R1Q_novlan; | ||
297 | __be64 reserved9; | ||
298 | |||
299 | /* Total number of Successfully Received Control Frames */ | ||
300 | __be64 RCNTL; | ||
301 | __be64 reserved10; | ||
302 | __be64 reserved11; | ||
303 | __be64 reserved12; | ||
304 | /* Count of received frames with a length/type field value between 46 | ||
305 | (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames), | ||
306 | inclusive */ | ||
307 | __be64 RInRangeLengthErr; | ||
308 | /* Count of received frames with length/type field between 1501 and 1535 | ||
309 | decimal, inclusive */ | ||
310 | __be64 ROutRangeLengthErr; | ||
311 | /* Count of received frames that are longer than max allowed size for | ||
312 | 802.3 frames (1518/1522) */ | ||
313 | __be64 RFrmTooLong; | ||
314 | /* Count frames received with PCS error */ | ||
315 | __be64 PCS; | ||
316 | |||
317 | /* Transmit frames with a length of 64 octets */ | ||
318 | __be64 T64_prio_0; | ||
319 | __be64 T64_prio_1; | ||
320 | __be64 T64_prio_2; | ||
321 | __be64 T64_prio_3; | ||
322 | __be64 T64_prio_4; | ||
323 | __be64 T64_prio_5; | ||
324 | __be64 T64_prio_6; | ||
325 | __be64 T64_prio_7; | ||
326 | __be64 T64_novlan; | ||
327 | __be64 T64_loopbk; | ||
328 | /* Transmit frames with a length of 65 to 127 octets. */ | ||
329 | __be64 T127_prio_0; | ||
330 | __be64 T127_prio_1; | ||
331 | __be64 T127_prio_2; | ||
332 | __be64 T127_prio_3; | ||
333 | __be64 T127_prio_4; | ||
334 | __be64 T127_prio_5; | ||
335 | __be64 T127_prio_6; | ||
336 | __be64 T127_prio_7; | ||
337 | __be64 T127_novlan; | ||
338 | __be64 T127_loopbk; | ||
339 | /* Transmit frames with a length of 128 to 255 octets */ | ||
340 | __be64 T255_prio_0; | ||
341 | __be64 T255_prio_1; | ||
342 | __be64 T255_prio_2; | ||
343 | __be64 T255_prio_3; | ||
344 | __be64 T255_prio_4; | ||
345 | __be64 T255_prio_5; | ||
346 | __be64 T255_prio_6; | ||
347 | __be64 T255_prio_7; | ||
348 | __be64 T255_novlan; | ||
349 | __be64 T255_loopbk; | ||
350 | /* Transmit frames with a length of 256 to 511 octets */ | ||
351 | __be64 T511_prio_0; | ||
352 | __be64 T511_prio_1; | ||
353 | __be64 T511_prio_2; | ||
354 | __be64 T511_prio_3; | ||
355 | __be64 T511_prio_4; | ||
356 | __be64 T511_prio_5; | ||
357 | __be64 T511_prio_6; | ||
358 | __be64 T511_prio_7; | ||
359 | __be64 T511_novlan; | ||
360 | __be64 T511_loopbk; | ||
361 | /* Transmit frames with a length of 512 to 1023 octets */ | ||
362 | __be64 T1023_prio_0; | ||
363 | __be64 T1023_prio_1; | ||
364 | __be64 T1023_prio_2; | ||
365 | __be64 T1023_prio_3; | ||
366 | __be64 T1023_prio_4; | ||
367 | __be64 T1023_prio_5; | ||
368 | __be64 T1023_prio_6; | ||
369 | __be64 T1023_prio_7; | ||
370 | __be64 T1023_novlan; | ||
371 | __be64 T1023_loopbk; | ||
372 | /* Transmit frames with a length of 1024 to 1518 octets */ | ||
373 | __be64 T1518_prio_0; | ||
374 | __be64 T1518_prio_1; | ||
375 | __be64 T1518_prio_2; | ||
376 | __be64 T1518_prio_3; | ||
377 | __be64 T1518_prio_4; | ||
378 | __be64 T1518_prio_5; | ||
379 | __be64 T1518_prio_6; | ||
380 | __be64 T1518_prio_7; | ||
381 | __be64 T1518_novlan; | ||
382 | __be64 T1518_loopbk; | ||
383 | /* Counts transmit frames with a length of 1519 to 1522 bytes */ | ||
384 | __be64 T1522_prio_0; | ||
385 | __be64 T1522_prio_1; | ||
386 | __be64 T1522_prio_2; | ||
387 | __be64 T1522_prio_3; | ||
388 | __be64 T1522_prio_4; | ||
389 | __be64 T1522_prio_5; | ||
390 | __be64 T1522_prio_6; | ||
391 | __be64 T1522_prio_7; | ||
392 | __be64 T1522_novlan; | ||
393 | __be64 T1522_loopbk; | ||
394 | /* Transmit frames with a length of 1523 to 1548 octets */ | ||
395 | __be64 T1548_prio_0; | ||
396 | __be64 T1548_prio_1; | ||
397 | __be64 T1548_prio_2; | ||
398 | __be64 T1548_prio_3; | ||
399 | __be64 T1548_prio_4; | ||
400 | __be64 T1548_prio_5; | ||
401 | __be64 T1548_prio_6; | ||
402 | __be64 T1548_prio_7; | ||
403 | __be64 T1548_novlan; | ||
404 | __be64 T1548_loopbk; | ||
405 | /* Counts transmit frames with a length of 1549 to MTU bytes */ | ||
406 | __be64 T2MTU_prio_0; | ||
407 | __be64 T2MTU_prio_1; | ||
408 | __be64 T2MTU_prio_2; | ||
409 | __be64 T2MTU_prio_3; | ||
410 | __be64 T2MTU_prio_4; | ||
411 | __be64 T2MTU_prio_5; | ||
412 | __be64 T2MTU_prio_6; | ||
413 | __be64 T2MTU_prio_7; | ||
414 | __be64 T2MTU_novlan; | ||
415 | __be64 T2MTU_loopbk; | ||
416 | /* Transmit frames with a length greater than MTU octets and a good CRC. */ | ||
417 | __be64 TGIANT_prio_0; | ||
418 | __be64 TGIANT_prio_1; | ||
419 | __be64 TGIANT_prio_2; | ||
420 | __be64 TGIANT_prio_3; | ||
421 | __be64 TGIANT_prio_4; | ||
422 | __be64 TGIANT_prio_5; | ||
423 | __be64 TGIANT_prio_6; | ||
424 | __be64 TGIANT_prio_7; | ||
425 | __be64 TGIANT_novlan; | ||
426 | __be64 TGIANT_loopbk; | ||
427 | /* Transmit broadcast frames with a good CRC */ | ||
428 | __be64 TBCAST_prio_0; | ||
429 | __be64 TBCAST_prio_1; | ||
430 | __be64 TBCAST_prio_2; | ||
431 | __be64 TBCAST_prio_3; | ||
432 | __be64 TBCAST_prio_4; | ||
433 | __be64 TBCAST_prio_5; | ||
434 | __be64 TBCAST_prio_6; | ||
435 | __be64 TBCAST_prio_7; | ||
436 | __be64 TBCAST_novlan; | ||
437 | __be64 TBCAST_loopbk; | ||
438 | /* Transmit multicast frames with a good CRC */ | ||
439 | __be64 TMCAST_prio_0; | ||
440 | __be64 TMCAST_prio_1; | ||
441 | __be64 TMCAST_prio_2; | ||
442 | __be64 TMCAST_prio_3; | ||
443 | __be64 TMCAST_prio_4; | ||
444 | __be64 TMCAST_prio_5; | ||
445 | __be64 TMCAST_prio_6; | ||
446 | __be64 TMCAST_prio_7; | ||
447 | __be64 TMCAST_novlan; | ||
448 | __be64 TMCAST_loopbk; | ||
449 | /* Transmit good frames that are neither broadcast nor multicast */ | ||
450 | __be64 TTOTG_prio_0; | ||
451 | __be64 TTOTG_prio_1; | ||
452 | __be64 TTOTG_prio_2; | ||
453 | __be64 TTOTG_prio_3; | ||
454 | __be64 TTOTG_prio_4; | ||
455 | __be64 TTOTG_prio_5; | ||
456 | __be64 TTOTG_prio_6; | ||
457 | __be64 TTOTG_prio_7; | ||
458 | __be64 TTOTG_novlan; | ||
459 | __be64 TTOTG_loopbk; | ||
460 | |||
461 | /* total octets of transmitted frames, including framing characters */ | ||
462 | __be64 TTTLOCT_prio_0; | ||
463 | /* total octets of transmitted frames, not including framing characters */ | ||
464 | __be64 TTTLOCT_NOFRM_prio_0; | ||
465 | /* ifOutOctets */ | ||
466 | __be64 TOCT_prio_0; | ||
467 | |||
468 | __be64 TTTLOCT_prio_1; | ||
469 | __be64 TTTLOCT_NOFRM_prio_1; | ||
470 | __be64 TOCT_prio_1; | ||
471 | |||
472 | __be64 TTTLOCT_prio_2; | ||
473 | __be64 TTTLOCT_NOFRM_prio_2; | ||
474 | __be64 TOCT_prio_2; | ||
475 | |||
476 | __be64 TTTLOCT_prio_3; | ||
477 | __be64 TTTLOCT_NOFRM_prio_3; | ||
478 | __be64 TOCT_prio_3; | ||
479 | |||
480 | __be64 TTTLOCT_prio_4; | ||
481 | __be64 TTTLOCT_NOFRM_prio_4; | ||
482 | __be64 TOCT_prio_4; | ||
483 | |||
484 | __be64 TTTLOCT_prio_5; | ||
485 | __be64 TTTLOCT_NOFRM_prio_5; | ||
486 | __be64 TOCT_prio_5; | ||
487 | |||
488 | __be64 TTTLOCT_prio_6; | ||
489 | __be64 TTTLOCT_NOFRM_prio_6; | ||
490 | __be64 TOCT_prio_6; | ||
491 | |||
492 | __be64 TTTLOCT_prio_7; | ||
493 | __be64 TTTLOCT_NOFRM_prio_7; | ||
494 | __be64 TOCT_prio_7; | ||
495 | |||
496 | __be64 TTTLOCT_novlan; | ||
497 | __be64 TTTLOCT_NOFRM_novlan; | ||
498 | __be64 TOCT_novlan; | ||
499 | |||
500 | __be64 TTTLOCT_loopbk; | ||
501 | __be64 TTTLOCT_NOFRM_loopbk; | ||
502 | __be64 TOCT_loopbk; | ||
503 | |||
504 | /* Total frames transmitted with a good CRC that are not aborted */ | ||
505 | __be64 TTOT_prio_0; | ||
506 | /* Total number of frames transmitted with 802.1Q encapsulation */ | ||
507 | __be64 T1Q_prio_0; | ||
508 | __be64 reserved13; | ||
509 | |||
510 | __be64 TTOT_prio_1; | ||
511 | __be64 T1Q_prio_1; | ||
512 | __be64 reserved14; | ||
513 | |||
514 | __be64 TTOT_prio_2; | ||
515 | __be64 T1Q_prio_2; | ||
516 | __be64 reserved15; | ||
517 | |||
518 | __be64 TTOT_prio_3; | ||
519 | __be64 T1Q_prio_3; | ||
520 | __be64 reserved16; | ||
521 | |||
522 | __be64 TTOT_prio_4; | ||
523 | __be64 T1Q_prio_4; | ||
524 | __be64 reserved17; | ||
525 | |||
526 | __be64 TTOT_prio_5; | ||
527 | __be64 T1Q_prio_5; | ||
528 | __be64 reserved18; | ||
529 | |||
530 | __be64 TTOT_prio_6; | ||
531 | __be64 T1Q_prio_6; | ||
532 | __be64 reserved19; | ||
533 | |||
534 | __be64 TTOT_prio_7; | ||
535 | __be64 T1Q_prio_7; | ||
536 | __be64 reserved20; | ||
537 | |||
538 | __be64 TTOT_novlan; | ||
539 | __be64 T1Q_novlan; | ||
540 | __be64 reserved21; | ||
541 | |||
542 | __be64 TTOT_loopbk; | ||
543 | __be64 T1Q_loopbk; | ||
544 | __be64 reserved22; | ||
545 | |||
546 | /* Received frames with a length greater than MTU octets and a bad CRC */ | ||
547 | __be32 RJBBR; | ||
548 | /* Received frames with a bad CRC that are not runts, jabbers, | ||
549 | or alignment errors */ | ||
550 | __be32 RCRC; | ||
551 | /* Received frames with SFD with a length of less than 64 octets and a | ||
552 | bad CRC */ | ||
553 | __be32 RRUNT; | ||
554 | /* Received frames with a length less than 64 octets and a good CRC */ | ||
555 | __be32 RSHORT; | ||
556 | /* Total Number of Received Packets Dropped */ | ||
557 | __be32 RDROP; | ||
558 | /* Drop due to overflow */ | ||
559 | __be32 RdropOvflw; | ||
560 | /* Drop due to overflow */ | ||
561 | __be32 RdropLength; | ||
562 | /* Total of good frames. Does not include frames received with | ||
563 | frame-too-long, FCS, or length errors */ | ||
564 | __be32 RTOTFRMS; | ||
565 | /* Total dropped Xmited packets */ | ||
566 | __be32 TDROP; | ||
567 | }; | ||
568 | |||
569 | |||
570 | #endif | ||
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c new file mode 100644 index 000000000000..a0545209e507 --- /dev/null +++ b/drivers/net/mlx4/en_resources.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | |||
37 | #include "mlx4_en.h" | ||
38 | |||
39 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
40 | int is_tx, int rss, int qpn, int cqn, int srqn, | ||
41 | struct mlx4_qp_context *context) | ||
42 | { | ||
43 | struct mlx4_en_dev *mdev = priv->mdev; | ||
44 | |||
45 | memset(context, 0, sizeof *context); | ||
46 | context->flags = cpu_to_be32(7 << 16 | rss << 13); | ||
47 | context->pd = cpu_to_be32(mdev->priv_pdn); | ||
48 | context->mtu_msgmax = 0xff; | ||
49 | context->rq_size_stride = 0; | ||
50 | if (is_tx) | ||
51 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | ||
52 | else | ||
53 | context->sq_size_stride = 1; | ||
54 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); | ||
55 | context->local_qpn = cpu_to_be32(qpn); | ||
56 | context->pri_path.ackto = 1 & 0x07; | ||
57 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; | ||
58 | context->pri_path.counter_index = 0xff; | ||
59 | context->cqn_send = cpu_to_be32(cqn); | ||
60 | context->cqn_recv = cpu_to_be32(cqn); | ||
61 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); | ||
62 | if (!rss) | ||
63 | context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn); | ||
64 | } | ||
65 | |||
66 | |||
67 | int mlx4_en_map_buffer(struct mlx4_buf *buf) | ||
68 | { | ||
69 | struct page **pages; | ||
70 | int i; | ||
71 | |||
72 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
73 | return 0; | ||
74 | |||
75 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | ||
76 | if (!pages) | ||
77 | return -ENOMEM; | ||
78 | |||
79 | for (i = 0; i < buf->nbufs; ++i) | ||
80 | pages[i] = virt_to_page(buf->page_list[i].buf); | ||
81 | |||
82 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||
83 | kfree(pages); | ||
84 | if (!buf->direct.buf) | ||
85 | return -ENOMEM; | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf) | ||
91 | { | ||
92 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
93 | return; | ||
94 | |||
95 | vunmap(buf->direct.buf); | ||
96 | } | ||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c new file mode 100644 index 000000000000..6232227f56c3 --- /dev/null +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -0,0 +1,1080 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/mlx4/cq.h> | ||
35 | #include <linux/mlx4/qp.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/if_ether.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | |||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | static void *get_wqe(struct mlx4_en_rx_ring *ring, int n) | ||
44 | { | ||
45 | int offset = n << ring->srq.wqe_shift; | ||
46 | return ring->buf + offset; | ||
47 | } | ||
48 | |||
49 | static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | ||
50 | { | ||
51 | return; | ||
52 | } | ||
53 | |||
54 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, | ||
55 | void **ip_hdr, void **tcpudp_hdr, | ||
56 | u64 *hdr_flags, void *priv) | ||
57 | { | ||
58 | *mac_hdr = page_address(frags->page) + frags->page_offset; | ||
59 | *ip_hdr = *mac_hdr + ETH_HLEN; | ||
60 | *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr)); | ||
61 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, | ||
67 | struct mlx4_en_rx_desc *rx_desc, | ||
68 | struct skb_frag_struct *skb_frags, | ||
69 | struct mlx4_en_rx_alloc *ring_alloc, | ||
70 | int i) | ||
71 | { | ||
72 | struct mlx4_en_dev *mdev = priv->mdev; | ||
73 | struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | ||
74 | struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; | ||
75 | struct page *page; | ||
76 | dma_addr_t dma; | ||
77 | |||
78 | if (page_alloc->offset == frag_info->last_offset) { | ||
79 | /* Allocate new page */ | ||
80 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); | ||
81 | if (!page) | ||
82 | return -ENOMEM; | ||
83 | |||
84 | skb_frags[i].page = page_alloc->page; | ||
85 | skb_frags[i].page_offset = page_alloc->offset; | ||
86 | page_alloc->page = page; | ||
87 | page_alloc->offset = frag_info->frag_align; | ||
88 | } else { | ||
89 | page = page_alloc->page; | ||
90 | get_page(page); | ||
91 | |||
92 | skb_frags[i].page = page; | ||
93 | skb_frags[i].page_offset = page_alloc->offset; | ||
94 | page_alloc->offset += frag_info->frag_stride; | ||
95 | } | ||
96 | dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) + | ||
97 | skb_frags[i].page_offset, frag_info->frag_size, | ||
98 | PCI_DMA_FROMDEVICE); | ||
99 | rx_desc->data[i].addr = cpu_to_be64(dma); | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, | ||
104 | struct mlx4_en_rx_ring *ring) | ||
105 | { | ||
106 | struct mlx4_en_rx_alloc *page_alloc; | ||
107 | int i; | ||
108 | |||
109 | for (i = 0; i < priv->num_frags; i++) { | ||
110 | page_alloc = &ring->page_alloc[i]; | ||
111 | page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | ||
112 | MLX4_EN_ALLOC_ORDER); | ||
113 | if (!page_alloc->page) | ||
114 | goto out; | ||
115 | |||
116 | page_alloc->offset = priv->frag_info[i].frag_align; | ||
117 | mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", | ||
118 | i, page_alloc->page); | ||
119 | } | ||
120 | return 0; | ||
121 | |||
122 | out: | ||
123 | while (i--) { | ||
124 | page_alloc = &ring->page_alloc[i]; | ||
125 | put_page(page_alloc->page); | ||
126 | page_alloc->page = NULL; | ||
127 | } | ||
128 | return -ENOMEM; | ||
129 | } | ||
130 | |||
131 | static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, | ||
132 | struct mlx4_en_rx_ring *ring) | ||
133 | { | ||
134 | struct mlx4_en_rx_alloc *page_alloc; | ||
135 | int i; | ||
136 | |||
137 | for (i = 0; i < priv->num_frags; i++) { | ||
138 | page_alloc = &ring->page_alloc[i]; | ||
139 | mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", | ||
140 | i, page_count(page_alloc->page)); | ||
141 | |||
142 | put_page(page_alloc->page); | ||
143 | page_alloc->page = NULL; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | |||
148 | static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | ||
149 | struct mlx4_en_rx_ring *ring, int index) | ||
150 | { | ||
151 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | ||
152 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
153 | (index << priv->log_rx_info); | ||
154 | int possible_frags; | ||
155 | int i; | ||
156 | |||
157 | /* Pre-link descriptor */ | ||
158 | rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask); | ||
159 | |||
160 | /* Set size and memtype fields */ | ||
161 | for (i = 0; i < priv->num_frags; i++) { | ||
162 | skb_frags[i].size = priv->frag_info[i].frag_size; | ||
163 | rx_desc->data[i].byte_count = | ||
164 | cpu_to_be32(priv->frag_info[i].frag_size); | ||
165 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | ||
166 | } | ||
167 | |||
168 | /* If the number of used fragments does not fill up the ring stride, | ||
169 | * remaining (unused) fragments must be padded with null address/size | ||
170 | * and a special memory key */ | ||
171 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | ||
172 | for (i = priv->num_frags; i < possible_frags; i++) { | ||
173 | rx_desc->data[i].byte_count = 0; | ||
174 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | ||
175 | rx_desc->data[i].addr = 0; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | |||
180 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | ||
181 | struct mlx4_en_rx_ring *ring, int index) | ||
182 | { | ||
183 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); | ||
184 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
185 | (index << priv->log_rx_info); | ||
186 | int i; | ||
187 | |||
188 | for (i = 0; i < priv->num_frags; i++) | ||
189 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i)) | ||
190 | goto err; | ||
191 | |||
192 | return 0; | ||
193 | |||
194 | err: | ||
195 | while (i--) | ||
196 | put_page(skb_frags[i].page); | ||
197 | return -ENOMEM; | ||
198 | } | ||
199 | |||
200 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | ||
201 | { | ||
202 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | ||
203 | } | ||
204 | |||
205 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) | ||
206 | { | ||
207 | struct mlx4_en_dev *mdev = priv->mdev; | ||
208 | struct mlx4_en_rx_ring *ring; | ||
209 | int ring_ind; | ||
210 | int buf_ind; | ||
211 | |||
212 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | ||
213 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
214 | ring = &priv->rx_ring[ring_ind]; | ||
215 | |||
216 | if (mlx4_en_prepare_rx_desc(priv, ring, | ||
217 | ring->actual_size)) { | ||
218 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { | ||
219 | mlx4_err(mdev, "Failed to allocate " | ||
220 | "enough rx buffers\n"); | ||
221 | return -ENOMEM; | ||
222 | } else { | ||
223 | if (netif_msg_rx_err(priv)) | ||
224 | mlx4_warn(mdev, | ||
225 | "Only %d buffers allocated\n", | ||
226 | ring->actual_size); | ||
227 | goto out; | ||
228 | } | ||
229 | } | ||
230 | ring->actual_size++; | ||
231 | ring->prod++; | ||
232 | } | ||
233 | } | ||
234 | out: | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int mlx4_en_fill_rx_buf(struct net_device *dev, | ||
239 | struct mlx4_en_rx_ring *ring) | ||
240 | { | ||
241 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
242 | int num = 0; | ||
243 | int err; | ||
244 | |||
245 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { | ||
246 | err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & | ||
247 | ring->size_mask); | ||
248 | if (err) { | ||
249 | if (netif_msg_rx_err(priv)) | ||
250 | mlx4_warn(priv->mdev, | ||
251 | "Failed preparing rx descriptor\n"); | ||
252 | priv->port_stats.rx_alloc_failed++; | ||
253 | break; | ||
254 | } | ||
255 | ++num; | ||
256 | ++ring->prod; | ||
257 | } | ||
258 | if ((u32) (ring->prod - ring->cons) == ring->size) | ||
259 | ring->full = 1; | ||
260 | |||
261 | return num; | ||
262 | } | ||
263 | |||
264 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | ||
265 | struct mlx4_en_rx_ring *ring) | ||
266 | { | ||
267 | struct mlx4_en_dev *mdev = priv->mdev; | ||
268 | struct skb_frag_struct *skb_frags; | ||
269 | struct mlx4_en_rx_desc *rx_desc; | ||
270 | dma_addr_t dma; | ||
271 | int index; | ||
272 | int nr; | ||
273 | |||
274 | mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", | ||
275 | ring->cons, ring->prod); | ||
276 | |||
277 | /* Unmap and free Rx buffers */ | ||
278 | BUG_ON((u32) (ring->prod - ring->cons) > ring->size); | ||
279 | while (ring->cons != ring->prod) { | ||
280 | index = ring->cons & ring->size_mask; | ||
281 | rx_desc = ring->buf + (index << ring->log_stride); | ||
282 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
283 | mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); | ||
284 | |||
285 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
286 | mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); | ||
287 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
288 | |||
289 | mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); | ||
290 | pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, | ||
291 | PCI_DMA_FROMDEVICE); | ||
292 | put_page(skb_frags[nr].page); | ||
293 | } | ||
294 | ++ring->cons; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | |||
299 | void mlx4_en_rx_refill(struct work_struct *work) | ||
300 | { | ||
301 | struct delayed_work *delay = container_of(work, struct delayed_work, work); | ||
302 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
303 | refill_task); | ||
304 | struct mlx4_en_dev *mdev = priv->mdev; | ||
305 | struct net_device *dev = priv->dev; | ||
306 | struct mlx4_en_rx_ring *ring; | ||
307 | int need_refill = 0; | ||
308 | int i; | ||
309 | |||
310 | mutex_lock(&mdev->state_lock); | ||
311 | if (!mdev->device_up || !priv->port_up) | ||
312 | goto out; | ||
313 | |||
314 | /* We only get here if there are no receive buffers, so we can't race | ||
315 | * with Rx interrupts while filling buffers */ | ||
316 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
317 | ring = &priv->rx_ring[i]; | ||
318 | if (ring->need_refill) { | ||
319 | if (mlx4_en_fill_rx_buf(dev, ring)) { | ||
320 | ring->need_refill = 0; | ||
321 | mlx4_en_update_rx_prod_db(ring); | ||
322 | } else | ||
323 | need_refill = 1; | ||
324 | } | ||
325 | } | ||
326 | if (need_refill) | ||
327 | queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ); | ||
328 | |||
329 | out: | ||
330 | mutex_unlock(&mdev->state_lock); | ||
331 | } | ||
332 | |||
333 | |||
334 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
335 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) | ||
336 | { | ||
337 | struct mlx4_en_dev *mdev = priv->mdev; | ||
338 | int err; | ||
339 | int tmp; | ||
340 | |||
341 | /* Sanity check SRQ size before proceeding */ | ||
342 | if (size >= mdev->dev->caps.max_srq_wqes) | ||
343 | return -EINVAL; | ||
344 | |||
345 | ring->prod = 0; | ||
346 | ring->cons = 0; | ||
347 | ring->size = size; | ||
348 | ring->size_mask = size - 1; | ||
349 | ring->stride = stride; | ||
350 | ring->log_stride = ffs(ring->stride) - 1; | ||
351 | ring->buf_size = ring->size * ring->stride; | ||
352 | |||
353 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | ||
354 | sizeof(struct skb_frag_struct)); | ||
355 | ring->rx_info = vmalloc(tmp); | ||
356 | if (!ring->rx_info) { | ||
357 | mlx4_err(mdev, "Failed allocating rx_info ring\n"); | ||
358 | return -ENOMEM; | ||
359 | } | ||
360 | mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", | ||
361 | ring->rx_info, tmp); | ||
362 | |||
363 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, | ||
364 | ring->buf_size, 2 * PAGE_SIZE); | ||
365 | if (err) | ||
366 | goto err_ring; | ||
367 | |||
368 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
369 | if (err) { | ||
370 | mlx4_err(mdev, "Failed to map RX buffer\n"); | ||
371 | goto err_hwq; | ||
372 | } | ||
373 | ring->buf = ring->wqres.buf.direct.buf; | ||
374 | |||
375 | /* Configure lro mngr */ | ||
376 | memset(&ring->lro, 0, sizeof(struct net_lro_mgr)); | ||
377 | ring->lro.dev = priv->dev; | ||
378 | ring->lro.features = LRO_F_NAPI; | ||
379 | ring->lro.frag_align_pad = NET_IP_ALIGN; | ||
380 | ring->lro.ip_summed = CHECKSUM_UNNECESSARY; | ||
381 | ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
382 | ring->lro.max_desc = mdev->profile.num_lro; | ||
383 | ring->lro.max_aggr = MAX_SKB_FRAGS; | ||
384 | ring->lro.lro_arr = kzalloc(mdev->profile.num_lro * | ||
385 | sizeof(struct net_lro_desc), | ||
386 | GFP_KERNEL); | ||
387 | if (!ring->lro.lro_arr) { | ||
388 | mlx4_err(mdev, "Failed to allocate lro array\n"); | ||
389 | goto err_map; | ||
390 | } | ||
391 | ring->lro.get_frag_header = mlx4_en_get_frag_header; | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | err_map: | ||
396 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
397 | err_hwq: | ||
398 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
399 | err_ring: | ||
400 | vfree(ring->rx_info); | ||
401 | ring->rx_info = NULL; | ||
402 | return err; | ||
403 | } | ||
404 | |||
405 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | ||
406 | { | ||
407 | struct mlx4_en_dev *mdev = priv->mdev; | ||
408 | struct mlx4_wqe_srq_next_seg *next; | ||
409 | struct mlx4_en_rx_ring *ring; | ||
410 | int i; | ||
411 | int ring_ind; | ||
412 | int err; | ||
413 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
414 | DS_SIZE * priv->num_frags); | ||
415 | int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE; | ||
416 | |||
417 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
418 | ring = &priv->rx_ring[ring_ind]; | ||
419 | |||
420 | ring->prod = 0; | ||
421 | ring->cons = 0; | ||
422 | ring->actual_size = 0; | ||
423 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; | ||
424 | |||
425 | ring->stride = stride; | ||
426 | ring->log_stride = ffs(ring->stride) - 1; | ||
427 | ring->buf_size = ring->size * ring->stride; | ||
428 | |||
429 | memset(ring->buf, 0, ring->buf_size); | ||
430 | mlx4_en_update_rx_prod_db(ring); | ||
431 | |||
432 | /* Initailize all descriptors */ | ||
433 | for (i = 0; i < ring->size; i++) | ||
434 | mlx4_en_init_rx_desc(priv, ring, i); | ||
435 | |||
436 | /* Initialize page allocators */ | ||
437 | err = mlx4_en_init_allocator(priv, ring); | ||
438 | if (err) { | ||
439 | mlx4_err(mdev, "Failed initializing ring allocator\n"); | ||
440 | goto err_allocator; | ||
441 | } | ||
442 | |||
443 | /* Fill Rx buffers */ | ||
444 | ring->full = 0; | ||
445 | } | ||
446 | if (mlx4_en_fill_rx_buffers(priv)) | ||
447 | goto err_buffers; | ||
448 | |||
449 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
450 | ring = &priv->rx_ring[ring_ind]; | ||
451 | |||
452 | mlx4_en_update_rx_prod_db(ring); | ||
453 | |||
454 | /* Configure SRQ representing the ring */ | ||
455 | ring->srq.max = ring->size; | ||
456 | ring->srq.max_gs = max_gs; | ||
457 | ring->srq.wqe_shift = ilog2(ring->stride); | ||
458 | |||
459 | for (i = 0; i < ring->srq.max; ++i) { | ||
460 | next = get_wqe(ring, i); | ||
461 | next->next_wqe_index = | ||
462 | cpu_to_be16((i + 1) & (ring->srq.max - 1)); | ||
463 | } | ||
464 | |||
465 | err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, | ||
466 | ring->wqres.db.dma, &ring->srq); | ||
467 | if (err){ | ||
468 | mlx4_err(mdev, "Failed to allocate srq\n"); | ||
469 | goto err_srq; | ||
470 | } | ||
471 | ring->srq.event = mlx4_en_srq_event; | ||
472 | } | ||
473 | |||
474 | return 0; | ||
475 | |||
476 | err_srq: | ||
477 | while (ring_ind >= 0) { | ||
478 | ring = &priv->rx_ring[ring_ind]; | ||
479 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
480 | ring_ind--; | ||
481 | } | ||
482 | |||
483 | err_buffers: | ||
484 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | ||
485 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); | ||
486 | |||
487 | ring_ind = priv->rx_ring_num - 1; | ||
488 | err_allocator: | ||
489 | while (ring_ind >= 0) { | ||
490 | mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); | ||
491 | ring_ind--; | ||
492 | } | ||
493 | return err; | ||
494 | } | ||
495 | |||
496 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
497 | struct mlx4_en_rx_ring *ring) | ||
498 | { | ||
499 | struct mlx4_en_dev *mdev = priv->mdev; | ||
500 | |||
501 | kfree(ring->lro.lro_arr); | ||
502 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
503 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
504 | vfree(ring->rx_info); | ||
505 | ring->rx_info = NULL; | ||
506 | } | ||
507 | |||
508 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
509 | struct mlx4_en_rx_ring *ring) | ||
510 | { | ||
511 | struct mlx4_en_dev *mdev = priv->mdev; | ||
512 | |||
513 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
514 | mlx4_en_free_rx_buf(priv, ring); | ||
515 | mlx4_en_destroy_allocator(priv, ring); | ||
516 | } | ||
517 | |||
518 | |||
519 | /* Unmap a completed descriptor and free unused pages */ | ||
520 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | ||
521 | struct mlx4_en_rx_desc *rx_desc, | ||
522 | struct skb_frag_struct *skb_frags, | ||
523 | struct skb_frag_struct *skb_frags_rx, | ||
524 | struct mlx4_en_rx_alloc *page_alloc, | ||
525 | int length) | ||
526 | { | ||
527 | struct mlx4_en_dev *mdev = priv->mdev; | ||
528 | struct mlx4_en_frag_info *frag_info; | ||
529 | int nr; | ||
530 | dma_addr_t dma; | ||
531 | |||
532 | /* Collect used fragments while replacing them in the HW descirptors */ | ||
533 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
534 | frag_info = &priv->frag_info[nr]; | ||
535 | if (length <= frag_info->frag_prefix_size) | ||
536 | break; | ||
537 | |||
538 | /* Save page reference in skb */ | ||
539 | skb_frags_rx[nr].page = skb_frags[nr].page; | ||
540 | skb_frags_rx[nr].size = skb_frags[nr].size; | ||
541 | skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; | ||
542 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
543 | |||
544 | /* Allocate a replacement page */ | ||
545 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) | ||
546 | goto fail; | ||
547 | |||
548 | /* Unmap buffer */ | ||
549 | pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, | ||
550 | PCI_DMA_FROMDEVICE); | ||
551 | } | ||
552 | /* Adjust size of last fragment to match actual length */ | ||
553 | skb_frags_rx[nr - 1].size = length - | ||
554 | priv->frag_info[nr - 1].frag_prefix_size; | ||
555 | return nr; | ||
556 | |||
557 | fail: | ||
558 | /* Drop all accumulated fragments (which have already been replaced in | ||
559 | * the descriptor) of this packet; remaining fragments are reused... */ | ||
560 | while (nr > 0) { | ||
561 | nr--; | ||
562 | put_page(skb_frags_rx[nr].page); | ||
563 | } | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | |||
568 | static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | ||
569 | struct mlx4_en_rx_desc *rx_desc, | ||
570 | struct skb_frag_struct *skb_frags, | ||
571 | struct mlx4_en_rx_alloc *page_alloc, | ||
572 | unsigned int length) | ||
573 | { | ||
574 | struct mlx4_en_dev *mdev = priv->mdev; | ||
575 | struct sk_buff *skb; | ||
576 | void *va; | ||
577 | int used_frags; | ||
578 | dma_addr_t dma; | ||
579 | |||
580 | skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); | ||
581 | if (!skb) { | ||
582 | mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); | ||
583 | return NULL; | ||
584 | } | ||
585 | skb->dev = priv->dev; | ||
586 | skb_reserve(skb, NET_IP_ALIGN); | ||
587 | skb->len = length; | ||
588 | skb->truesize = length + sizeof(struct sk_buff); | ||
589 | |||
590 | /* Get pointer to first fragment so we could copy the headers into the | ||
591 | * (linear part of the) skb */ | ||
592 | va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; | ||
593 | |||
594 | if (length <= SMALL_PACKET_SIZE) { | ||
595 | /* We are copying all relevant data to the skb - temporarily | ||
596 | * synch buffers for the copy */ | ||
597 | dma = be64_to_cpu(rx_desc->data[0].addr); | ||
598 | dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0, | ||
599 | length, DMA_FROM_DEVICE); | ||
600 | skb_copy_to_linear_data(skb, va, length); | ||
601 | dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0, | ||
602 | length, DMA_FROM_DEVICE); | ||
603 | skb->tail += length; | ||
604 | } else { | ||
605 | |||
606 | /* Move relevant fragments to skb */ | ||
607 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, | ||
608 | skb_shinfo(skb)->frags, | ||
609 | page_alloc, length); | ||
610 | skb_shinfo(skb)->nr_frags = used_frags; | ||
611 | |||
612 | /* Copy headers into the skb linear buffer */ | ||
613 | memcpy(skb->data, va, HEADER_COPY_SIZE); | ||
614 | skb->tail += HEADER_COPY_SIZE; | ||
615 | |||
616 | /* Skip headers in first fragment */ | ||
617 | skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; | ||
618 | |||
619 | /* Adjust size of first fragment */ | ||
620 | skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; | ||
621 | skb->data_len = length - HEADER_COPY_SIZE; | ||
622 | } | ||
623 | return skb; | ||
624 | } | ||
625 | |||
626 | static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, | ||
627 | struct mlx4_en_rx_ring *ring, | ||
628 | int from, int to, int num) | ||
629 | { | ||
630 | struct skb_frag_struct *skb_frags_from; | ||
631 | struct skb_frag_struct *skb_frags_to; | ||
632 | struct mlx4_en_rx_desc *rx_desc_from; | ||
633 | struct mlx4_en_rx_desc *rx_desc_to; | ||
634 | int from_index, to_index; | ||
635 | int nr, i; | ||
636 | |||
637 | for (i = 0; i < num; i++) { | ||
638 | from_index = (from + i) & ring->size_mask; | ||
639 | to_index = (to + i) & ring->size_mask; | ||
640 | skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info); | ||
641 | skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info); | ||
642 | rx_desc_from = ring->buf + (from_index << ring->log_stride); | ||
643 | rx_desc_to = ring->buf + (to_index << ring->log_stride); | ||
644 | |||
645 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
646 | skb_frags_to[nr].page = skb_frags_from[nr].page; | ||
647 | skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset; | ||
648 | rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr; | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | |||
654 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | ||
655 | { | ||
656 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
657 | struct mlx4_en_dev *mdev = priv->mdev; | ||
658 | struct mlx4_cqe *cqe; | ||
659 | struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; | ||
660 | struct skb_frag_struct *skb_frags; | ||
661 | struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS]; | ||
662 | struct mlx4_en_rx_desc *rx_desc; | ||
663 | struct sk_buff *skb; | ||
664 | int index; | ||
665 | int nr; | ||
666 | unsigned int length; | ||
667 | int polled = 0; | ||
668 | int ip_summed; | ||
669 | |||
670 | if (!priv->port_up) | ||
671 | return 0; | ||
672 | |||
673 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | ||
674 | * descriptor offset can be deduced from the CQE index instead of | ||
675 | * reading 'cqe->index' */ | ||
676 | index = cq->mcq.cons_index & ring->size_mask; | ||
677 | cqe = &cq->buf[index]; | ||
678 | |||
679 | /* Process all completed CQEs */ | ||
680 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | ||
681 | cq->mcq.cons_index & cq->size)) { | ||
682 | |||
683 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
684 | rx_desc = ring->buf + (index << ring->log_stride); | ||
685 | |||
686 | /* | ||
687 | * make sure we read the CQE after we read the ownership bit | ||
688 | */ | ||
689 | rmb(); | ||
690 | |||
691 | /* Drop packet on bad receive or bad checksum */ | ||
692 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | ||
693 | MLX4_CQE_OPCODE_ERROR)) { | ||
694 | mlx4_err(mdev, "CQE completed in error - vendor " | ||
695 | "syndrom:%d syndrom:%d\n", | ||
696 | ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, | ||
697 | ((struct mlx4_err_cqe *) cqe)->syndrome); | ||
698 | goto next; | ||
699 | } | ||
700 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | ||
701 | mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); | ||
702 | goto next; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Packet is OK - process it. | ||
707 | */ | ||
708 | length = be32_to_cpu(cqe->byte_cnt); | ||
709 | ring->bytes += length; | ||
710 | ring->packets++; | ||
711 | |||
712 | if (likely(priv->rx_csum)) { | ||
713 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | ||
714 | (cqe->checksum == cpu_to_be16(0xffff))) { | ||
715 | priv->port_stats.rx_chksum_good++; | ||
716 | /* This packet is eligible for LRO if it is: | ||
717 | * - DIX Ethernet (type interpretation) | ||
718 | * - TCP/IP (v4) | ||
719 | * - without IP options | ||
720 | * - not an IP fragment */ | ||
721 | if (mlx4_en_can_lro(cqe->status) && | ||
722 | dev->features & NETIF_F_LRO) { | ||
723 | |||
724 | nr = mlx4_en_complete_rx_desc( | ||
725 | priv, rx_desc, | ||
726 | skb_frags, lro_frags, | ||
727 | ring->page_alloc, length); | ||
728 | if (!nr) | ||
729 | goto next; | ||
730 | |||
731 | if (priv->vlgrp && (cqe->vlan_my_qpn & | ||
732 | cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { | ||
733 | lro_vlan_hwaccel_receive_frags( | ||
734 | &ring->lro, lro_frags, | ||
735 | length, length, | ||
736 | priv->vlgrp, | ||
737 | be16_to_cpu(cqe->sl_vid), | ||
738 | NULL, 0); | ||
739 | } else | ||
740 | lro_receive_frags(&ring->lro, | ||
741 | lro_frags, | ||
742 | length, | ||
743 | length, | ||
744 | NULL, 0); | ||
745 | |||
746 | goto next; | ||
747 | } | ||
748 | |||
749 | /* LRO not possible, complete processing here */ | ||
750 | ip_summed = CHECKSUM_UNNECESSARY; | ||
751 | INC_PERF_COUNTER(priv->pstats.lro_misses); | ||
752 | } else { | ||
753 | ip_summed = CHECKSUM_NONE; | ||
754 | priv->port_stats.rx_chksum_none++; | ||
755 | } | ||
756 | } else { | ||
757 | ip_summed = CHECKSUM_NONE; | ||
758 | priv->port_stats.rx_chksum_none++; | ||
759 | } | ||
760 | |||
761 | skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, | ||
762 | ring->page_alloc, length); | ||
763 | if (!skb) { | ||
764 | priv->stats.rx_dropped++; | ||
765 | goto next; | ||
766 | } | ||
767 | |||
768 | skb->ip_summed = ip_summed; | ||
769 | skb->protocol = eth_type_trans(skb, dev); | ||
770 | |||
771 | /* Push it up the stack */ | ||
772 | if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & | ||
773 | MLX4_CQE_VLAN_PRESENT_MASK)) { | ||
774 | vlan_hwaccel_receive_skb(skb, priv->vlgrp, | ||
775 | be16_to_cpu(cqe->sl_vid)); | ||
776 | } else | ||
777 | netif_receive_skb(skb); | ||
778 | |||
779 | dev->last_rx = jiffies; | ||
780 | |||
781 | next: | ||
782 | ++cq->mcq.cons_index; | ||
783 | index = (cq->mcq.cons_index) & ring->size_mask; | ||
784 | cqe = &cq->buf[index]; | ||
785 | if (++polled == budget) { | ||
786 | /* We are here because we reached the NAPI budget - | ||
787 | * flush only pending LRO sessions */ | ||
788 | lro_flush_all(&ring->lro); | ||
789 | goto out; | ||
790 | } | ||
791 | } | ||
792 | |||
793 | /* If CQ is empty flush all LRO sessions unconditionally */ | ||
794 | lro_flush_all(&ring->lro); | ||
795 | |||
796 | out: | ||
797 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); | ||
798 | mlx4_cq_set_ci(&cq->mcq); | ||
799 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | ||
800 | ring->cons = cq->mcq.cons_index; | ||
801 | ring->prod += polled; /* Polled descriptors were realocated in place */ | ||
802 | if (unlikely(!ring->full)) { | ||
803 | mlx4_en_copy_desc(priv, ring, ring->cons - polled, | ||
804 | ring->prod - polled, polled); | ||
805 | mlx4_en_fill_rx_buf(dev, ring); | ||
806 | } | ||
807 | mlx4_en_update_rx_prod_db(ring); | ||
808 | return polled; | ||
809 | } | ||
810 | |||
811 | |||
812 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | ||
813 | { | ||
814 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
815 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
816 | |||
817 | if (priv->port_up) | ||
818 | netif_rx_schedule(cq->dev, &cq->napi); | ||
819 | else | ||
820 | mlx4_en_arm_cq(priv, cq); | ||
821 | } | ||
822 | |||
823 | /* Rx CQ polling - called by NAPI */ | ||
824 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | ||
825 | { | ||
826 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | ||
827 | struct net_device *dev = cq->dev; | ||
828 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
829 | int done; | ||
830 | |||
831 | done = mlx4_en_process_rx_cq(dev, cq, budget); | ||
832 | |||
833 | /* If we used up all the quota - we're probably not done yet... */ | ||
834 | if (done == budget) | ||
835 | INC_PERF_COUNTER(priv->pstats.napi_quota); | ||
836 | else { | ||
837 | /* Done for now */ | ||
838 | netif_rx_complete(dev, napi); | ||
839 | mlx4_en_arm_cq(priv, cq); | ||
840 | } | ||
841 | return done; | ||
842 | } | ||
843 | |||
844 | |||
845 | /* Calculate the last offset position that accomodates a full fragment | ||
846 | * (assuming fagment size = stride-align) */ | ||
847 | static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) | ||
848 | { | ||
849 | u16 res = MLX4_EN_ALLOC_SIZE % stride; | ||
850 | u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; | ||
851 | |||
852 | mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " | ||
853 | "res:%d offset:%d\n", stride, align, res, offset); | ||
854 | return offset; | ||
855 | } | ||
856 | |||
857 | |||
858 | static int frag_sizes[] = { | ||
859 | FRAG_SZ0, | ||
860 | FRAG_SZ1, | ||
861 | FRAG_SZ2, | ||
862 | FRAG_SZ3 | ||
863 | }; | ||
864 | |||
865 | void mlx4_en_calc_rx_buf(struct net_device *dev) | ||
866 | { | ||
867 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
868 | int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; | ||
869 | int buf_size = 0; | ||
870 | int i = 0; | ||
871 | |||
872 | while (buf_size < eff_mtu) { | ||
873 | priv->frag_info[i].frag_size = | ||
874 | (eff_mtu > buf_size + frag_sizes[i]) ? | ||
875 | frag_sizes[i] : eff_mtu - buf_size; | ||
876 | priv->frag_info[i].frag_prefix_size = buf_size; | ||
877 | if (!i) { | ||
878 | priv->frag_info[i].frag_align = NET_IP_ALIGN; | ||
879 | priv->frag_info[i].frag_stride = | ||
880 | ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); | ||
881 | } else { | ||
882 | priv->frag_info[i].frag_align = 0; | ||
883 | priv->frag_info[i].frag_stride = | ||
884 | ALIGN(frag_sizes[i], SMP_CACHE_BYTES); | ||
885 | } | ||
886 | priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( | ||
887 | priv, priv->frag_info[i].frag_stride, | ||
888 | priv->frag_info[i].frag_align); | ||
889 | buf_size += priv->frag_info[i].frag_size; | ||
890 | i++; | ||
891 | } | ||
892 | |||
893 | priv->num_frags = i; | ||
894 | priv->rx_skb_size = eff_mtu; | ||
895 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); | ||
896 | |||
897 | mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " | ||
898 | "num_frags:%d):\n", eff_mtu, priv->num_frags); | ||
899 | for (i = 0; i < priv->num_frags; i++) { | ||
900 | mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " | ||
901 | "stride:%d last_offset:%d\n", i, | ||
902 | priv->frag_info[i].frag_size, | ||
903 | priv->frag_info[i].frag_prefix_size, | ||
904 | priv->frag_info[i].frag_align, | ||
905 | priv->frag_info[i].frag_stride, | ||
906 | priv->frag_info[i].last_offset); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | /* RSS related functions */ | ||
911 | |||
912 | /* Calculate rss size and map each entry in rss table to rx ring */ | ||
913 | void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | ||
914 | struct mlx4_en_rss_map *rss_map, | ||
915 | int num_entries, int num_rings) | ||
916 | { | ||
917 | int i; | ||
918 | |||
919 | rss_map->size = roundup_pow_of_two(num_entries); | ||
920 | mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", | ||
921 | rss_map->size); | ||
922 | |||
923 | for (i = 0; i < rss_map->size; i++) { | ||
924 | rss_map->map[i] = i % num_rings; | ||
925 | mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); | ||
926 | } | ||
927 | } | ||
928 | |||
929 | static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) | ||
930 | { | ||
931 | return; | ||
932 | } | ||
933 | |||
934 | |||
935 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | ||
936 | int qpn, int srqn, int cqn, | ||
937 | enum mlx4_qp_state *state, | ||
938 | struct mlx4_qp *qp) | ||
939 | { | ||
940 | struct mlx4_en_dev *mdev = priv->mdev; | ||
941 | struct mlx4_qp_context *context; | ||
942 | int err = 0; | ||
943 | |||
944 | context = kmalloc(sizeof *context , GFP_KERNEL); | ||
945 | if (!context) { | ||
946 | mlx4_err(mdev, "Failed to allocate qp context\n"); | ||
947 | return -ENOMEM; | ||
948 | } | ||
949 | |||
950 | err = mlx4_qp_alloc(mdev->dev, qpn, qp); | ||
951 | if (err) { | ||
952 | mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); | ||
953 | goto out; | ||
954 | return err; | ||
955 | } | ||
956 | qp->event = mlx4_en_sqp_event; | ||
957 | |||
958 | memset(context, 0, sizeof *context); | ||
959 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); | ||
960 | |||
961 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); | ||
962 | if (err) { | ||
963 | mlx4_qp_remove(mdev->dev, qp); | ||
964 | mlx4_qp_free(mdev->dev, qp); | ||
965 | } | ||
966 | out: | ||
967 | kfree(context); | ||
968 | return err; | ||
969 | } | ||
970 | |||
971 | /* Allocate rx qp's and configure them according to rss map */ | ||
972 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | ||
973 | { | ||
974 | struct mlx4_en_dev *mdev = priv->mdev; | ||
975 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
976 | struct mlx4_qp_context context; | ||
977 | struct mlx4_en_rss_context *rss_context; | ||
978 | void *ptr; | ||
979 | int rss_xor = mdev->profile.rss_xor; | ||
980 | u8 rss_mask = mdev->profile.rss_mask; | ||
981 | int i, srqn, qpn, cqn; | ||
982 | int err = 0; | ||
983 | int good_qps = 0; | ||
984 | |||
985 | mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); | ||
986 | err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, | ||
987 | rss_map->size, &rss_map->base_qpn); | ||
988 | if (err) { | ||
989 | mlx4_err(mdev, "Failed reserving %d qps for port %u\n", | ||
990 | rss_map->size, priv->port); | ||
991 | return err; | ||
992 | } | ||
993 | |||
994 | for (i = 0; i < rss_map->size; i++) { | ||
995 | cqn = priv->rx_ring[rss_map->map[i]].cqn; | ||
996 | srqn = priv->rx_ring[rss_map->map[i]].srq.srqn; | ||
997 | qpn = rss_map->base_qpn + i; | ||
998 | err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, | ||
999 | &rss_map->state[i], | ||
1000 | &rss_map->qps[i]); | ||
1001 | if (err) | ||
1002 | goto rss_err; | ||
1003 | |||
1004 | ++good_qps; | ||
1005 | } | ||
1006 | |||
1007 | /* Configure RSS indirection qp */ | ||
1008 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); | ||
1009 | if (err) { | ||
1010 | mlx4_err(mdev, "Failed to reserve range for RSS " | ||
1011 | "indirection qp\n"); | ||
1012 | goto rss_err; | ||
1013 | } | ||
1014 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | ||
1015 | if (err) { | ||
1016 | mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); | ||
1017 | goto reserve_err; | ||
1018 | } | ||
1019 | rss_map->indir_qp.event = mlx4_en_sqp_event; | ||
1020 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | ||
1021 | priv->rx_ring[0].cqn, 0, &context); | ||
1022 | |||
1023 | ptr = ((void *) &context) + 0x3c; | ||
1024 | rss_context = (struct mlx4_en_rss_context *) ptr; | ||
1025 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 | | ||
1026 | (rss_map->base_qpn)); | ||
1027 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); | ||
1028 | rss_context->hash_fn = rss_xor & 0x3; | ||
1029 | rss_context->flags = rss_mask << 2; | ||
1030 | |||
1031 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, | ||
1032 | &rss_map->indir_qp, &rss_map->indir_state); | ||
1033 | if (err) | ||
1034 | goto indir_err; | ||
1035 | |||
1036 | return 0; | ||
1037 | |||
1038 | indir_err: | ||
1039 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
1040 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
1041 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
1042 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
1043 | reserve_err: | ||
1044 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
1045 | rss_err: | ||
1046 | for (i = 0; i < good_qps; i++) { | ||
1047 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
1048 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
1049 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
1050 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
1051 | } | ||
1052 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size); | ||
1053 | return err; | ||
1054 | } | ||
1055 | |||
1056 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | ||
1057 | { | ||
1058 | struct mlx4_en_dev *mdev = priv->mdev; | ||
1059 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
1060 | int i; | ||
1061 | |||
1062 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
1063 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
1064 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
1065 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
1066 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
1067 | |||
1068 | for (i = 0; i < rss_map->size; i++) { | ||
1069 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
1070 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
1071 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
1072 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
1073 | } | ||
1074 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size); | ||
1075 | } | ||
1076 | |||
1077 | |||
1078 | |||
1079 | |||
1080 | |||
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c new file mode 100644 index 000000000000..8592f8fb8475 --- /dev/null +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -0,0 +1,820 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <asm/page.h> | ||
35 | #include <linux/mlx4/cq.h> | ||
36 | #include <linux/mlx4/qp.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | |||
41 | #include "mlx4_en.h" | ||
42 | |||
43 | enum { | ||
44 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | ||
45 | }; | ||
46 | |||
47 | static int inline_thold __read_mostly = MAX_INLINE; | ||
48 | |||
49 | module_param_named(inline_thold, inline_thold, int, 0444); | ||
50 | MODULE_PARM_DESC(inline_thold, "treshold for using inline data"); | ||
51 | |||
52 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | ||
53 | struct mlx4_en_tx_ring *ring, u32 size, | ||
54 | u16 stride) | ||
55 | { | ||
56 | struct mlx4_en_dev *mdev = priv->mdev; | ||
57 | int tmp; | ||
58 | int err; | ||
59 | |||
60 | ring->size = size; | ||
61 | ring->size_mask = size - 1; | ||
62 | ring->stride = stride; | ||
63 | |||
64 | inline_thold = min(inline_thold, MAX_INLINE); | ||
65 | |||
66 | spin_lock_init(&ring->comp_lock); | ||
67 | |||
68 | tmp = size * sizeof(struct mlx4_en_tx_info); | ||
69 | ring->tx_info = vmalloc(tmp); | ||
70 | if (!ring->tx_info) { | ||
71 | mlx4_err(mdev, "Failed allocating tx_info ring\n"); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", | ||
75 | ring->tx_info, tmp); | ||
76 | |||
77 | ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); | ||
78 | if (!ring->bounce_buf) { | ||
79 | mlx4_err(mdev, "Failed allocating bounce buffer\n"); | ||
80 | err = -ENOMEM; | ||
81 | goto err_tx; | ||
82 | } | ||
83 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); | ||
84 | |||
85 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, | ||
86 | 2 * PAGE_SIZE); | ||
87 | if (err) { | ||
88 | mlx4_err(mdev, "Failed allocating hwq resources\n"); | ||
89 | goto err_bounce; | ||
90 | } | ||
91 | |||
92 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
93 | if (err) { | ||
94 | mlx4_err(mdev, "Failed to map TX buffer\n"); | ||
95 | goto err_hwq_res; | ||
96 | } | ||
97 | |||
98 | ring->buf = ring->wqres.buf.direct.buf; | ||
99 | |||
100 | mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " | ||
101 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | ||
102 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | ||
103 | |||
104 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); | ||
105 | if (err) { | ||
106 | mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); | ||
107 | goto err_map; | ||
108 | } | ||
109 | |||
110 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | ||
111 | if (err) { | ||
112 | mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); | ||
113 | goto err_reserve; | ||
114 | } | ||
115 | |||
116 | return 0; | ||
117 | |||
118 | err_reserve: | ||
119 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
120 | err_map: | ||
121 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
122 | err_hwq_res: | ||
123 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
124 | err_bounce: | ||
125 | kfree(ring->bounce_buf); | ||
126 | ring->bounce_buf = NULL; | ||
127 | err_tx: | ||
128 | vfree(ring->tx_info); | ||
129 | ring->tx_info = NULL; | ||
130 | return err; | ||
131 | } | ||
132 | |||
133 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | ||
134 | struct mlx4_en_tx_ring *ring) | ||
135 | { | ||
136 | struct mlx4_en_dev *mdev = priv->mdev; | ||
137 | mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | ||
138 | |||
139 | mlx4_qp_remove(mdev->dev, &ring->qp); | ||
140 | mlx4_qp_free(mdev->dev, &ring->qp); | ||
141 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
142 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
143 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
144 | kfree(ring->bounce_buf); | ||
145 | ring->bounce_buf = NULL; | ||
146 | vfree(ring->tx_info); | ||
147 | ring->tx_info = NULL; | ||
148 | } | ||
149 | |||
150 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
151 | struct mlx4_en_tx_ring *ring, | ||
152 | int cq, int srqn) | ||
153 | { | ||
154 | struct mlx4_en_dev *mdev = priv->mdev; | ||
155 | int err; | ||
156 | |||
157 | ring->cqn = cq; | ||
158 | ring->prod = 0; | ||
159 | ring->cons = 0xffffffff; | ||
160 | ring->last_nr_txbb = 1; | ||
161 | ring->poll_cnt = 0; | ||
162 | ring->blocked = 0; | ||
163 | memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); | ||
164 | memset(ring->buf, 0, ring->buf_size); | ||
165 | |||
166 | ring->qp_state = MLX4_QP_STATE_RST; | ||
167 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); | ||
168 | |||
169 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | ||
170 | ring->cqn, srqn, &ring->context); | ||
171 | |||
172 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | ||
173 | &ring->qp, &ring->qp_state); | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
179 | struct mlx4_en_tx_ring *ring) | ||
180 | { | ||
181 | struct mlx4_en_dev *mdev = priv->mdev; | ||
182 | |||
183 | mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, | ||
184 | MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); | ||
185 | } | ||
186 | |||
187 | |||
188 | static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | ||
189 | struct mlx4_en_tx_ring *ring, | ||
190 | int index, u8 owner) | ||
191 | { | ||
192 | struct mlx4_en_dev *mdev = priv->mdev; | ||
193 | struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; | ||
194 | struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; | ||
195 | struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; | ||
196 | struct sk_buff *skb = tx_info->skb; | ||
197 | struct skb_frag_struct *frag; | ||
198 | void *end = ring->buf + ring->buf_size; | ||
199 | int frags = skb_shinfo(skb)->nr_frags; | ||
200 | int i; | ||
201 | __be32 *ptr = (__be32 *)tx_desc; | ||
202 | __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); | ||
203 | |||
204 | /* Optimize the common case when there are no wraparounds */ | ||
205 | if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { | ||
206 | if (tx_info->linear) { | ||
207 | pci_unmap_single(mdev->pdev, | ||
208 | (dma_addr_t) be64_to_cpu(data->addr), | ||
209 | be32_to_cpu(data->byte_count), | ||
210 | PCI_DMA_TODEVICE); | ||
211 | ++data; | ||
212 | } | ||
213 | |||
214 | for (i = 0; i < frags; i++) { | ||
215 | frag = &skb_shinfo(skb)->frags[i]; | ||
216 | pci_unmap_page(mdev->pdev, | ||
217 | (dma_addr_t) be64_to_cpu(data[i].addr), | ||
218 | frag->size, PCI_DMA_TODEVICE); | ||
219 | } | ||
220 | /* Stamp the freed descriptor */ | ||
221 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
222 | *ptr = stamp; | ||
223 | ptr += STAMP_DWORDS; | ||
224 | } | ||
225 | |||
226 | } else { | ||
227 | if ((void *) data >= end) { | ||
228 | data = (struct mlx4_wqe_data_seg *) | ||
229 | (ring->buf + ((void *) data - end)); | ||
230 | } | ||
231 | |||
232 | if (tx_info->linear) { | ||
233 | pci_unmap_single(mdev->pdev, | ||
234 | (dma_addr_t) be64_to_cpu(data->addr), | ||
235 | be32_to_cpu(data->byte_count), | ||
236 | PCI_DMA_TODEVICE); | ||
237 | ++data; | ||
238 | } | ||
239 | |||
240 | for (i = 0; i < frags; i++) { | ||
241 | /* Check for wraparound before unmapping */ | ||
242 | if ((void *) data >= end) | ||
243 | data = (struct mlx4_wqe_data_seg *) ring->buf; | ||
244 | frag = &skb_shinfo(skb)->frags[i]; | ||
245 | pci_unmap_page(mdev->pdev, | ||
246 | (dma_addr_t) be64_to_cpu(data->addr), | ||
247 | frag->size, PCI_DMA_TODEVICE); | ||
248 | } | ||
249 | /* Stamp the freed descriptor */ | ||
250 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
251 | *ptr = stamp; | ||
252 | ptr += STAMP_DWORDS; | ||
253 | if ((void *) ptr >= end) { | ||
254 | ptr = ring->buf; | ||
255 | stamp ^= cpu_to_be32(0x80000000); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | } | ||
260 | dev_kfree_skb_any(skb); | ||
261 | return tx_info->nr_txbb; | ||
262 | } | ||
263 | |||
264 | |||
265 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) | ||
266 | { | ||
267 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
268 | int cnt = 0; | ||
269 | |||
270 | /* Skip last polled descriptor */ | ||
271 | ring->cons += ring->last_nr_txbb; | ||
272 | mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", | ||
273 | ring->cons, ring->prod); | ||
274 | |||
275 | if ((u32) (ring->prod - ring->cons) > ring->size) { | ||
276 | if (netif_msg_tx_err(priv)) | ||
277 | mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | while (ring->cons != ring->prod) { | ||
282 | ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, | ||
283 | ring->cons & ring->size_mask, | ||
284 | !!(ring->cons & ring->size)); | ||
285 | ring->cons += ring->last_nr_txbb; | ||
286 | cnt++; | ||
287 | } | ||
288 | |||
289 | if (cnt) | ||
290 | mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); | ||
291 | |||
292 | return cnt; | ||
293 | } | ||
294 | |||
295 | void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num) | ||
296 | { | ||
297 | int block = 8 / ring_num; | ||
298 | int extra = 8 - (block * ring_num); | ||
299 | int num = 0; | ||
300 | u16 ring = 1; | ||
301 | int prio; | ||
302 | |||
303 | if (ring_num == 1) { | ||
304 | for (prio = 0; prio < 8; prio++) | ||
305 | prio_map[prio] = 0; | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | for (prio = 0; prio < 8; prio++) { | ||
310 | if (extra && (num == block + 1)) { | ||
311 | ring++; | ||
312 | num = 0; | ||
313 | extra--; | ||
314 | } else if (!extra && (num == block)) { | ||
315 | ring++; | ||
316 | num = 0; | ||
317 | } | ||
318 | prio_map[prio] = ring; | ||
319 | mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); | ||
320 | num++; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | ||
325 | { | ||
326 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
327 | struct mlx4_cq *mcq = &cq->mcq; | ||
328 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
329 | struct mlx4_cqe *cqe = cq->buf; | ||
330 | u16 index; | ||
331 | u16 new_index; | ||
332 | u32 txbbs_skipped = 0; | ||
333 | u32 cq_last_sav; | ||
334 | |||
335 | /* index always points to the first TXBB of the last polled descriptor */ | ||
336 | index = ring->cons & ring->size_mask; | ||
337 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
338 | if (index == new_index) | ||
339 | return; | ||
340 | |||
341 | if (!priv->port_up) | ||
342 | return; | ||
343 | |||
344 | /* | ||
345 | * We use a two-stage loop: | ||
346 | * - the first samples the HW-updated CQE | ||
347 | * - the second frees TXBBs until the last sample | ||
348 | * This lets us amortize CQE cache misses, while still polling the CQ | ||
349 | * until is quiescent. | ||
350 | */ | ||
351 | cq_last_sav = mcq->cons_index; | ||
352 | do { | ||
353 | do { | ||
354 | /* Skip over last polled CQE */ | ||
355 | index = (index + ring->last_nr_txbb) & ring->size_mask; | ||
356 | txbbs_skipped += ring->last_nr_txbb; | ||
357 | |||
358 | /* Poll next CQE */ | ||
359 | ring->last_nr_txbb = mlx4_en_free_tx_desc( | ||
360 | priv, ring, index, | ||
361 | !!((ring->cons + txbbs_skipped) & | ||
362 | ring->size)); | ||
363 | ++mcq->cons_index; | ||
364 | |||
365 | } while (index != new_index); | ||
366 | |||
367 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
368 | } while (index != new_index); | ||
369 | AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, | ||
370 | (u32) (mcq->cons_index - cq_last_sav)); | ||
371 | |||
372 | /* | ||
373 | * To prevent CQ overflow we first update CQ consumer and only then | ||
374 | * the ring consumer. | ||
375 | */ | ||
376 | mlx4_cq_set_ci(mcq); | ||
377 | wmb(); | ||
378 | ring->cons += txbbs_skipped; | ||
379 | |||
380 | /* Wakeup Tx queue if this ring stopped it */ | ||
381 | if (unlikely(ring->blocked)) { | ||
382 | if (((u32) (ring->prod - ring->cons) <= | ||
383 | ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) { | ||
384 | |||
385 | /* TODO: support multiqueue netdevs. Currently, we block | ||
386 | * when *any* ring is full. Note that: | ||
387 | * - 2 Tx rings can unblock at the same time and call | ||
388 | * netif_wake_queue(), which is OK since this | ||
389 | * operation is idempotent. | ||
390 | * - We might wake the queue just after another ring | ||
391 | * stopped it. This is no big deal because the next | ||
392 | * transmission on that ring would stop the queue. | ||
393 | */ | ||
394 | ring->blocked = 0; | ||
395 | netif_wake_queue(dev); | ||
396 | priv->port_stats.wake_queue++; | ||
397 | } | ||
398 | } | ||
399 | } | ||
400 | |||
401 | void mlx4_en_tx_irq(struct mlx4_cq *mcq) | ||
402 | { | ||
403 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
404 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
405 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
406 | |||
407 | spin_lock_irq(&ring->comp_lock); | ||
408 | cq->armed = 0; | ||
409 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
410 | if (ring->blocked) | ||
411 | mlx4_en_arm_cq(priv, cq); | ||
412 | else | ||
413 | mod_timer(&cq->timer, jiffies + 1); | ||
414 | spin_unlock_irq(&ring->comp_lock); | ||
415 | } | ||
416 | |||
417 | |||
418 | void mlx4_en_poll_tx_cq(unsigned long data) | ||
419 | { | ||
420 | struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; | ||
421 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
422 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
423 | u32 inflight; | ||
424 | |||
425 | INC_PERF_COUNTER(priv->pstats.tx_poll); | ||
426 | |||
427 | netif_tx_lock(priv->dev); | ||
428 | spin_lock_irq(&ring->comp_lock); | ||
429 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
430 | inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); | ||
431 | |||
432 | /* If there are still packets in flight and the timer has not already | ||
433 | * been scheduled by the Tx routine then schedule it here to guarantee | ||
434 | * completion processing of these packets */ | ||
435 | if (inflight && priv->port_up) | ||
436 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
437 | |||
438 | spin_unlock_irq(&ring->comp_lock); | ||
439 | netif_tx_unlock(priv->dev); | ||
440 | } | ||
441 | |||
442 | static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, | ||
443 | struct mlx4_en_tx_ring *ring, | ||
444 | u32 index, | ||
445 | unsigned int desc_size) | ||
446 | { | ||
447 | u32 copy = (ring->size - index) * TXBB_SIZE; | ||
448 | int i; | ||
449 | |||
450 | for (i = desc_size - copy - 4; i >= 0; i -= 4) { | ||
451 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
452 | wmb(); | ||
453 | |||
454 | *((u32 *) (ring->buf + i)) = | ||
455 | *((u32 *) (ring->bounce_buf + copy + i)); | ||
456 | } | ||
457 | |||
458 | for (i = copy - 4; i >= 4 ; i -= 4) { | ||
459 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
460 | wmb(); | ||
461 | |||
462 | *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = | ||
463 | *((u32 *) (ring->bounce_buf + i)); | ||
464 | } | ||
465 | |||
466 | /* Return real descriptor location */ | ||
467 | return ring->buf + index * TXBB_SIZE; | ||
468 | } | ||
469 | |||
470 | static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | ||
471 | { | ||
472 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; | ||
473 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; | ||
474 | |||
475 | /* If we don't have a pending timer, set one up to catch our recent | ||
476 | post in case the interface becomes idle */ | ||
477 | if (!timer_pending(&cq->timer)) | ||
478 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
479 | |||
480 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ | ||
481 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) | ||
482 | mlx4_en_process_tx_cq(priv->dev, cq); | ||
483 | } | ||
484 | |||
485 | static void *get_frag_ptr(struct sk_buff *skb) | ||
486 | { | ||
487 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
488 | struct page *page = frag->page; | ||
489 | void *ptr; | ||
490 | |||
491 | ptr = page_address(page); | ||
492 | if (unlikely(!ptr)) | ||
493 | return NULL; | ||
494 | |||
495 | return ptr + frag->page_offset; | ||
496 | } | ||
497 | |||
498 | static int is_inline(struct sk_buff *skb, void **pfrag) | ||
499 | { | ||
500 | void *ptr; | ||
501 | |||
502 | if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { | ||
503 | if (skb_shinfo(skb)->nr_frags == 1) { | ||
504 | ptr = get_frag_ptr(skb); | ||
505 | if (unlikely(!ptr)) | ||
506 | return 0; | ||
507 | |||
508 | if (pfrag) | ||
509 | *pfrag = ptr; | ||
510 | |||
511 | return 1; | ||
512 | } else if (unlikely(skb_shinfo(skb)->nr_frags)) | ||
513 | return 0; | ||
514 | else | ||
515 | return 1; | ||
516 | } | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int inline_size(struct sk_buff *skb) | ||
522 | { | ||
523 | if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) | ||
524 | <= MLX4_INLINE_ALIGN) | ||
525 | return ALIGN(skb->len + CTRL_SIZE + | ||
526 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
527 | else | ||
528 | return ALIGN(skb->len + CTRL_SIZE + 2 * | ||
529 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
530 | } | ||
531 | |||
532 | static int get_real_size(struct sk_buff *skb, struct net_device *dev, | ||
533 | int *lso_header_size) | ||
534 | { | ||
535 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
536 | struct mlx4_en_dev *mdev = priv->mdev; | ||
537 | int real_size; | ||
538 | |||
539 | if (skb_is_gso(skb)) { | ||
540 | *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
541 | real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + | ||
542 | ALIGN(*lso_header_size + 4, DS_SIZE); | ||
543 | if (unlikely(*lso_header_size != skb_headlen(skb))) { | ||
544 | /* We add a segment for the skb linear buffer only if | ||
545 | * it contains data */ | ||
546 | if (*lso_header_size < skb_headlen(skb)) | ||
547 | real_size += DS_SIZE; | ||
548 | else { | ||
549 | if (netif_msg_tx_err(priv)) | ||
550 | mlx4_warn(mdev, "Non-linear headers\n"); | ||
551 | dev_kfree_skb_any(skb); | ||
552 | return 0; | ||
553 | } | ||
554 | } | ||
555 | if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { | ||
556 | if (netif_msg_tx_err(priv)) | ||
557 | mlx4_warn(mdev, "LSO header size too big\n"); | ||
558 | dev_kfree_skb_any(skb); | ||
559 | return 0; | ||
560 | } | ||
561 | } else { | ||
562 | *lso_header_size = 0; | ||
563 | if (!is_inline(skb, NULL)) | ||
564 | real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; | ||
565 | else | ||
566 | real_size = inline_size(skb); | ||
567 | } | ||
568 | |||
569 | return real_size; | ||
570 | } | ||
571 | |||
572 | static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, | ||
573 | int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) | ||
574 | { | ||
575 | struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; | ||
576 | int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; | ||
577 | |||
578 | if (skb->len <= spc) { | ||
579 | inl->byte_count = cpu_to_be32(1 << 31 | skb->len); | ||
580 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
581 | if (skb_shinfo(skb)->nr_frags) | ||
582 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, | ||
583 | skb_shinfo(skb)->frags[0].size); | ||
584 | |||
585 | } else { | ||
586 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | ||
587 | if (skb_headlen(skb) <= spc) { | ||
588 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
589 | if (skb_headlen(skb) < spc) { | ||
590 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), | ||
591 | fragptr, spc - skb_headlen(skb)); | ||
592 | fragptr += spc - skb_headlen(skb); | ||
593 | } | ||
594 | inl = (void *) (inl + 1) + spc; | ||
595 | memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); | ||
596 | } else { | ||
597 | skb_copy_from_linear_data(skb, inl + 1, spc); | ||
598 | inl = (void *) (inl + 1) + spc; | ||
599 | skb_copy_from_linear_data_offset(skb, spc, inl + 1, | ||
600 | skb_headlen(skb) - spc); | ||
601 | if (skb_shinfo(skb)->nr_frags) | ||
602 | memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, | ||
603 | fragptr, skb_shinfo(skb)->frags[0].size); | ||
604 | } | ||
605 | |||
606 | wmb(); | ||
607 | inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); | ||
608 | } | ||
609 | tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); | ||
610 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); | ||
611 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
612 | } | ||
613 | |||
614 | static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, | ||
615 | u16 *vlan_tag) | ||
616 | { | ||
617 | int tx_ind; | ||
618 | |||
619 | /* Obtain VLAN information if present */ | ||
620 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { | ||
621 | *vlan_tag = vlan_tx_tag_get(skb); | ||
622 | /* Set the Tx ring to use according to vlan priority */ | ||
623 | tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; | ||
624 | } else { | ||
625 | *vlan_tag = 0; | ||
626 | tx_ind = 0; | ||
627 | } | ||
628 | return tx_ind; | ||
629 | } | ||
630 | |||
631 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | ||
632 | { | ||
633 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
634 | struct mlx4_en_dev *mdev = priv->mdev; | ||
635 | struct mlx4_en_tx_ring *ring; | ||
636 | struct mlx4_en_cq *cq; | ||
637 | struct mlx4_en_tx_desc *tx_desc; | ||
638 | struct mlx4_wqe_data_seg *data; | ||
639 | struct skb_frag_struct *frag; | ||
640 | struct mlx4_en_tx_info *tx_info; | ||
641 | int tx_ind = 0; | ||
642 | int nr_txbb; | ||
643 | int desc_size; | ||
644 | int real_size; | ||
645 | dma_addr_t dma; | ||
646 | u32 index; | ||
647 | __be32 op_own; | ||
648 | u16 vlan_tag; | ||
649 | int i; | ||
650 | int lso_header_size; | ||
651 | void *fragptr; | ||
652 | |||
653 | if (unlikely(!skb->len)) { | ||
654 | dev_kfree_skb_any(skb); | ||
655 | return NETDEV_TX_OK; | ||
656 | } | ||
657 | real_size = get_real_size(skb, dev, &lso_header_size); | ||
658 | if (unlikely(!real_size)) | ||
659 | return NETDEV_TX_OK; | ||
660 | |||
661 | /* Allign descriptor to TXBB size */ | ||
662 | desc_size = ALIGN(real_size, TXBB_SIZE); | ||
663 | nr_txbb = desc_size / TXBB_SIZE; | ||
664 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | ||
665 | if (netif_msg_tx_err(priv)) | ||
666 | mlx4_warn(mdev, "Oversized header or SG list\n"); | ||
667 | dev_kfree_skb_any(skb); | ||
668 | return NETDEV_TX_OK; | ||
669 | } | ||
670 | |||
671 | tx_ind = get_vlan_info(priv, skb, &vlan_tag); | ||
672 | ring = &priv->tx_ring[tx_ind]; | ||
673 | |||
674 | /* Check available TXBBs And 2K spare for prefetch */ | ||
675 | if (unlikely(((int)(ring->prod - ring->cons)) > | ||
676 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
677 | /* every full Tx ring stops queue. | ||
678 | * TODO: implement multi-queue support (per-queue stop) */ | ||
679 | netif_stop_queue(dev); | ||
680 | ring->blocked = 1; | ||
681 | priv->port_stats.queue_stopped++; | ||
682 | |||
683 | /* Use interrupts to find out when queue opened */ | ||
684 | cq = &priv->tx_cq[tx_ind]; | ||
685 | mlx4_en_arm_cq(priv, cq); | ||
686 | return NETDEV_TX_BUSY; | ||
687 | } | ||
688 | |||
689 | /* Now that we know what Tx ring to use */ | ||
690 | if (unlikely(!priv->port_up)) { | ||
691 | if (netif_msg_tx_err(priv)) | ||
692 | mlx4_warn(mdev, "xmit: port down!\n"); | ||
693 | dev_kfree_skb_any(skb); | ||
694 | return NETDEV_TX_OK; | ||
695 | } | ||
696 | |||
697 | /* Track current inflight packets for performance analysis */ | ||
698 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | ||
699 | (u32) (ring->prod - ring->cons - 1)); | ||
700 | |||
701 | /* Packet is good - grab an index and transmit it */ | ||
702 | index = ring->prod & ring->size_mask; | ||
703 | |||
704 | /* See if we have enough space for whole descriptor TXBB for setting | ||
705 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | ||
706 | if (likely(index + nr_txbb <= ring->size)) | ||
707 | tx_desc = ring->buf + index * TXBB_SIZE; | ||
708 | else | ||
709 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | ||
710 | |||
711 | /* Save skb in tx_info ring */ | ||
712 | tx_info = &ring->tx_info[index]; | ||
713 | tx_info->skb = skb; | ||
714 | tx_info->nr_txbb = nr_txbb; | ||
715 | |||
716 | /* Prepare ctrl segement apart opcode+ownership, which depends on | ||
717 | * whether LSO is used */ | ||
718 | tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); | ||
719 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; | ||
720 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
721 | tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | | ||
722 | MLX4_WQE_CTRL_SOLICITED); | ||
723 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
724 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | ||
725 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
726 | priv->port_stats.tx_chksum_offload++; | ||
727 | } | ||
728 | |||
729 | /* Handle LSO (TSO) packets */ | ||
730 | if (lso_header_size) { | ||
731 | /* Mark opcode as LSO */ | ||
732 | op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | | ||
733 | ((ring->prod & ring->size) ? | ||
734 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
735 | |||
736 | /* Fill in the LSO prefix */ | ||
737 | tx_desc->lso.mss_hdr_size = cpu_to_be32( | ||
738 | skb_shinfo(skb)->gso_size << 16 | lso_header_size); | ||
739 | |||
740 | /* Copy headers; | ||
741 | * note that we already verified that it is linear */ | ||
742 | memcpy(tx_desc->lso.header, skb->data, lso_header_size); | ||
743 | data = ((void *) &tx_desc->lso + | ||
744 | ALIGN(lso_header_size + 4, DS_SIZE)); | ||
745 | |||
746 | priv->port_stats.tso_packets++; | ||
747 | i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + | ||
748 | !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); | ||
749 | ring->bytes += skb->len + (i - 1) * lso_header_size; | ||
750 | ring->packets += i; | ||
751 | } else { | ||
752 | /* Normal (Non LSO) packet */ | ||
753 | op_own = cpu_to_be32(MLX4_OPCODE_SEND) | | ||
754 | ((ring->prod & ring->size) ? | ||
755 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
756 | data = &tx_desc->data; | ||
757 | ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); | ||
758 | ring->packets++; | ||
759 | |||
760 | } | ||
761 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); | ||
762 | |||
763 | |||
764 | /* valid only for none inline segments */ | ||
765 | tx_info->data_offset = (void *) data - (void *) tx_desc; | ||
766 | |||
767 | tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; | ||
768 | data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; | ||
769 | |||
770 | if (!is_inline(skb, &fragptr)) { | ||
771 | /* Map fragments */ | ||
772 | for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { | ||
773 | frag = &skb_shinfo(skb)->frags[i]; | ||
774 | dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, | ||
775 | frag->size, PCI_DMA_TODEVICE); | ||
776 | data->addr = cpu_to_be64(dma); | ||
777 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
778 | wmb(); | ||
779 | data->byte_count = cpu_to_be32(frag->size); | ||
780 | --data; | ||
781 | } | ||
782 | |||
783 | /* Map linear part */ | ||
784 | if (tx_info->linear) { | ||
785 | dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, | ||
786 | skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); | ||
787 | data->addr = cpu_to_be64(dma); | ||
788 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
789 | wmb(); | ||
790 | data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); | ||
791 | } | ||
792 | } else | ||
793 | build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); | ||
794 | |||
795 | ring->prod += nr_txbb; | ||
796 | |||
797 | /* If we used a bounce buffer then copy descriptor back into place */ | ||
798 | if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) | ||
799 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | ||
800 | |||
801 | /* Run destructor before passing skb to HW */ | ||
802 | if (likely(!skb_shared(skb))) | ||
803 | skb_orphan(skb); | ||
804 | |||
805 | /* Ensure new descirptor hits memory | ||
806 | * before setting ownership of this descriptor to HW */ | ||
807 | wmb(); | ||
808 | tx_desc->ctrl.owner_opcode = op_own; | ||
809 | |||
810 | /* Ring doorbell! */ | ||
811 | wmb(); | ||
812 | writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); | ||
813 | dev->trans_start = jiffies; | ||
814 | |||
815 | /* Poll CQ here */ | ||
816 | mlx4_en_xmit_poll(priv, tx_ind); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 8a8b56135a58..de169338cd90 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
558 | int i; | 558 | int i; |
559 | 559 | ||
560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | 560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, |
561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs); | 561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); |
562 | if (err) | 562 | if (err) |
563 | return err; | 563 | return err; |
564 | 564 | ||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 7e32955da982..be09fdb79cb8 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) | |||
88 | [ 8] = "P_Key violation counter", | 88 | [ 8] = "P_Key violation counter", |
89 | [ 9] = "Q_Key violation counter", | 89 | [ 9] = "Q_Key violation counter", |
90 | [10] = "VMM", | 90 | [10] = "VMM", |
91 | [12] = "DPDP", | ||
91 | [16] = "MW support", | 92 | [16] = "MW support", |
92 | [17] = "APM support", | 93 | [17] = "APM support", |
93 | [18] = "Atomic ops support", | 94 | [18] = "Atomic ops support", |
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
346 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); | 347 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); |
347 | dev_cap->max_vl[i] = field >> 4; | 348 | dev_cap->max_vl[i] = field >> 4; |
348 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); | 349 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); |
349 | dev_cap->max_mtu[i] = field >> 4; | 350 | dev_cap->ib_mtu[i] = field >> 4; |
350 | dev_cap->max_port_width[i] = field & 0xf; | 351 | dev_cap->max_port_width[i] = field & 0xf; |
351 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); | 352 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); |
352 | dev_cap->max_gids[i] = 1 << (field & 0xf); | 353 | dev_cap->max_gids[i] = 1 << (field & 0xf); |
@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
354 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | 355 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); |
355 | } | 356 | } |
356 | } else { | 357 | } else { |
358 | #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 | ||
357 | #define QUERY_PORT_MTU_OFFSET 0x01 | 359 | #define QUERY_PORT_MTU_OFFSET 0x01 |
360 | #define QUERY_PORT_ETH_MTU_OFFSET 0x02 | ||
358 | #define QUERY_PORT_WIDTH_OFFSET 0x06 | 361 | #define QUERY_PORT_WIDTH_OFFSET 0x06 |
359 | #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 | 362 | #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 |
363 | #define QUERY_PORT_MAC_OFFSET 0x08 | ||
364 | #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a | ||
360 | #define QUERY_PORT_MAX_VL_OFFSET 0x0b | 365 | #define QUERY_PORT_MAX_VL_OFFSET 0x0b |
361 | 366 | ||
362 | for (i = 1; i <= dev_cap->num_ports; ++i) { | 367 | for (i = 1; i <= dev_cap->num_ports; ++i) { |
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
365 | if (err) | 370 | if (err) |
366 | goto out; | 371 | goto out; |
367 | 372 | ||
373 | MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); | ||
374 | dev_cap->supported_port_types[i] = field & 3; | ||
368 | MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); | 375 | MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); |
369 | dev_cap->max_mtu[i] = field & 0xf; | 376 | dev_cap->ib_mtu[i] = field & 0xf; |
370 | MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); | 377 | MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); |
371 | dev_cap->max_port_width[i] = field & 0xf; | 378 | dev_cap->max_port_width[i] = field & 0xf; |
372 | MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); | 379 | MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); |
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
374 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | 381 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); |
375 | MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); | 382 | MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); |
376 | dev_cap->max_vl[i] = field & 0xf; | 383 | dev_cap->max_vl[i] = field & 0xf; |
384 | MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); | ||
385 | dev_cap->log_max_macs[i] = field & 0xf; | ||
386 | dev_cap->log_max_vlans[i] = field >> 4; | ||
387 | MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); | ||
388 | MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); | ||
377 | } | 389 | } |
378 | } | 390 | } |
379 | 391 | ||
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
407 | mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | 419 | mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", |
408 | dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); | 420 | dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); |
409 | mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", | 421 | mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", |
410 | dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], | 422 | dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], |
411 | dev_cap->max_port_width[1]); | 423 | dev_cap->max_port_width[1]); |
412 | mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", | 424 | mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", |
413 | dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); | 425 | dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); |
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) | |||
819 | flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; | 831 | flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; |
820 | MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); | 832 | MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); |
821 | 833 | ||
822 | field = 128 << dev->caps.mtu_cap[port]; | 834 | field = 128 << dev->caps.ib_mtu_cap[port]; |
823 | MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); | 835 | MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); |
824 | field = dev->caps.gid_table_len[port]; | 836 | field = dev->caps.gid_table_len[port]; |
825 | MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); | 837 | MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); |
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index decbb5c2ad41..526d7f30c041 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -66,11 +66,13 @@ struct mlx4_dev_cap { | |||
66 | int local_ca_ack_delay; | 66 | int local_ca_ack_delay; |
67 | int num_ports; | 67 | int num_ports; |
68 | u32 max_msg_sz; | 68 | u32 max_msg_sz; |
69 | int max_mtu[MLX4_MAX_PORTS + 1]; | 69 | int ib_mtu[MLX4_MAX_PORTS + 1]; |
70 | int max_port_width[MLX4_MAX_PORTS + 1]; | 70 | int max_port_width[MLX4_MAX_PORTS + 1]; |
71 | int max_vl[MLX4_MAX_PORTS + 1]; | 71 | int max_vl[MLX4_MAX_PORTS + 1]; |
72 | int max_gids[MLX4_MAX_PORTS + 1]; | 72 | int max_gids[MLX4_MAX_PORTS + 1]; |
73 | int max_pkeys[MLX4_MAX_PORTS + 1]; | 73 | int max_pkeys[MLX4_MAX_PORTS + 1]; |
74 | u64 def_mac[MLX4_MAX_PORTS + 1]; | ||
75 | u16 eth_mtu[MLX4_MAX_PORTS + 1]; | ||
74 | u16 stat_rate_support; | 76 | u16 stat_rate_support; |
75 | u32 flags; | 77 | u32 flags; |
76 | int reserved_uars; | 78 | int reserved_uars; |
@@ -102,6 +104,9 @@ struct mlx4_dev_cap { | |||
102 | u32 reserved_lkey; | 104 | u32 reserved_lkey; |
103 | u64 max_icm_sz; | 105 | u64 max_icm_sz; |
104 | int max_gso_sz; | 106 | int max_gso_sz; |
107 | u8 supported_port_types[MLX4_MAX_PORTS + 1]; | ||
108 | u8 log_max_macs[MLX4_MAX_PORTS + 1]; | ||
109 | u8 log_max_vlans[MLX4_MAX_PORTS + 1]; | ||
105 | }; | 110 | }; |
106 | 111 | ||
107 | struct mlx4_adapter { | 112 | struct mlx4_adapter { |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 1252a919de2e..468921b8f4b6 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = { | |||
85 | .num_mtt = 1 << 20, | 85 | .num_mtt = 1 << 20, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int log_num_mac = 2; | ||
89 | module_param_named(log_num_mac, log_num_mac, int, 0444); | ||
90 | MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); | ||
91 | |||
92 | static int log_num_vlan; | ||
93 | module_param_named(log_num_vlan, log_num_vlan, int, 0444); | ||
94 | MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); | ||
95 | |||
96 | static int use_prio; | ||
97 | module_param_named(use_prio, use_prio, bool, 0444); | ||
98 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " | ||
99 | "(0/1, default 0)"); | ||
100 | |||
101 | static int mlx4_check_port_params(struct mlx4_dev *dev, | ||
102 | enum mlx4_port_type *port_type) | ||
103 | { | ||
104 | int i; | ||
105 | |||
106 | for (i = 0; i < dev->caps.num_ports - 1; i++) { | ||
107 | if (port_type[i] != port_type[i+1] && | ||
108 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { | ||
109 | mlx4_err(dev, "Only same port types supported " | ||
110 | "on this HCA, aborting.\n"); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | } | ||
114 | if ((port_type[0] == MLX4_PORT_TYPE_ETH) && | ||
115 | (port_type[1] == MLX4_PORT_TYPE_IB)) { | ||
116 | mlx4_err(dev, "eth-ib configuration is not supported.\n"); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | |||
120 | for (i = 0; i < dev->caps.num_ports; i++) { | ||
121 | if (!(port_type[i] & dev->caps.supported_type[i+1])) { | ||
122 | mlx4_err(dev, "Requested port type for port %d is not " | ||
123 | "supported on this HCA\n", i + 1); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | } | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static void mlx4_set_port_mask(struct mlx4_dev *dev) | ||
131 | { | ||
132 | int i; | ||
133 | |||
134 | dev->caps.port_mask = 0; | ||
135 | for (i = 1; i <= dev->caps.num_ports; ++i) | ||
136 | if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) | ||
137 | dev->caps.port_mask |= 1 << (i - 1); | ||
138 | } | ||
88 | static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | 139 | static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) |
89 | { | 140 | { |
90 | int err; | 141 | int err; |
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
120 | dev->caps.num_ports = dev_cap->num_ports; | 171 | dev->caps.num_ports = dev_cap->num_ports; |
121 | for (i = 1; i <= dev->caps.num_ports; ++i) { | 172 | for (i = 1; i <= dev->caps.num_ports; ++i) { |
122 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; | 173 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; |
123 | dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; | 174 | dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; |
124 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; | 175 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; |
125 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; | 176 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; |
126 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; | 177 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; |
178 | dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; | ||
179 | dev->caps.def_mac[i] = dev_cap->def_mac[i]; | ||
180 | dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; | ||
127 | } | 181 | } |
128 | 182 | ||
129 | dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; | 183 | dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; |
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
134 | dev->caps.max_rq_sg = dev_cap->max_rq_sg; | 188 | dev->caps.max_rq_sg = dev_cap->max_rq_sg; |
135 | dev->caps.max_wqes = dev_cap->max_qp_sz; | 189 | dev->caps.max_wqes = dev_cap->max_qp_sz; |
136 | dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; | 190 | dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; |
137 | dev->caps.reserved_qps = dev_cap->reserved_qps; | ||
138 | dev->caps.max_srq_wqes = dev_cap->max_srq_sz; | 191 | dev->caps.max_srq_wqes = dev_cap->max_srq_sz; |
139 | dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; | 192 | dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; |
140 | dev->caps.reserved_srqs = dev_cap->reserved_srqs; | 193 | dev->caps.reserved_srqs = dev_cap->reserved_srqs; |
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
163 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 216 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
164 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 217 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
165 | 218 | ||
219 | dev->caps.log_num_macs = log_num_mac; | ||
220 | dev->caps.log_num_vlans = log_num_vlan; | ||
221 | dev->caps.log_num_prios = use_prio ? 3 : 0; | ||
222 | |||
223 | for (i = 1; i <= dev->caps.num_ports; ++i) { | ||
224 | if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) | ||
225 | dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; | ||
226 | else | ||
227 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; | ||
228 | |||
229 | if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { | ||
230 | dev->caps.log_num_macs = dev_cap->log_max_macs[i]; | ||
231 | mlx4_warn(dev, "Requested number of MACs is too much " | ||
232 | "for port %d, reducing to %d.\n", | ||
233 | i, 1 << dev->caps.log_num_macs); | ||
234 | } | ||
235 | if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { | ||
236 | dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; | ||
237 | mlx4_warn(dev, "Requested number of VLANs is too much " | ||
238 | "for port %d, reducing to %d.\n", | ||
239 | i, 1 << dev->caps.log_num_vlans); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | mlx4_set_port_mask(dev); | ||
244 | |||
245 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; | ||
246 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = | ||
247 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = | ||
248 | (1 << dev->caps.log_num_macs) * | ||
249 | (1 << dev->caps.log_num_vlans) * | ||
250 | (1 << dev->caps.log_num_prios) * | ||
251 | dev->caps.num_ports; | ||
252 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; | ||
253 | |||
254 | dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + | ||
255 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + | ||
256 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + | ||
257 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; | ||
258 | |||
166 | return 0; | 259 | return 0; |
167 | } | 260 | } |
168 | 261 | ||
262 | /* | ||
263 | * Change the port configuration of the device. | ||
264 | * Every user of this function must hold the port mutex. | ||
265 | */ | ||
266 | static int mlx4_change_port_types(struct mlx4_dev *dev, | ||
267 | enum mlx4_port_type *port_types) | ||
268 | { | ||
269 | int err = 0; | ||
270 | int change = 0; | ||
271 | int port; | ||
272 | |||
273 | for (port = 0; port < dev->caps.num_ports; port++) { | ||
274 | if (port_types[port] != dev->caps.port_type[port + 1]) { | ||
275 | change = 1; | ||
276 | dev->caps.port_type[port + 1] = port_types[port]; | ||
277 | } | ||
278 | } | ||
279 | if (change) { | ||
280 | mlx4_unregister_device(dev); | ||
281 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
282 | mlx4_CLOSE_PORT(dev, port); | ||
283 | err = mlx4_SET_PORT(dev, port); | ||
284 | if (err) { | ||
285 | mlx4_err(dev, "Failed to set port %d, " | ||
286 | "aborting\n", port); | ||
287 | goto out; | ||
288 | } | ||
289 | } | ||
290 | mlx4_set_port_mask(dev); | ||
291 | err = mlx4_register_device(dev); | ||
292 | } | ||
293 | |||
294 | out: | ||
295 | return err; | ||
296 | } | ||
297 | |||
298 | static ssize_t show_port_type(struct device *dev, | ||
299 | struct device_attribute *attr, | ||
300 | char *buf) | ||
301 | { | ||
302 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
303 | port_attr); | ||
304 | struct mlx4_dev *mdev = info->dev; | ||
305 | |||
306 | return sprintf(buf, "%s\n", | ||
307 | mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ? | ||
308 | "ib" : "eth"); | ||
309 | } | ||
310 | |||
311 | static ssize_t set_port_type(struct device *dev, | ||
312 | struct device_attribute *attr, | ||
313 | const char *buf, size_t count) | ||
314 | { | ||
315 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
316 | port_attr); | ||
317 | struct mlx4_dev *mdev = info->dev; | ||
318 | struct mlx4_priv *priv = mlx4_priv(mdev); | ||
319 | enum mlx4_port_type types[MLX4_MAX_PORTS]; | ||
320 | int i; | ||
321 | int err = 0; | ||
322 | |||
323 | if (!strcmp(buf, "ib\n")) | ||
324 | info->tmp_type = MLX4_PORT_TYPE_IB; | ||
325 | else if (!strcmp(buf, "eth\n")) | ||
326 | info->tmp_type = MLX4_PORT_TYPE_ETH; | ||
327 | else { | ||
328 | mlx4_err(mdev, "%s is not supported port type\n", buf); | ||
329 | return -EINVAL; | ||
330 | } | ||
331 | |||
332 | mutex_lock(&priv->port_mutex); | ||
333 | for (i = 0; i < mdev->caps.num_ports; i++) | ||
334 | types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : | ||
335 | mdev->caps.port_type[i+1]; | ||
336 | |||
337 | err = mlx4_check_port_params(mdev, types); | ||
338 | if (err) | ||
339 | goto out; | ||
340 | |||
341 | for (i = 1; i <= mdev->caps.num_ports; i++) | ||
342 | priv->port[i].tmp_type = 0; | ||
343 | |||
344 | err = mlx4_change_port_types(mdev, types); | ||
345 | |||
346 | out: | ||
347 | mutex_unlock(&priv->port_mutex); | ||
348 | return err ? err : count; | ||
349 | } | ||
350 | |||
169 | static int mlx4_load_fw(struct mlx4_dev *dev) | 351 | static int mlx4_load_fw(struct mlx4_dev *dev) |
170 | { | 352 | { |
171 | struct mlx4_priv *priv = mlx4_priv(dev); | 353 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
211 | ((u64) (MLX4_CMPT_TYPE_QP * | 393 | ((u64) (MLX4_CMPT_TYPE_QP * |
212 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 394 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
213 | cmpt_entry_sz, dev->caps.num_qps, | 395 | cmpt_entry_sz, dev->caps.num_qps, |
214 | dev->caps.reserved_qps, 0, 0); | 396 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
397 | 0, 0); | ||
215 | if (err) | 398 | if (err) |
216 | goto err; | 399 | goto err; |
217 | 400 | ||
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
336 | init_hca->qpc_base, | 519 | init_hca->qpc_base, |
337 | dev_cap->qpc_entry_sz, | 520 | dev_cap->qpc_entry_sz, |
338 | dev->caps.num_qps, | 521 | dev->caps.num_qps, |
339 | dev->caps.reserved_qps, 0, 0); | 522 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
523 | 0, 0); | ||
340 | if (err) { | 524 | if (err) { |
341 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); | 525 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); |
342 | goto err_unmap_dmpt; | 526 | goto err_unmap_dmpt; |
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
346 | init_hca->auxc_base, | 530 | init_hca->auxc_base, |
347 | dev_cap->aux_entry_sz, | 531 | dev_cap->aux_entry_sz, |
348 | dev->caps.num_qps, | 532 | dev->caps.num_qps, |
349 | dev->caps.reserved_qps, 0, 0); | 533 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
534 | 0, 0); | ||
350 | if (err) { | 535 | if (err) { |
351 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); | 536 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); |
352 | goto err_unmap_qp; | 537 | goto err_unmap_qp; |
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
356 | init_hca->altc_base, | 541 | init_hca->altc_base, |
357 | dev_cap->altc_entry_sz, | 542 | dev_cap->altc_entry_sz, |
358 | dev->caps.num_qps, | 543 | dev->caps.num_qps, |
359 | dev->caps.reserved_qps, 0, 0); | 544 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
545 | 0, 0); | ||
360 | if (err) { | 546 | if (err) { |
361 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); | 547 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); |
362 | goto err_unmap_auxc; | 548 | goto err_unmap_auxc; |
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
366 | init_hca->rdmarc_base, | 552 | init_hca->rdmarc_base, |
367 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, | 553 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, |
368 | dev->caps.num_qps, | 554 | dev->caps.num_qps, |
369 | dev->caps.reserved_qps, 0, 0); | 555 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], |
556 | 0, 0); | ||
370 | if (err) { | 557 | if (err) { |
371 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); | 558 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); |
372 | goto err_unmap_altc; | 559 | goto err_unmap_altc; |
@@ -565,6 +752,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
565 | { | 752 | { |
566 | struct mlx4_priv *priv = mlx4_priv(dev); | 753 | struct mlx4_priv *priv = mlx4_priv(dev); |
567 | int err; | 754 | int err; |
755 | int port; | ||
568 | 756 | ||
569 | err = mlx4_init_uar_table(dev); | 757 | err = mlx4_init_uar_table(dev); |
570 | if (err) { | 758 | if (err) { |
@@ -663,8 +851,20 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
663 | goto err_qp_table_free; | 851 | goto err_qp_table_free; |
664 | } | 852 | } |
665 | 853 | ||
854 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
855 | err = mlx4_SET_PORT(dev, port); | ||
856 | if (err) { | ||
857 | mlx4_err(dev, "Failed to set port %d, aborting\n", | ||
858 | port); | ||
859 | goto err_mcg_table_free; | ||
860 | } | ||
861 | } | ||
862 | |||
666 | return 0; | 863 | return 0; |
667 | 864 | ||
865 | err_mcg_table_free: | ||
866 | mlx4_cleanup_mcg_table(dev); | ||
867 | |||
668 | err_qp_table_free: | 868 | err_qp_table_free: |
669 | mlx4_cleanup_qp_table(dev); | 869 | mlx4_cleanup_qp_table(dev); |
670 | 870 | ||
@@ -728,11 +928,45 @@ no_msi: | |||
728 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 928 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
729 | } | 929 | } |
730 | 930 | ||
931 | static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | ||
932 | { | ||
933 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
934 | int err = 0; | ||
935 | |||
936 | info->dev = dev; | ||
937 | info->port = port; | ||
938 | mlx4_init_mac_table(dev, &info->mac_table); | ||
939 | mlx4_init_vlan_table(dev, &info->vlan_table); | ||
940 | |||
941 | sprintf(info->dev_name, "mlx4_port%d", port); | ||
942 | info->port_attr.attr.name = info->dev_name; | ||
943 | info->port_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
944 | info->port_attr.show = show_port_type; | ||
945 | info->port_attr.store = set_port_type; | ||
946 | |||
947 | err = device_create_file(&dev->pdev->dev, &info->port_attr); | ||
948 | if (err) { | ||
949 | mlx4_err(dev, "Failed to create file for port %d\n", port); | ||
950 | info->port = -1; | ||
951 | } | ||
952 | |||
953 | return err; | ||
954 | } | ||
955 | |||
956 | static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | ||
957 | { | ||
958 | if (info->port < 0) | ||
959 | return; | ||
960 | |||
961 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | ||
962 | } | ||
963 | |||
731 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 964 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
732 | { | 965 | { |
733 | struct mlx4_priv *priv; | 966 | struct mlx4_priv *priv; |
734 | struct mlx4_dev *dev; | 967 | struct mlx4_dev *dev; |
735 | int err; | 968 | int err; |
969 | int port; | ||
736 | 970 | ||
737 | printk(KERN_INFO PFX "Initializing %s\n", | 971 | printk(KERN_INFO PFX "Initializing %s\n", |
738 | pci_name(pdev)); | 972 | pci_name(pdev)); |
@@ -807,6 +1041,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
807 | INIT_LIST_HEAD(&priv->ctx_list); | 1041 | INIT_LIST_HEAD(&priv->ctx_list); |
808 | spin_lock_init(&priv->ctx_lock); | 1042 | spin_lock_init(&priv->ctx_lock); |
809 | 1043 | ||
1044 | mutex_init(&priv->port_mutex); | ||
1045 | |||
810 | INIT_LIST_HEAD(&priv->pgdir_list); | 1046 | INIT_LIST_HEAD(&priv->pgdir_list); |
811 | mutex_init(&priv->pgdir_mutex); | 1047 | mutex_init(&priv->pgdir_mutex); |
812 | 1048 | ||
@@ -842,15 +1078,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
842 | if (err) | 1078 | if (err) |
843 | goto err_close; | 1079 | goto err_close; |
844 | 1080 | ||
1081 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
1082 | err = mlx4_init_port_info(dev, port); | ||
1083 | if (err) | ||
1084 | goto err_port; | ||
1085 | } | ||
1086 | |||
845 | err = mlx4_register_device(dev); | 1087 | err = mlx4_register_device(dev); |
846 | if (err) | 1088 | if (err) |
847 | goto err_cleanup; | 1089 | goto err_port; |
848 | 1090 | ||
849 | pci_set_drvdata(pdev, dev); | 1091 | pci_set_drvdata(pdev, dev); |
850 | 1092 | ||
851 | return 0; | 1093 | return 0; |
852 | 1094 | ||
853 | err_cleanup: | 1095 | err_port: |
1096 | for (port = 1; port <= dev->caps.num_ports; port++) | ||
1097 | mlx4_cleanup_port_info(&priv->port[port]); | ||
1098 | |||
854 | mlx4_cleanup_mcg_table(dev); | 1099 | mlx4_cleanup_mcg_table(dev); |
855 | mlx4_cleanup_qp_table(dev); | 1100 | mlx4_cleanup_qp_table(dev); |
856 | mlx4_cleanup_srq_table(dev); | 1101 | mlx4_cleanup_srq_table(dev); |
@@ -907,8 +1152,10 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
907 | if (dev) { | 1152 | if (dev) { |
908 | mlx4_unregister_device(dev); | 1153 | mlx4_unregister_device(dev); |
909 | 1154 | ||
910 | for (p = 1; p <= dev->caps.num_ports; ++p) | 1155 | for (p = 1; p <= dev->caps.num_ports; p++) { |
1156 | mlx4_cleanup_port_info(&priv->port[p]); | ||
911 | mlx4_CLOSE_PORT(dev, p); | 1157 | mlx4_CLOSE_PORT(dev, p); |
1158 | } | ||
912 | 1159 | ||
913 | mlx4_cleanup_mcg_table(dev); | 1160 | mlx4_cleanup_mcg_table(dev); |
914 | mlx4_cleanup_qp_table(dev); | 1161 | mlx4_cleanup_qp_table(dev); |
@@ -948,6 +1195,8 @@ static struct pci_device_id mlx4_pci_table[] = { | |||
948 | { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ | 1195 | { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ |
949 | { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ | 1196 | { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ |
950 | { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ | 1197 | { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ |
1198 | { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ | ||
1199 | { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ | ||
951 | { 0, } | 1200 | { 0, } |
952 | }; | 1201 | }; |
953 | 1202 | ||
@@ -960,10 +1209,28 @@ static struct pci_driver mlx4_driver = { | |||
960 | .remove = __devexit_p(mlx4_remove_one) | 1209 | .remove = __devexit_p(mlx4_remove_one) |
961 | }; | 1210 | }; |
962 | 1211 | ||
1212 | static int __init mlx4_verify_params(void) | ||
1213 | { | ||
1214 | if ((log_num_mac < 0) || (log_num_mac > 7)) { | ||
1215 | printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac); | ||
1216 | return -1; | ||
1217 | } | ||
1218 | |||
1219 | if ((log_num_vlan < 0) || (log_num_vlan > 7)) { | ||
1220 | printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan); | ||
1221 | return -1; | ||
1222 | } | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
963 | static int __init mlx4_init(void) | 1227 | static int __init mlx4_init(void) |
964 | { | 1228 | { |
965 | int ret; | 1229 | int ret; |
966 | 1230 | ||
1231 | if (mlx4_verify_params()) | ||
1232 | return -EINVAL; | ||
1233 | |||
967 | ret = mlx4_catas_init(); | 1234 | ret = mlx4_catas_init(); |
968 | if (ret) | 1235 | if (ret) |
969 | return ret; | 1236 | return ret; |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index c83f88ce0736..592c01ae2c5d 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev) | |||
368 | struct mlx4_priv *priv = mlx4_priv(dev); | 368 | struct mlx4_priv *priv = mlx4_priv(dev); |
369 | int err; | 369 | int err; |
370 | 370 | ||
371 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, | 371 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, |
372 | dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); | 372 | dev->caps.num_amgms - 1, 0, 0); |
373 | if (err) | 373 | if (err) |
374 | return err; | 374 | return err; |
375 | 375 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 5337e3ac3e78..fa431fad0eec 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -111,6 +111,7 @@ struct mlx4_bitmap { | |||
111 | u32 last; | 111 | u32 last; |
112 | u32 top; | 112 | u32 top; |
113 | u32 max; | 113 | u32 max; |
114 | u32 reserved_top; | ||
114 | u32 mask; | 115 | u32 mask; |
115 | spinlock_t lock; | 116 | spinlock_t lock; |
116 | unsigned long *table; | 117 | unsigned long *table; |
@@ -251,6 +252,38 @@ struct mlx4_catas_err { | |||
251 | struct list_head list; | 252 | struct list_head list; |
252 | }; | 253 | }; |
253 | 254 | ||
255 | #define MLX4_MAX_MAC_NUM 128 | ||
256 | #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) | ||
257 | |||
258 | struct mlx4_mac_table { | ||
259 | __be64 entries[MLX4_MAX_MAC_NUM]; | ||
260 | int refs[MLX4_MAX_MAC_NUM]; | ||
261 | struct mutex mutex; | ||
262 | int total; | ||
263 | int max; | ||
264 | }; | ||
265 | |||
266 | #define MLX4_MAX_VLAN_NUM 128 | ||
267 | #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) | ||
268 | |||
269 | struct mlx4_vlan_table { | ||
270 | __be32 entries[MLX4_MAX_VLAN_NUM]; | ||
271 | int refs[MLX4_MAX_VLAN_NUM]; | ||
272 | struct mutex mutex; | ||
273 | int total; | ||
274 | int max; | ||
275 | }; | ||
276 | |||
277 | struct mlx4_port_info { | ||
278 | struct mlx4_dev *dev; | ||
279 | int port; | ||
280 | char dev_name[16]; | ||
281 | struct device_attribute port_attr; | ||
282 | enum mlx4_port_type tmp_type; | ||
283 | struct mlx4_mac_table mac_table; | ||
284 | struct mlx4_vlan_table vlan_table; | ||
285 | }; | ||
286 | |||
254 | struct mlx4_priv { | 287 | struct mlx4_priv { |
255 | struct mlx4_dev dev; | 288 | struct mlx4_dev dev; |
256 | 289 | ||
@@ -279,6 +312,8 @@ struct mlx4_priv { | |||
279 | 312 | ||
280 | struct mlx4_uar driver_uar; | 313 | struct mlx4_uar driver_uar; |
281 | void __iomem *kar; | 314 | void __iomem *kar; |
315 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; | ||
316 | struct mutex port_mutex; | ||
282 | }; | 317 | }; |
283 | 318 | ||
284 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | 319 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) |
@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | |||
288 | 323 | ||
289 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); | 324 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); |
290 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); | 325 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); |
291 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); | 326 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); |
327 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); | ||
328 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | ||
329 | u32 reserved_bot, u32 resetrved_top); | ||
292 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | 330 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); |
293 | 331 | ||
294 | int mlx4_reset(struct mlx4_dev *dev); | 332 | int mlx4_reset(struct mlx4_dev *dev); |
@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | |||
346 | 384 | ||
347 | void mlx4_handle_catas_err(struct mlx4_dev *dev); | 385 | void mlx4_handle_catas_err(struct mlx4_dev *dev); |
348 | 386 | ||
387 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); | ||
388 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | ||
389 | |||
390 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | ||
391 | |||
349 | #endif /* MLX4_H */ | 392 | #endif /* MLX4_H */ |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h new file mode 100644 index 000000000000..11fb17c6e97b --- /dev/null +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -0,0 +1,561 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #ifndef _MLX4_EN_H_ | ||
35 | #define _MLX4_EN_H_ | ||
36 | |||
37 | #include <linux/compiler.h> | ||
38 | #include <linux/list.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/netdevice.h> | ||
41 | #include <linux/inet_lro.h> | ||
42 | |||
43 | #include <linux/mlx4/device.h> | ||
44 | #include <linux/mlx4/qp.h> | ||
45 | #include <linux/mlx4/cq.h> | ||
46 | #include <linux/mlx4/srq.h> | ||
47 | #include <linux/mlx4/doorbell.h> | ||
48 | |||
49 | #include "en_port.h" | ||
50 | |||
51 | #define DRV_NAME "mlx4_en" | ||
52 | #define DRV_VERSION "1.4.0" | ||
53 | #define DRV_RELDATE "Sep 2008" | ||
54 | |||
55 | |||
56 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | ||
57 | |||
58 | #define mlx4_dbg(mlevel, priv, format, arg...) \ | ||
59 | if (NETIF_MSG_##mlevel & priv->msg_enable) \ | ||
60 | printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ | ||
61 | (&priv->mdev->pdev->dev)->bus_id , ## arg) | ||
62 | |||
63 | #define mlx4_err(mdev, format, arg...) \ | ||
64 | printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ | ||
65 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
66 | #define mlx4_info(mdev, format, arg...) \ | ||
67 | printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ | ||
68 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
69 | #define mlx4_warn(mdev, format, arg...) \ | ||
70 | printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ | ||
71 | (&mdev->pdev->dev)->bus_id , ## arg) | ||
72 | |||
73 | /* | ||
74 | * Device constants | ||
75 | */ | ||
76 | |||
77 | |||
78 | #define MLX4_EN_PAGE_SHIFT 12 | ||
79 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) | ||
80 | #define MAX_TX_RINGS 16 | ||
81 | #define MAX_RX_RINGS 16 | ||
82 | #define MAX_RSS_MAP_SIZE 64 | ||
83 | #define RSS_FACTOR 2 | ||
84 | #define TXBB_SIZE 64 | ||
85 | #define HEADROOM (2048 / TXBB_SIZE + 1) | ||
86 | #define MAX_LSO_HDR_SIZE 92 | ||
87 | #define STAMP_STRIDE 64 | ||
88 | #define STAMP_DWORDS (STAMP_STRIDE / 4) | ||
89 | #define STAMP_SHIFT 31 | ||
90 | #define STAMP_VAL 0x7fffffff | ||
91 | #define STATS_DELAY (HZ / 4) | ||
92 | |||
93 | /* Typical TSO descriptor with 16 gather entries is 352 bytes... */ | ||
94 | #define MAX_DESC_SIZE 512 | ||
95 | #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE) | ||
96 | |||
97 | /* | ||
98 | * OS related constants and tunables | ||
99 | */ | ||
100 | |||
101 | #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) | ||
102 | |||
103 | #define MLX4_EN_ALLOC_ORDER 2 | ||
104 | #define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) | ||
105 | |||
106 | #define MLX4_EN_MAX_LRO_DESCRIPTORS 32 | ||
107 | |||
108 | /* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU | ||
109 | * and 4K allocations) */ | ||
110 | enum { | ||
111 | FRAG_SZ0 = 512 - NET_IP_ALIGN, | ||
112 | FRAG_SZ1 = 1024, | ||
113 | FRAG_SZ2 = 4096, | ||
114 | FRAG_SZ3 = MLX4_EN_ALLOC_SIZE | ||
115 | }; | ||
116 | #define MLX4_EN_MAX_RX_FRAGS 4 | ||
117 | |||
118 | /* Minimum ring size for our page-allocation sceme to work */ | ||
119 | #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) | ||
120 | #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) | ||
121 | |||
122 | #define MLX4_EN_TX_RING_NUM 9 | ||
123 | #define MLX4_EN_DEF_TX_RING_SIZE 1024 | ||
124 | #define MLX4_EN_DEF_RX_RING_SIZE 1024 | ||
125 | |||
126 | /* Target number of bytes to coalesce with interrupt moderation */ | ||
127 | #define MLX4_EN_RX_COAL_TARGET 0x20000 | ||
128 | #define MLX4_EN_RX_COAL_TIME 0x10 | ||
129 | |||
130 | #define MLX4_EN_TX_COAL_PKTS 5 | ||
131 | #define MLX4_EN_TX_COAL_TIME 0x80 | ||
132 | |||
133 | #define MLX4_EN_RX_RATE_LOW 400000 | ||
134 | #define MLX4_EN_RX_COAL_TIME_LOW 0 | ||
135 | #define MLX4_EN_RX_RATE_HIGH 450000 | ||
136 | #define MLX4_EN_RX_COAL_TIME_HIGH 128 | ||
137 | #define MLX4_EN_RX_SIZE_THRESH 1024 | ||
138 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) | ||
139 | #define MLX4_EN_SAMPLE_INTERVAL 0 | ||
140 | |||
141 | #define MLX4_EN_AUTO_CONF 0xffff | ||
142 | |||
143 | #define MLX4_EN_DEF_RX_PAUSE 1 | ||
144 | #define MLX4_EN_DEF_TX_PAUSE 1 | ||
145 | |||
146 | /* Interval between sucessive polls in the Tx routine when polling is used | ||
147 | instead of interrupts (in per-core Tx rings) - should be power of 2 */ | ||
148 | #define MLX4_EN_TX_POLL_MODER 16 | ||
149 | #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) | ||
150 | |||
151 | #define ETH_LLC_SNAP_SIZE 8 | ||
152 | |||
153 | #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) | ||
154 | #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) | ||
155 | |||
156 | #define MLX4_EN_MIN_MTU 46 | ||
157 | #define ETH_BCAST 0xffffffffffffULL | ||
158 | |||
159 | #ifdef MLX4_EN_PERF_STAT | ||
160 | /* Number of samples to 'average' */ | ||
161 | #define AVG_SIZE 128 | ||
162 | #define AVG_FACTOR 1024 | ||
163 | #define NUM_PERF_STATS NUM_PERF_COUNTERS | ||
164 | |||
165 | #define INC_PERF_COUNTER(cnt) (++(cnt)) | ||
166 | #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) | ||
167 | #define AVG_PERF_COUNTER(cnt, sample) \ | ||
168 | ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) | ||
169 | #define GET_PERF_COUNTER(cnt) (cnt) | ||
170 | #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) | ||
171 | |||
172 | #else | ||
173 | |||
174 | #define NUM_PERF_STATS 0 | ||
175 | #define INC_PERF_COUNTER(cnt) do {} while (0) | ||
176 | #define ADD_PERF_COUNTER(cnt, add) do {} while (0) | ||
177 | #define AVG_PERF_COUNTER(cnt, sample) do {} while (0) | ||
178 | #define GET_PERF_COUNTER(cnt) (0) | ||
179 | #define GET_AVG_PERF_COUNTER(cnt) (0) | ||
180 | #endif /* MLX4_EN_PERF_STAT */ | ||
181 | |||
182 | /* | ||
183 | * Configurables | ||
184 | */ | ||
185 | |||
186 | enum cq_type { | ||
187 | RX = 0, | ||
188 | TX = 1, | ||
189 | }; | ||
190 | |||
191 | |||
192 | /* | ||
193 | * Useful macros | ||
194 | */ | ||
195 | #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) | ||
196 | #define XNOR(x, y) (!(x) == !(y)) | ||
197 | #define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0) | ||
198 | |||
199 | |||
200 | struct mlx4_en_tx_info { | ||
201 | struct sk_buff *skb; | ||
202 | u32 nr_txbb; | ||
203 | u8 linear; | ||
204 | u8 data_offset; | ||
205 | }; | ||
206 | |||
207 | |||
208 | #define MLX4_EN_BIT_DESC_OWN 0x80000000 | ||
209 | #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg) | ||
210 | #define MLX4_EN_MEMTYPE_PAD 0x100 | ||
211 | #define DS_SIZE sizeof(struct mlx4_wqe_data_seg) | ||
212 | |||
213 | |||
214 | struct mlx4_en_tx_desc { | ||
215 | struct mlx4_wqe_ctrl_seg ctrl; | ||
216 | union { | ||
217 | struct mlx4_wqe_data_seg data; /* at least one data segment */ | ||
218 | struct mlx4_wqe_lso_seg lso; | ||
219 | struct mlx4_wqe_inline_seg inl; | ||
220 | }; | ||
221 | }; | ||
222 | |||
223 | #define MLX4_EN_USE_SRQ 0x01000000 | ||
224 | |||
225 | struct mlx4_en_rx_alloc { | ||
226 | struct page *page; | ||
227 | u16 offset; | ||
228 | }; | ||
229 | |||
230 | struct mlx4_en_tx_ring { | ||
231 | struct mlx4_hwq_resources wqres; | ||
232 | u32 size ; /* number of TXBBs */ | ||
233 | u32 size_mask; | ||
234 | u16 stride; | ||
235 | u16 cqn; /* index of port CQ associated with this ring */ | ||
236 | u32 prod; | ||
237 | u32 cons; | ||
238 | u32 buf_size; | ||
239 | u32 doorbell_qpn; | ||
240 | void *buf; | ||
241 | u16 poll_cnt; | ||
242 | int blocked; | ||
243 | struct mlx4_en_tx_info *tx_info; | ||
244 | u8 *bounce_buf; | ||
245 | u32 last_nr_txbb; | ||
246 | struct mlx4_qp qp; | ||
247 | struct mlx4_qp_context context; | ||
248 | int qpn; | ||
249 | enum mlx4_qp_state qp_state; | ||
250 | struct mlx4_srq dummy; | ||
251 | unsigned long bytes; | ||
252 | unsigned long packets; | ||
253 | spinlock_t comp_lock; | ||
254 | }; | ||
255 | |||
256 | struct mlx4_en_rx_desc { | ||
257 | struct mlx4_wqe_srq_next_seg next; | ||
258 | /* actual number of entries depends on rx ring stride */ | ||
259 | struct mlx4_wqe_data_seg data[0]; | ||
260 | }; | ||
261 | |||
262 | struct mlx4_en_rx_ring { | ||
263 | struct mlx4_srq srq; | ||
264 | struct mlx4_hwq_resources wqres; | ||
265 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; | ||
266 | struct net_lro_mgr lro; | ||
267 | u32 size ; /* number of Rx descs*/ | ||
268 | u32 actual_size; | ||
269 | u32 size_mask; | ||
270 | u16 stride; | ||
271 | u16 log_stride; | ||
272 | u16 cqn; /* index of port CQ associated with this ring */ | ||
273 | u32 prod; | ||
274 | u32 cons; | ||
275 | u32 buf_size; | ||
276 | int need_refill; | ||
277 | int full; | ||
278 | void *buf; | ||
279 | void *rx_info; | ||
280 | unsigned long bytes; | ||
281 | unsigned long packets; | ||
282 | }; | ||
283 | |||
284 | |||
285 | static inline int mlx4_en_can_lro(__be16 status) | ||
286 | { | ||
287 | return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
288 | MLX4_CQE_STATUS_IPV4F | | ||
289 | MLX4_CQE_STATUS_IPV6 | | ||
290 | MLX4_CQE_STATUS_IPV4OPT | | ||
291 | MLX4_CQE_STATUS_TCP | | ||
292 | MLX4_CQE_STATUS_UDP | | ||
293 | MLX4_CQE_STATUS_IPOK)) == | ||
294 | cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
295 | MLX4_CQE_STATUS_IPOK | | ||
296 | MLX4_CQE_STATUS_TCP); | ||
297 | } | ||
298 | |||
299 | struct mlx4_en_cq { | ||
300 | struct mlx4_cq mcq; | ||
301 | struct mlx4_hwq_resources wqres; | ||
302 | int ring; | ||
303 | spinlock_t lock; | ||
304 | struct net_device *dev; | ||
305 | struct napi_struct napi; | ||
306 | /* Per-core Tx cq processing support */ | ||
307 | struct timer_list timer; | ||
308 | int size; | ||
309 | int buf_size; | ||
310 | unsigned vector; | ||
311 | enum cq_type is_tx; | ||
312 | u16 moder_time; | ||
313 | u16 moder_cnt; | ||
314 | int armed; | ||
315 | struct mlx4_cqe *buf; | ||
316 | #define MLX4_EN_OPCODE_ERROR 0x1e | ||
317 | }; | ||
318 | |||
319 | struct mlx4_en_port_profile { | ||
320 | u32 flags; | ||
321 | u32 tx_ring_num; | ||
322 | u32 rx_ring_num; | ||
323 | u32 tx_ring_size; | ||
324 | u32 rx_ring_size; | ||
325 | }; | ||
326 | |||
327 | struct mlx4_en_profile { | ||
328 | int rss_xor; | ||
329 | int num_lro; | ||
330 | u8 rss_mask; | ||
331 | u32 active_ports; | ||
332 | u32 small_pkt_int; | ||
333 | int rx_moder_cnt; | ||
334 | int rx_moder_time; | ||
335 | int auto_moder; | ||
336 | u8 rx_pause; | ||
337 | u8 rx_ppp; | ||
338 | u8 tx_pause; | ||
339 | u8 tx_ppp; | ||
340 | u8 no_reset; | ||
341 | struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; | ||
342 | }; | ||
343 | |||
344 | struct mlx4_en_dev { | ||
345 | struct mlx4_dev *dev; | ||
346 | struct pci_dev *pdev; | ||
347 | struct mutex state_lock; | ||
348 | struct net_device *pndev[MLX4_MAX_PORTS + 1]; | ||
349 | u32 port_cnt; | ||
350 | bool device_up; | ||
351 | struct mlx4_en_profile profile; | ||
352 | u32 LSO_support; | ||
353 | struct workqueue_struct *workqueue; | ||
354 | struct device *dma_device; | ||
355 | void __iomem *uar_map; | ||
356 | struct mlx4_uar priv_uar; | ||
357 | struct mlx4_mr mr; | ||
358 | u32 priv_pdn; | ||
359 | spinlock_t uar_lock; | ||
360 | }; | ||
361 | |||
362 | |||
363 | struct mlx4_en_rss_map { | ||
364 | int size; | ||
365 | int base_qpn; | ||
366 | u16 map[MAX_RSS_MAP_SIZE]; | ||
367 | struct mlx4_qp qps[MAX_RSS_MAP_SIZE]; | ||
368 | enum mlx4_qp_state state[MAX_RSS_MAP_SIZE]; | ||
369 | struct mlx4_qp indir_qp; | ||
370 | enum mlx4_qp_state indir_state; | ||
371 | }; | ||
372 | |||
373 | struct mlx4_en_rss_context { | ||
374 | __be32 base_qpn; | ||
375 | __be32 default_qpn; | ||
376 | u16 reserved; | ||
377 | u8 hash_fn; | ||
378 | u8 flags; | ||
379 | __be32 rss_key[10]; | ||
380 | }; | ||
381 | |||
382 | struct mlx4_en_pkt_stats { | ||
383 | unsigned long broadcast; | ||
384 | unsigned long rx_prio[8]; | ||
385 | unsigned long tx_prio[8]; | ||
386 | #define NUM_PKT_STATS 17 | ||
387 | }; | ||
388 | |||
389 | struct mlx4_en_port_stats { | ||
390 | unsigned long lro_aggregated; | ||
391 | unsigned long lro_flushed; | ||
392 | unsigned long lro_no_desc; | ||
393 | unsigned long tso_packets; | ||
394 | unsigned long queue_stopped; | ||
395 | unsigned long wake_queue; | ||
396 | unsigned long tx_timeout; | ||
397 | unsigned long rx_alloc_failed; | ||
398 | unsigned long rx_chksum_good; | ||
399 | unsigned long rx_chksum_none; | ||
400 | unsigned long tx_chksum_offload; | ||
401 | #define NUM_PORT_STATS 11 | ||
402 | }; | ||
403 | |||
404 | struct mlx4_en_perf_stats { | ||
405 | u32 tx_poll; | ||
406 | u64 tx_pktsz_avg; | ||
407 | u32 inflight_avg; | ||
408 | u16 tx_coal_avg; | ||
409 | u16 rx_coal_avg; | ||
410 | u32 napi_quota; | ||
411 | #define NUM_PERF_COUNTERS 6 | ||
412 | }; | ||
413 | |||
414 | struct mlx4_en_frag_info { | ||
415 | u16 frag_size; | ||
416 | u16 frag_prefix_size; | ||
417 | u16 frag_stride; | ||
418 | u16 frag_align; | ||
419 | u16 last_offset; | ||
420 | |||
421 | }; | ||
422 | |||
423 | struct mlx4_en_priv { | ||
424 | struct mlx4_en_dev *mdev; | ||
425 | struct mlx4_en_port_profile *prof; | ||
426 | struct net_device *dev; | ||
427 | struct vlan_group *vlgrp; | ||
428 | struct net_device_stats stats; | ||
429 | struct net_device_stats ret_stats; | ||
430 | spinlock_t stats_lock; | ||
431 | |||
432 | unsigned long last_moder_packets; | ||
433 | unsigned long last_moder_tx_packets; | ||
434 | unsigned long last_moder_bytes; | ||
435 | unsigned long last_moder_jiffies; | ||
436 | int last_moder_time; | ||
437 | u16 rx_usecs; | ||
438 | u16 rx_frames; | ||
439 | u16 tx_usecs; | ||
440 | u16 tx_frames; | ||
441 | u32 pkt_rate_low; | ||
442 | u16 rx_usecs_low; | ||
443 | u32 pkt_rate_high; | ||
444 | u16 rx_usecs_high; | ||
445 | u16 sample_interval; | ||
446 | u16 adaptive_rx_coal; | ||
447 | u32 msg_enable; | ||
448 | |||
449 | struct mlx4_hwq_resources res; | ||
450 | int link_state; | ||
451 | int last_link_state; | ||
452 | bool port_up; | ||
453 | int port; | ||
454 | int registered; | ||
455 | int allocated; | ||
456 | int stride; | ||
457 | int rx_csum; | ||
458 | u64 mac; | ||
459 | int mac_index; | ||
460 | unsigned max_mtu; | ||
461 | int base_qpn; | ||
462 | |||
463 | struct mlx4_en_rss_map rss_map; | ||
464 | u16 tx_prio_map[8]; | ||
465 | u32 flags; | ||
466 | #define MLX4_EN_FLAG_PROMISC 0x1 | ||
467 | u32 tx_ring_num; | ||
468 | u32 rx_ring_num; | ||
469 | u32 rx_skb_size; | ||
470 | struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; | ||
471 | u16 num_frags; | ||
472 | u16 log_rx_info; | ||
473 | |||
474 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | ||
475 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | ||
476 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | ||
477 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | ||
478 | struct work_struct mcast_task; | ||
479 | struct work_struct mac_task; | ||
480 | struct delayed_work refill_task; | ||
481 | struct work_struct watchdog_task; | ||
482 | struct work_struct linkstate_task; | ||
483 | struct delayed_work stats_task; | ||
484 | struct mlx4_en_perf_stats pstats; | ||
485 | struct mlx4_en_pkt_stats pkstats; | ||
486 | struct mlx4_en_port_stats port_stats; | ||
487 | struct dev_mc_list *mc_list; | ||
488 | struct mlx4_en_stat_out_mbox hw_stats; | ||
489 | }; | ||
490 | |||
491 | |||
492 | void mlx4_en_destroy_netdev(struct net_device *dev); | ||
493 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
494 | struct mlx4_en_port_profile *prof); | ||
495 | |||
496 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev); | ||
497 | |||
498 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | ||
499 | int entries, int ring, enum cq_type mode); | ||
500 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
501 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
502 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
503 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
504 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
505 | |||
506 | void mlx4_en_poll_tx_cq(unsigned long data); | ||
507 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | ||
508 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | ||
509 | |||
510 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, | ||
511 | u32 size, u16 stride); | ||
512 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | ||
513 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
514 | struct mlx4_en_tx_ring *ring, | ||
515 | int cq, int srqn); | ||
516 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
517 | struct mlx4_en_tx_ring *ring); | ||
518 | |||
519 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
520 | struct mlx4_en_rx_ring *ring, | ||
521 | u32 size, u16 stride); | ||
522 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
523 | struct mlx4_en_rx_ring *ring); | ||
524 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); | ||
525 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
526 | struct mlx4_en_rx_ring *ring); | ||
527 | int mlx4_en_process_rx_cq(struct net_device *dev, | ||
528 | struct mlx4_en_cq *cq, | ||
529 | int budget); | ||
530 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); | ||
531 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
532 | int is_tx, int rss, int qpn, int cqn, int srqn, | ||
533 | struct mlx4_qp_context *context); | ||
534 | int mlx4_en_map_buffer(struct mlx4_buf *buf); | ||
535 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf); | ||
536 | |||
537 | void mlx4_en_calc_rx_buf(struct net_device *dev); | ||
538 | void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | ||
539 | struct mlx4_en_rss_map *rss_map, | ||
540 | int num_entries, int num_rings); | ||
541 | void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num); | ||
542 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); | ||
543 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); | ||
544 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); | ||
545 | void mlx4_en_rx_refill(struct work_struct *work); | ||
546 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); | ||
547 | |||
548 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | ||
549 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp); | ||
550 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
551 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); | ||
552 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
553 | u8 promisc); | ||
554 | |||
555 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); | ||
556 | |||
557 | /* | ||
558 | * Globals | ||
559 | */ | ||
560 | extern const struct ethtool_ops mlx4_en_ethtool_ops; | ||
561 | #endif | ||
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index d1dd5b48dbd1..0caf74cae8bc 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) | |||
461 | int err; | 461 | int err; |
462 | 462 | ||
463 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, | 463 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, |
464 | ~0, dev->caps.reserved_mrws); | 464 | ~0, dev->caps.reserved_mrws, 0); |
465 | if (err) | 465 | if (err) |
466 | return err; | 466 | return err; |
467 | 467 | ||
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index aa616892d09c..26d1a7a9e375 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev) | |||
62 | struct mlx4_priv *priv = mlx4_priv(dev); | 62 | struct mlx4_priv *priv = mlx4_priv(dev); |
63 | 63 | ||
64 | return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, | 64 | return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, |
65 | (1 << 24) - 1, dev->caps.reserved_pds); | 65 | (1 << 24) - 1, dev->caps.reserved_pds, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev) | 68 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev) |
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev) | |||
100 | 100 | ||
101 | return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, | 101 | return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, |
102 | dev->caps.num_uars, dev->caps.num_uars - 1, | 102 | dev->caps.num_uars, dev->caps.num_uars - 1, |
103 | max(128, dev->caps.reserved_uars)); | 103 | max(128, dev->caps.reserved_uars), 0); |
104 | } | 104 | } |
105 | 105 | ||
106 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev) | 106 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev) |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c new file mode 100644 index 000000000000..e2fdab42c4ce --- /dev/null +++ b/drivers/net/mlx4/port.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/errno.h> | ||
34 | #include <linux/if_ether.h> | ||
35 | |||
36 | #include <linux/mlx4/cmd.h> | ||
37 | |||
38 | #include "mlx4.h" | ||
39 | |||
40 | #define MLX4_MAC_VALID (1ull << 63) | ||
41 | #define MLX4_MAC_MASK 0xffffffffffffULL | ||
42 | |||
43 | #define MLX4_VLAN_VALID (1u << 31) | ||
44 | #define MLX4_VLAN_MASK 0xfff | ||
45 | |||
46 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) | ||
47 | { | ||
48 | int i; | ||
49 | |||
50 | mutex_init(&table->mutex); | ||
51 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
52 | table->entries[i] = 0; | ||
53 | table->refs[i] = 0; | ||
54 | } | ||
55 | table->max = 1 << dev->caps.log_num_macs; | ||
56 | table->total = 0; | ||
57 | } | ||
58 | |||
59 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | mutex_init(&table->mutex); | ||
64 | for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { | ||
65 | table->entries[i] = 0; | ||
66 | table->refs[i] = 0; | ||
67 | } | ||
68 | table->max = 1 << dev->caps.log_num_vlans; | ||
69 | table->total = 0; | ||
70 | } | ||
71 | |||
72 | static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, | ||
73 | __be64 *entries) | ||
74 | { | ||
75 | struct mlx4_cmd_mailbox *mailbox; | ||
76 | u32 in_mod; | ||
77 | int err; | ||
78 | |||
79 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
80 | if (IS_ERR(mailbox)) | ||
81 | return PTR_ERR(mailbox); | ||
82 | |||
83 | memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); | ||
84 | |||
85 | in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; | ||
86 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
87 | MLX4_CMD_TIME_CLASS_B); | ||
88 | |||
89 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | ||
94 | { | ||
95 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | ||
96 | int i, err = 0; | ||
97 | int free = -1; | ||
98 | |||
99 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | ||
100 | mutex_lock(&table->mutex); | ||
101 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | ||
102 | if (free < 0 && !table->refs[i]) { | ||
103 | free = i; | ||
104 | continue; | ||
105 | } | ||
106 | |||
107 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
108 | /* MAC already registered, increase refernce count */ | ||
109 | *index = i; | ||
110 | ++table->refs[i]; | ||
111 | goto out; | ||
112 | } | ||
113 | } | ||
114 | mlx4_dbg(dev, "Free MAC index is %d\n", free); | ||
115 | |||
116 | if (table->total == table->max) { | ||
117 | /* No free mac entries */ | ||
118 | err = -ENOSPC; | ||
119 | goto out; | ||
120 | } | ||
121 | |||
122 | /* Register new MAC */ | ||
123 | table->refs[free] = 1; | ||
124 | table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); | ||
125 | |||
126 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
127 | if (unlikely(err)) { | ||
128 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); | ||
129 | table->refs[free] = 0; | ||
130 | table->entries[free] = 0; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
134 | *index = free; | ||
135 | ++table->total; | ||
136 | out: | ||
137 | mutex_unlock(&table->mutex); | ||
138 | return err; | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(mlx4_register_mac); | ||
141 | |||
142 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) | ||
143 | { | ||
144 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | ||
145 | |||
146 | mutex_lock(&table->mutex); | ||
147 | if (!table->refs[index]) { | ||
148 | mlx4_warn(dev, "No MAC entry for index %d\n", index); | ||
149 | goto out; | ||
150 | } | ||
151 | if (--table->refs[index]) { | ||
152 | mlx4_warn(dev, "Have more references for index %d," | ||
153 | "no need to modify MAC table\n", index); | ||
154 | goto out; | ||
155 | } | ||
156 | table->entries[index] = 0; | ||
157 | mlx4_set_port_mac_table(dev, port, table->entries); | ||
158 | --table->total; | ||
159 | out: | ||
160 | mutex_unlock(&table->mutex); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); | ||
163 | |||
164 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | ||
165 | __be32 *entries) | ||
166 | { | ||
167 | struct mlx4_cmd_mailbox *mailbox; | ||
168 | u32 in_mod; | ||
169 | int err; | ||
170 | |||
171 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
172 | if (IS_ERR(mailbox)) | ||
173 | return PTR_ERR(mailbox); | ||
174 | |||
175 | memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); | ||
176 | in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; | ||
177 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
178 | MLX4_CMD_TIME_CLASS_B); | ||
179 | |||
180 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
181 | |||
182 | return err; | ||
183 | } | ||
184 | |||
185 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) | ||
186 | { | ||
187 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
188 | int i, err = 0; | ||
189 | int free = -1; | ||
190 | |||
191 | mutex_lock(&table->mutex); | ||
192 | for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { | ||
193 | if (free < 0 && (table->refs[i] == 0)) { | ||
194 | free = i; | ||
195 | continue; | ||
196 | } | ||
197 | |||
198 | if (table->refs[i] && | ||
199 | (vlan == (MLX4_VLAN_MASK & | ||
200 | be32_to_cpu(table->entries[i])))) { | ||
201 | /* Vlan already registered, increase refernce count */ | ||
202 | *index = i; | ||
203 | ++table->refs[i]; | ||
204 | goto out; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | if (table->total == table->max) { | ||
209 | /* No free vlan entries */ | ||
210 | err = -ENOSPC; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | /* Register new MAC */ | ||
215 | table->refs[free] = 1; | ||
216 | table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); | ||
217 | |||
218 | err = mlx4_set_port_vlan_table(dev, port, table->entries); | ||
219 | if (unlikely(err)) { | ||
220 | mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); | ||
221 | table->refs[free] = 0; | ||
222 | table->entries[free] = 0; | ||
223 | goto out; | ||
224 | } | ||
225 | |||
226 | *index = free; | ||
227 | ++table->total; | ||
228 | out: | ||
229 | mutex_unlock(&table->mutex); | ||
230 | return err; | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(mlx4_register_vlan); | ||
233 | |||
234 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) | ||
235 | { | ||
236 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
237 | |||
238 | if (index < MLX4_VLAN_REGULAR) { | ||
239 | mlx4_warn(dev, "Trying to free special vlan index %d\n", index); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | mutex_lock(&table->mutex); | ||
244 | if (!table->refs[index]) { | ||
245 | mlx4_warn(dev, "No vlan entry for index %d\n", index); | ||
246 | goto out; | ||
247 | } | ||
248 | if (--table->refs[index]) { | ||
249 | mlx4_dbg(dev, "Have more references for index %d," | ||
250 | "no need to modify vlan table\n", index); | ||
251 | goto out; | ||
252 | } | ||
253 | table->entries[index] = 0; | ||
254 | mlx4_set_port_vlan_table(dev, port, table->entries); | ||
255 | --table->total; | ||
256 | out: | ||
257 | mutex_unlock(&table->mutex); | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); | ||
260 | |||
261 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | ||
262 | { | ||
263 | struct mlx4_cmd_mailbox *mailbox; | ||
264 | int err; | ||
265 | u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; | ||
266 | |||
267 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
268 | if (IS_ERR(mailbox)) | ||
269 | return PTR_ERR(mailbox); | ||
270 | |||
271 | memset(mailbox->buf, 0, 256); | ||
272 | if (is_eth) { | ||
273 | ((u8 *) mailbox->buf)[3] = 6; | ||
274 | ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15); | ||
275 | ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15); | ||
276 | } | ||
277 | err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, | ||
278 | MLX4_CMD_TIME_CLASS_B); | ||
279 | |||
280 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
281 | return err; | ||
282 | } | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index c49a86044bf7..1c565ef8d179 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); | 148 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); |
149 | 149 | ||
150 | int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) | 150 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) |
151 | { | ||
152 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
153 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
154 | int qpn; | ||
155 | |||
156 | qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); | ||
157 | if (qpn == -1) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | *base = qpn; | ||
161 | return 0; | ||
162 | } | ||
163 | EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); | ||
164 | |||
165 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | ||
166 | { | ||
167 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
168 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
169 | if (base_qpn < dev->caps.sqp_start + 8) | ||
170 | return; | ||
171 | |||
172 | mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | ||
175 | |||
176 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | ||
151 | { | 177 | { |
152 | struct mlx4_priv *priv = mlx4_priv(dev); | 178 | struct mlx4_priv *priv = mlx4_priv(dev); |
153 | struct mlx4_qp_table *qp_table = &priv->qp_table; | 179 | struct mlx4_qp_table *qp_table = &priv->qp_table; |
154 | int err; | 180 | int err; |
155 | 181 | ||
156 | if (sqpn) | 182 | if (!qpn) |
157 | qp->qpn = sqpn; | 183 | return -EINVAL; |
158 | else { | 184 | |
159 | qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); | 185 | qp->qpn = qpn; |
160 | if (qp->qpn == -1) | ||
161 | return -ENOMEM; | ||
162 | } | ||
163 | 186 | ||
164 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); | 187 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); |
165 | if (err) | 188 | if (err) |
@@ -208,9 +231,6 @@ err_put_qp: | |||
208 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | 231 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); |
209 | 232 | ||
210 | err_out: | 233 | err_out: |
211 | if (!sqpn) | ||
212 | mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); | ||
213 | |||
214 | return err; | 234 | return err; |
215 | } | 235 | } |
216 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 236 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) | |||
239 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | 259 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); |
240 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | 260 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); |
241 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | 261 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); |
242 | |||
243 | if (qp->qpn >= dev->caps.sqp_start + 8) | ||
244 | mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); | ||
245 | } | 262 | } |
246 | EXPORT_SYMBOL_GPL(mlx4_qp_free); | 263 | EXPORT_SYMBOL_GPL(mlx4_qp_free); |
247 | 264 | ||
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) | |||
255 | { | 272 | { |
256 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | 273 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; |
257 | int err; | 274 | int err; |
275 | int reserved_from_top = 0; | ||
258 | 276 | ||
259 | spin_lock_init(&qp_table->lock); | 277 | spin_lock_init(&qp_table->lock); |
260 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); | 278 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); |
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) | |||
264 | * block of special QPs must be aligned to a multiple of 8, so | 282 | * block of special QPs must be aligned to a multiple of 8, so |
265 | * round up. | 283 | * round up. |
266 | */ | 284 | */ |
267 | dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); | 285 | dev->caps.sqp_start = |
286 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | ||
287 | |||
288 | { | ||
289 | int sort[MLX4_NUM_QP_REGION]; | ||
290 | int i, j, tmp; | ||
291 | int last_base = dev->caps.num_qps; | ||
292 | |||
293 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) | ||
294 | sort[i] = i; | ||
295 | |||
296 | for (i = MLX4_NUM_QP_REGION; i > 0; --i) { | ||
297 | for (j = 2; j < i; ++j) { | ||
298 | if (dev->caps.reserved_qps_cnt[sort[j]] > | ||
299 | dev->caps.reserved_qps_cnt[sort[j - 1]]) { | ||
300 | tmp = sort[j]; | ||
301 | sort[j] = sort[j - 1]; | ||
302 | sort[j - 1] = tmp; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { | ||
308 | last_base -= dev->caps.reserved_qps_cnt[sort[i]]; | ||
309 | dev->caps.reserved_qps_base[sort[i]] = last_base; | ||
310 | reserved_from_top += | ||
311 | dev->caps.reserved_qps_cnt[sort[i]]; | ||
312 | } | ||
313 | |||
314 | } | ||
315 | |||
268 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, | 316 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, |
269 | (1 << 24) - 1, dev->caps.sqp_start + 8); | 317 | (1 << 23) - 1, dev->caps.sqp_start + 8, |
318 | reserved_from_top); | ||
270 | if (err) | 319 | if (err) |
271 | return err; | 320 | return err; |
272 | 321 | ||
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index 533eb6db24b3..fe9f218691f5 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c | |||
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) | |||
245 | INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); | 245 | INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); |
246 | 246 | ||
247 | err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, | 247 | err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, |
248 | dev->caps.num_srqs - 1, dev->caps.reserved_srqs); | 248 | dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); |
249 | if (err) | 249 | if (err) |
250 | return err; | 250 | return err; |
251 | 251 | ||
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c new file mode 100644 index 000000000000..da42aa06a3ba --- /dev/null +++ b/drivers/net/xtsonic.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * xtsonic.c | ||
3 | * | ||
4 | * (C) 2001 - 2007 Tensilica Inc. | ||
5 | * Kevin Chea <kchea@yahoo.com> | ||
6 | * Marc Gauthier <marc@linux-xtensa.org> | ||
7 | * Chris Zankel <chris@zankel.net> | ||
8 | * | ||
9 | * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) | ||
10 | * | ||
11 | * This driver is based on work from Andreas Busse, but most of | ||
12 | * the code is rewritten. | ||
13 | * | ||
14 | * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) | ||
15 | * | ||
16 | * A driver for the onboard Sonic ethernet controller on the XT2000. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/fcntl.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/in.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/dma-mapping.h> | ||
36 | |||
37 | #include <asm/io.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/dma.h> | ||
40 | |||
41 | static char xtsonic_string[] = "xtsonic"; | ||
42 | |||
43 | extern unsigned xtboard_nvram_valid(void); | ||
44 | extern void xtboard_get_ether_addr(unsigned char *buf); | ||
45 | |||
46 | #include "sonic.h" | ||
47 | |||
48 | /* | ||
49 | * According to the documentation for the Sonic ethernet controller, | ||
50 | * EOBC should be 760 words (1520 bytes) for 32-bit applications, and, | ||
51 | * as such, 2 words less than the buffer size. The value for RBSIZE | ||
52 | * defined in sonic.h, however is only 1520. | ||
53 | * | ||
54 | * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and | ||
55 | * RBSIZE 1520 bytes) | ||
56 | */ | ||
57 | #undef SONIC_RBSIZE | ||
58 | #define SONIC_RBSIZE 1524 | ||
59 | |||
60 | /* | ||
61 | * The chip provides 256 byte register space. | ||
62 | */ | ||
63 | #define SONIC_MEM_SIZE 0x100 | ||
64 | |||
65 | /* | ||
66 | * Macros to access SONIC registers | ||
67 | */ | ||
68 | #define SONIC_READ(reg) \ | ||
69 | (0xffff & *((volatile unsigned int *)dev->base_addr+reg)) | ||
70 | |||
71 | #define SONIC_WRITE(reg,val) \ | ||
72 | *((volatile unsigned int *)dev->base_addr+reg) = val | ||
73 | |||
74 | |||
75 | /* Use 0 for production, 1 for verification, and >2 for debug */ | ||
76 | #ifdef SONIC_DEBUG | ||
77 | static unsigned int sonic_debug = SONIC_DEBUG; | ||
78 | #else | ||
79 | static unsigned int sonic_debug = 1; | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * We cannot use station (ethernet) address prefixes to detect the | ||
84 | * sonic controller since these are board manufacturer depended. | ||
85 | * So we check for known Silicon Revision IDs instead. | ||
86 | */ | ||
87 | static unsigned short known_revisions[] = | ||
88 | { | ||
89 | 0x101, /* SONIC 83934 */ | ||
90 | 0xffff /* end of list */ | ||
91 | }; | ||
92 | |||
93 | static int xtsonic_open(struct net_device *dev) | ||
94 | { | ||
95 | if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) { | ||
96 | printk(KERN_ERR "%s: unable to get IRQ %d.\n", | ||
97 | dev->name, dev->irq); | ||
98 | return -EAGAIN; | ||
99 | } | ||
100 | return sonic_open(dev); | ||
101 | } | ||
102 | |||
103 | static int xtsonic_close(struct net_device *dev) | ||
104 | { | ||
105 | int err; | ||
106 | err = sonic_close(dev); | ||
107 | free_irq(dev->irq, dev); | ||
108 | return err; | ||
109 | } | ||
110 | |||
111 | static int __init sonic_probe1(struct net_device *dev) | ||
112 | { | ||
113 | static unsigned version_printed = 0; | ||
114 | unsigned int silicon_revision; | ||
115 | struct sonic_local *lp = netdev_priv(dev); | ||
116 | unsigned int base_addr = dev->base_addr; | ||
117 | int i; | ||
118 | int err = 0; | ||
119 | |||
120 | if (!request_mem_region(base_addr, 0x100, xtsonic_string)) | ||
121 | return -EBUSY; | ||
122 | |||
123 | /* | ||
124 | * get the Silicon Revision ID. If this is one of the known | ||
125 | * one assume that we found a SONIC ethernet controller at | ||
126 | * the expected location. | ||
127 | */ | ||
128 | silicon_revision = SONIC_READ(SONIC_SR); | ||
129 | if (sonic_debug > 1) | ||
130 | printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); | ||
131 | |||
132 | i = 0; | ||
133 | while ((known_revisions[i] != 0xffff) && | ||
134 | (known_revisions[i] != silicon_revision)) | ||
135 | i++; | ||
136 | |||
137 | if (known_revisions[i] == 0xffff) { | ||
138 | printk("SONIC ethernet controller not found (0x%4x)\n", | ||
139 | silicon_revision); | ||
140 | return -ENODEV; | ||
141 | } | ||
142 | |||
143 | if (sonic_debug && version_printed++ == 0) | ||
144 | printk(version); | ||
145 | |||
146 | /* | ||
147 | * Put the sonic into software reset, then retrieve ethernet address. | ||
148 | * Note: we are assuming that the boot-loader has initialized the cam. | ||
149 | */ | ||
150 | SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); | ||
151 | SONIC_WRITE(SONIC_DCR, | ||
152 | SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS); | ||
153 | SONIC_WRITE(SONIC_CEP,0); | ||
154 | SONIC_WRITE(SONIC_IMR,0); | ||
155 | |||
156 | SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); | ||
157 | SONIC_WRITE(SONIC_CEP,0); | ||
158 | |||
159 | for (i=0; i<3; i++) { | ||
160 | unsigned int val = SONIC_READ(SONIC_CAP0-i); | ||
161 | dev->dev_addr[i*2] = val; | ||
162 | dev->dev_addr[i*2+1] = val >> 8; | ||
163 | } | ||
164 | |||
165 | /* Initialize the device structure. */ | ||
166 | |||
167 | lp->dma_bitmode = SONIC_BITMODE32; | ||
168 | |||
169 | /* | ||
170 | * Allocate local private descriptor areas in uncached space. | ||
171 | * The entire structure must be located within the same 64kb segment. | ||
172 | * A simple way to ensure this is to allocate twice the | ||
173 | * size of the structure -- given that the structure is | ||
174 | * much less than 64 kB, at least one of the halves of | ||
175 | * the allocated area will be contained entirely in 64 kB. | ||
176 | * We also allocate extra space for a pointer to allow freeing | ||
177 | * this structure later on (in xtsonic_cleanup_module()). | ||
178 | */ | ||
179 | lp->descriptors = | ||
180 | dma_alloc_coherent(lp->device, | ||
181 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), | ||
182 | &lp->descriptors_laddr, GFP_KERNEL); | ||
183 | |||
184 | if (lp->descriptors == NULL) { | ||
185 | printk(KERN_ERR "%s: couldn't alloc DMA memory for " | ||
186 | " descriptors.\n", lp->device->bus_id); | ||
187 | goto out; | ||
188 | } | ||
189 | |||
190 | lp->cda = lp->descriptors; | ||
191 | lp->tda = lp->cda + (SIZEOF_SONIC_CDA | ||
192 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
193 | lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS | ||
194 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
195 | lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS | ||
196 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
197 | |||
198 | /* get the virtual dma address */ | ||
199 | |||
200 | lp->cda_laddr = lp->descriptors_laddr; | ||
201 | lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA | ||
202 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
203 | lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS | ||
204 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
205 | lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS | ||
206 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | ||
207 | |||
208 | dev->open = xtsonic_open; | ||
209 | dev->stop = xtsonic_close; | ||
210 | dev->hard_start_xmit = sonic_send_packet; | ||
211 | dev->get_stats = sonic_get_stats; | ||
212 | dev->set_multicast_list = &sonic_multicast_list; | ||
213 | dev->tx_timeout = sonic_tx_timeout; | ||
214 | dev->watchdog_timeo = TX_TIMEOUT; | ||
215 | |||
216 | /* | ||
217 | * clear tally counter | ||
218 | */ | ||
219 | SONIC_WRITE(SONIC_CRCT,0xffff); | ||
220 | SONIC_WRITE(SONIC_FAET,0xffff); | ||
221 | SONIC_WRITE(SONIC_MPT,0xffff); | ||
222 | |||
223 | return 0; | ||
224 | out: | ||
225 | release_region(dev->base_addr, SONIC_MEM_SIZE); | ||
226 | return err; | ||
227 | } | ||
228 | |||
229 | |||
230 | /* | ||
231 | * Probe for a SONIC ethernet controller on an XT2000 board. | ||
232 | * Actually probing is superfluous but we're paranoid. | ||
233 | */ | ||
234 | |||
235 | int __init xtsonic_probe(struct platform_device *pdev) | ||
236 | { | ||
237 | struct net_device *dev; | ||
238 | struct sonic_local *lp; | ||
239 | struct resource *resmem, *resirq; | ||
240 | int err = 0; | ||
241 | |||
242 | DECLARE_MAC_BUF(mac); | ||
243 | |||
244 | if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) | ||
245 | return -ENODEV; | ||
246 | |||
247 | if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL) | ||
248 | return -ENODEV; | ||
249 | |||
250 | if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL) | ||
251 | return -ENOMEM; | ||
252 | |||
253 | lp = netdev_priv(dev); | ||
254 | lp->device = &pdev->dev; | ||
255 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
256 | netdev_boot_setup_check(dev); | ||
257 | |||
258 | dev->base_addr = resmem->start; | ||
259 | dev->irq = resirq->start; | ||
260 | |||
261 | if ((err = sonic_probe1(dev))) | ||
262 | goto out; | ||
263 | if ((err = register_netdev(dev))) | ||
264 | goto out1; | ||
265 | |||
266 | printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name, | ||
267 | dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq); | ||
268 | |||
269 | return 0; | ||
270 | |||
271 | out1: | ||
272 | release_region(dev->base_addr, SONIC_MEM_SIZE); | ||
273 | out: | ||
274 | free_netdev(dev); | ||
275 | |||
276 | return err; | ||
277 | } | ||
278 | |||
279 | MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); | ||
280 | module_param(sonic_debug, int, 0); | ||
281 | MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)"); | ||
282 | |||
283 | #include "sonic.c" | ||
284 | |||
285 | static int __devexit xtsonic_device_remove (struct platform_device *pdev) | ||
286 | { | ||
287 | struct net_device *dev = platform_get_drvdata(pdev); | ||
288 | struct sonic_local *lp = netdev_priv(dev); | ||
289 | |||
290 | unregister_netdev(dev); | ||
291 | dma_free_coherent(lp->device, | ||
292 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), | ||
293 | lp->descriptors, lp->descriptors_laddr); | ||
294 | release_region (dev->base_addr, SONIC_MEM_SIZE); | ||
295 | free_netdev(dev); | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static struct platform_driver xtsonic_driver = { | ||
301 | .probe = xtsonic_probe, | ||
302 | .remove = __devexit_p(xtsonic_device_remove), | ||
303 | .driver = { | ||
304 | .name = xtsonic_string, | ||
305 | }, | ||
306 | }; | ||
307 | |||
308 | static int __init xtsonic_init(void) | ||
309 | { | ||
310 | return platform_driver_register(&xtsonic_driver); | ||
311 | } | ||
312 | |||
313 | static void __exit xtsonic_cleanup(void) | ||
314 | { | ||
315 | platform_driver_unregister(&xtsonic_driver); | ||
316 | } | ||
317 | |||
318 | module_init(xtsonic_init); | ||
319 | module_exit(xtsonic_cleanup); | ||
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index 6a98dc8aa30b..24bbef777c19 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c | |||
@@ -41,7 +41,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
41 | 41 | ||
42 | info.addr = *addr; | 42 | info.addr = *addr; |
43 | 43 | ||
44 | request_module(info.type); | 44 | request_module("%s", info.type); |
45 | 45 | ||
46 | result = i2c_new_device(adap, &info); | 46 | result = i2c_new_device(adap, &info); |
47 | if (result == NULL) { | 47 | if (result == NULL) { |
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c index b01eec026f68..bed0ed6dcdc1 100644 --- a/drivers/of/of_spi.c +++ b/drivers/of/of_spi.c | |||
@@ -61,6 +61,8 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
61 | spi->mode |= SPI_CPHA; | 61 | spi->mode |= SPI_CPHA; |
62 | if (of_find_property(nc, "spi-cpol", NULL)) | 62 | if (of_find_property(nc, "spi-cpol", NULL)) |
63 | spi->mode |= SPI_CPOL; | 63 | spi->mode |= SPI_CPOL; |
64 | if (of_find_property(nc, "spi-cs-high", NULL)) | ||
65 | spi->mode |= SPI_CS_HIGH; | ||
64 | 66 | ||
65 | /* Device speed */ | 67 | /* Device speed */ |
66 | prop = of_get_property(nc, "spi-max-frequency", &len); | 68 | prop = of_get_property(nc, "spi-max-frequency", &len); |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index ed982273fb8b..37681700b61a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -628,3 +628,27 @@ void sync_buffer(int cpu) | |||
628 | 628 | ||
629 | mutex_unlock(&buffer_mutex); | 629 | mutex_unlock(&buffer_mutex); |
630 | } | 630 | } |
631 | |||
632 | /* The function can be used to add a buffer worth of data directly to | ||
633 | * the kernel buffer. The buffer is assumed to be a circular buffer. | ||
634 | * Take the entries from index start and end at index end, wrapping | ||
635 | * at max_entries. | ||
636 | */ | ||
637 | void oprofile_put_buff(unsigned long *buf, unsigned int start, | ||
638 | unsigned int stop, unsigned int max) | ||
639 | { | ||
640 | int i; | ||
641 | |||
642 | i = start; | ||
643 | |||
644 | mutex_lock(&buffer_mutex); | ||
645 | while (i != stop) { | ||
646 | add_event_entry(buf[i++]); | ||
647 | |||
648 | if (i >= max) | ||
649 | i = 0; | ||
650 | } | ||
651 | |||
652 | mutex_unlock(&buffer_mutex); | ||
653 | } | ||
654 | |||
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index e1bd5a937f6c..7ba39fe20a8a 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -38,13 +38,26 @@ static int work_enabled; | |||
38 | void free_cpu_buffers(void) | 38 | void free_cpu_buffers(void) |
39 | { | 39 | { |
40 | int i; | 40 | int i; |
41 | 41 | ||
42 | for_each_online_cpu(i) { | 42 | for_each_online_cpu(i) { |
43 | vfree(per_cpu(cpu_buffer, i).buffer); | 43 | vfree(per_cpu(cpu_buffer, i).buffer); |
44 | per_cpu(cpu_buffer, i).buffer = NULL; | 44 | per_cpu(cpu_buffer, i).buffer = NULL; |
45 | } | 45 | } |
46 | } | 46 | } |
47 | 47 | ||
48 | unsigned long oprofile_get_cpu_buffer_size(void) | ||
49 | { | ||
50 | return fs_cpu_buffer_size; | ||
51 | } | ||
52 | |||
53 | void oprofile_cpu_buffer_inc_smpl_lost(void) | ||
54 | { | ||
55 | struct oprofile_cpu_buffer *cpu_buf | ||
56 | = &__get_cpu_var(cpu_buffer); | ||
57 | |||
58 | cpu_buf->sample_lost_overflow++; | ||
59 | } | ||
60 | |||
48 | int alloc_cpu_buffers(void) | 61 | int alloc_cpu_buffers(void) |
49 | { | 62 | { |
50 | int i; | 63 | int i; |
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h index 5076ed1ebd8f..84bf324c5771 100644 --- a/drivers/oprofile/event_buffer.h +++ b/drivers/oprofile/event_buffer.h | |||
@@ -17,6 +17,13 @@ int alloc_event_buffer(void); | |||
17 | 17 | ||
18 | void free_event_buffer(void); | 18 | void free_event_buffer(void); |
19 | 19 | ||
20 | /** | ||
21 | * Add data to the event buffer. | ||
22 | * The data passed is free-form, but typically consists of | ||
23 | * file offsets, dcookies, context information, and ESCAPE codes. | ||
24 | */ | ||
25 | void add_event_entry(unsigned long data); | ||
26 | |||
20 | /* wake up the process sleeping on the event file */ | 27 | /* wake up the process sleeping on the event file */ |
21 | void wake_up_buffer_waiter(void); | 28 | void wake_up_buffer_waiter(void); |
22 | 29 | ||
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 8a846adf1dcf..96f3bdf0ec4b 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -2791,6 +2791,7 @@ enum parport_pc_pci_cards { | |||
2791 | oxsemi_952, | 2791 | oxsemi_952, |
2792 | oxsemi_954, | 2792 | oxsemi_954, |
2793 | oxsemi_840, | 2793 | oxsemi_840, |
2794 | oxsemi_pcie_pport, | ||
2794 | aks_0100, | 2795 | aks_0100, |
2795 | mobility_pp, | 2796 | mobility_pp, |
2796 | netmos_9705, | 2797 | netmos_9705, |
@@ -2868,6 +2869,7 @@ static struct parport_pc_pci { | |||
2868 | /* oxsemi_952 */ { 1, { { 0, 1 }, } }, | 2869 | /* oxsemi_952 */ { 1, { { 0, 1 }, } }, |
2869 | /* oxsemi_954 */ { 1, { { 0, -1 }, } }, | 2870 | /* oxsemi_954 */ { 1, { { 0, -1 }, } }, |
2870 | /* oxsemi_840 */ { 1, { { 0, 1 }, } }, | 2871 | /* oxsemi_840 */ { 1, { { 0, 1 }, } }, |
2872 | /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } }, | ||
2871 | /* aks_0100 */ { 1, { { 0, -1 }, } }, | 2873 | /* aks_0100 */ { 1, { { 0, -1 }, } }, |
2872 | /* mobility_pp */ { 1, { { 0, 1 }, } }, | 2874 | /* mobility_pp */ { 1, { { 0, 1 }, } }, |
2873 | /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ | 2875 | /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ |
@@ -2928,7 +2930,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { | |||
2928 | { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a }, | 2930 | { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a }, |
2929 | { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 }, | 2931 | { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 }, |
2930 | { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a }, | 2932 | { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a }, |
2931 | { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp }, | ||
2932 | { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP, | 2933 | { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP, |
2933 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp }, | 2934 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp }, |
2934 | { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP, | 2935 | { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP, |
@@ -2946,8 +2947,25 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { | |||
2946 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 }, | 2947 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 }, |
2947 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840, | 2948 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840, |
2948 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 }, | 2949 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 }, |
2950 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840, | ||
2951 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2952 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G, | ||
2953 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2954 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0, | ||
2955 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2956 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G, | ||
2957 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2958 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1, | ||
2959 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2960 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G, | ||
2961 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2962 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U, | ||
2963 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2964 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU, | ||
2965 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, | ||
2949 | { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD, | 2966 | { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD, |
2950 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 }, | 2967 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 }, |
2968 | { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp }, | ||
2951 | /* NetMos communication controllers */ | 2969 | /* NetMos communication controllers */ |
2952 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705, | 2970 | { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705, |
2953 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 }, | 2971 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 }, |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 8b29c307f1a1..691b3adeb870 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
191 | static int __init | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
192 | dmar_parse_dev(struct dmar_drhd_unit *dmaru) | ||
193 | { | 192 | { |
194 | struct acpi_dmar_hardware_unit *drhd; | 193 | struct acpi_dmar_hardware_unit *drhd; |
195 | static int include_all; | 194 | static int include_all; |
@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
277 | drhd = (struct acpi_dmar_hardware_unit *)header; | 276 | drhd = (struct acpi_dmar_hardware_unit *)header; |
278 | printk (KERN_INFO PREFIX | 277 | printk (KERN_INFO PREFIX |
279 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | 278 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", |
280 | drhd->flags, drhd->address); | 279 | drhd->flags, (unsigned long long)drhd->address); |
281 | break; | 280 | break; |
282 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 281 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
283 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 282 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
284 | 283 | ||
285 | printk (KERN_INFO PREFIX | 284 | printk (KERN_INFO PREFIX |
286 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | 285 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", |
287 | rmrr->base_address, rmrr->end_address); | 286 | (unsigned long long)rmrr->base_address, |
287 | (unsigned long long)rmrr->end_address); | ||
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | } | 290 | } |
@@ -328,7 +328,7 @@ parse_dmar_table(void) | |||
328 | if (!dmar) | 328 | if (!dmar) |
329 | return -ENODEV; | 329 | return -ENODEV; |
330 | 330 | ||
331 | if (dmar->width < PAGE_SHIFT_4K - 1) { | 331 | if (dmar->width < PAGE_SHIFT - 1) { |
332 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); | 332 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
333 | return -EINVAL; | 333 | return -EINVAL; |
334 | } | 334 | } |
@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void) | |||
460 | 460 | ||
461 | ret = dmar_table_detect(); | 461 | ret = dmar_table_detect(); |
462 | 462 | ||
463 | #ifdef CONFIG_DMAR | ||
464 | { | 463 | { |
464 | #ifdef CONFIG_INTR_REMAP | ||
465 | struct acpi_table_dmar *dmar; | 465 | struct acpi_table_dmar *dmar; |
466 | /* | 466 | /* |
467 | * for now we will disable dma-remapping when interrupt | 467 | * for now we will disable dma-remapping when interrupt |
@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void) | |||
470 | * is added, we will not need this any more. | 470 | * is added, we will not need this any more. |
471 | */ | 471 | */ |
472 | dmar = (struct acpi_table_dmar *) dmar_tbl; | 472 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
473 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) { | 473 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
474 | printk(KERN_INFO | 474 | printk(KERN_INFO |
475 | "Queued invalidation will be enabled to support " | 475 | "Queued invalidation will be enabled to support " |
476 | "x2apic and Intr-remapping.\n"); | 476 | "x2apic and Intr-remapping.\n"); |
477 | printk(KERN_INFO | 477 | #endif |
478 | "Disabling IOMMU detection, because of missing " | 478 | #ifdef CONFIG_DMAR |
479 | "queued invalidation support for IOTLB " | ||
480 | "invalidation\n"); | ||
481 | printk(KERN_INFO | ||
482 | "Use \"nox2apic\", if you want to use Intel " | ||
483 | " IOMMU for DMA-remapping and don't care about " | ||
484 | " x2apic support\n"); | ||
485 | |||
486 | dmar_disabled = 1; | ||
487 | goto end; | ||
488 | } | ||
489 | |||
490 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 479 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
491 | !dmar_disabled) | 480 | !dmar_disabled) |
492 | iommu_detected = 1; | 481 | iommu_detected = 1; |
493 | } | ||
494 | end: | ||
495 | #endif | 482 | #endif |
483 | } | ||
496 | dmar_tbl = NULL; | 484 | dmar_tbl = NULL; |
497 | } | 485 | } |
498 | 486 | ||
@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
510 | 498 | ||
511 | iommu->seq_id = iommu_allocated++; | 499 | iommu->seq_id = iommu_allocated++; |
512 | 500 | ||
513 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | 501 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
514 | if (!iommu->reg) { | 502 | if (!iommu->reg) { |
515 | printk(KERN_ERR "IOMMU: can't map the region\n"); | 503 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
516 | goto error; | 504 | goto error; |
@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
521 | /* the registers might be more than one page */ | 509 | /* the registers might be more than one page */ |
522 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 510 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
523 | cap_max_fault_reg_offset(iommu->cap)); | 511 | cap_max_fault_reg_offset(iommu->cap)); |
524 | map_size = PAGE_ALIGN_4K(map_size); | 512 | map_size = VTD_PAGE_ALIGN(map_size); |
525 | if (map_size > PAGE_SIZE_4K) { | 513 | if (map_size > VTD_PAGE_SIZE) { |
526 | iounmap(iommu->reg); | 514 | iounmap(iommu->reg); |
527 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | 515 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); |
528 | if (!iommu->reg) { | 516 | if (!iommu->reg) { |
@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
533 | 521 | ||
534 | ver = readl(iommu->reg + DMAR_VER_REG); | 522 | ver = readl(iommu->reg + DMAR_VER_REG); |
535 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | 523 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
536 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 524 | (unsigned long long)drhd->reg_base_addr, |
537 | iommu->cap, iommu->ecap); | 525 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
526 | (unsigned long long)iommu->cap, | ||
527 | (unsigned long long)iommu->ecap); | ||
538 | 528 | ||
539 | spin_lock_init(&iommu->register_lock); | 529 | spin_lock_init(&iommu->register_lock); |
540 | 530 | ||
@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
587 | 577 | ||
588 | hw = qi->desc; | 578 | hw = qi->desc; |
589 | 579 | ||
590 | spin_lock(&qi->q_lock); | 580 | spin_lock_irqsave(&qi->q_lock, flags); |
591 | while (qi->free_cnt < 3) { | 581 | while (qi->free_cnt < 3) { |
592 | spin_unlock(&qi->q_lock); | 582 | spin_unlock_irqrestore(&qi->q_lock, flags); |
593 | cpu_relax(); | 583 | cpu_relax(); |
594 | spin_lock(&qi->q_lock); | 584 | spin_lock_irqsave(&qi->q_lock, flags); |
595 | } | 585 | } |
596 | 586 | ||
597 | index = qi->free_head; | 587 | index = qi->free_head; |
@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
612 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 602 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
613 | qi->free_cnt -= 2; | 603 | qi->free_cnt -= 2; |
614 | 604 | ||
615 | spin_lock_irqsave(&iommu->register_lock, flags); | 605 | spin_lock(&iommu->register_lock); |
616 | /* | 606 | /* |
617 | * update the HW tail register indicating the presence of | 607 | * update the HW tail register indicating the presence of |
618 | * new descriptors. | 608 | * new descriptors. |
619 | */ | 609 | */ |
620 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 610 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
621 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 611 | spin_unlock(&iommu->register_lock); |
622 | 612 | ||
623 | while (qi->desc_status[wait_index] != QI_DONE) { | 613 | while (qi->desc_status[wait_index] != QI_DONE) { |
614 | /* | ||
615 | * We will leave the interrupts disabled, to prevent interrupt | ||
616 | * context to queue another cmd while a cmd is already submitted | ||
617 | * and waiting for completion on this cpu. This is to avoid | ||
618 | * a deadlock where the interrupt context can wait indefinitely | ||
619 | * for free slots in the queue. | ||
620 | */ | ||
624 | spin_unlock(&qi->q_lock); | 621 | spin_unlock(&qi->q_lock); |
625 | cpu_relax(); | 622 | cpu_relax(); |
626 | spin_lock(&qi->q_lock); | 623 | spin_lock(&qi->q_lock); |
@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
629 | qi->desc_status[index] = QI_DONE; | 626 | qi->desc_status[index] = QI_DONE; |
630 | 627 | ||
631 | reclaim_free_desc(qi); | 628 | reclaim_free_desc(qi); |
632 | spin_unlock(&qi->q_lock); | 629 | spin_unlock_irqrestore(&qi->q_lock, flags); |
633 | } | 630 | } |
634 | 631 | ||
635 | /* | 632 | /* |
@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
645 | qi_submit_sync(&desc, iommu); | 642 | qi_submit_sync(&desc, iommu); |
646 | } | 643 | } |
647 | 644 | ||
645 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | ||
646 | u64 type, int non_present_entry_flush) | ||
647 | { | ||
648 | |||
649 | struct qi_desc desc; | ||
650 | |||
651 | if (non_present_entry_flush) { | ||
652 | if (!cap_caching_mode(iommu->cap)) | ||
653 | return 1; | ||
654 | else | ||
655 | did = 0; | ||
656 | } | ||
657 | |||
658 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | ||
659 | | QI_CC_GRAN(type) | QI_CC_TYPE; | ||
660 | desc.high = 0; | ||
661 | |||
662 | qi_submit_sync(&desc, iommu); | ||
663 | |||
664 | return 0; | ||
665 | |||
666 | } | ||
667 | |||
668 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | ||
669 | unsigned int size_order, u64 type, | ||
670 | int non_present_entry_flush) | ||
671 | { | ||
672 | u8 dw = 0, dr = 0; | ||
673 | |||
674 | struct qi_desc desc; | ||
675 | int ih = 0; | ||
676 | |||
677 | if (non_present_entry_flush) { | ||
678 | if (!cap_caching_mode(iommu->cap)) | ||
679 | return 1; | ||
680 | else | ||
681 | did = 0; | ||
682 | } | ||
683 | |||
684 | if (cap_write_drain(iommu->cap)) | ||
685 | dw = 1; | ||
686 | |||
687 | if (cap_read_drain(iommu->cap)) | ||
688 | dr = 1; | ||
689 | |||
690 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | ||
691 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | ||
692 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | ||
693 | | QI_IOTLB_AM(size_order); | ||
694 | |||
695 | qi_submit_sync(&desc, iommu); | ||
696 | |||
697 | return 0; | ||
698 | |||
699 | } | ||
700 | |||
648 | /* | 701 | /* |
649 | * Enable Queued Invalidation interface. This is a must to support | 702 | * Enable Queued Invalidation interface. This is a must to support |
650 | * interrupt-remapping. Also used by DMA-remapping, which replaces | 703 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8b51e10b7783..a2692724b68f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Author: Ashok Raj <ashok.raj@intel.com> | 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
21 | */ | 22 | */ |
22 | 23 | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
@@ -35,11 +36,13 @@ | |||
35 | #include <linux/timer.h> | 36 | #include <linux/timer.h> |
36 | #include <linux/iova.h> | 37 | #include <linux/iova.h> |
37 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ | ||
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
41 | #include "pci.h" | 41 | #include "pci.h" |
42 | 42 | ||
43 | #define ROOT_SIZE VTD_PAGE_SIZE | ||
44 | #define CONTEXT_SIZE VTD_PAGE_SIZE | ||
45 | |||
43 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 46 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
44 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 47 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
45 | 48 | ||
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | |||
199 | spin_unlock_irqrestore(&iommu->lock, flags); | 202 | spin_unlock_irqrestore(&iommu->lock, flags); |
200 | return NULL; | 203 | return NULL; |
201 | } | 204 | } |
202 | __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); | 205 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
203 | phy_addr = virt_to_phys((void *)context); | 206 | phy_addr = virt_to_phys((void *)context); |
204 | set_root_value(root, phy_addr); | 207 | set_root_value(root, phy_addr); |
205 | set_root_present(root); | 208 | set_root_present(root); |
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
345 | return NULL; | 348 | return NULL; |
346 | } | 349 | } |
347 | __iommu_flush_cache(domain->iommu, tmp_page, | 350 | __iommu_flush_cache(domain->iommu, tmp_page, |
348 | PAGE_SIZE_4K); | 351 | PAGE_SIZE); |
349 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | 352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); |
350 | /* | 353 | /* |
351 | * high level table always sets r/w, last level page | 354 | * high level table always sets r/w, last level page |
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | |||
408 | start &= (((u64)1) << addr_width) - 1; | 411 | start &= (((u64)1) << addr_width) - 1; |
409 | end &= (((u64)1) << addr_width) - 1; | 412 | end &= (((u64)1) << addr_width) - 1; |
410 | /* in case it's partial page */ | 413 | /* in case it's partial page */ |
411 | start = PAGE_ALIGN_4K(start); | 414 | start = PAGE_ALIGN(start); |
412 | end &= PAGE_MASK_4K; | 415 | end &= PAGE_MASK; |
413 | 416 | ||
414 | /* we don't need lock here, nobody else touches the iova range */ | 417 | /* we don't need lock here, nobody else touches the iova range */ |
415 | while (start < end) { | 418 | while (start < end) { |
416 | dma_pte_clear_one(domain, start); | 419 | dma_pte_clear_one(domain, start); |
417 | start += PAGE_SIZE_4K; | 420 | start += VTD_PAGE_SIZE; |
418 | } | 421 | } |
419 | } | 422 | } |
420 | 423 | ||
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
468 | if (!root) | 471 | if (!root) |
469 | return -ENOMEM; | 472 | return -ENOMEM; |
470 | 473 | ||
471 | __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); | 474 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
472 | 475 | ||
473 | spin_lock_irqsave(&iommu->lock, flags); | 476 | spin_lock_irqsave(&iommu->lock, flags); |
474 | iommu->root_entry = root; | 477 | iommu->root_entry = root; |
@@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
567 | return 0; | 570 | return 0; |
568 | } | 571 | } |
569 | 572 | ||
570 | static int inline iommu_flush_context_global(struct intel_iommu *iommu, | ||
571 | int non_present_entry_flush) | ||
572 | { | ||
573 | return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, | ||
574 | non_present_entry_flush); | ||
575 | } | ||
576 | |||
577 | static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, | ||
578 | int non_present_entry_flush) | ||
579 | { | ||
580 | return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, | ||
581 | non_present_entry_flush); | ||
582 | } | ||
583 | |||
584 | static int inline iommu_flush_context_device(struct intel_iommu *iommu, | ||
585 | u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) | ||
586 | { | ||
587 | return __iommu_flush_context(iommu, did, source_id, function_mask, | ||
588 | DMA_CCMD_DEVICE_INVL, non_present_entry_flush); | ||
589 | } | ||
590 | |||
591 | /* return value determine if we need a write buffer flush */ | 573 | /* return value determine if we need a write buffer flush */ |
592 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | 574 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
593 | u64 addr, unsigned int size_order, u64 type, | 575 | u64 addr, unsigned int size_order, u64 type, |
@@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
655 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); | 637 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
656 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) | 638 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
657 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", | 639 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
658 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); | 640 | (unsigned long long)DMA_TLB_IIRG(type), |
641 | (unsigned long long)DMA_TLB_IAIG(val)); | ||
659 | /* flush iotlb entry will implicitly flush write buffer */ | 642 | /* flush iotlb entry will implicitly flush write buffer */ |
660 | return 0; | 643 | return 0; |
661 | } | 644 | } |
662 | 645 | ||
663 | static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, | ||
664 | int non_present_entry_flush) | ||
665 | { | ||
666 | return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | ||
667 | non_present_entry_flush); | ||
668 | } | ||
669 | |||
670 | static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, | ||
671 | int non_present_entry_flush) | ||
672 | { | ||
673 | return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, | ||
674 | non_present_entry_flush); | ||
675 | } | ||
676 | |||
677 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 646 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
678 | u64 addr, unsigned int pages, int non_present_entry_flush) | 647 | u64 addr, unsigned int pages, int non_present_entry_flush) |
679 | { | 648 | { |
680 | unsigned int mask; | 649 | unsigned int mask; |
681 | 650 | ||
682 | BUG_ON(addr & (~PAGE_MASK_4K)); | 651 | BUG_ON(addr & (~VTD_PAGE_MASK)); |
683 | BUG_ON(pages == 0); | 652 | BUG_ON(pages == 0); |
684 | 653 | ||
685 | /* Fallback to domain selective flush if no PSI support */ | 654 | /* Fallback to domain selective flush if no PSI support */ |
686 | if (!cap_pgsel_inv(iommu->cap)) | 655 | if (!cap_pgsel_inv(iommu->cap)) |
687 | return iommu_flush_iotlb_dsi(iommu, did, | 656 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
688 | non_present_entry_flush); | 657 | DMA_TLB_DSI_FLUSH, |
658 | non_present_entry_flush); | ||
689 | 659 | ||
690 | /* | 660 | /* |
691 | * PSI requires page size to be 2 ^ x, and the base address is naturally | 661 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
@@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
694 | mask = ilog2(__roundup_pow_of_two(pages)); | 664 | mask = ilog2(__roundup_pow_of_two(pages)); |
695 | /* Fallback to domain selective flush if size is too big */ | 665 | /* Fallback to domain selective flush if size is too big */ |
696 | if (mask > cap_max_amask_val(iommu->cap)) | 666 | if (mask > cap_max_amask_val(iommu->cap)) |
697 | return iommu_flush_iotlb_dsi(iommu, did, | 667 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
698 | non_present_entry_flush); | 668 | DMA_TLB_DSI_FLUSH, non_present_entry_flush); |
699 | 669 | ||
700 | return __iommu_flush_iotlb(iommu, did, addr, mask, | 670 | return iommu->flush.flush_iotlb(iommu, did, addr, mask, |
701 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); | 671 | DMA_TLB_PSI_FLUSH, |
672 | non_present_entry_flush); | ||
702 | } | 673 | } |
703 | 674 | ||
704 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | 675 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
@@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
831 | } | 802 | } |
832 | 803 | ||
833 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | 804 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, |
834 | u8 fault_reason, u16 source_id, u64 addr) | 805 | u8 fault_reason, u16 source_id, unsigned long long addr) |
835 | { | 806 | { |
836 | const char *reason; | 807 | const char *reason; |
837 | 808 | ||
@@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) | |||
1084 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) | 1055 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
1085 | continue; | 1056 | continue; |
1086 | addr = r->start; | 1057 | addr = r->start; |
1087 | addr &= PAGE_MASK_4K; | 1058 | addr &= PAGE_MASK; |
1088 | size = r->end - addr; | 1059 | size = r->end - addr; |
1089 | size = PAGE_ALIGN_4K(size); | 1060 | size = PAGE_ALIGN(size); |
1090 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), | 1061 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), |
1091 | IOVA_PFN(size + addr) - 1); | 1062 | IOVA_PFN(size + addr) - 1); |
1092 | if (!iova) | 1063 | if (!iova) |
@@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1148 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
1149 | if (!domain->pgd) | 1120 | if (!domain->pgd) |
1150 | return -ENOMEM; | 1121 | return -ENOMEM; |
1151 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); | 1122 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
1152 | return 0; | 1123 | return 0; |
1153 | } | 1124 | } |
1154 | 1125 | ||
@@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1164 | /* destroy iovas */ | 1135 | /* destroy iovas */ |
1165 | put_iova_domain(&domain->iovad); | 1136 | put_iova_domain(&domain->iovad); |
1166 | end = DOMAIN_MAX_ADDR(domain->gaw); | 1137 | end = DOMAIN_MAX_ADDR(domain->gaw); |
1167 | end = end & (~PAGE_MASK_4K); | 1138 | end = end & (~PAGE_MASK); |
1168 | 1139 | ||
1169 | /* clear ptes */ | 1140 | /* clear ptes */ |
1170 | dma_pte_clear_range(domain, 0, end); | 1141 | dma_pte_clear_range(domain, 0, end); |
@@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1204 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); |
1205 | 1176 | ||
1206 | /* it's a non-present to present mapping */ | 1177 | /* it's a non-present to present mapping */ |
1207 | if (iommu_flush_context_device(iommu, domain->id, | 1178 | if (iommu->flush.flush_context(iommu, domain->id, |
1208 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) | 1179 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, |
1180 | DMA_CCMD_DEVICE_INVL, 1)) | ||
1209 | iommu_flush_write_buffer(iommu); | 1181 | iommu_flush_write_buffer(iommu); |
1210 | else | 1182 | else |
1211 | iommu_flush_iotlb_dsi(iommu, 0, 0); | 1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1184 | |||
1212 | spin_unlock_irqrestore(&iommu->lock, flags); | 1185 | spin_unlock_irqrestore(&iommu->lock, flags); |
1213 | return 0; | 1186 | return 0; |
1214 | } | 1187 | } |
@@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1283 | u64 start_pfn, end_pfn; | 1256 | u64 start_pfn, end_pfn; |
1284 | struct dma_pte *pte; | 1257 | struct dma_pte *pte; |
1285 | int index; | 1258 | int index; |
1259 | int addr_width = agaw_to_width(domain->agaw); | ||
1260 | |||
1261 | hpa &= (((u64)1) << addr_width) - 1; | ||
1286 | 1262 | ||
1287 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1263 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
1288 | return -EINVAL; | 1264 | return -EINVAL; |
1289 | iova &= PAGE_MASK_4K; | 1265 | iova &= PAGE_MASK; |
1290 | start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; | 1266 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; |
1291 | end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; | 1267 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; |
1292 | index = 0; | 1268 | index = 0; |
1293 | while (start_pfn < end_pfn) { | 1269 | while (start_pfn < end_pfn) { |
1294 | pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); | 1270 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); |
1295 | if (!pte) | 1271 | if (!pte) |
1296 | return -ENOMEM; | 1272 | return -ENOMEM; |
1297 | /* We don't need lock here, nobody else | 1273 | /* We don't need lock here, nobody else |
1298 | * touches the iova range | 1274 | * touches the iova range |
1299 | */ | 1275 | */ |
1300 | BUG_ON(dma_pte_addr(*pte)); | 1276 | BUG_ON(dma_pte_addr(*pte)); |
1301 | dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); | 1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); |
1302 | dma_set_pte_prot(*pte, prot); | 1278 | dma_set_pte_prot(*pte, prot); |
1303 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
1304 | start_pfn++; | 1280 | start_pfn++; |
@@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1310 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) |
1311 | { | 1287 | { |
1312 | clear_context_table(domain->iommu, bus, devfn); | 1288 | clear_context_table(domain->iommu, bus, devfn); |
1313 | iommu_flush_context_global(domain->iommu, 0); | 1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, |
1314 | iommu_flush_iotlb_global(domain->iommu, 0); | 1290 | DMA_CCMD_GLOBAL_INVL, 0); |
1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | ||
1292 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
1315 | } | 1293 | } |
1316 | 1294 | ||
1317 | static void domain_remove_dev_info(struct dmar_domain *domain) | 1295 | static void domain_remove_dev_info(struct dmar_domain *domain) |
@@ -1474,11 +1452,13 @@ error: | |||
1474 | return find_domain(pdev); | 1452 | return find_domain(pdev); |
1475 | } | 1453 | } |
1476 | 1454 | ||
1477 | static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | 1455 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1456 | unsigned long long start, | ||
1457 | unsigned long long end) | ||
1478 | { | 1458 | { |
1479 | struct dmar_domain *domain; | 1459 | struct dmar_domain *domain; |
1480 | unsigned long size; | 1460 | unsigned long size; |
1481 | u64 base; | 1461 | unsigned long long base; |
1482 | int ret; | 1462 | int ret; |
1483 | 1463 | ||
1484 | printk(KERN_INFO | 1464 | printk(KERN_INFO |
@@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | |||
1490 | return -ENOMEM; | 1470 | return -ENOMEM; |
1491 | 1471 | ||
1492 | /* The address might not be aligned */ | 1472 | /* The address might not be aligned */ |
1493 | base = start & PAGE_MASK_4K; | 1473 | base = start & PAGE_MASK; |
1494 | size = end - base; | 1474 | size = end - base; |
1495 | size = PAGE_ALIGN_4K(size); | 1475 | size = PAGE_ALIGN(size); |
1496 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), | 1476 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), |
1497 | IOVA_PFN(base + size) - 1)) { | 1477 | IOVA_PFN(base + size) - 1)) { |
1498 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | 1478 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
@@ -1662,6 +1642,28 @@ int __init init_dmars(void) | |||
1662 | } | 1642 | } |
1663 | } | 1643 | } |
1664 | 1644 | ||
1645 | for_each_drhd_unit(drhd) { | ||
1646 | if (drhd->ignored) | ||
1647 | continue; | ||
1648 | |||
1649 | iommu = drhd->iommu; | ||
1650 | if (dmar_enable_qi(iommu)) { | ||
1651 | /* | ||
1652 | * Queued Invalidate not enabled, use Register Based | ||
1653 | * Invalidate | ||
1654 | */ | ||
1655 | iommu->flush.flush_context = __iommu_flush_context; | ||
1656 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
1657 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " | ||
1658 | "invalidation\n", drhd->reg_base_addr); | ||
1659 | } else { | ||
1660 | iommu->flush.flush_context = qi_flush_context; | ||
1661 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
1662 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " | ||
1663 | "invalidation\n", drhd->reg_base_addr); | ||
1664 | } | ||
1665 | } | ||
1666 | |||
1665 | /* | 1667 | /* |
1666 | * For each rmrr | 1668 | * For each rmrr |
1667 | * for each dev attached to rmrr | 1669 | * for each dev attached to rmrr |
@@ -1714,9 +1716,10 @@ int __init init_dmars(void) | |||
1714 | 1716 | ||
1715 | iommu_set_root_entry(iommu); | 1717 | iommu_set_root_entry(iommu); |
1716 | 1718 | ||
1717 | iommu_flush_context_global(iommu, 0); | 1719 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, |
1718 | iommu_flush_iotlb_global(iommu, 0); | 1720 | 0); |
1719 | 1721 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | |
1722 | 0); | ||
1720 | iommu_disable_protect_mem_regions(iommu); | 1723 | iommu_disable_protect_mem_regions(iommu); |
1721 | 1724 | ||
1722 | ret = iommu_enable_translation(iommu); | 1725 | ret = iommu_enable_translation(iommu); |
@@ -1738,8 +1741,8 @@ error: | |||
1738 | static inline u64 aligned_size(u64 host_addr, size_t size) | 1741 | static inline u64 aligned_size(u64 host_addr, size_t size) |
1739 | { | 1742 | { |
1740 | u64 addr; | 1743 | u64 addr; |
1741 | addr = (host_addr & (~PAGE_MASK_4K)) + size; | 1744 | addr = (host_addr & (~PAGE_MASK)) + size; |
1742 | return PAGE_ALIGN_4K(addr); | 1745 | return PAGE_ALIGN(addr); |
1743 | } | 1746 | } |
1744 | 1747 | ||
1745 | struct iova * | 1748 | struct iova * |
@@ -1753,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) | |||
1753 | return NULL; | 1756 | return NULL; |
1754 | 1757 | ||
1755 | piova = alloc_iova(&domain->iovad, | 1758 | piova = alloc_iova(&domain->iovad, |
1756 | size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); | 1759 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); |
1757 | return piova; | 1760 | return piova; |
1758 | } | 1761 | } |
1759 | 1762 | ||
1760 | static struct iova * | 1763 | static struct iova * |
1761 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | 1764 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, |
1762 | size_t size) | 1765 | size_t size, u64 dma_mask) |
1763 | { | 1766 | { |
1764 | struct pci_dev *pdev = to_pci_dev(dev); | 1767 | struct pci_dev *pdev = to_pci_dev(dev); |
1765 | struct iova *iova = NULL; | 1768 | struct iova *iova = NULL; |
1766 | 1769 | ||
1767 | if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { | 1770 | if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) |
1768 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1771 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1769 | } else { | 1772 | else { |
1770 | /* | 1773 | /* |
1771 | * First try to allocate an io virtual address in | 1774 | * First try to allocate an io virtual address in |
1772 | * DMA_32BIT_MASK and if that fails then try allocating | 1775 | * DMA_32BIT_MASK and if that fails then try allocating |
@@ -1774,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | |||
1774 | */ | 1777 | */ |
1775 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); | 1778 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); |
1776 | if (!iova) | 1779 | if (!iova) |
1777 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1780 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1778 | } | 1781 | } |
1779 | 1782 | ||
1780 | if (!iova) { | 1783 | if (!iova) { |
@@ -1813,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1813 | return domain; | 1816 | return domain; |
1814 | } | 1817 | } |
1815 | 1818 | ||
1816 | static dma_addr_t | 1819 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
1817 | intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | 1820 | size_t size, int dir, u64 dma_mask) |
1818 | { | 1821 | { |
1819 | struct pci_dev *pdev = to_pci_dev(hwdev); | 1822 | struct pci_dev *pdev = to_pci_dev(hwdev); |
1820 | struct dmar_domain *domain; | 1823 | struct dmar_domain *domain; |
1821 | unsigned long start_paddr; | 1824 | phys_addr_t start_paddr; |
1822 | struct iova *iova; | 1825 | struct iova *iova; |
1823 | int prot = 0; | 1826 | int prot = 0; |
1824 | int ret; | 1827 | int ret; |
@@ -1833,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1833 | 1836 | ||
1834 | size = aligned_size((u64)paddr, size); | 1837 | size = aligned_size((u64)paddr, size); |
1835 | 1838 | ||
1836 | iova = __intel_alloc_iova(hwdev, domain, size); | 1839 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
1837 | if (!iova) | 1840 | if (!iova) |
1838 | goto error; | 1841 | goto error; |
1839 | 1842 | ||
1840 | start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; | 1843 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
1841 | 1844 | ||
1842 | /* | 1845 | /* |
1843 | * Check if DMAR supports zero-length reads on write only | 1846 | * Check if DMAR supports zero-length reads on write only |
@@ -1855,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1855 | * is not a big problem | 1858 | * is not a big problem |
1856 | */ | 1859 | */ |
1857 | ret = domain_page_mapping(domain, start_paddr, | 1860 | ret = domain_page_mapping(domain, start_paddr, |
1858 | ((u64)paddr) & PAGE_MASK_4K, size, prot); | 1861 | ((u64)paddr) & PAGE_MASK, size, prot); |
1859 | if (ret) | 1862 | if (ret) |
1860 | goto error; | 1863 | goto error; |
1861 | 1864 | ||
1862 | pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", | ||
1863 | pci_name(pdev), size, (u64)paddr, | ||
1864 | size, (u64)start_paddr, dir); | ||
1865 | |||
1866 | /* it's a non-present to present mapping */ | 1865 | /* it's a non-present to present mapping */ |
1867 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 1866 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, |
1868 | start_paddr, size >> PAGE_SHIFT_4K, 1); | 1867 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
1869 | if (ret) | 1868 | if (ret) |
1870 | iommu_flush_write_buffer(domain->iommu); | 1869 | iommu_flush_write_buffer(domain->iommu); |
1871 | 1870 | ||
1872 | return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); | 1871 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
1873 | 1872 | ||
1874 | error: | 1873 | error: |
1875 | if (iova) | 1874 | if (iova) |
1876 | __free_iova(&domain->iovad, iova); | 1875 | __free_iova(&domain->iovad, iova); |
1877 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", | 1876 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", |
1878 | pci_name(pdev), size, (u64)paddr, dir); | 1877 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
1879 | return 0; | 1878 | return 0; |
1880 | } | 1879 | } |
1881 | 1880 | ||
1881 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | ||
1882 | size_t size, int dir) | ||
1883 | { | ||
1884 | return __intel_map_single(hwdev, paddr, size, dir, | ||
1885 | to_pci_dev(hwdev)->dma_mask); | ||
1886 | } | ||
1887 | |||
1882 | static void flush_unmaps(void) | 1888 | static void flush_unmaps(void) |
1883 | { | 1889 | { |
1884 | int i, j; | 1890 | int i, j; |
@@ -1891,7 +1897,8 @@ static void flush_unmaps(void) | |||
1891 | struct intel_iommu *iommu = | 1897 | struct intel_iommu *iommu = |
1892 | deferred_flush[i].domain[0]->iommu; | 1898 | deferred_flush[i].domain[0]->iommu; |
1893 | 1899 | ||
1894 | iommu_flush_iotlb_global(iommu, 0); | 1900 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1901 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
1895 | for (j = 0; j < deferred_flush[i].next; j++) { | 1902 | for (j = 0; j < deferred_flush[i].next; j++) { |
1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1903 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
1897 | deferred_flush[i].iova[j]); | 1904 | deferred_flush[i].iova[j]); |
@@ -1936,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
1936 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 1943 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
1937 | } | 1944 | } |
1938 | 1945 | ||
1939 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | 1946 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
1940 | size_t size, int dir) | 1947 | int dir) |
1941 | { | 1948 | { |
1942 | struct pci_dev *pdev = to_pci_dev(dev); | 1949 | struct pci_dev *pdev = to_pci_dev(dev); |
1943 | struct dmar_domain *domain; | 1950 | struct dmar_domain *domain; |
@@ -1953,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1953 | if (!iova) | 1960 | if (!iova) |
1954 | return; | 1961 | return; |
1955 | 1962 | ||
1956 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 1963 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
1957 | size = aligned_size((u64)dev_addr, size); | 1964 | size = aligned_size((u64)dev_addr, size); |
1958 | 1965 | ||
1959 | pr_debug("Device %s unmapping: %lx@%llx\n", | 1966 | pr_debug("Device %s unmapping: %lx@%llx\n", |
1960 | pci_name(pdev), size, (u64)start_addr); | 1967 | pci_name(pdev), size, (unsigned long long)start_addr); |
1961 | 1968 | ||
1962 | /* clear the whole page */ | 1969 | /* clear the whole page */ |
1963 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 1970 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -1965,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1965 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 1972 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
1966 | if (intel_iommu_strict) { | 1973 | if (intel_iommu_strict) { |
1967 | if (iommu_flush_iotlb_psi(domain->iommu, | 1974 | if (iommu_flush_iotlb_psi(domain->iommu, |
1968 | domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) | 1975 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
1969 | iommu_flush_write_buffer(domain->iommu); | 1976 | iommu_flush_write_buffer(domain->iommu); |
1970 | /* free iova */ | 1977 | /* free iova */ |
1971 | __free_iova(&domain->iovad, iova); | 1978 | __free_iova(&domain->iovad, iova); |
@@ -1978,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1978 | } | 1985 | } |
1979 | } | 1986 | } |
1980 | 1987 | ||
1981 | static void * intel_alloc_coherent(struct device *hwdev, size_t size, | 1988 | void *intel_alloc_coherent(struct device *hwdev, size_t size, |
1982 | dma_addr_t *dma_handle, gfp_t flags) | 1989 | dma_addr_t *dma_handle, gfp_t flags) |
1983 | { | 1990 | { |
1984 | void *vaddr; | 1991 | void *vaddr; |
1985 | int order; | 1992 | int order; |
1986 | 1993 | ||
1987 | size = PAGE_ALIGN_4K(size); | 1994 | size = PAGE_ALIGN(size); |
1988 | order = get_order(size); | 1995 | order = get_order(size); |
1989 | flags &= ~(GFP_DMA | GFP_DMA32); | 1996 | flags &= ~(GFP_DMA | GFP_DMA32); |
1990 | 1997 | ||
@@ -1993,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, | |||
1993 | return NULL; | 2000 | return NULL; |
1994 | memset(vaddr, 0, size); | 2001 | memset(vaddr, 0, size); |
1995 | 2002 | ||
1996 | *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); | 2003 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
2004 | DMA_BIDIRECTIONAL, | ||
2005 | hwdev->coherent_dma_mask); | ||
1997 | if (*dma_handle) | 2006 | if (*dma_handle) |
1998 | return vaddr; | 2007 | return vaddr; |
1999 | free_pages((unsigned long)vaddr, order); | 2008 | free_pages((unsigned long)vaddr, order); |
2000 | return NULL; | 2009 | return NULL; |
2001 | } | 2010 | } |
2002 | 2011 | ||
2003 | static void intel_free_coherent(struct device *hwdev, size_t size, | 2012 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2004 | void *vaddr, dma_addr_t dma_handle) | 2013 | dma_addr_t dma_handle) |
2005 | { | 2014 | { |
2006 | int order; | 2015 | int order; |
2007 | 2016 | ||
2008 | size = PAGE_ALIGN_4K(size); | 2017 | size = PAGE_ALIGN(size); |
2009 | order = get_order(size); | 2018 | order = get_order(size); |
2010 | 2019 | ||
2011 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); | 2020 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); |
@@ -2013,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, | |||
2013 | } | 2022 | } |
2014 | 2023 | ||
2015 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2024 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
2016 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2025 | |
2017 | int nelems, int dir) | 2026 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
2027 | int nelems, int dir) | ||
2018 | { | 2028 | { |
2019 | int i; | 2029 | int i; |
2020 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2030 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -2038,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2038 | size += aligned_size((u64)addr, sg->length); | 2048 | size += aligned_size((u64)addr, sg->length); |
2039 | } | 2049 | } |
2040 | 2050 | ||
2041 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2051 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2042 | 2052 | ||
2043 | /* clear the whole page */ | 2053 | /* clear the whole page */ |
2044 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2054 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -2046,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2046 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2056 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2047 | 2057 | ||
2048 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2058 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, |
2049 | size >> PAGE_SHIFT_4K, 0)) | 2059 | size >> VTD_PAGE_SHIFT, 0)) |
2050 | iommu_flush_write_buffer(domain->iommu); | 2060 | iommu_flush_write_buffer(domain->iommu); |
2051 | 2061 | ||
2052 | /* free iova */ | 2062 | /* free iova */ |
@@ -2067,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2067 | return nelems; | 2077 | return nelems; |
2068 | } | 2078 | } |
2069 | 2079 | ||
2070 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | 2080 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2071 | int nelems, int dir) | 2081 | int dir) |
2072 | { | 2082 | { |
2073 | void *addr; | 2083 | void *addr; |
2074 | int i; | 2084 | int i; |
@@ -2096,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2096 | size += aligned_size((u64)addr, sg->length); | 2106 | size += aligned_size((u64)addr, sg->length); |
2097 | } | 2107 | } |
2098 | 2108 | ||
2099 | iova = __intel_alloc_iova(hwdev, domain, size); | 2109 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
2100 | if (!iova) { | 2110 | if (!iova) { |
2101 | sglist->dma_length = 0; | 2111 | sglist->dma_length = 0; |
2102 | return 0; | 2112 | return 0; |
@@ -2112,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2112 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2122 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2113 | prot |= DMA_PTE_WRITE; | 2123 | prot |= DMA_PTE_WRITE; |
2114 | 2124 | ||
2115 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2125 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2116 | offset = 0; | 2126 | offset = 0; |
2117 | for_each_sg(sglist, sg, nelems, i) { | 2127 | for_each_sg(sglist, sg, nelems, i) { |
2118 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2128 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2119 | addr = (void *)virt_to_phys(addr); | 2129 | addr = (void *)virt_to_phys(addr); |
2120 | size = aligned_size((u64)addr, sg->length); | 2130 | size = aligned_size((u64)addr, sg->length); |
2121 | ret = domain_page_mapping(domain, start_addr + offset, | 2131 | ret = domain_page_mapping(domain, start_addr + offset, |
2122 | ((u64)addr) & PAGE_MASK_4K, | 2132 | ((u64)addr) & PAGE_MASK, |
2123 | size, prot); | 2133 | size, prot); |
2124 | if (ret) { | 2134 | if (ret) { |
2125 | /* clear the page */ | 2135 | /* clear the page */ |
@@ -2133,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2133 | return 0; | 2143 | return 0; |
2134 | } | 2144 | } |
2135 | sg->dma_address = start_addr + offset + | 2145 | sg->dma_address = start_addr + offset + |
2136 | ((u64)addr & (~PAGE_MASK_4K)); | 2146 | ((u64)addr & (~PAGE_MASK)); |
2137 | sg->dma_length = sg->length; | 2147 | sg->dma_length = sg->length; |
2138 | offset += size; | 2148 | offset += size; |
2139 | } | 2149 | } |
2140 | 2150 | ||
2141 | /* it's a non-present to present mapping */ | 2151 | /* it's a non-present to present mapping */ |
2142 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2152 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, |
2143 | start_addr, offset >> PAGE_SHIFT_4K, 1)) | 2153 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2144 | iommu_flush_write_buffer(domain->iommu); | 2154 | iommu_flush_write_buffer(domain->iommu); |
2145 | return nelems; | 2155 | return nelems; |
2146 | } | 2156 | } |
@@ -2180,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) | |||
2180 | sizeof(struct device_domain_info), | 2190 | sizeof(struct device_domain_info), |
2181 | 0, | 2191 | 0, |
2182 | SLAB_HWCACHE_ALIGN, | 2192 | SLAB_HWCACHE_ALIGN, |
2183 | |||
2184 | NULL); | 2193 | NULL); |
2185 | if (!iommu_devinfo_cache) { | 2194 | if (!iommu_devinfo_cache) { |
2186 | printk(KERN_ERR "Couldn't create devinfo cache\n"); | 2195 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
@@ -2198,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) | |||
2198 | sizeof(struct iova), | 2207 | sizeof(struct iova), |
2199 | 0, | 2208 | 0, |
2200 | SLAB_HWCACHE_ALIGN, | 2209 | SLAB_HWCACHE_ALIGN, |
2201 | |||
2202 | NULL); | 2210 | NULL); |
2203 | if (!iommu_iova_cache) { | 2211 | if (!iommu_iova_cache) { |
2204 | printk(KERN_ERR "Couldn't create iova cache\n"); | 2212 | printk(KERN_ERR "Couldn't create iova cache\n"); |
@@ -2327,7 +2335,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2327 | return; | 2335 | return; |
2328 | 2336 | ||
2329 | end = DOMAIN_MAX_ADDR(domain->gaw); | 2337 | end = DOMAIN_MAX_ADDR(domain->gaw); |
2330 | end = end & (~PAGE_MASK_4K); | 2338 | end = end & (~VTD_PAGE_MASK); |
2331 | 2339 | ||
2332 | /* clear ptes */ | 2340 | /* clear ptes */ |
2333 | dma_pte_clear_range(domain, 0, end); | 2341 | dma_pte_clear_range(domain, 0, end); |
@@ -2423,6 +2431,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | |||
2423 | if (pte) | 2431 | if (pte) |
2424 | pfn = dma_pte_addr(*pte); | 2432 | pfn = dma_pte_addr(*pte); |
2425 | 2433 | ||
2426 | return pfn >> PAGE_SHIFT_4K; | 2434 | return pfn >> VTD_PAGE_SHIFT; |
2427 | } | 2435 | } |
2428 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | 2436 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbf66ea8fd87..96cf8ecd04ce 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) | |||
43 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); | 43 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); |
44 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); | 44 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); |
45 | 45 | ||
46 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
47 | int forbid_dac __read_mostly; | ||
48 | EXPORT_SYMBOL(forbid_dac); | ||
49 | |||
50 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
51 | { | ||
52 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
53 | dev_info(&dev->dev, | ||
54 | "VIA PCI bridge detected. Disabling DAC.\n"); | ||
55 | forbid_dac = 1; | ||
56 | } | ||
57 | } | ||
58 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
59 | |||
46 | /* Deal with broken BIOS'es that neglect to enable passive release, | 60 | /* Deal with broken BIOS'es that neglect to enable passive release, |
47 | which can cause problems in combination with the 82441FX/PPro MTRRs */ | 61 | which can cause problems in combination with the 82441FX/PPro MTRRs */ |
48 | static void quirk_passive_release(struct pci_dev *dev) | 62 | static void quirk_passive_release(struct pci_dev *dev) |
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index b46c60b72708..23e492bf75cf 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -70,7 +70,7 @@ pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o | |||
70 | pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o | 70 | pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o |
71 | pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o | 71 | pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o |
72 | pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o | 72 | pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o |
73 | pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps.o | 73 | pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o |
74 | pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o | 74 | pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o |
75 | pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o | 75 | pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o |
76 | 76 | ||
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 814f49fde530..847481dc8d72 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -246,6 +246,16 @@ config RTC_DRV_TWL92330 | |||
246 | platforms. The support is integrated with the rest of | 246 | platforms. The support is integrated with the rest of |
247 | the Menelaus driver; it's not separate module. | 247 | the Menelaus driver; it's not separate module. |
248 | 248 | ||
249 | config RTC_DRV_TWL4030 | ||
250 | tristate "TI TWL4030/TWL5030/TPS659x0" | ||
251 | depends on RTC_CLASS && TWL4030_CORE | ||
252 | help | ||
253 | If you say yes here you get support for the RTC on the | ||
254 | TWL4030 family chips, used mostly with OMAP3 platforms. | ||
255 | |||
256 | This driver can also be built as a module. If so, the module | ||
257 | will be called rtc-twl4030. | ||
258 | |||
249 | config RTC_DRV_S35390A | 259 | config RTC_DRV_S35390A |
250 | tristate "Seiko Instruments S-35390A" | 260 | tristate "Seiko Instruments S-35390A" |
251 | select BITREVERSE | 261 | select BITREVERSE |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index d6a9ac7176ea..e9e8474cc8fe 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -63,6 +63,7 @@ obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | |||
63 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 63 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
64 | obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o | 64 | obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o |
65 | obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o | 65 | obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o |
66 | obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o | ||
66 | obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o | 67 | obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o |
67 | obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o | 68 | obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o |
68 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o | 69 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o |
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c new file mode 100644 index 000000000000..abe87a4d2665 --- /dev/null +++ b/drivers/rtc/rtc-twl4030.c | |||
@@ -0,0 +1,564 @@ | |||
1 | /* | ||
2 | * rtc-twl4030.c -- TWL4030 Real Time Clock interface | ||
3 | * | ||
4 | * Copyright (C) 2007 MontaVista Software, Inc | ||
5 | * Author: Alexandre Rusev <source@mvista.com> | ||
6 | * | ||
7 | * Based on original TI driver twl4030-rtc.c | ||
8 | * Copyright (C) 2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * Based on rtc-omap.c | ||
11 | * Copyright (C) 2003 MontaVista Software, Inc. | ||
12 | * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com> | ||
13 | * Copyright (C) 2006 David Brownell | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/rtc.h> | ||
26 | #include <linux/bcd.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | |||
30 | #include <linux/i2c/twl4030.h> | ||
31 | |||
32 | |||
33 | /* | ||
34 | * RTC block register offsets (use TWL_MODULE_RTC) | ||
35 | */ | ||
36 | #define REG_SECONDS_REG 0x00 | ||
37 | #define REG_MINUTES_REG 0x01 | ||
38 | #define REG_HOURS_REG 0x02 | ||
39 | #define REG_DAYS_REG 0x03 | ||
40 | #define REG_MONTHS_REG 0x04 | ||
41 | #define REG_YEARS_REG 0x05 | ||
42 | #define REG_WEEKS_REG 0x06 | ||
43 | |||
44 | #define REG_ALARM_SECONDS_REG 0x07 | ||
45 | #define REG_ALARM_MINUTES_REG 0x08 | ||
46 | #define REG_ALARM_HOURS_REG 0x09 | ||
47 | #define REG_ALARM_DAYS_REG 0x0A | ||
48 | #define REG_ALARM_MONTHS_REG 0x0B | ||
49 | #define REG_ALARM_YEARS_REG 0x0C | ||
50 | |||
51 | #define REG_RTC_CTRL_REG 0x0D | ||
52 | #define REG_RTC_STATUS_REG 0x0E | ||
53 | #define REG_RTC_INTERRUPTS_REG 0x0F | ||
54 | |||
55 | #define REG_RTC_COMP_LSB_REG 0x10 | ||
56 | #define REG_RTC_COMP_MSB_REG 0x11 | ||
57 | |||
58 | /* RTC_CTRL_REG bitfields */ | ||
59 | #define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01 | ||
60 | #define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02 | ||
61 | #define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04 | ||
62 | #define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08 | ||
63 | #define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10 | ||
64 | #define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20 | ||
65 | #define BIT_RTC_CTRL_REG_GET_TIME_M 0x40 | ||
66 | |||
67 | /* RTC_STATUS_REG bitfields */ | ||
68 | #define BIT_RTC_STATUS_REG_RUN_M 0x02 | ||
69 | #define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04 | ||
70 | #define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08 | ||
71 | #define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10 | ||
72 | #define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20 | ||
73 | #define BIT_RTC_STATUS_REG_ALARM_M 0x40 | ||
74 | #define BIT_RTC_STATUS_REG_POWER_UP_M 0x80 | ||
75 | |||
76 | /* RTC_INTERRUPTS_REG bitfields */ | ||
77 | #define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03 | ||
78 | #define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04 | ||
79 | #define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08 | ||
80 | |||
81 | |||
82 | /* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */ | ||
83 | #define ALL_TIME_REGS 6 | ||
84 | |||
85 | /*----------------------------------------------------------------------*/ | ||
86 | |||
87 | /* | ||
88 | * Supports 1 byte read from TWL4030 RTC register. | ||
89 | */ | ||
90 | static int twl4030_rtc_read_u8(u8 *data, u8 reg) | ||
91 | { | ||
92 | int ret; | ||
93 | |||
94 | ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg); | ||
95 | if (ret < 0) | ||
96 | pr_err("twl4030_rtc: Could not read TWL4030" | ||
97 | "register %X - error %d\n", reg, ret); | ||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Supports 1 byte write to TWL4030 RTC registers. | ||
103 | */ | ||
104 | static int twl4030_rtc_write_u8(u8 data, u8 reg) | ||
105 | { | ||
106 | int ret; | ||
107 | |||
108 | ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg); | ||
109 | if (ret < 0) | ||
110 | pr_err("twl4030_rtc: Could not write TWL4030" | ||
111 | "register %X - error %d\n", reg, ret); | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Cache the value for timer/alarm interrupts register; this is | ||
117 | * only changed by callers holding rtc ops lock (or resume). | ||
118 | */ | ||
119 | static unsigned char rtc_irq_bits; | ||
120 | |||
121 | /* | ||
122 | * Enable timer and/or alarm interrupts. | ||
123 | */ | ||
124 | static int set_rtc_irq_bit(unsigned char bit) | ||
125 | { | ||
126 | unsigned char val; | ||
127 | int ret; | ||
128 | |||
129 | val = rtc_irq_bits | bit; | ||
130 | ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); | ||
131 | if (ret == 0) | ||
132 | rtc_irq_bits = val; | ||
133 | |||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Disable timer and/or alarm interrupts. | ||
139 | */ | ||
140 | static int mask_rtc_irq_bit(unsigned char bit) | ||
141 | { | ||
142 | unsigned char val; | ||
143 | int ret; | ||
144 | |||
145 | val = rtc_irq_bits & ~bit; | ||
146 | ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); | ||
147 | if (ret == 0) | ||
148 | rtc_irq_bits = val; | ||
149 | |||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | static inline int twl4030_rtc_alarm_irq_set_state(int enabled) | ||
154 | { | ||
155 | int ret; | ||
156 | |||
157 | if (enabled) | ||
158 | ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | ||
159 | else | ||
160 | ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | static inline int twl4030_rtc_irq_set_state(int enabled) | ||
166 | { | ||
167 | int ret; | ||
168 | |||
169 | if (enabled) | ||
170 | ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); | ||
171 | else | ||
172 | ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); | ||
173 | |||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Gets current TWL4030 RTC time and date parameters. | ||
179 | * | ||
180 | * The RTC's time/alarm representation is not what gmtime(3) requires | ||
181 | * Linux to use: | ||
182 | * | ||
183 | * - Months are 1..12 vs Linux 0-11 | ||
184 | * - Years are 0..99 vs Linux 1900..N (we assume 21st century) | ||
185 | */ | ||
186 | static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
187 | { | ||
188 | unsigned char rtc_data[ALL_TIME_REGS + 1]; | ||
189 | int ret; | ||
190 | u8 save_control; | ||
191 | |||
192 | ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); | ||
193 | if (ret < 0) | ||
194 | return ret; | ||
195 | |||
196 | save_control |= BIT_RTC_CTRL_REG_GET_TIME_M; | ||
197 | |||
198 | ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); | ||
199 | if (ret < 0) | ||
200 | return ret; | ||
201 | |||
202 | ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, | ||
203 | REG_SECONDS_REG, ALL_TIME_REGS); | ||
204 | |||
205 | if (ret < 0) { | ||
206 | dev_err(dev, "rtc_read_time error %d\n", ret); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | tm->tm_sec = bcd2bin(rtc_data[0]); | ||
211 | tm->tm_min = bcd2bin(rtc_data[1]); | ||
212 | tm->tm_hour = bcd2bin(rtc_data[2]); | ||
213 | tm->tm_mday = bcd2bin(rtc_data[3]); | ||
214 | tm->tm_mon = bcd2bin(rtc_data[4]) - 1; | ||
215 | tm->tm_year = bcd2bin(rtc_data[5]) + 100; | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
221 | { | ||
222 | unsigned char save_control; | ||
223 | unsigned char rtc_data[ALL_TIME_REGS + 1]; | ||
224 | int ret; | ||
225 | |||
226 | rtc_data[1] = bin2bcd(tm->tm_sec); | ||
227 | rtc_data[2] = bin2bcd(tm->tm_min); | ||
228 | rtc_data[3] = bin2bcd(tm->tm_hour); | ||
229 | rtc_data[4] = bin2bcd(tm->tm_mday); | ||
230 | rtc_data[5] = bin2bcd(tm->tm_mon + 1); | ||
231 | rtc_data[6] = bin2bcd(tm->tm_year - 100); | ||
232 | |||
233 | /* Stop RTC while updating the TC registers */ | ||
234 | ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); | ||
235 | if (ret < 0) | ||
236 | goto out; | ||
237 | |||
238 | save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M; | ||
239 | twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); | ||
240 | if (ret < 0) | ||
241 | goto out; | ||
242 | |||
243 | /* update all the time registers in one shot */ | ||
244 | ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data, | ||
245 | REG_SECONDS_REG, ALL_TIME_REGS); | ||
246 | if (ret < 0) { | ||
247 | dev_err(dev, "rtc_set_time error %d\n", ret); | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | /* Start back RTC */ | ||
252 | save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M; | ||
253 | ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); | ||
254 | |||
255 | out: | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Gets current TWL4030 RTC alarm time. | ||
261 | */ | ||
262 | static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
263 | { | ||
264 | unsigned char rtc_data[ALL_TIME_REGS + 1]; | ||
265 | int ret; | ||
266 | |||
267 | ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, | ||
268 | REG_ALARM_SECONDS_REG, ALL_TIME_REGS); | ||
269 | if (ret < 0) { | ||
270 | dev_err(dev, "rtc_read_alarm error %d\n", ret); | ||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | /* some of these fields may be wildcard/"match all" */ | ||
275 | alm->time.tm_sec = bcd2bin(rtc_data[0]); | ||
276 | alm->time.tm_min = bcd2bin(rtc_data[1]); | ||
277 | alm->time.tm_hour = bcd2bin(rtc_data[2]); | ||
278 | alm->time.tm_mday = bcd2bin(rtc_data[3]); | ||
279 | alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1; | ||
280 | alm->time.tm_year = bcd2bin(rtc_data[5]) + 100; | ||
281 | |||
282 | /* report cached alarm enable state */ | ||
283 | if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) | ||
284 | alm->enabled = 1; | ||
285 | |||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
290 | { | ||
291 | unsigned char alarm_data[ALL_TIME_REGS + 1]; | ||
292 | int ret; | ||
293 | |||
294 | ret = twl4030_rtc_alarm_irq_set_state(0); | ||
295 | if (ret) | ||
296 | goto out; | ||
297 | |||
298 | alarm_data[1] = bin2bcd(alm->time.tm_sec); | ||
299 | alarm_data[2] = bin2bcd(alm->time.tm_min); | ||
300 | alarm_data[3] = bin2bcd(alm->time.tm_hour); | ||
301 | alarm_data[4] = bin2bcd(alm->time.tm_mday); | ||
302 | alarm_data[5] = bin2bcd(alm->time.tm_mon + 1); | ||
303 | alarm_data[6] = bin2bcd(alm->time.tm_year - 100); | ||
304 | |||
305 | /* update all the alarm registers in one shot */ | ||
306 | ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data, | ||
307 | REG_ALARM_SECONDS_REG, ALL_TIME_REGS); | ||
308 | if (ret) { | ||
309 | dev_err(dev, "rtc_set_alarm error %d\n", ret); | ||
310 | goto out; | ||
311 | } | ||
312 | |||
313 | if (alm->enabled) | ||
314 | ret = twl4030_rtc_alarm_irq_set_state(1); | ||
315 | out: | ||
316 | return ret; | ||
317 | } | ||
318 | |||
319 | #ifdef CONFIG_RTC_INTF_DEV | ||
320 | |||
321 | static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd, | ||
322 | unsigned long arg) | ||
323 | { | ||
324 | switch (cmd) { | ||
325 | case RTC_AIE_OFF: | ||
326 | return twl4030_rtc_alarm_irq_set_state(0); | ||
327 | case RTC_AIE_ON: | ||
328 | return twl4030_rtc_alarm_irq_set_state(1); | ||
329 | case RTC_UIE_OFF: | ||
330 | return twl4030_rtc_irq_set_state(0); | ||
331 | case RTC_UIE_ON: | ||
332 | return twl4030_rtc_irq_set_state(1); | ||
333 | |||
334 | default: | ||
335 | return -ENOIOCTLCMD; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | #else | ||
340 | #define omap_rtc_ioctl NULL | ||
341 | #endif | ||
342 | |||
343 | static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) | ||
344 | { | ||
345 | unsigned long events = 0; | ||
346 | int ret = IRQ_NONE; | ||
347 | int res; | ||
348 | u8 rd_reg; | ||
349 | |||
350 | #ifdef CONFIG_LOCKDEP | ||
351 | /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which | ||
352 | * we don't want and can't tolerate. Although it might be | ||
353 | * friendlier not to borrow this thread context... | ||
354 | */ | ||
355 | local_irq_enable(); | ||
356 | #endif | ||
357 | |||
358 | res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | ||
359 | if (res) | ||
360 | goto out; | ||
361 | /* | ||
362 | * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG. | ||
363 | * only one (ALARM or RTC) interrupt source may be enabled | ||
364 | * at time, we also could check our results | ||
365 | * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM] | ||
366 | */ | ||
367 | if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) | ||
368 | events |= RTC_IRQF | RTC_AF; | ||
369 | else | ||
370 | events |= RTC_IRQF | RTC_UF; | ||
371 | |||
372 | res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M, | ||
373 | REG_RTC_STATUS_REG); | ||
374 | if (res) | ||
375 | goto out; | ||
376 | |||
377 | /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1 | ||
378 | * needs 2 reads to clear the interrupt. One read is done in | ||
379 | * do_twl4030_pwrirq(). Doing the second read, to clear | ||
380 | * the bit. | ||
381 | * | ||
382 | * FIXME the reason PWR_ISR1 needs an extra read is that | ||
383 | * RTC_IF retriggered until we cleared REG_ALARM_M above. | ||
384 | * But re-reading like this is a bad hack; by doing so we | ||
385 | * risk wrongly clearing status for some other IRQ (losing | ||
386 | * the interrupt). Be smarter about handling RTC_UF ... | ||
387 | */ | ||
388 | res = twl4030_i2c_read_u8(TWL4030_MODULE_INT, | ||
389 | &rd_reg, TWL4030_INT_PWR_ISR1); | ||
390 | if (res) | ||
391 | goto out; | ||
392 | |||
393 | /* Notify RTC core on event */ | ||
394 | rtc_update_irq(rtc, 1, events); | ||
395 | |||
396 | ret = IRQ_HANDLED; | ||
397 | out: | ||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | static struct rtc_class_ops twl4030_rtc_ops = { | ||
402 | .ioctl = twl4030_rtc_ioctl, | ||
403 | .read_time = twl4030_rtc_read_time, | ||
404 | .set_time = twl4030_rtc_set_time, | ||
405 | .read_alarm = twl4030_rtc_read_alarm, | ||
406 | .set_alarm = twl4030_rtc_set_alarm, | ||
407 | }; | ||
408 | |||
409 | /*----------------------------------------------------------------------*/ | ||
410 | |||
411 | static int __devinit twl4030_rtc_probe(struct platform_device *pdev) | ||
412 | { | ||
413 | struct rtc_device *rtc; | ||
414 | int ret = 0; | ||
415 | int irq = platform_get_irq(pdev, 0); | ||
416 | u8 rd_reg; | ||
417 | |||
418 | if (irq < 0) | ||
419 | return irq; | ||
420 | |||
421 | rtc = rtc_device_register(pdev->name, | ||
422 | &pdev->dev, &twl4030_rtc_ops, THIS_MODULE); | ||
423 | if (IS_ERR(rtc)) { | ||
424 | ret = -EINVAL; | ||
425 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", | ||
426 | PTR_ERR(rtc)); | ||
427 | goto out0; | ||
428 | |||
429 | } | ||
430 | |||
431 | platform_set_drvdata(pdev, rtc); | ||
432 | |||
433 | ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); | ||
434 | |||
435 | if (ret < 0) | ||
436 | goto out1; | ||
437 | |||
438 | if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) | ||
439 | dev_warn(&pdev->dev, "Power up reset detected.\n"); | ||
440 | |||
441 | if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) | ||
442 | dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n"); | ||
443 | |||
444 | /* Clear RTC Power up reset and pending alarm interrupts */ | ||
445 | ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); | ||
446 | if (ret < 0) | ||
447 | goto out1; | ||
448 | |||
449 | ret = request_irq(irq, twl4030_rtc_interrupt, | ||
450 | IRQF_TRIGGER_RISING, | ||
451 | rtc->dev.bus_id, rtc); | ||
452 | if (ret < 0) { | ||
453 | dev_err(&pdev->dev, "IRQ is not free.\n"); | ||
454 | goto out1; | ||
455 | } | ||
456 | |||
457 | /* Check RTC module status, Enable if it is off */ | ||
458 | ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); | ||
459 | if (ret < 0) | ||
460 | goto out2; | ||
461 | |||
462 | if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { | ||
463 | dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n"); | ||
464 | rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; | ||
465 | ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); | ||
466 | if (ret < 0) | ||
467 | goto out2; | ||
468 | } | ||
469 | |||
470 | /* init cached IRQ enable bits */ | ||
471 | ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); | ||
472 | if (ret < 0) | ||
473 | goto out2; | ||
474 | |||
475 | return ret; | ||
476 | |||
477 | |||
478 | out2: | ||
479 | free_irq(irq, rtc); | ||
480 | out1: | ||
481 | rtc_device_unregister(rtc); | ||
482 | out0: | ||
483 | return ret; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Disable all TWL4030 RTC module interrupts. | ||
488 | * Sets status flag to free. | ||
489 | */ | ||
490 | static int __devexit twl4030_rtc_remove(struct platform_device *pdev) | ||
491 | { | ||
492 | /* leave rtc running, but disable irqs */ | ||
493 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
494 | int irq = platform_get_irq(pdev, 0); | ||
495 | |||
496 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | ||
497 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); | ||
498 | |||
499 | free_irq(irq, rtc); | ||
500 | |||
501 | rtc_device_unregister(rtc); | ||
502 | platform_set_drvdata(pdev, NULL); | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static void twl4030_rtc_shutdown(struct platform_device *pdev) | ||
507 | { | ||
508 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M | | ||
509 | BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | ||
510 | } | ||
511 | |||
512 | #ifdef CONFIG_PM | ||
513 | |||
514 | static unsigned char irqstat; | ||
515 | |||
516 | static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state) | ||
517 | { | ||
518 | irqstat = rtc_irq_bits; | ||
519 | |||
520 | /* REVISIT alarm may need to wake us from sleep */ | ||
521 | mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M | | ||
522 | BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | static int twl4030_rtc_resume(struct platform_device *pdev) | ||
527 | { | ||
528 | set_rtc_irq_bit(irqstat); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | #else | ||
533 | #define twl4030_rtc_suspend NULL | ||
534 | #define twl4030_rtc_resume NULL | ||
535 | #endif | ||
536 | |||
537 | MODULE_ALIAS("platform:twl4030_rtc"); | ||
538 | |||
539 | static struct platform_driver twl4030rtc_driver = { | ||
540 | .probe = twl4030_rtc_probe, | ||
541 | .remove = __devexit_p(twl4030_rtc_remove), | ||
542 | .shutdown = twl4030_rtc_shutdown, | ||
543 | .suspend = twl4030_rtc_suspend, | ||
544 | .resume = twl4030_rtc_resume, | ||
545 | .driver = { | ||
546 | .owner = THIS_MODULE, | ||
547 | .name = "twl4030_rtc", | ||
548 | }, | ||
549 | }; | ||
550 | |||
551 | static int __init twl4030_rtc_init(void) | ||
552 | { | ||
553 | return platform_driver_register(&twl4030rtc_driver); | ||
554 | } | ||
555 | module_init(twl4030_rtc_init); | ||
556 | |||
557 | static void __exit twl4030_rtc_exit(void) | ||
558 | { | ||
559 | platform_driver_unregister(&twl4030rtc_driver); | ||
560 | } | ||
561 | module_exit(twl4030_rtc_exit); | ||
562 | |||
563 | MODULE_AUTHOR("Texas Instruments, MontaVista Software"); | ||
564 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index c014ffb110e9..5450a0e5ecdb 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -1100,6 +1100,8 @@ enum pci_board_num_t { | |||
1100 | pbn_b0_4_1843200_200, | 1100 | pbn_b0_4_1843200_200, |
1101 | pbn_b0_8_1843200_200, | 1101 | pbn_b0_8_1843200_200, |
1102 | 1102 | ||
1103 | pbn_b0_1_4000000, | ||
1104 | |||
1103 | pbn_b0_bt_1_115200, | 1105 | pbn_b0_bt_1_115200, |
1104 | pbn_b0_bt_2_115200, | 1106 | pbn_b0_bt_2_115200, |
1105 | pbn_b0_bt_8_115200, | 1107 | pbn_b0_bt_8_115200, |
@@ -1167,6 +1169,10 @@ enum pci_board_num_t { | |||
1167 | pbn_exsys_4055, | 1169 | pbn_exsys_4055, |
1168 | pbn_plx_romulus, | 1170 | pbn_plx_romulus, |
1169 | pbn_oxsemi, | 1171 | pbn_oxsemi, |
1172 | pbn_oxsemi_1_4000000, | ||
1173 | pbn_oxsemi_2_4000000, | ||
1174 | pbn_oxsemi_4_4000000, | ||
1175 | pbn_oxsemi_8_4000000, | ||
1170 | pbn_intel_i960, | 1176 | pbn_intel_i960, |
1171 | pbn_sgi_ioc3, | 1177 | pbn_sgi_ioc3, |
1172 | pbn_computone_4, | 1178 | pbn_computone_4, |
@@ -1290,6 +1296,12 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1290 | .base_baud = 1843200, | 1296 | .base_baud = 1843200, |
1291 | .uart_offset = 0x200, | 1297 | .uart_offset = 0x200, |
1292 | }, | 1298 | }, |
1299 | [pbn_b0_1_4000000] = { | ||
1300 | .flags = FL_BASE0, | ||
1301 | .num_ports = 1, | ||
1302 | .base_baud = 4000000, | ||
1303 | .uart_offset = 8, | ||
1304 | }, | ||
1293 | 1305 | ||
1294 | [pbn_b0_bt_1_115200] = { | 1306 | [pbn_b0_bt_1_115200] = { |
1295 | .flags = FL_BASE0|FL_BASE_BARS, | 1307 | .flags = FL_BASE0|FL_BASE_BARS, |
@@ -1625,6 +1637,35 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1625 | .base_baud = 115200, | 1637 | .base_baud = 115200, |
1626 | .uart_offset = 8, | 1638 | .uart_offset = 8, |
1627 | }, | 1639 | }, |
1640 | [pbn_oxsemi_1_4000000] = { | ||
1641 | .flags = FL_BASE0, | ||
1642 | .num_ports = 1, | ||
1643 | .base_baud = 4000000, | ||
1644 | .uart_offset = 0x200, | ||
1645 | .first_offset = 0x1000, | ||
1646 | }, | ||
1647 | [pbn_oxsemi_2_4000000] = { | ||
1648 | .flags = FL_BASE0, | ||
1649 | .num_ports = 2, | ||
1650 | .base_baud = 4000000, | ||
1651 | .uart_offset = 0x200, | ||
1652 | .first_offset = 0x1000, | ||
1653 | }, | ||
1654 | [pbn_oxsemi_4_4000000] = { | ||
1655 | .flags = FL_BASE0, | ||
1656 | .num_ports = 4, | ||
1657 | .base_baud = 4000000, | ||
1658 | .uart_offset = 0x200, | ||
1659 | .first_offset = 0x1000, | ||
1660 | }, | ||
1661 | [pbn_oxsemi_8_4000000] = { | ||
1662 | .flags = FL_BASE0, | ||
1663 | .num_ports = 8, | ||
1664 | .base_baud = 4000000, | ||
1665 | .uart_offset = 0x200, | ||
1666 | .first_offset = 0x1000, | ||
1667 | }, | ||
1668 | |||
1628 | 1669 | ||
1629 | /* | 1670 | /* |
1630 | * EKF addition for i960 Boards form EKF with serial port. | 1671 | * EKF addition for i960 Boards form EKF with serial port. |
@@ -1813,6 +1854,39 @@ serial_pci_matches(struct pciserial_board *board, | |||
1813 | board->first_offset == guessed->first_offset; | 1854 | board->first_offset == guessed->first_offset; |
1814 | } | 1855 | } |
1815 | 1856 | ||
1857 | /* | ||
1858 | * Oxford Semiconductor Inc. | ||
1859 | * Check that device is part of the Tornado range of devices, then determine | ||
1860 | * the number of ports available on the device. | ||
1861 | */ | ||
1862 | static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board) | ||
1863 | { | ||
1864 | u8 __iomem *p; | ||
1865 | unsigned long deviceID; | ||
1866 | unsigned int number_uarts; | ||
1867 | |||
1868 | /* OxSemi Tornado devices are all 0xCxxx */ | ||
1869 | if (dev->vendor == PCI_VENDOR_ID_OXSEMI && | ||
1870 | (dev->device & 0xF000) != 0xC000) | ||
1871 | return 0; | ||
1872 | |||
1873 | p = pci_iomap(dev, 0, 5); | ||
1874 | if (p == NULL) | ||
1875 | return -ENOMEM; | ||
1876 | |||
1877 | deviceID = ioread32(p); | ||
1878 | /* Tornado device */ | ||
1879 | if (deviceID == 0x07000200) { | ||
1880 | number_uarts = ioread8(p + 4); | ||
1881 | board->num_ports = number_uarts; | ||
1882 | printk(KERN_DEBUG | ||
1883 | "%d ports detected on Oxford PCI Express device\n", | ||
1884 | number_uarts); | ||
1885 | } | ||
1886 | pci_iounmap(dev, p); | ||
1887 | return 0; | ||
1888 | } | ||
1889 | |||
1816 | struct serial_private * | 1890 | struct serial_private * |
1817 | pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) | 1891 | pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) |
1818 | { | 1892 | { |
@@ -1821,6 +1895,13 @@ pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) | |||
1821 | struct pci_serial_quirk *quirk; | 1895 | struct pci_serial_quirk *quirk; |
1822 | int rc, nr_ports, i; | 1896 | int rc, nr_ports, i; |
1823 | 1897 | ||
1898 | /* | ||
1899 | * Find number of ports on board | ||
1900 | */ | ||
1901 | if (dev->vendor == PCI_VENDOR_ID_OXSEMI || | ||
1902 | dev->vendor == PCI_VENDOR_ID_MAINPINE) | ||
1903 | pci_oxsemi_tornado_init(dev, board); | ||
1904 | |||
1824 | nr_ports = board->num_ports; | 1905 | nr_ports = board->num_ports; |
1825 | 1906 | ||
1826 | /* | 1907 | /* |
@@ -2301,6 +2382,156 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2301 | pbn_b0_bt_2_921600 }, | 2382 | pbn_b0_bt_2_921600 }, |
2302 | 2383 | ||
2303 | /* | 2384 | /* |
2385 | * Oxford Semiconductor Inc. Tornado PCI express device range. | ||
2386 | */ | ||
2387 | { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */ | ||
2388 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2389 | pbn_b0_1_4000000 }, | ||
2390 | { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */ | ||
2391 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2392 | pbn_b0_1_4000000 }, | ||
2393 | { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */ | ||
2394 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2395 | pbn_oxsemi_1_4000000 }, | ||
2396 | { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */ | ||
2397 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2398 | pbn_oxsemi_1_4000000 }, | ||
2399 | { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */ | ||
2400 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2401 | pbn_b0_1_4000000 }, | ||
2402 | { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */ | ||
2403 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2404 | pbn_b0_1_4000000 }, | ||
2405 | { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */ | ||
2406 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2407 | pbn_oxsemi_1_4000000 }, | ||
2408 | { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */ | ||
2409 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2410 | pbn_oxsemi_1_4000000 }, | ||
2411 | { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */ | ||
2412 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2413 | pbn_b0_1_4000000 }, | ||
2414 | { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */ | ||
2415 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2416 | pbn_b0_1_4000000 }, | ||
2417 | { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */ | ||
2418 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2419 | pbn_b0_1_4000000 }, | ||
2420 | { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */ | ||
2421 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2422 | pbn_b0_1_4000000 }, | ||
2423 | { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */ | ||
2424 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2425 | pbn_oxsemi_2_4000000 }, | ||
2426 | { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */ | ||
2427 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2428 | pbn_oxsemi_2_4000000 }, | ||
2429 | { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */ | ||
2430 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2431 | pbn_oxsemi_4_4000000 }, | ||
2432 | { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */ | ||
2433 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2434 | pbn_oxsemi_4_4000000 }, | ||
2435 | { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */ | ||
2436 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2437 | pbn_oxsemi_8_4000000 }, | ||
2438 | { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */ | ||
2439 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2440 | pbn_oxsemi_8_4000000 }, | ||
2441 | { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */ | ||
2442 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2443 | pbn_oxsemi_1_4000000 }, | ||
2444 | { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */ | ||
2445 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2446 | pbn_oxsemi_1_4000000 }, | ||
2447 | { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */ | ||
2448 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2449 | pbn_oxsemi_1_4000000 }, | ||
2450 | { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */ | ||
2451 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2452 | pbn_oxsemi_1_4000000 }, | ||
2453 | { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */ | ||
2454 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2455 | pbn_oxsemi_1_4000000 }, | ||
2456 | { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */ | ||
2457 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2458 | pbn_oxsemi_1_4000000 }, | ||
2459 | { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */ | ||
2460 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2461 | pbn_oxsemi_1_4000000 }, | ||
2462 | { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */ | ||
2463 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2464 | pbn_oxsemi_1_4000000 }, | ||
2465 | { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */ | ||
2466 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2467 | pbn_oxsemi_1_4000000 }, | ||
2468 | { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */ | ||
2469 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2470 | pbn_oxsemi_1_4000000 }, | ||
2471 | { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */ | ||
2472 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2473 | pbn_oxsemi_1_4000000 }, | ||
2474 | { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */ | ||
2475 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2476 | pbn_oxsemi_1_4000000 }, | ||
2477 | { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */ | ||
2478 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2479 | pbn_oxsemi_1_4000000 }, | ||
2480 | { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */ | ||
2481 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2482 | pbn_oxsemi_1_4000000 }, | ||
2483 | { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */ | ||
2484 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2485 | pbn_oxsemi_1_4000000 }, | ||
2486 | { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */ | ||
2487 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2488 | pbn_oxsemi_1_4000000 }, | ||
2489 | { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */ | ||
2490 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2491 | pbn_oxsemi_1_4000000 }, | ||
2492 | { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */ | ||
2493 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2494 | pbn_oxsemi_1_4000000 }, | ||
2495 | { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */ | ||
2496 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2497 | pbn_oxsemi_1_4000000 }, | ||
2498 | { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */ | ||
2499 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2500 | pbn_oxsemi_1_4000000 }, | ||
2501 | { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */ | ||
2502 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2503 | pbn_oxsemi_1_4000000 }, | ||
2504 | { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */ | ||
2505 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2506 | pbn_oxsemi_1_4000000 }, | ||
2507 | { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */ | ||
2508 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2509 | pbn_oxsemi_1_4000000 }, | ||
2510 | { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */ | ||
2511 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2512 | pbn_oxsemi_1_4000000 }, | ||
2513 | { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */ | ||
2514 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2515 | pbn_oxsemi_1_4000000 }, | ||
2516 | { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */ | ||
2517 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2518 | pbn_oxsemi_1_4000000 }, | ||
2519 | /* | ||
2520 | * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado | ||
2521 | */ | ||
2522 | { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */ | ||
2523 | PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0, | ||
2524 | pbn_oxsemi_1_4000000 }, | ||
2525 | { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */ | ||
2526 | PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0, | ||
2527 | pbn_oxsemi_2_4000000 }, | ||
2528 | { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */ | ||
2529 | PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0, | ||
2530 | pbn_oxsemi_4_4000000 }, | ||
2531 | { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */ | ||
2532 | PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0, | ||
2533 | pbn_oxsemi_8_4000000 }, | ||
2534 | /* | ||
2304 | * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, | 2535 | * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, |
2305 | * from skokodyn@yahoo.com | 2536 | * from skokodyn@yahoo.com |
2306 | */ | 2537 | */ |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index db783b77a881..c94d3c4b7521 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG | |||
457 | 457 | ||
458 | config SERIAL_SAMSUNG_DEBUG | 458 | config SERIAL_SAMSUNG_DEBUG |
459 | bool "Samsung SoC serial debug" | 459 | bool "Samsung SoC serial debug" |
460 | depends on SERIAL_SAMSUNG | 460 | depends on SERIAL_SAMSUNG && DEBUG_LL |
461 | help | 461 | help |
462 | Add support for debugging the serial driver. Since this is | 462 | Add support for debugging the serial driver. Since this is |
463 | generally being used as a console, we use our own output | 463 | generally being used as a console, we use our own output |
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index bcefbddeba50..c23a9857ee67 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
@@ -97,6 +97,8 @@ source "drivers/usb/core/Kconfig" | |||
97 | 97 | ||
98 | source "drivers/usb/mon/Kconfig" | 98 | source "drivers/usb/mon/Kconfig" |
99 | 99 | ||
100 | source "drivers/usb/wusbcore/Kconfig" | ||
101 | |||
100 | source "drivers/usb/host/Kconfig" | 102 | source "drivers/usb/host/Kconfig" |
101 | 103 | ||
102 | source "drivers/usb/musb/Kconfig" | 104 | source "drivers/usb/musb/Kconfig" |
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index a419c42e880e..8b7c419b876e 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile | |||
@@ -16,9 +16,12 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/ | |||
16 | obj-$(CONFIG_USB_SL811_HCD) += host/ | 16 | obj-$(CONFIG_USB_SL811_HCD) += host/ |
17 | obj-$(CONFIG_USB_U132_HCD) += host/ | 17 | obj-$(CONFIG_USB_U132_HCD) += host/ |
18 | obj-$(CONFIG_USB_R8A66597_HCD) += host/ | 18 | obj-$(CONFIG_USB_R8A66597_HCD) += host/ |
19 | obj-$(CONFIG_USB_HWA_HCD) += host/ | ||
19 | 20 | ||
20 | obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ | 21 | obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ |
21 | 22 | ||
23 | obj-$(CONFIG_USB_WUSB) += wusbcore/ | ||
24 | |||
22 | obj-$(CONFIG_USB_ACM) += class/ | 25 | obj-$(CONFIG_USB_ACM) += class/ |
23 | obj-$(CONFIG_USB_PRINTER) += class/ | 26 | obj-$(CONFIG_USB_PRINTER) += class/ |
24 | 27 | ||
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 228797e54f9c..72fb655e6033 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -305,3 +305,31 @@ config SUPERH_ON_CHIP_R8A66597 | |||
305 | help | 305 | help |
306 | This driver enables support for the on-chip R8A66597 in the | 306 | This driver enables support for the on-chip R8A66597 in the |
307 | SH7366 and SH7723 processors. | 307 | SH7366 and SH7723 processors. |
308 | |||
309 | config USB_WHCI_HCD | ||
310 | tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" | ||
311 | depends on EXPERIMENTAL | ||
312 | depends on PCI && USB | ||
313 | select USB_WUSB | ||
314 | select UWB_WHCI | ||
315 | help | ||
316 | A driver for PCI-based Wireless USB Host Controllers that are | ||
317 | compliant with the WHCI specification. | ||
318 | |||
319 | To compile this driver a module, choose M here: the module | ||
320 | will be called "whci-hcd". | ||
321 | |||
322 | config USB_HWA_HCD | ||
323 | tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)" | ||
324 | depends on EXPERIMENTAL | ||
325 | depends on USB | ||
326 | select USB_WUSB | ||
327 | select UWB_HWA | ||
328 | help | ||
329 | This driver enables you to connect Wireless USB devices to | ||
330 | your system using a Host Wire Adaptor USB dongle. This is an | ||
331 | UWB Radio Controller and WUSB Host Controller connected to | ||
332 | your machine via USB (specified in WUSB1.0). | ||
333 | |||
334 | To compile this driver a module, choose M here: the module | ||
335 | will be called "hwa-hc". | ||
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index f1edda2dcfde..23be22224044 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile | |||
@@ -8,6 +8,8 @@ endif | |||
8 | 8 | ||
9 | isp1760-objs := isp1760-hcd.o isp1760-if.o | 9 | isp1760-objs := isp1760-hcd.o isp1760-if.o |
10 | 10 | ||
11 | obj-$(CONFIG_USB_WHCI_HCD) += whci/ | ||
12 | |||
11 | obj-$(CONFIG_PCI) += pci-quirks.o | 13 | obj-$(CONFIG_PCI) += pci-quirks.o |
12 | 14 | ||
13 | obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o | 15 | obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o |
@@ -19,3 +21,4 @@ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o | |||
19 | obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o | 21 | obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o |
20 | obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o | 22 | obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o |
21 | obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o | 23 | obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o |
24 | obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o | ||
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c new file mode 100644 index 000000000000..64be4d88df11 --- /dev/null +++ b/drivers/usb/host/hwa-hc.c | |||
@@ -0,0 +1,925 @@ | |||
1 | /* | ||
2 | * Host Wire Adapter: | ||
3 | * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * The HWA driver is a simple layer that forwards requests to the WAHC | ||
24 | * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host | ||
25 | * Controller) layers. | ||
26 | * | ||
27 | * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB | ||
28 | * Host Controller that is connected to your system via USB (a USB | ||
29 | * dongle that implements a USB host...). There is also a Device Wired | ||
30 | * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for | ||
31 | * transferring data (it is after all a USB host connected via | ||
32 | * Wireless USB), we have a common layer called Wire Adapter Host | ||
33 | * Controller that does all the hard work. The WUSBHC (Wireless USB | ||
34 | * Host Controller) is the part common to WUSB Host Controllers, the | ||
35 | * HWA and the PCI-based one, that is implemented following the WHCI | ||
36 | * spec. All these layers are implemented in ../wusbcore. | ||
37 | * | ||
38 | * The main functions are hwahc_op_urb_{en,de}queue(), that pass the | ||
39 | * job of converting a URB to a Wire Adapter | ||
40 | * | ||
41 | * Entry points: | ||
42 | * | ||
43 | * hwahc_driver_*() Driver initialization, registration and | ||
44 | * teardown. | ||
45 | * | ||
46 | * hwahc_probe() New device came up, create an instance for | ||
47 | * it [from device enumeration]. | ||
48 | * | ||
49 | * hwahc_disconnect() Remove device instance [from device | ||
50 | * enumeration]. | ||
51 | * | ||
52 | * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for | ||
53 | * starting/stopping/etc (some might be made also | ||
54 | * DWA). | ||
55 | */ | ||
56 | #include <linux/kernel.h> | ||
57 | #include <linux/version.h> | ||
58 | #include <linux/init.h> | ||
59 | #include <linux/module.h> | ||
60 | #include <linux/workqueue.h> | ||
61 | #include <linux/wait.h> | ||
62 | #include <linux/completion.h> | ||
63 | #include "../wusbcore/wa-hc.h" | ||
64 | #include "../wusbcore/wusbhc.h" | ||
65 | |||
66 | #define D_LOCAL 0 | ||
67 | #include <linux/uwb/debug.h> | ||
68 | |||
69 | struct hwahc { | ||
70 | struct wusbhc wusbhc; /* has to be 1st */ | ||
71 | struct wahc wa; | ||
72 | u8 buffer[16]; /* for misc usb transactions */ | ||
73 | }; | ||
74 | |||
75 | /** | ||
76 | * FIXME should be wusbhc | ||
77 | * | ||
78 | * NOTE: we need to cache the Cluster ID because later...there is no | ||
79 | * way to get it :) | ||
80 | */ | ||
81 | static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id) | ||
82 | { | ||
83 | int result; | ||
84 | struct wusbhc *wusbhc = &hwahc->wusbhc; | ||
85 | struct wahc *wa = &hwahc->wa; | ||
86 | struct device *dev = &wa->usb_iface->dev; | ||
87 | |||
88 | result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
89 | WUSB_REQ_SET_CLUSTER_ID, | ||
90 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
91 | cluster_id, | ||
92 | wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
93 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
94 | if (result < 0) | ||
95 | dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n", | ||
96 | cluster_id, result); | ||
97 | else | ||
98 | wusbhc->cluster_id = cluster_id; | ||
99 | dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id); | ||
100 | return result; | ||
101 | } | ||
102 | |||
103 | static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) | ||
104 | { | ||
105 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
106 | struct wahc *wa = &hwahc->wa; | ||
107 | |||
108 | return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
109 | WUSB_REQ_SET_NUM_DNTS, | ||
110 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
111 | interval << 8 | slots, | ||
112 | wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
113 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Reset a WUSB host controller and wait for it to complete doing it. | ||
118 | * | ||
119 | * @usb_hcd: Pointer to WUSB Host Controller instance. | ||
120 | * | ||
121 | */ | ||
122 | static int hwahc_op_reset(struct usb_hcd *usb_hcd) | ||
123 | { | ||
124 | int result; | ||
125 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
126 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
127 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
128 | |||
129 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
130 | mutex_lock(&wusbhc->mutex); | ||
131 | wa_nep_disarm(&hwahc->wa); | ||
132 | result = __wa_set_feature(&hwahc->wa, WA_RESET); | ||
133 | if (result < 0) { | ||
134 | dev_err(dev, "error commanding HC to reset: %d\n", result); | ||
135 | goto error_unlock; | ||
136 | } | ||
137 | d_printf(3, dev, "reset: waiting for device to change state\n"); | ||
138 | result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); | ||
139 | if (result < 0) { | ||
140 | dev_err(dev, "error waiting for HC to reset: %d\n", result); | ||
141 | goto error_unlock; | ||
142 | } | ||
143 | error_unlock: | ||
144 | mutex_unlock(&wusbhc->mutex); | ||
145 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
146 | return result; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * FIXME: break this function up | ||
151 | */ | ||
152 | static int hwahc_op_start(struct usb_hcd *usb_hcd) | ||
153 | { | ||
154 | u8 addr; | ||
155 | int result; | ||
156 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
157 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
158 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
159 | |||
160 | /* Set up a Host Info WUSB Information Element */ | ||
161 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
162 | result = -ENOSPC; | ||
163 | mutex_lock(&wusbhc->mutex); | ||
164 | /* Start the numbering from the top so that the bottom | ||
165 | * range of the unauth addr space is used for devices, | ||
166 | * the top for HCs; use 0xfe - RC# */ | ||
167 | addr = wusb_cluster_id_get(); | ||
168 | if (addr == 0) | ||
169 | goto error_cluster_id_get; | ||
170 | result = __hwahc_set_cluster_id(hwahc, addr); | ||
171 | if (result < 0) | ||
172 | goto error_set_cluster_id; | ||
173 | |||
174 | result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); | ||
175 | if (result < 0) { | ||
176 | dev_err(dev, "cannot listen to notifications: %d\n", result); | ||
177 | goto error_stop; | ||
178 | } | ||
179 | usb_hcd->uses_new_polling = 1; | ||
180 | usb_hcd->poll_rh = 1; | ||
181 | usb_hcd->state = HC_STATE_RUNNING; | ||
182 | result = 0; | ||
183 | out: | ||
184 | mutex_unlock(&wusbhc->mutex); | ||
185 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
186 | return result; | ||
187 | |||
188 | error_stop: | ||
189 | __wa_stop(&hwahc->wa); | ||
190 | error_set_cluster_id: | ||
191 | wusb_cluster_id_put(wusbhc->cluster_id); | ||
192 | error_cluster_id_get: | ||
193 | goto out; | ||
194 | |||
195 | } | ||
196 | |||
197 | /* | ||
198 | * FIXME: break this function up | ||
199 | */ | ||
200 | static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) | ||
201 | { | ||
202 | int result; | ||
203 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
204 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
205 | |||
206 | /* Set up a Host Info WUSB Information Element */ | ||
207 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
208 | result = -ENOSPC; | ||
209 | |||
210 | result = __wa_set_feature(&hwahc->wa, WA_ENABLE); | ||
211 | if (result < 0) { | ||
212 | dev_err(dev, "error commanding HC to start: %d\n", result); | ||
213 | goto error_stop; | ||
214 | } | ||
215 | result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); | ||
216 | if (result < 0) { | ||
217 | dev_err(dev, "error waiting for HC to start: %d\n", result); | ||
218 | goto error_stop; | ||
219 | } | ||
220 | result = 0; | ||
221 | out: | ||
222 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
223 | return result; | ||
224 | |||
225 | error_stop: | ||
226 | result = __wa_clear_feature(&hwahc->wa, WA_ENABLE); | ||
227 | goto out; | ||
228 | } | ||
229 | |||
230 | static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) | ||
231 | { | ||
232 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
233 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
234 | dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__, | ||
235 | usb_hcd, hwahc, *(unsigned long *) &msg); | ||
236 | return -ENOSYS; | ||
237 | } | ||
238 | |||
239 | static int hwahc_op_resume(struct usb_hcd *usb_hcd) | ||
240 | { | ||
241 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
242 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
243 | |||
244 | dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, | ||
245 | usb_hcd, hwahc); | ||
246 | return -ENOSYS; | ||
247 | } | ||
248 | |||
249 | static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) | ||
250 | { | ||
251 | int result; | ||
252 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
253 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
254 | |||
255 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
256 | /* Nothing for now */ | ||
257 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * No need to abort pipes, as when this is called, all the children | ||
263 | * has been disconnected and that has done it [through | ||
264 | * usb_disable_interface() -> usb_disable_endpoint() -> | ||
265 | * hwahc_op_ep_disable() - >rpipe_ep_disable()]. | ||
266 | */ | ||
267 | static void hwahc_op_stop(struct usb_hcd *usb_hcd) | ||
268 | { | ||
269 | int result; | ||
270 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
271 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
272 | struct wahc *wa = &hwahc->wa; | ||
273 | struct device *dev = &wa->usb_iface->dev; | ||
274 | |||
275 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
276 | mutex_lock(&wusbhc->mutex); | ||
277 | wusbhc_stop(wusbhc); | ||
278 | wa_nep_disarm(&hwahc->wa); | ||
279 | result = __wa_stop(&hwahc->wa); | ||
280 | wusb_cluster_id_put(wusbhc->cluster_id); | ||
281 | mutex_unlock(&wusbhc->mutex); | ||
282 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) | ||
287 | { | ||
288 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
289 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
290 | |||
291 | dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, | ||
292 | usb_hcd, hwahc); | ||
293 | return -ENOSYS; | ||
294 | } | ||
295 | |||
296 | static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, | ||
297 | gfp_t gfp) | ||
298 | { | ||
299 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
300 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
301 | |||
302 | return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp); | ||
303 | } | ||
304 | |||
305 | static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, | ||
306 | int status) | ||
307 | { | ||
308 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
309 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
310 | |||
311 | return wa_urb_dequeue(&hwahc->wa, urb); | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Release resources allocated for an endpoint | ||
316 | * | ||
317 | * If there is an associated rpipe to this endpoint, go ahead and put it. | ||
318 | */ | ||
319 | static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, | ||
320 | struct usb_host_endpoint *ep) | ||
321 | { | ||
322 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
323 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
324 | |||
325 | rpipe_ep_disable(&hwahc->wa, ep); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Set the UWB MAS allocation for the WUSB cluster | ||
330 | * | ||
331 | * @stream_index: stream to use (-1 for cancelling the allocation) | ||
332 | * @mas: mas bitmap to use | ||
333 | */ | ||
334 | static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, | ||
335 | const struct uwb_mas_bm *mas) | ||
336 | { | ||
337 | int result; | ||
338 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
339 | struct wahc *wa = &hwahc->wa; | ||
340 | struct device *dev = &wa->usb_iface->dev; | ||
341 | u8 mas_le[UWB_NUM_MAS/8]; | ||
342 | |||
343 | /* Set the stream index */ | ||
344 | result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
345 | WUSB_REQ_SET_STREAM_IDX, | ||
346 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
347 | stream_index, | ||
348 | wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
349 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
350 | if (result < 0) { | ||
351 | dev_err(dev, "Cannot set WUSB stream index: %d\n", result); | ||
352 | goto out; | ||
353 | } | ||
354 | uwb_mas_bm_copy_le(mas_le, mas); | ||
355 | /* Set the MAS allocation */ | ||
356 | result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
357 | WUSB_REQ_SET_WUSB_MAS, | ||
358 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
359 | 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
360 | mas_le, 32, 1000 /* FIXME: arbitrary */); | ||
361 | if (result < 0) | ||
362 | dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); | ||
363 | out: | ||
364 | return result; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * Add an IE to the host's MMC | ||
369 | * | ||
370 | * @interval: See WUSB1.0[8.5.3.1] | ||
371 | * @repeat_cnt: See WUSB1.0[8.5.3.1] | ||
372 | * @handle: See WUSB1.0[8.5.3.1] | ||
373 | * @wuie: Pointer to the header of the WUSB IE data to add. | ||
374 | * MUST BE allocated in a kmalloc buffer (no stack or | ||
375 | * vmalloc). | ||
376 | * | ||
377 | * NOTE: the format of the WUSB IEs for MMCs are different to the | ||
378 | * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length + | ||
379 | * Id in WUSB IEs). Standards...you gotta love'em. | ||
380 | */ | ||
381 | static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval, | ||
382 | u8 repeat_cnt, u8 handle, | ||
383 | struct wuie_hdr *wuie) | ||
384 | { | ||
385 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
386 | struct wahc *wa = &hwahc->wa; | ||
387 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
388 | |||
389 | return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
390 | WUSB_REQ_ADD_MMC_IE, | ||
391 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
392 | interval << 8 | repeat_cnt, | ||
393 | handle << 8 | iface_no, | ||
394 | wuie, wuie->bLength, 1000 /* FIXME: arbitrary */); | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Remove an IE to the host's MMC | ||
399 | * | ||
400 | * @handle: See WUSB1.0[8.5.3.1] | ||
401 | */ | ||
402 | static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle) | ||
403 | { | ||
404 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
405 | struct wahc *wa = &hwahc->wa; | ||
406 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
407 | return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
408 | WUSB_REQ_REMOVE_MMC_IE, | ||
409 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
410 | 0, handle << 8 | iface_no, | ||
411 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Update device information for a given fake port | ||
416 | * | ||
417 | * @port_idx: Fake port to which device is connected (wusbhc index, not | ||
418 | * USB port number). | ||
419 | */ | ||
420 | static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc, | ||
421 | struct wusb_dev *wusb_dev) | ||
422 | { | ||
423 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
424 | struct wahc *wa = &hwahc->wa; | ||
425 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
426 | struct hwa_dev_info *dev_info; | ||
427 | int ret; | ||
428 | |||
429 | /* fill out the Device Info buffer and send it */ | ||
430 | dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL); | ||
431 | if (!dev_info) | ||
432 | return -ENOMEM; | ||
433 | uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability, | ||
434 | &wusb_dev->availability); | ||
435 | dev_info->bDeviceAddress = wusb_dev->addr; | ||
436 | |||
437 | /* | ||
438 | * If the descriptors haven't been read yet, use a default PHY | ||
439 | * rate of 53.3 Mbit/s only. The correct value will be used | ||
440 | * when this will be called again as part of the | ||
441 | * authentication process (which occurs after the descriptors | ||
442 | * have been read). | ||
443 | */ | ||
444 | if (wusb_dev->wusb_cap_descr) | ||
445 | dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates; | ||
446 | else | ||
447 | dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53); | ||
448 | |||
449 | ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
450 | WUSB_REQ_SET_DEV_INFO, | ||
451 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
452 | 0, wusb_dev->port_idx << 8 | iface_no, | ||
453 | dev_info, sizeof(struct hwa_dev_info), | ||
454 | 1000 /* FIXME: arbitrary */); | ||
455 | kfree(dev_info); | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Set host's idea of which encryption (and key) method to use when | ||
461 | * talking to ad evice on a given port. | ||
462 | * | ||
463 | * If key is NULL, it means disable encryption for that "virtual port" | ||
464 | * (used when we disconnect). | ||
465 | */ | ||
466 | static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, | ||
467 | const void *key, size_t key_size, | ||
468 | u8 key_idx) | ||
469 | { | ||
470 | int result = -ENOMEM; | ||
471 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
472 | struct wahc *wa = &hwahc->wa; | ||
473 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
474 | struct usb_key_descriptor *keyd; | ||
475 | size_t keyd_len; | ||
476 | |||
477 | keyd_len = sizeof(*keyd) + key_size; | ||
478 | keyd = kzalloc(keyd_len, GFP_KERNEL); | ||
479 | if (keyd == NULL) | ||
480 | return -ENOMEM; | ||
481 | |||
482 | keyd->bLength = keyd_len; | ||
483 | keyd->bDescriptorType = USB_DT_KEY; | ||
484 | keyd->tTKID[0] = (tkid >> 0) & 0xff; | ||
485 | keyd->tTKID[1] = (tkid >> 8) & 0xff; | ||
486 | keyd->tTKID[2] = (tkid >> 16) & 0xff; | ||
487 | memcpy(keyd->bKeyData, key, key_size); | ||
488 | |||
489 | result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
490 | USB_REQ_SET_DESCRIPTOR, | ||
491 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
492 | USB_DT_KEY << 8 | key_idx, | ||
493 | port_idx << 8 | iface_no, | ||
494 | keyd, keyd_len, 1000 /* FIXME: arbitrary */); | ||
495 | |||
496 | memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */ | ||
497 | kfree(keyd); | ||
498 | return result; | ||
499 | } | ||
500 | |||
501 | /* | ||
502 | * Set host's idea of which encryption (and key) method to use when | ||
503 | * talking to ad evice on a given port. | ||
504 | * | ||
505 | * If key is NULL, it means disable encryption for that "virtual port" | ||
506 | * (used when we disconnect). | ||
507 | */ | ||
508 | static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, | ||
509 | const void *key, size_t key_size) | ||
510 | { | ||
511 | int result = -ENOMEM; | ||
512 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
513 | struct wahc *wa = &hwahc->wa; | ||
514 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
515 | u8 encryption_value; | ||
516 | |||
517 | /* Tell the host which key to use to talk to the device */ | ||
518 | if (key) { | ||
519 | u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK, | ||
520 | WUSB_KEY_INDEX_ORIGINATOR_HOST); | ||
521 | |||
522 | result = __hwahc_dev_set_key(wusbhc, port_idx, tkid, | ||
523 | key, key_size, key_idx); | ||
524 | if (result < 0) | ||
525 | goto error_set_key; | ||
526 | encryption_value = wusbhc->ccm1_etd->bEncryptionValue; | ||
527 | } else { | ||
528 | /* FIXME: this should come from wusbhc->etd[UNSECURE].value */ | ||
529 | encryption_value = 0; | ||
530 | } | ||
531 | |||
532 | /* Set the encryption type for commmunicating with the device */ | ||
533 | result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
534 | USB_REQ_SET_ENCRYPTION, | ||
535 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
536 | encryption_value, port_idx << 8 | iface_no, | ||
537 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
538 | if (result < 0) | ||
539 | dev_err(wusbhc->dev, "Can't set host's WUSB encryption for " | ||
540 | "port index %u to %s (value %d): %d\n", port_idx, | ||
541 | wusb_et_name(wusbhc->ccm1_etd->bEncryptionType), | ||
542 | wusbhc->ccm1_etd->bEncryptionValue, result); | ||
543 | error_set_key: | ||
544 | return result; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * Set host's GTK key | ||
549 | */ | ||
550 | static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid, | ||
551 | const void *key, size_t key_size) | ||
552 | { | ||
553 | u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, | ||
554 | WUSB_KEY_INDEX_ORIGINATOR_HOST); | ||
555 | |||
556 | return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx); | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * Get the Wire Adapter class-specific descriptor | ||
561 | * | ||
562 | * NOTE: this descriptor comes with the big bundled configuration | ||
563 | * descriptor that includes the interfaces' and endpoints', so | ||
564 | * we just look for it in the cached copy kept by the USB stack. | ||
565 | * | ||
566 | * NOTE2: We convert LE fields to CPU order. | ||
567 | */ | ||
568 | static int wa_fill_descr(struct wahc *wa) | ||
569 | { | ||
570 | int result; | ||
571 | struct device *dev = &wa->usb_iface->dev; | ||
572 | char *itr; | ||
573 | struct usb_device *usb_dev = wa->usb_dev; | ||
574 | struct usb_descriptor_header *hdr; | ||
575 | struct usb_wa_descriptor *wa_descr; | ||
576 | size_t itr_size, actconfig_idx; | ||
577 | |||
578 | actconfig_idx = (usb_dev->actconfig - usb_dev->config) / | ||
579 | sizeof(usb_dev->config[0]); | ||
580 | itr = usb_dev->rawdescriptors[actconfig_idx]; | ||
581 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | ||
582 | while (itr_size >= sizeof(*hdr)) { | ||
583 | hdr = (struct usb_descriptor_header *) itr; | ||
584 | d_printf(3, dev, "Extra device descriptor: " | ||
585 | "type %02x/%u bytes @ %zu (%zu left)\n", | ||
586 | hdr->bDescriptorType, hdr->bLength, | ||
587 | (itr - usb_dev->rawdescriptors[actconfig_idx]), | ||
588 | itr_size); | ||
589 | if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) | ||
590 | goto found; | ||
591 | itr += hdr->bLength; | ||
592 | itr_size -= hdr->bLength; | ||
593 | } | ||
594 | dev_err(dev, "cannot find Wire Adapter Class descriptor\n"); | ||
595 | return -ENODEV; | ||
596 | |||
597 | found: | ||
598 | result = -EINVAL; | ||
599 | if (hdr->bLength > itr_size) { /* is it available? */ | ||
600 | dev_err(dev, "incomplete Wire Adapter Class descriptor " | ||
601 | "(%zu bytes left, %u needed)\n", | ||
602 | itr_size, hdr->bLength); | ||
603 | goto error; | ||
604 | } | ||
605 | if (hdr->bLength < sizeof(*wa->wa_descr)) { | ||
606 | dev_err(dev, "short Wire Adapter Class descriptor\n"); | ||
607 | goto error; | ||
608 | } | ||
609 | wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; | ||
610 | /* Make LE fields CPU order */ | ||
611 | wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion); | ||
612 | wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes); | ||
613 | wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock); | ||
614 | if (wa_descr->bcdWAVersion > 0x0100) | ||
615 | dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", | ||
616 | wa_descr->bcdWAVersion & 0xff00 >> 8, | ||
617 | wa_descr->bcdWAVersion & 0x00ff); | ||
618 | result = 0; | ||
619 | error: | ||
620 | return result; | ||
621 | } | ||
622 | |||
623 | static struct hc_driver hwahc_hc_driver = { | ||
624 | .description = "hwa-hcd", | ||
625 | .product_desc = "Wireless USB HWA host controller", | ||
626 | .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd), | ||
627 | .irq = NULL, /* FIXME */ | ||
628 | .flags = HCD_USB2, /* FIXME */ | ||
629 | .reset = hwahc_op_reset, | ||
630 | .start = hwahc_op_start, | ||
631 | .pci_suspend = hwahc_op_suspend, | ||
632 | .pci_resume = hwahc_op_resume, | ||
633 | .stop = hwahc_op_stop, | ||
634 | .get_frame_number = hwahc_op_get_frame_number, | ||
635 | .urb_enqueue = hwahc_op_urb_enqueue, | ||
636 | .urb_dequeue = hwahc_op_urb_dequeue, | ||
637 | .endpoint_disable = hwahc_op_endpoint_disable, | ||
638 | |||
639 | .hub_status_data = wusbhc_rh_status_data, | ||
640 | .hub_control = wusbhc_rh_control, | ||
641 | .bus_suspend = wusbhc_rh_suspend, | ||
642 | .bus_resume = wusbhc_rh_resume, | ||
643 | .start_port_reset = wusbhc_rh_start_port_reset, | ||
644 | }; | ||
645 | |||
646 | static int hwahc_security_create(struct hwahc *hwahc) | ||
647 | { | ||
648 | int result; | ||
649 | struct wusbhc *wusbhc = &hwahc->wusbhc; | ||
650 | struct usb_device *usb_dev = hwahc->wa.usb_dev; | ||
651 | struct device *dev = &usb_dev->dev; | ||
652 | struct usb_security_descriptor *secd; | ||
653 | struct usb_encryption_descriptor *etd; | ||
654 | void *itr, *top; | ||
655 | size_t itr_size, needed, bytes; | ||
656 | u8 index; | ||
657 | char buf[64]; | ||
658 | |||
659 | /* Find the host's security descriptors in the config descr bundle */ | ||
660 | index = (usb_dev->actconfig - usb_dev->config) / | ||
661 | sizeof(usb_dev->config[0]); | ||
662 | itr = usb_dev->rawdescriptors[index]; | ||
663 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | ||
664 | top = itr + itr_size; | ||
665 | result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], | ||
666 | le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), | ||
667 | USB_DT_SECURITY, (void **) &secd); | ||
668 | if (result == -1) { | ||
669 | dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); | ||
670 | return 0; | ||
671 | } | ||
672 | needed = sizeof(*secd); | ||
673 | if (top - (void *)secd < needed) { | ||
674 | dev_err(dev, "BUG? Not enough data to process security " | ||
675 | "descriptor header (%zu bytes left vs %zu needed)\n", | ||
676 | top - (void *) secd, needed); | ||
677 | return 0; | ||
678 | } | ||
679 | needed = le16_to_cpu(secd->wTotalLength); | ||
680 | if (top - (void *)secd < needed) { | ||
681 | dev_err(dev, "BUG? Not enough data to process security " | ||
682 | "descriptors (%zu bytes left vs %zu needed)\n", | ||
683 | top - (void *) secd, needed); | ||
684 | return 0; | ||
685 | } | ||
686 | /* Walk over the sec descriptors and store CCM1's on wusbhc */ | ||
687 | itr = (void *) secd + sizeof(*secd); | ||
688 | top = (void *) secd + le16_to_cpu(secd->wTotalLength); | ||
689 | index = 0; | ||
690 | bytes = 0; | ||
691 | while (itr < top) { | ||
692 | etd = itr; | ||
693 | if (top - itr < sizeof(*etd)) { | ||
694 | dev_err(dev, "BUG: bad host security descriptor; " | ||
695 | "not enough data (%zu vs %zu left)\n", | ||
696 | top - itr, sizeof(*etd)); | ||
697 | break; | ||
698 | } | ||
699 | if (etd->bLength < sizeof(*etd)) { | ||
700 | dev_err(dev, "BUG: bad host encryption descriptor; " | ||
701 | "descriptor is too short " | ||
702 | "(%zu vs %zu needed)\n", | ||
703 | (size_t)etd->bLength, sizeof(*etd)); | ||
704 | break; | ||
705 | } | ||
706 | itr += etd->bLength; | ||
707 | bytes += snprintf(buf + bytes, sizeof(buf) - bytes, | ||
708 | "%s (0x%02x) ", | ||
709 | wusb_et_name(etd->bEncryptionType), | ||
710 | etd->bEncryptionValue); | ||
711 | wusbhc->ccm1_etd = etd; | ||
712 | } | ||
713 | dev_info(dev, "supported encryption types: %s\n", buf); | ||
714 | if (wusbhc->ccm1_etd == NULL) { | ||
715 | dev_err(dev, "E: host doesn't support CCM-1 crypto\n"); | ||
716 | return 0; | ||
717 | } | ||
718 | /* Pretty print what we support */ | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | static void hwahc_security_release(struct hwahc *hwahc) | ||
723 | { | ||
724 | /* nothing to do here so far... */ | ||
725 | } | ||
726 | |||
727 | static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface) | ||
728 | { | ||
729 | int result; | ||
730 | struct device *dev = &iface->dev; | ||
731 | struct wusbhc *wusbhc = &hwahc->wusbhc; | ||
732 | struct wahc *wa = &hwahc->wa; | ||
733 | struct usb_device *usb_dev = interface_to_usbdev(iface); | ||
734 | |||
735 | wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ | ||
736 | wa->usb_iface = usb_get_intf(iface); | ||
737 | wusbhc->dev = dev; | ||
738 | wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent); | ||
739 | if (wusbhc->uwb_rc == NULL) { | ||
740 | result = -ENODEV; | ||
741 | dev_err(dev, "Cannot get associated UWB Host Controller\n"); | ||
742 | goto error_rc_get; | ||
743 | } | ||
744 | result = wa_fill_descr(wa); /* Get the device descriptor */ | ||
745 | if (result < 0) | ||
746 | goto error_fill_descriptor; | ||
747 | if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) { | ||
748 | dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB " | ||
749 | "adapter (%u ports)\n", wa->wa_descr->bNumPorts); | ||
750 | wusbhc->ports_max = USB_MAXCHILDREN; | ||
751 | } else { | ||
752 | wusbhc->ports_max = wa->wa_descr->bNumPorts; | ||
753 | } | ||
754 | wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs; | ||
755 | wusbhc->start = __hwahc_op_wusbhc_start; | ||
756 | wusbhc->stop = __hwahc_op_wusbhc_stop; | ||
757 | wusbhc->mmcie_add = __hwahc_op_mmcie_add; | ||
758 | wusbhc->mmcie_rm = __hwahc_op_mmcie_rm; | ||
759 | wusbhc->dev_info_set = __hwahc_op_dev_info_set; | ||
760 | wusbhc->bwa_set = __hwahc_op_bwa_set; | ||
761 | wusbhc->set_num_dnts = __hwahc_op_set_num_dnts; | ||
762 | wusbhc->set_ptk = __hwahc_op_set_ptk; | ||
763 | wusbhc->set_gtk = __hwahc_op_set_gtk; | ||
764 | result = hwahc_security_create(hwahc); | ||
765 | if (result < 0) { | ||
766 | dev_err(dev, "Can't initialize security: %d\n", result); | ||
767 | goto error_security_create; | ||
768 | } | ||
769 | wa->wusb = wusbhc; /* FIXME: ugly, need to fix */ | ||
770 | result = wusbhc_create(&hwahc->wusbhc); | ||
771 | if (result < 0) { | ||
772 | dev_err(dev, "Can't create WUSB HC structures: %d\n", result); | ||
773 | goto error_wusbhc_create; | ||
774 | } | ||
775 | result = wa_create(&hwahc->wa, iface); | ||
776 | if (result < 0) | ||
777 | goto error_wa_create; | ||
778 | return 0; | ||
779 | |||
780 | error_wa_create: | ||
781 | wusbhc_destroy(&hwahc->wusbhc); | ||
782 | error_wusbhc_create: | ||
783 | /* WA Descr fill allocs no resources */ | ||
784 | error_security_create: | ||
785 | error_fill_descriptor: | ||
786 | uwb_rc_put(wusbhc->uwb_rc); | ||
787 | error_rc_get: | ||
788 | usb_put_intf(iface); | ||
789 | usb_put_dev(usb_dev); | ||
790 | return result; | ||
791 | } | ||
792 | |||
793 | static void hwahc_destroy(struct hwahc *hwahc) | ||
794 | { | ||
795 | struct wusbhc *wusbhc = &hwahc->wusbhc; | ||
796 | |||
797 | d_fnstart(1, NULL, "(hwahc %p)\n", hwahc); | ||
798 | mutex_lock(&wusbhc->mutex); | ||
799 | __wa_destroy(&hwahc->wa); | ||
800 | wusbhc_destroy(&hwahc->wusbhc); | ||
801 | hwahc_security_release(hwahc); | ||
802 | hwahc->wusbhc.dev = NULL; | ||
803 | uwb_rc_put(wusbhc->uwb_rc); | ||
804 | usb_put_intf(hwahc->wa.usb_iface); | ||
805 | usb_put_dev(hwahc->wa.usb_dev); | ||
806 | mutex_unlock(&wusbhc->mutex); | ||
807 | d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc); | ||
808 | } | ||
809 | |||
810 | static void hwahc_init(struct hwahc *hwahc) | ||
811 | { | ||
812 | wa_init(&hwahc->wa); | ||
813 | } | ||
814 | |||
815 | static int hwahc_probe(struct usb_interface *usb_iface, | ||
816 | const struct usb_device_id *id) | ||
817 | { | ||
818 | int result; | ||
819 | struct usb_hcd *usb_hcd; | ||
820 | struct wusbhc *wusbhc; | ||
821 | struct hwahc *hwahc; | ||
822 | struct device *dev = &usb_iface->dev; | ||
823 | |||
824 | d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id); | ||
825 | result = -ENOMEM; | ||
826 | usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); | ||
827 | if (usb_hcd == NULL) { | ||
828 | dev_err(dev, "unable to allocate instance\n"); | ||
829 | goto error_alloc; | ||
830 | } | ||
831 | usb_hcd->wireless = 1; | ||
832 | usb_hcd->flags |= HCD_FLAG_SAW_IRQ; | ||
833 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
834 | hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
835 | hwahc_init(hwahc); | ||
836 | result = hwahc_create(hwahc, usb_iface); | ||
837 | if (result < 0) { | ||
838 | dev_err(dev, "Cannot initialize internals: %d\n", result); | ||
839 | goto error_hwahc_create; | ||
840 | } | ||
841 | result = usb_add_hcd(usb_hcd, 0, 0); | ||
842 | if (result < 0) { | ||
843 | dev_err(dev, "Cannot add HCD: %d\n", result); | ||
844 | goto error_add_hcd; | ||
845 | } | ||
846 | result = wusbhc_b_create(&hwahc->wusbhc); | ||
847 | if (result < 0) { | ||
848 | dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); | ||
849 | goto error_wusbhc_b_create; | ||
850 | } | ||
851 | d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id); | ||
852 | return 0; | ||
853 | |||
854 | error_wusbhc_b_create: | ||
855 | usb_remove_hcd(usb_hcd); | ||
856 | error_add_hcd: | ||
857 | hwahc_destroy(hwahc); | ||
858 | error_hwahc_create: | ||
859 | usb_put_hcd(usb_hcd); | ||
860 | error_alloc: | ||
861 | d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result); | ||
862 | return result; | ||
863 | } | ||
864 | |||
865 | static void hwahc_disconnect(struct usb_interface *usb_iface) | ||
866 | { | ||
867 | struct usb_hcd *usb_hcd; | ||
868 | struct wusbhc *wusbhc; | ||
869 | struct hwahc *hwahc; | ||
870 | |||
871 | usb_hcd = usb_get_intfdata(usb_iface); | ||
872 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
873 | hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
874 | |||
875 | d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface); | ||
876 | wusbhc_b_destroy(&hwahc->wusbhc); | ||
877 | usb_remove_hcd(usb_hcd); | ||
878 | hwahc_destroy(hwahc); | ||
879 | usb_put_hcd(usb_hcd); | ||
880 | d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc, | ||
881 | usb_iface); | ||
882 | } | ||
883 | |||
884 | /** USB device ID's that we handle */ | ||
885 | static struct usb_device_id hwahc_id_table[] = { | ||
886 | /* FIXME: use class labels for this */ | ||
887 | { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, | ||
888 | {}, | ||
889 | }; | ||
890 | MODULE_DEVICE_TABLE(usb, hwahc_id_table); | ||
891 | |||
892 | static struct usb_driver hwahc_driver = { | ||
893 | .name = "hwa-hc", | ||
894 | .probe = hwahc_probe, | ||
895 | .disconnect = hwahc_disconnect, | ||
896 | .id_table = hwahc_id_table, | ||
897 | }; | ||
898 | |||
899 | static int __init hwahc_driver_init(void) | ||
900 | { | ||
901 | int result; | ||
902 | result = usb_register(&hwahc_driver); | ||
903 | if (result < 0) { | ||
904 | printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n", | ||
905 | result); | ||
906 | goto error_usb_register; | ||
907 | } | ||
908 | return 0; | ||
909 | |||
910 | error_usb_register: | ||
911 | return result; | ||
912 | |||
913 | } | ||
914 | module_init(hwahc_driver_init); | ||
915 | |||
916 | static void __exit hwahc_driver_exit(void) | ||
917 | { | ||
918 | usb_deregister(&hwahc_driver); | ||
919 | } | ||
920 | module_exit(hwahc_driver_exit); | ||
921 | |||
922 | |||
923 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
924 | MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver"); | ||
925 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild new file mode 100644 index 000000000000..26a3871ea0f9 --- /dev/null +++ b/drivers/usb/host/whci/Kbuild | |||
@@ -0,0 +1,11 @@ | |||
1 | obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o | ||
2 | |||
3 | whci-hcd-y := \ | ||
4 | asl.o \ | ||
5 | hcd.o \ | ||
6 | hw.o \ | ||
7 | init.o \ | ||
8 | int.o \ | ||
9 | pzl.o \ | ||
10 | qset.o \ | ||
11 | wusb.o | ||
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c new file mode 100644 index 000000000000..4d7078e50572 --- /dev/null +++ b/drivers/usb/host/whci/asl.c | |||
@@ -0,0 +1,367 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) asynchronous schedule management. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/uwb/umc.h> | ||
21 | #include <linux/usb.h> | ||
22 | #define D_LOCAL 0 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | |||
25 | #include "../../wusbcore/wusbhc.h" | ||
26 | |||
27 | #include "whcd.h" | ||
28 | |||
29 | #if D_LOCAL >= 4 | ||
30 | static void dump_asl(struct whc *whc, const char *tag) | ||
31 | { | ||
32 | struct device *dev = &whc->umc->dev; | ||
33 | struct whc_qset *qset; | ||
34 | |||
35 | d_printf(4, dev, "ASL %s\n", tag); | ||
36 | |||
37 | list_for_each_entry(qset, &whc->async_list, list_node) { | ||
38 | dump_qset(qset, dev); | ||
39 | } | ||
40 | } | ||
41 | #else | ||
42 | static inline void dump_asl(struct whc *whc, const char *tag) | ||
43 | { | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | |||
48 | static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, | ||
49 | struct whc_qset **next, struct whc_qset **prev) | ||
50 | { | ||
51 | struct list_head *n, *p; | ||
52 | |||
53 | BUG_ON(list_empty(&whc->async_list)); | ||
54 | |||
55 | n = qset->list_node.next; | ||
56 | if (n == &whc->async_list) | ||
57 | n = n->next; | ||
58 | p = qset->list_node.prev; | ||
59 | if (p == &whc->async_list) | ||
60 | p = p->prev; | ||
61 | |||
62 | *next = container_of(n, struct whc_qset, list_node); | ||
63 | *prev = container_of(p, struct whc_qset, list_node); | ||
64 | |||
65 | } | ||
66 | |||
67 | static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset) | ||
68 | { | ||
69 | list_move(&qset->list_node, &whc->async_list); | ||
70 | qset->in_sw_list = true; | ||
71 | } | ||
72 | |||
73 | static void asl_qset_insert(struct whc *whc, struct whc_qset *qset) | ||
74 | { | ||
75 | struct whc_qset *next, *prev; | ||
76 | |||
77 | qset_clear(whc, qset); | ||
78 | |||
79 | /* Link into ASL. */ | ||
80 | qset_get_next_prev(whc, qset, &next, &prev); | ||
81 | whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma); | ||
82 | whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma); | ||
83 | qset->in_hw_list = true; | ||
84 | } | ||
85 | |||
86 | static void asl_qset_remove(struct whc *whc, struct whc_qset *qset) | ||
87 | { | ||
88 | struct whc_qset *prev, *next; | ||
89 | |||
90 | qset_get_next_prev(whc, qset, &next, &prev); | ||
91 | |||
92 | list_move(&qset->list_node, &whc->async_removed_list); | ||
93 | qset->in_sw_list = false; | ||
94 | |||
95 | /* | ||
96 | * No more qsets in the ASL? The caller must stop the ASL as | ||
97 | * it's no longer valid. | ||
98 | */ | ||
99 | if (list_empty(&whc->async_list)) | ||
100 | return; | ||
101 | |||
102 | /* Remove from ASL. */ | ||
103 | whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma); | ||
104 | qset->in_hw_list = false; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * process_qset - process any recently inactivated or halted qTDs in a | ||
109 | * qset. | ||
110 | * | ||
111 | * After inactive qTDs are removed, new qTDs can be added if the | ||
112 | * urb queue still contains URBs. | ||
113 | * | ||
114 | * Returns any additional WUSBCMD bits for the ASL sync command (i.e., | ||
115 | * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed). | ||
116 | */ | ||
117 | static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) | ||
118 | { | ||
119 | enum whc_update update = 0; | ||
120 | uint32_t status = 0; | ||
121 | |||
122 | while (qset->ntds) { | ||
123 | struct whc_qtd *td; | ||
124 | int t; | ||
125 | |||
126 | t = qset->td_start; | ||
127 | td = &qset->qtd[qset->td_start]; | ||
128 | status = le32_to_cpu(td->status); | ||
129 | |||
130 | /* | ||
131 | * Nothing to do with a still active qTD. | ||
132 | */ | ||
133 | if (status & QTD_STS_ACTIVE) | ||
134 | break; | ||
135 | |||
136 | if (status & QTD_STS_HALTED) { | ||
137 | /* Ug, an error. */ | ||
138 | process_halted_qtd(whc, qset, td); | ||
139 | goto done; | ||
140 | } | ||
141 | |||
142 | /* Mmm, a completed qTD. */ | ||
143 | process_inactive_qtd(whc, qset, td); | ||
144 | } | ||
145 | |||
146 | update |= qset_add_qtds(whc, qset); | ||
147 | |||
148 | done: | ||
149 | /* | ||
150 | * Remove this qset from the ASL if requested, but only if has | ||
151 | * no qTDs. | ||
152 | */ | ||
153 | if (qset->remove && qset->ntds == 0) { | ||
154 | asl_qset_remove(whc, qset); | ||
155 | update |= WHC_UPDATE_REMOVED; | ||
156 | } | ||
157 | return update; | ||
158 | } | ||
159 | |||
160 | void asl_start(struct whc *whc) | ||
161 | { | ||
162 | struct whc_qset *qset; | ||
163 | |||
164 | qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); | ||
165 | |||
166 | le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR); | ||
167 | |||
168 | whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN); | ||
169 | whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, | ||
170 | WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED, | ||
171 | 1000, "start ASL"); | ||
172 | } | ||
173 | |||
174 | void asl_stop(struct whc *whc) | ||
175 | { | ||
176 | whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0); | ||
177 | whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, | ||
178 | WUSBSTS_ASYNC_SCHED, 0, | ||
179 | 1000, "stop ASL"); | ||
180 | } | ||
181 | |||
182 | void asl_update(struct whc *whc, uint32_t wusbcmd) | ||
183 | { | ||
184 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | ||
185 | wait_event(whc->async_list_wq, | ||
186 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * scan_async_work - scan the ASL for qsets to process. | ||
191 | * | ||
192 | * Process each qset in the ASL in turn and then signal the WHC that | ||
193 | * the ASL has been updated. | ||
194 | * | ||
195 | * Then start, stop or update the asynchronous schedule as required. | ||
196 | */ | ||
197 | void scan_async_work(struct work_struct *work) | ||
198 | { | ||
199 | struct whc *whc = container_of(work, struct whc, async_work); | ||
200 | struct whc_qset *qset, *t; | ||
201 | enum whc_update update = 0; | ||
202 | |||
203 | spin_lock_irq(&whc->lock); | ||
204 | |||
205 | dump_asl(whc, "before processing"); | ||
206 | |||
207 | /* | ||
208 | * Transerve the software list backwards so new qsets can be | ||
209 | * safely inserted into the ASL without making it non-circular. | ||
210 | */ | ||
211 | list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) { | ||
212 | if (!qset->in_hw_list) { | ||
213 | asl_qset_insert(whc, qset); | ||
214 | update |= WHC_UPDATE_ADDED; | ||
215 | } | ||
216 | |||
217 | update |= process_qset(whc, qset); | ||
218 | } | ||
219 | |||
220 | dump_asl(whc, "after processing"); | ||
221 | |||
222 | spin_unlock_irq(&whc->lock); | ||
223 | |||
224 | if (update) { | ||
225 | uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB; | ||
226 | if (update & WHC_UPDATE_REMOVED) | ||
227 | wusbcmd |= WUSBCMD_ASYNC_QSET_RM; | ||
228 | asl_update(whc, wusbcmd); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * Now that the ASL is updated, complete the removal of any | ||
233 | * removed qsets. | ||
234 | */ | ||
235 | spin_lock(&whc->lock); | ||
236 | |||
237 | list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { | ||
238 | qset_remove_complete(whc, qset); | ||
239 | } | ||
240 | |||
241 | spin_unlock(&whc->lock); | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL). | ||
246 | * @whc: the WHCI host controller | ||
247 | * @urb: the URB to enqueue | ||
248 | * @mem_flags: flags for any memory allocations | ||
249 | * | ||
250 | * The qset for the endpoint is obtained and the urb queued on to it. | ||
251 | * | ||
252 | * Work is scheduled to update the hardware's view of the ASL. | ||
253 | */ | ||
254 | int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | ||
255 | { | ||
256 | struct whc_qset *qset; | ||
257 | int err; | ||
258 | unsigned long flags; | ||
259 | |||
260 | spin_lock_irqsave(&whc->lock, flags); | ||
261 | |||
262 | qset = get_qset(whc, urb, GFP_ATOMIC); | ||
263 | if (qset == NULL) | ||
264 | err = -ENOMEM; | ||
265 | else | ||
266 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | ||
267 | if (!err) { | ||
268 | usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
269 | if (!qset->in_sw_list) | ||
270 | asl_qset_insert_begin(whc, qset); | ||
271 | } | ||
272 | |||
273 | spin_unlock_irqrestore(&whc->lock, flags); | ||
274 | |||
275 | if (!err) | ||
276 | queue_work(whc->workqueue, &whc->async_work); | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * asl_urb_dequeue - remove an URB (qset) from the async list. | ||
283 | * @whc: the WHCI host controller | ||
284 | * @urb: the URB to dequeue | ||
285 | * @status: the current status of the URB | ||
286 | * | ||
287 | * URBs that do yet have qTDs can simply be removed from the software | ||
288 | * queue, otherwise the qset must be removed from the ASL so the qTDs | ||
289 | * can be removed. | ||
290 | */ | ||
291 | int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | ||
292 | { | ||
293 | struct whc_urb *wurb = urb->hcpriv; | ||
294 | struct whc_qset *qset = wurb->qset; | ||
295 | struct whc_std *std, *t; | ||
296 | int ret; | ||
297 | unsigned long flags; | ||
298 | |||
299 | spin_lock_irqsave(&whc->lock, flags); | ||
300 | |||
301 | ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); | ||
302 | if (ret < 0) | ||
303 | goto out; | ||
304 | |||
305 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | ||
306 | if (std->urb == urb) | ||
307 | qset_free_std(whc, std); | ||
308 | else | ||
309 | std->qtd = NULL; /* so this std is re-added when the qset is */ | ||
310 | } | ||
311 | |||
312 | asl_qset_remove(whc, qset); | ||
313 | wurb->status = status; | ||
314 | wurb->is_async = true; | ||
315 | queue_work(whc->workqueue, &wurb->dequeue_work); | ||
316 | |||
317 | out: | ||
318 | spin_unlock_irqrestore(&whc->lock, flags); | ||
319 | |||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * asl_qset_delete - delete a qset from the ASL | ||
325 | */ | ||
326 | void asl_qset_delete(struct whc *whc, struct whc_qset *qset) | ||
327 | { | ||
328 | qset->remove = 1; | ||
329 | queue_work(whc->workqueue, &whc->async_work); | ||
330 | qset_delete(whc, qset); | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * asl_init - initialize the asynchronous schedule list | ||
335 | * | ||
336 | * A dummy qset with no qTDs is added to the ASL to simplify removing | ||
337 | * qsets (no need to stop the ASL when the last qset is removed). | ||
338 | */ | ||
339 | int asl_init(struct whc *whc) | ||
340 | { | ||
341 | struct whc_qset *qset; | ||
342 | |||
343 | qset = qset_alloc(whc, GFP_KERNEL); | ||
344 | if (qset == NULL) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | asl_qset_insert_begin(whc, qset); | ||
348 | asl_qset_insert(whc, qset); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * asl_clean_up - free ASL resources | ||
355 | * | ||
356 | * The ASL is stopped and empty except for the dummy qset. | ||
357 | */ | ||
358 | void asl_clean_up(struct whc *whc) | ||
359 | { | ||
360 | struct whc_qset *qset; | ||
361 | |||
362 | if (!list_empty(&whc->async_list)) { | ||
363 | qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); | ||
364 | list_del(&qset->list_node); | ||
365 | qset_free(whc, qset); | ||
366 | } | ||
367 | } | ||
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c new file mode 100644 index 000000000000..ef3ad4dca945 --- /dev/null +++ b/drivers/usb/host/whci/hcd.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) driver. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/uwb/umc.h> | ||
22 | |||
23 | #include "../../wusbcore/wusbhc.h" | ||
24 | |||
25 | #include "whcd.h" | ||
26 | |||
27 | /* | ||
28 | * One time initialization. | ||
29 | * | ||
30 | * Nothing to do here. | ||
31 | */ | ||
32 | static int whc_reset(struct usb_hcd *usb_hcd) | ||
33 | { | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * Start the wireless host controller. | ||
39 | * | ||
40 | * Start device notification. | ||
41 | * | ||
42 | * Put hc into run state, set DNTS parameters. | ||
43 | */ | ||
44 | static int whc_start(struct usb_hcd *usb_hcd) | ||
45 | { | ||
46 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
47 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
48 | u8 bcid; | ||
49 | int ret; | ||
50 | |||
51 | mutex_lock(&wusbhc->mutex); | ||
52 | |||
53 | le_writel(WUSBINTR_GEN_CMD_DONE | ||
54 | | WUSBINTR_HOST_ERR | ||
55 | | WUSBINTR_ASYNC_SCHED_SYNCED | ||
56 | | WUSBINTR_DNTS_INT | ||
57 | | WUSBINTR_ERR_INT | ||
58 | | WUSBINTR_INT, | ||
59 | whc->base + WUSBINTR); | ||
60 | |||
61 | /* set cluster ID */ | ||
62 | bcid = wusb_cluster_id_get(); | ||
63 | ret = whc_set_cluster_id(whc, bcid); | ||
64 | if (ret < 0) | ||
65 | goto out; | ||
66 | wusbhc->cluster_id = bcid; | ||
67 | |||
68 | /* start HC */ | ||
69 | whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN); | ||
70 | |||
71 | usb_hcd->uses_new_polling = 1; | ||
72 | usb_hcd->poll_rh = 1; | ||
73 | usb_hcd->state = HC_STATE_RUNNING; | ||
74 | |||
75 | out: | ||
76 | mutex_unlock(&wusbhc->mutex); | ||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | |||
81 | /* | ||
82 | * Stop the wireless host controller. | ||
83 | * | ||
84 | * Stop device notification. | ||
85 | * | ||
86 | * Wait for pending transfer to stop? Put hc into stop state? | ||
87 | */ | ||
88 | static void whc_stop(struct usb_hcd *usb_hcd) | ||
89 | { | ||
90 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
91 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
92 | |||
93 | mutex_lock(&wusbhc->mutex); | ||
94 | |||
95 | wusbhc_stop(wusbhc); | ||
96 | |||
97 | /* stop HC */ | ||
98 | le_writel(0, whc->base + WUSBINTR); | ||
99 | whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); | ||
100 | whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, | ||
101 | WUSBSTS_HCHALTED, WUSBSTS_HCHALTED, | ||
102 | 100, "HC to halt"); | ||
103 | |||
104 | wusb_cluster_id_put(wusbhc->cluster_id); | ||
105 | |||
106 | mutex_unlock(&wusbhc->mutex); | ||
107 | } | ||
108 | |||
109 | static int whc_get_frame_number(struct usb_hcd *usb_hcd) | ||
110 | { | ||
111 | /* Frame numbers are not applicable to WUSB. */ | ||
112 | return -ENOSYS; | ||
113 | } | ||
114 | |||
115 | |||
116 | /* | ||
117 | * Queue an URB to the ASL or PZL | ||
118 | */ | ||
119 | static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, | ||
120 | gfp_t mem_flags) | ||
121 | { | ||
122 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
123 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
124 | int ret; | ||
125 | |||
126 | switch (usb_pipetype(urb->pipe)) { | ||
127 | case PIPE_INTERRUPT: | ||
128 | ret = pzl_urb_enqueue(whc, urb, mem_flags); | ||
129 | break; | ||
130 | case PIPE_ISOCHRONOUS: | ||
131 | dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); | ||
132 | ret = -ENOTSUPP; | ||
133 | break; | ||
134 | case PIPE_CONTROL: | ||
135 | case PIPE_BULK: | ||
136 | default: | ||
137 | ret = asl_urb_enqueue(whc, urb, mem_flags); | ||
138 | break; | ||
139 | }; | ||
140 | |||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Remove a queued URB from the ASL or PZL. | ||
146 | */ | ||
147 | static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) | ||
148 | { | ||
149 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
150 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
151 | int ret; | ||
152 | |||
153 | switch (usb_pipetype(urb->pipe)) { | ||
154 | case PIPE_INTERRUPT: | ||
155 | ret = pzl_urb_dequeue(whc, urb, status); | ||
156 | break; | ||
157 | case PIPE_ISOCHRONOUS: | ||
158 | ret = -ENOTSUPP; | ||
159 | break; | ||
160 | case PIPE_CONTROL: | ||
161 | case PIPE_BULK: | ||
162 | default: | ||
163 | ret = asl_urb_dequeue(whc, urb, status); | ||
164 | break; | ||
165 | }; | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Wait for all URBs to the endpoint to be completed, then delete the | ||
172 | * qset. | ||
173 | */ | ||
174 | static void whc_endpoint_disable(struct usb_hcd *usb_hcd, | ||
175 | struct usb_host_endpoint *ep) | ||
176 | { | ||
177 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
178 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
179 | struct whc_qset *qset; | ||
180 | |||
181 | qset = ep->hcpriv; | ||
182 | if (qset) { | ||
183 | ep->hcpriv = NULL; | ||
184 | if (usb_endpoint_xfer_bulk(&ep->desc) | ||
185 | || usb_endpoint_xfer_control(&ep->desc)) | ||
186 | asl_qset_delete(whc, qset); | ||
187 | else | ||
188 | pzl_qset_delete(whc, qset); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | static struct hc_driver whc_hc_driver = { | ||
193 | .description = "whci-hcd", | ||
194 | .product_desc = "Wireless host controller", | ||
195 | .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd), | ||
196 | .irq = whc_int_handler, | ||
197 | .flags = HCD_USB2, | ||
198 | |||
199 | .reset = whc_reset, | ||
200 | .start = whc_start, | ||
201 | .stop = whc_stop, | ||
202 | .get_frame_number = whc_get_frame_number, | ||
203 | .urb_enqueue = whc_urb_enqueue, | ||
204 | .urb_dequeue = whc_urb_dequeue, | ||
205 | .endpoint_disable = whc_endpoint_disable, | ||
206 | |||
207 | .hub_status_data = wusbhc_rh_status_data, | ||
208 | .hub_control = wusbhc_rh_control, | ||
209 | .bus_suspend = wusbhc_rh_suspend, | ||
210 | .bus_resume = wusbhc_rh_resume, | ||
211 | .start_port_reset = wusbhc_rh_start_port_reset, | ||
212 | }; | ||
213 | |||
214 | static int whc_probe(struct umc_dev *umc) | ||
215 | { | ||
216 | int ret = -ENOMEM; | ||
217 | struct usb_hcd *usb_hcd; | ||
218 | struct wusbhc *wusbhc = NULL; | ||
219 | struct whc *whc = NULL; | ||
220 | struct device *dev = &umc->dev; | ||
221 | |||
222 | usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci"); | ||
223 | if (usb_hcd == NULL) { | ||
224 | dev_err(dev, "unable to create hcd\n"); | ||
225 | goto error; | ||
226 | } | ||
227 | |||
228 | usb_hcd->wireless = 1; | ||
229 | |||
230 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
231 | whc = wusbhc_to_whc(wusbhc); | ||
232 | whc->umc = umc; | ||
233 | |||
234 | ret = whc_init(whc); | ||
235 | if (ret) | ||
236 | goto error; | ||
237 | |||
238 | wusbhc->dev = dev; | ||
239 | wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent); | ||
240 | if (!wusbhc->uwb_rc) { | ||
241 | ret = -ENODEV; | ||
242 | dev_err(dev, "cannot get radio controller\n"); | ||
243 | goto error; | ||
244 | } | ||
245 | |||
246 | if (whc->n_devices > USB_MAXCHILDREN) { | ||
247 | dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n", | ||
248 | whc->n_devices); | ||
249 | wusbhc->ports_max = USB_MAXCHILDREN; | ||
250 | } else | ||
251 | wusbhc->ports_max = whc->n_devices; | ||
252 | wusbhc->mmcies_max = whc->n_mmc_ies; | ||
253 | wusbhc->start = whc_wusbhc_start; | ||
254 | wusbhc->stop = whc_wusbhc_stop; | ||
255 | wusbhc->mmcie_add = whc_mmcie_add; | ||
256 | wusbhc->mmcie_rm = whc_mmcie_rm; | ||
257 | wusbhc->dev_info_set = whc_dev_info_set; | ||
258 | wusbhc->bwa_set = whc_bwa_set; | ||
259 | wusbhc->set_num_dnts = whc_set_num_dnts; | ||
260 | wusbhc->set_ptk = whc_set_ptk; | ||
261 | wusbhc->set_gtk = whc_set_gtk; | ||
262 | |||
263 | ret = wusbhc_create(wusbhc); | ||
264 | if (ret) | ||
265 | goto error_wusbhc_create; | ||
266 | |||
267 | ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED); | ||
268 | if (ret) { | ||
269 | dev_err(dev, "cannot add HCD: %d\n", ret); | ||
270 | goto error_usb_add_hcd; | ||
271 | } | ||
272 | |||
273 | ret = wusbhc_b_create(wusbhc); | ||
274 | if (ret) { | ||
275 | dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret); | ||
276 | goto error_wusbhc_b_create; | ||
277 | } | ||
278 | |||
279 | return 0; | ||
280 | |||
281 | error_wusbhc_b_create: | ||
282 | usb_remove_hcd(usb_hcd); | ||
283 | error_usb_add_hcd: | ||
284 | wusbhc_destroy(wusbhc); | ||
285 | error_wusbhc_create: | ||
286 | uwb_rc_put(wusbhc->uwb_rc); | ||
287 | error: | ||
288 | whc_clean_up(whc); | ||
289 | if (usb_hcd) | ||
290 | usb_put_hcd(usb_hcd); | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | |||
295 | static void whc_remove(struct umc_dev *umc) | ||
296 | { | ||
297 | struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev); | ||
298 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
299 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
300 | |||
301 | if (usb_hcd) { | ||
302 | wusbhc_b_destroy(wusbhc); | ||
303 | usb_remove_hcd(usb_hcd); | ||
304 | wusbhc_destroy(wusbhc); | ||
305 | uwb_rc_put(wusbhc->uwb_rc); | ||
306 | whc_clean_up(whc); | ||
307 | usb_put_hcd(usb_hcd); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static struct umc_driver whci_hc_driver = { | ||
312 | .name = "whci-hcd", | ||
313 | .cap_id = UMC_CAP_ID_WHCI_WUSB_HC, | ||
314 | .probe = whc_probe, | ||
315 | .remove = whc_remove, | ||
316 | }; | ||
317 | |||
318 | static int __init whci_hc_driver_init(void) | ||
319 | { | ||
320 | return umc_driver_register(&whci_hc_driver); | ||
321 | } | ||
322 | module_init(whci_hc_driver_init); | ||
323 | |||
324 | static void __exit whci_hc_driver_exit(void) | ||
325 | { | ||
326 | umc_driver_unregister(&whci_hc_driver); | ||
327 | } | ||
328 | module_exit(whci_hc_driver_exit); | ||
329 | |||
330 | /* PCI device ID's that we handle (so it gets loaded) */ | ||
331 | static struct pci_device_id whci_hcd_id_table[] = { | ||
332 | { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, | ||
333 | { /* empty last entry */ } | ||
334 | }; | ||
335 | MODULE_DEVICE_TABLE(pci, whci_hcd_id_table); | ||
336 | |||
337 | MODULE_DESCRIPTION("WHCI Wireless USB host controller driver"); | ||
338 | MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); | ||
339 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c new file mode 100644 index 000000000000..ac86e59c1225 --- /dev/null +++ b/drivers/usb/host/whci/hw.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) hardware access helpers. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/uwb/umc.h> | ||
21 | |||
22 | #include "../../wusbcore/wusbhc.h" | ||
23 | |||
24 | #include "whcd.h" | ||
25 | |||
26 | void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val) | ||
27 | { | ||
28 | unsigned long flags; | ||
29 | u32 cmd; | ||
30 | |||
31 | spin_lock_irqsave(&whc->lock, flags); | ||
32 | |||
33 | cmd = le_readl(whc->base + WUSBCMD); | ||
34 | cmd = (cmd & ~mask) | val; | ||
35 | le_writel(cmd, whc->base + WUSBCMD); | ||
36 | |||
37 | spin_unlock_irqrestore(&whc->lock, flags); | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register | ||
42 | * @whc: the WHCI HC | ||
43 | * @cmd: command to start. | ||
44 | * @params: parameters for the command (the WUSBGENCMDPARAMS register value). | ||
45 | * @addr: pointer to any data for the command (may be NULL). | ||
46 | * @len: length of the data (if any). | ||
47 | */ | ||
48 | int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) | ||
49 | { | ||
50 | unsigned long flags; | ||
51 | dma_addr_t dma_addr; | ||
52 | int t; | ||
53 | |||
54 | mutex_lock(&whc->mutex); | ||
55 | |||
56 | /* Wait for previous command to complete. */ | ||
57 | t = wait_event_timeout(whc->cmd_wq, | ||
58 | (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0, | ||
59 | WHC_GENCMD_TIMEOUT_MS); | ||
60 | if (t == 0) { | ||
61 | dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", | ||
62 | le_readl(whc->base + WUSBGENCMDSTS), | ||
63 | le_readl(whc->base + WUSBGENCMDPARAMS)); | ||
64 | return -ETIMEDOUT; | ||
65 | } | ||
66 | |||
67 | if (addr) { | ||
68 | memcpy(whc->gen_cmd_buf, addr, len); | ||
69 | dma_addr = whc->gen_cmd_buf_dma; | ||
70 | } else | ||
71 | dma_addr = 0; | ||
72 | |||
73 | /* Poke registers to start cmd. */ | ||
74 | spin_lock_irqsave(&whc->lock, flags); | ||
75 | |||
76 | le_writel(params, whc->base + WUSBGENCMDPARAMS); | ||
77 | le_writeq(dma_addr, whc->base + WUSBGENADDR); | ||
78 | |||
79 | le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd, | ||
80 | whc->base + WUSBGENCMDSTS); | ||
81 | |||
82 | spin_unlock_irqrestore(&whc->lock, flags); | ||
83 | |||
84 | mutex_unlock(&whc->mutex); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c new file mode 100644 index 000000000000..34a783cb0133 --- /dev/null +++ b/drivers/usb/host/whci/init.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) initialization. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/uwb/umc.h> | ||
21 | |||
22 | #include "../../wusbcore/wusbhc.h" | ||
23 | |||
24 | #include "whcd.h" | ||
25 | |||
26 | /* | ||
27 | * Reset the host controller. | ||
28 | */ | ||
29 | static void whc_hw_reset(struct whc *whc) | ||
30 | { | ||
31 | le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); | ||
32 | whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, | ||
33 | 100, "reset"); | ||
34 | } | ||
35 | |||
36 | static void whc_hw_init_di_buf(struct whc *whc) | ||
37 | { | ||
38 | int d; | ||
39 | |||
40 | /* Disable all entries in the Device Information buffer. */ | ||
41 | for (d = 0; d < whc->n_devices; d++) | ||
42 | whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; | ||
43 | |||
44 | le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); | ||
45 | } | ||
46 | |||
47 | static void whc_hw_init_dn_buf(struct whc *whc) | ||
48 | { | ||
49 | /* Clear the Device Notification buffer to ensure the V (valid) | ||
50 | * bits are clear. */ | ||
51 | memset(whc->dn_buf, 0, 4096); | ||
52 | |||
53 | le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); | ||
54 | } | ||
55 | |||
56 | int whc_init(struct whc *whc) | ||
57 | { | ||
58 | u32 whcsparams; | ||
59 | int ret, i; | ||
60 | resource_size_t start, len; | ||
61 | |||
62 | spin_lock_init(&whc->lock); | ||
63 | mutex_init(&whc->mutex); | ||
64 | init_waitqueue_head(&whc->cmd_wq); | ||
65 | init_waitqueue_head(&whc->async_list_wq); | ||
66 | init_waitqueue_head(&whc->periodic_list_wq); | ||
67 | whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev)); | ||
68 | if (whc->workqueue == NULL) { | ||
69 | ret = -ENOMEM; | ||
70 | goto error; | ||
71 | } | ||
72 | INIT_WORK(&whc->dn_work, whc_dn_work); | ||
73 | |||
74 | INIT_WORK(&whc->async_work, scan_async_work); | ||
75 | INIT_LIST_HEAD(&whc->async_list); | ||
76 | INIT_LIST_HEAD(&whc->async_removed_list); | ||
77 | |||
78 | INIT_WORK(&whc->periodic_work, scan_periodic_work); | ||
79 | for (i = 0; i < 5; i++) | ||
80 | INIT_LIST_HEAD(&whc->periodic_list[i]); | ||
81 | INIT_LIST_HEAD(&whc->periodic_removed_list); | ||
82 | |||
83 | /* Map HC registers. */ | ||
84 | start = whc->umc->resource.start; | ||
85 | len = whc->umc->resource.end - start + 1; | ||
86 | if (!request_mem_region(start, len, "whci-hc")) { | ||
87 | dev_err(&whc->umc->dev, "can't request HC region\n"); | ||
88 | ret = -EBUSY; | ||
89 | goto error; | ||
90 | } | ||
91 | whc->base_phys = start; | ||
92 | whc->base = ioremap(start, len); | ||
93 | if (!whc->base) { | ||
94 | dev_err(&whc->umc->dev, "ioremap\n"); | ||
95 | ret = -ENOMEM; | ||
96 | goto error; | ||
97 | } | ||
98 | |||
99 | whc_hw_reset(whc); | ||
100 | |||
101 | /* Read maximum number of devices, keys and MMC IEs. */ | ||
102 | whcsparams = le_readl(whc->base + WHCSPARAMS); | ||
103 | whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); | ||
104 | whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); | ||
105 | whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); | ||
106 | |||
107 | dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", | ||
108 | whc->n_devices, whc->n_keys, whc->n_mmc_ies); | ||
109 | |||
110 | whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, | ||
111 | sizeof(struct whc_qset), 64, 0); | ||
112 | if (whc->qset_pool == NULL) { | ||
113 | ret = -ENOMEM; | ||
114 | goto error; | ||
115 | } | ||
116 | |||
117 | ret = asl_init(whc); | ||
118 | if (ret < 0) | ||
119 | goto error; | ||
120 | ret = pzl_init(whc); | ||
121 | if (ret < 0) | ||
122 | goto error; | ||
123 | |||
124 | /* Allocate and initialize a buffer for generic commands, the | ||
125 | Device Information buffer, and the Device Notification | ||
126 | buffer. */ | ||
127 | |||
128 | whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, | ||
129 | &whc->gen_cmd_buf_dma, GFP_KERNEL); | ||
130 | if (whc->gen_cmd_buf == NULL) { | ||
131 | ret = -ENOMEM; | ||
132 | goto error; | ||
133 | } | ||
134 | |||
135 | whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, | ||
136 | sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, | ||
137 | &whc->dn_buf_dma, GFP_KERNEL); | ||
138 | if (!whc->dn_buf) { | ||
139 | ret = -ENOMEM; | ||
140 | goto error; | ||
141 | } | ||
142 | whc_hw_init_dn_buf(whc); | ||
143 | |||
144 | whc->di_buf = dma_alloc_coherent(&whc->umc->dev, | ||
145 | sizeof(struct di_buf_entry) * whc->n_devices, | ||
146 | &whc->di_buf_dma, GFP_KERNEL); | ||
147 | if (!whc->di_buf) { | ||
148 | ret = -ENOMEM; | ||
149 | goto error; | ||
150 | } | ||
151 | whc_hw_init_di_buf(whc); | ||
152 | |||
153 | return 0; | ||
154 | |||
155 | error: | ||
156 | whc_clean_up(whc); | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | void whc_clean_up(struct whc *whc) | ||
161 | { | ||
162 | resource_size_t len; | ||
163 | |||
164 | if (whc->di_buf) | ||
165 | dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, | ||
166 | whc->di_buf, whc->di_buf_dma); | ||
167 | if (whc->dn_buf) | ||
168 | dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, | ||
169 | whc->dn_buf, whc->dn_buf_dma); | ||
170 | if (whc->gen_cmd_buf) | ||
171 | dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, | ||
172 | whc->gen_cmd_buf, whc->gen_cmd_buf_dma); | ||
173 | |||
174 | pzl_clean_up(whc); | ||
175 | asl_clean_up(whc); | ||
176 | |||
177 | if (whc->qset_pool) | ||
178 | dma_pool_destroy(whc->qset_pool); | ||
179 | |||
180 | len = whc->umc->resource.end - whc->umc->resource.start + 1; | ||
181 | if (whc->base) | ||
182 | iounmap(whc->base); | ||
183 | if (whc->base_phys) | ||
184 | release_mem_region(whc->base_phys, len); | ||
185 | |||
186 | if (whc->workqueue) | ||
187 | destroy_workqueue(whc->workqueue); | ||
188 | } | ||
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c new file mode 100644 index 000000000000..fce01174aa9b --- /dev/null +++ b/drivers/usb/host/whci/int.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) interrupt handling. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/uwb/umc.h> | ||
22 | |||
23 | #include "../../wusbcore/wusbhc.h" | ||
24 | |||
25 | #include "whcd.h" | ||
26 | |||
27 | static void transfer_done(struct whc *whc) | ||
28 | { | ||
29 | queue_work(whc->workqueue, &whc->async_work); | ||
30 | queue_work(whc->workqueue, &whc->periodic_work); | ||
31 | } | ||
32 | |||
33 | irqreturn_t whc_int_handler(struct usb_hcd *hcd) | ||
34 | { | ||
35 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd); | ||
36 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
37 | u32 sts; | ||
38 | |||
39 | sts = le_readl(whc->base + WUSBSTS); | ||
40 | if (!(sts & WUSBSTS_INT_MASK)) | ||
41 | return IRQ_NONE; | ||
42 | le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS); | ||
43 | |||
44 | if (sts & WUSBSTS_GEN_CMD_DONE) | ||
45 | wake_up(&whc->cmd_wq); | ||
46 | |||
47 | if (sts & WUSBSTS_HOST_ERR) | ||
48 | dev_err(&whc->umc->dev, "FIXME: host system error\n"); | ||
49 | |||
50 | if (sts & WUSBSTS_ASYNC_SCHED_SYNCED) | ||
51 | wake_up(&whc->async_list_wq); | ||
52 | |||
53 | if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED) | ||
54 | wake_up(&whc->periodic_list_wq); | ||
55 | |||
56 | if (sts & WUSBSTS_DNTS_INT) | ||
57 | queue_work(whc->workqueue, &whc->dn_work); | ||
58 | |||
59 | /* | ||
60 | * A transfer completed (see [WHCI] section 4.7.1.2 for when | ||
61 | * this occurs). | ||
62 | */ | ||
63 | if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT)) | ||
64 | transfer_done(whc); | ||
65 | |||
66 | return IRQ_HANDLED; | ||
67 | } | ||
68 | |||
69 | static int process_dn_buf(struct whc *whc) | ||
70 | { | ||
71 | struct wusbhc *wusbhc = &whc->wusbhc; | ||
72 | struct dn_buf_entry *dn; | ||
73 | int processed = 0; | ||
74 | |||
75 | for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) { | ||
76 | if (dn->status & WHC_DN_STATUS_VALID) { | ||
77 | wusbhc_handle_dn(wusbhc, dn->src_addr, | ||
78 | (struct wusb_dn_hdr *)dn->dn_data, | ||
79 | dn->msg_size); | ||
80 | dn->status &= ~WHC_DN_STATUS_VALID; | ||
81 | processed++; | ||
82 | } | ||
83 | } | ||
84 | return processed; | ||
85 | } | ||
86 | |||
87 | void whc_dn_work(struct work_struct *work) | ||
88 | { | ||
89 | struct whc *whc = container_of(work, struct whc, dn_work); | ||
90 | int processed; | ||
91 | |||
92 | do { | ||
93 | processed = process_dn_buf(whc); | ||
94 | } while (processed); | ||
95 | } | ||
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c new file mode 100644 index 000000000000..8d62df0c330b --- /dev/null +++ b/drivers/usb/host/whci/pzl.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) periodic schedule management. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/uwb/umc.h> | ||
21 | #include <linux/usb.h> | ||
22 | #define D_LOCAL 0 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | |||
25 | #include "../../wusbcore/wusbhc.h" | ||
26 | |||
27 | #include "whcd.h" | ||
28 | |||
29 | #if D_LOCAL >= 4 | ||
30 | static void dump_pzl(struct whc *whc, const char *tag) | ||
31 | { | ||
32 | struct device *dev = &whc->umc->dev; | ||
33 | struct whc_qset *qset; | ||
34 | int period = 0; | ||
35 | |||
36 | d_printf(4, dev, "PZL %s\n", tag); | ||
37 | |||
38 | for (period = 0; period < 5; period++) { | ||
39 | d_printf(4, dev, "Period %d\n", period); | ||
40 | list_for_each_entry(qset, &whc->periodic_list[period], list_node) { | ||
41 | dump_qset(qset, dev); | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | #else | ||
46 | static inline void dump_pzl(struct whc *whc, const char *tag) | ||
47 | { | ||
48 | } | ||
49 | #endif | ||
50 | |||
51 | static void update_pzl_pointers(struct whc *whc, int period, u64 addr) | ||
52 | { | ||
53 | switch (period) { | ||
54 | case 0: | ||
55 | whc_qset_set_link_ptr(&whc->pz_list[0], addr); | ||
56 | whc_qset_set_link_ptr(&whc->pz_list[2], addr); | ||
57 | whc_qset_set_link_ptr(&whc->pz_list[4], addr); | ||
58 | whc_qset_set_link_ptr(&whc->pz_list[6], addr); | ||
59 | whc_qset_set_link_ptr(&whc->pz_list[8], addr); | ||
60 | whc_qset_set_link_ptr(&whc->pz_list[10], addr); | ||
61 | whc_qset_set_link_ptr(&whc->pz_list[12], addr); | ||
62 | whc_qset_set_link_ptr(&whc->pz_list[14], addr); | ||
63 | break; | ||
64 | case 1: | ||
65 | whc_qset_set_link_ptr(&whc->pz_list[1], addr); | ||
66 | whc_qset_set_link_ptr(&whc->pz_list[5], addr); | ||
67 | whc_qset_set_link_ptr(&whc->pz_list[9], addr); | ||
68 | whc_qset_set_link_ptr(&whc->pz_list[13], addr); | ||
69 | break; | ||
70 | case 2: | ||
71 | whc_qset_set_link_ptr(&whc->pz_list[3], addr); | ||
72 | whc_qset_set_link_ptr(&whc->pz_list[11], addr); | ||
73 | break; | ||
74 | case 3: | ||
75 | whc_qset_set_link_ptr(&whc->pz_list[7], addr); | ||
76 | break; | ||
77 | case 4: | ||
78 | whc_qset_set_link_ptr(&whc->pz_list[15], addr); | ||
79 | break; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Return the 'period' to use for this qset. The minimum interval for | ||
85 | * the endpoint is used so whatever urbs are submitted the device is | ||
86 | * polled often enough. | ||
87 | */ | ||
88 | static int qset_get_period(struct whc *whc, struct whc_qset *qset) | ||
89 | { | ||
90 | uint8_t bInterval = qset->ep->desc.bInterval; | ||
91 | |||
92 | if (bInterval < 6) | ||
93 | bInterval = 6; | ||
94 | if (bInterval > 10) | ||
95 | bInterval = 10; | ||
96 | return bInterval - 6; | ||
97 | } | ||
98 | |||
99 | static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset) | ||
100 | { | ||
101 | int period; | ||
102 | |||
103 | period = qset_get_period(whc, qset); | ||
104 | |||
105 | qset_clear(whc, qset); | ||
106 | list_move(&qset->list_node, &whc->periodic_list[period]); | ||
107 | qset->in_sw_list = true; | ||
108 | } | ||
109 | |||
110 | static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset) | ||
111 | { | ||
112 | list_move(&qset->list_node, &whc->periodic_removed_list); | ||
113 | qset->in_hw_list = false; | ||
114 | qset->in_sw_list = false; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * pzl_process_qset - process any recently inactivated or halted qTDs | ||
119 | * in a qset. | ||
120 | * | ||
121 | * After inactive qTDs are removed, new qTDs can be added if the | ||
122 | * urb queue still contains URBs. | ||
123 | * | ||
124 | * Returns the schedule updates required. | ||
125 | */ | ||
126 | static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) | ||
127 | { | ||
128 | enum whc_update update = 0; | ||
129 | uint32_t status = 0; | ||
130 | |||
131 | while (qset->ntds) { | ||
132 | struct whc_qtd *td; | ||
133 | int t; | ||
134 | |||
135 | t = qset->td_start; | ||
136 | td = &qset->qtd[qset->td_start]; | ||
137 | status = le32_to_cpu(td->status); | ||
138 | |||
139 | /* | ||
140 | * Nothing to do with a still active qTD. | ||
141 | */ | ||
142 | if (status & QTD_STS_ACTIVE) | ||
143 | break; | ||
144 | |||
145 | if (status & QTD_STS_HALTED) { | ||
146 | /* Ug, an error. */ | ||
147 | process_halted_qtd(whc, qset, td); | ||
148 | goto done; | ||
149 | } | ||
150 | |||
151 | /* Mmm, a completed qTD. */ | ||
152 | process_inactive_qtd(whc, qset, td); | ||
153 | } | ||
154 | |||
155 | update |= qset_add_qtds(whc, qset); | ||
156 | |||
157 | done: | ||
158 | /* | ||
159 | * If there are no qTDs in this qset, remove it from the PZL. | ||
160 | */ | ||
161 | if (qset->remove && qset->ntds == 0) { | ||
162 | pzl_qset_remove(whc, qset); | ||
163 | update |= WHC_UPDATE_REMOVED; | ||
164 | } | ||
165 | |||
166 | return update; | ||
167 | } | ||
168 | |||
169 | /** | ||
170 | * pzl_start - start the periodic schedule | ||
171 | * @whc: the WHCI host controller | ||
172 | * | ||
173 | * The PZL must be valid (e.g., all entries in the list should have | ||
174 | * the T bit set). | ||
175 | */ | ||
176 | void pzl_start(struct whc *whc) | ||
177 | { | ||
178 | le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); | ||
179 | |||
180 | whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN); | ||
181 | whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, | ||
182 | WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED, | ||
183 | 1000, "start PZL"); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * pzl_stop - stop the periodic schedule | ||
188 | * @whc: the WHCI host controller | ||
189 | */ | ||
190 | void pzl_stop(struct whc *whc) | ||
191 | { | ||
192 | whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0); | ||
193 | whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, | ||
194 | WUSBSTS_PERIODIC_SCHED, 0, | ||
195 | 1000, "stop PZL"); | ||
196 | } | ||
197 | |||
198 | void pzl_update(struct whc *whc, uint32_t wusbcmd) | ||
199 | { | ||
200 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | ||
201 | wait_event(whc->periodic_list_wq, | ||
202 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); | ||
203 | } | ||
204 | |||
205 | static void update_pzl_hw_view(struct whc *whc) | ||
206 | { | ||
207 | struct whc_qset *qset, *t; | ||
208 | int period; | ||
209 | u64 tmp_qh = 0; | ||
210 | |||
211 | for (period = 0; period < 5; period++) { | ||
212 | list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { | ||
213 | whc_qset_set_link_ptr(&qset->qh.link, tmp_qh); | ||
214 | tmp_qh = qset->qset_dma; | ||
215 | qset->in_hw_list = true; | ||
216 | } | ||
217 | update_pzl_pointers(whc, period, tmp_qh); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * scan_periodic_work - scan the PZL for qsets to process. | ||
223 | * | ||
224 | * Process each qset in the PZL in turn and then signal the WHC that | ||
225 | * the PZL has been updated. | ||
226 | * | ||
227 | * Then start, stop or update the periodic schedule as required. | ||
228 | */ | ||
229 | void scan_periodic_work(struct work_struct *work) | ||
230 | { | ||
231 | struct whc *whc = container_of(work, struct whc, periodic_work); | ||
232 | struct whc_qset *qset, *t; | ||
233 | enum whc_update update = 0; | ||
234 | int period; | ||
235 | |||
236 | spin_lock_irq(&whc->lock); | ||
237 | |||
238 | dump_pzl(whc, "before processing"); | ||
239 | |||
240 | for (period = 4; period >= 0; period--) { | ||
241 | list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { | ||
242 | if (!qset->in_hw_list) | ||
243 | update |= WHC_UPDATE_ADDED; | ||
244 | update |= pzl_process_qset(whc, qset); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) | ||
249 | update_pzl_hw_view(whc); | ||
250 | |||
251 | dump_pzl(whc, "after processing"); | ||
252 | |||
253 | spin_unlock_irq(&whc->lock); | ||
254 | |||
255 | if (update) { | ||
256 | uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB; | ||
257 | if (update & WHC_UPDATE_REMOVED) | ||
258 | wusbcmd |= WUSBCMD_PERIODIC_QSET_RM; | ||
259 | pzl_update(whc, wusbcmd); | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Now that the PZL is updated, complete the removal of any | ||
264 | * removed qsets. | ||
265 | */ | ||
266 | spin_lock(&whc->lock); | ||
267 | |||
268 | list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { | ||
269 | qset_remove_complete(whc, qset); | ||
270 | } | ||
271 | |||
272 | spin_unlock(&whc->lock); | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * pzl_urb_enqueue - queue an URB onto the periodic list (PZL) | ||
277 | * @whc: the WHCI host controller | ||
278 | * @urb: the URB to enqueue | ||
279 | * @mem_flags: flags for any memory allocations | ||
280 | * | ||
281 | * The qset for the endpoint is obtained and the urb queued on to it. | ||
282 | * | ||
283 | * Work is scheduled to update the hardware's view of the PZL. | ||
284 | */ | ||
285 | int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | ||
286 | { | ||
287 | struct whc_qset *qset; | ||
288 | int err; | ||
289 | unsigned long flags; | ||
290 | |||
291 | spin_lock_irqsave(&whc->lock, flags); | ||
292 | |||
293 | qset = get_qset(whc, urb, GFP_ATOMIC); | ||
294 | if (qset == NULL) | ||
295 | err = -ENOMEM; | ||
296 | else | ||
297 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | ||
298 | if (!err) { | ||
299 | usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
300 | if (!qset->in_sw_list) | ||
301 | qset_insert_in_sw_list(whc, qset); | ||
302 | } | ||
303 | |||
304 | spin_unlock_irqrestore(&whc->lock, flags); | ||
305 | |||
306 | if (!err) | ||
307 | queue_work(whc->workqueue, &whc->periodic_work); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * pzl_urb_dequeue - remove an URB (qset) from the periodic list | ||
314 | * @whc: the WHCI host controller | ||
315 | * @urb: the URB to dequeue | ||
316 | * @status: the current status of the URB | ||
317 | * | ||
318 | * URBs that do yet have qTDs can simply be removed from the software | ||
319 | * queue, otherwise the qset must be removed so the qTDs can be safely | ||
320 | * removed. | ||
321 | */ | ||
322 | int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | ||
323 | { | ||
324 | struct whc_urb *wurb = urb->hcpriv; | ||
325 | struct whc_qset *qset = wurb->qset; | ||
326 | struct whc_std *std, *t; | ||
327 | int ret; | ||
328 | unsigned long flags; | ||
329 | |||
330 | spin_lock_irqsave(&whc->lock, flags); | ||
331 | |||
332 | ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); | ||
333 | if (ret < 0) | ||
334 | goto out; | ||
335 | |||
336 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | ||
337 | if (std->urb == urb) | ||
338 | qset_free_std(whc, std); | ||
339 | else | ||
340 | std->qtd = NULL; /* so this std is re-added when the qset is */ | ||
341 | } | ||
342 | |||
343 | pzl_qset_remove(whc, qset); | ||
344 | wurb->status = status; | ||
345 | wurb->is_async = false; | ||
346 | queue_work(whc->workqueue, &wurb->dequeue_work); | ||
347 | |||
348 | out: | ||
349 | spin_unlock_irqrestore(&whc->lock, flags); | ||
350 | |||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * pzl_qset_delete - delete a qset from the PZL | ||
356 | */ | ||
357 | void pzl_qset_delete(struct whc *whc, struct whc_qset *qset) | ||
358 | { | ||
359 | qset->remove = 1; | ||
360 | queue_work(whc->workqueue, &whc->periodic_work); | ||
361 | qset_delete(whc, qset); | ||
362 | } | ||
363 | |||
364 | |||
365 | /** | ||
366 | * pzl_init - initialize the periodic zone list | ||
367 | * @whc: the WHCI host controller | ||
368 | */ | ||
369 | int pzl_init(struct whc *whc) | ||
370 | { | ||
371 | int i; | ||
372 | |||
373 | whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16, | ||
374 | &whc->pz_list_dma, GFP_KERNEL); | ||
375 | if (whc->pz_list == NULL) | ||
376 | return -ENOMEM; | ||
377 | |||
378 | /* Set T bit on all elements in PZL. */ | ||
379 | for (i = 0; i < 16; i++) | ||
380 | whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); | ||
381 | |||
382 | le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * pzl_clean_up - free PZL resources | ||
389 | * @whc: the WHCI host controller | ||
390 | * | ||
391 | * The PZL is stopped and empty. | ||
392 | */ | ||
393 | void pzl_clean_up(struct whc *whc) | ||
394 | { | ||
395 | if (whc->pz_list) | ||
396 | dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list, | ||
397 | whc->pz_list_dma); | ||
398 | } | ||
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c new file mode 100644 index 000000000000..0420037d2e18 --- /dev/null +++ b/drivers/usb/host/whci/qset.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) qset management. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/uwb/umc.h> | ||
21 | #include <linux/usb.h> | ||
22 | |||
23 | #include "../../wusbcore/wusbhc.h" | ||
24 | |||
25 | #include "whcd.h" | ||
26 | |||
27 | void dump_qset(struct whc_qset *qset, struct device *dev) | ||
28 | { | ||
29 | struct whc_std *std; | ||
30 | struct urb *urb = NULL; | ||
31 | int i; | ||
32 | |||
33 | dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma); | ||
34 | dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link); | ||
35 | dev_dbg(dev, " info: %08x %08x %08x\n", | ||
36 | qset->qh.info1, qset->qh.info2, qset->qh.info3); | ||
37 | dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); | ||
38 | dev_dbg(dev, " TD: sts: %08x opts: %08x\n", | ||
39 | qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); | ||
40 | |||
41 | for (i = 0; i < WHCI_QSET_TD_MAX; i++) { | ||
42 | dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", | ||
43 | i == qset->td_start ? 'S' : ' ', | ||
44 | i == qset->td_end ? 'E' : ' ', | ||
45 | i, qset->qtd[i].status, qset->qtd[i].options, | ||
46 | (u32)qset->qtd[i].page_list_ptr); | ||
47 | } | ||
48 | dev_dbg(dev, " ntds: %d\n", qset->ntds); | ||
49 | list_for_each_entry(std, &qset->stds, list_node) { | ||
50 | if (urb != std->urb) { | ||
51 | urb = std->urb; | ||
52 | dev_dbg(dev, " urb %p transferred: %d bytes\n", urb, | ||
53 | urb->actual_length); | ||
54 | } | ||
55 | if (std->qtd) | ||
56 | dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n", | ||
57 | std->qtd - &qset->qtd[0], | ||
58 | std->len, std->num_pointers ? | ||
59 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
60 | else | ||
61 | dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n", | ||
62 | std->len, std->num_pointers ? | ||
63 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) | ||
68 | { | ||
69 | struct whc_qset *qset; | ||
70 | dma_addr_t dma; | ||
71 | |||
72 | qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); | ||
73 | if (qset == NULL) | ||
74 | return NULL; | ||
75 | memset(qset, 0, sizeof(struct whc_qset)); | ||
76 | |||
77 | qset->qset_dma = dma; | ||
78 | qset->whc = whc; | ||
79 | |||
80 | INIT_LIST_HEAD(&qset->list_node); | ||
81 | INIT_LIST_HEAD(&qset->stds); | ||
82 | |||
83 | return qset; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * qset_fill_qh - fill the static endpoint state in a qset's QHead | ||
88 | * @qset: the qset whose QH needs initializing with static endpoint | ||
89 | * state | ||
90 | * @urb: an urb for a transfer to this endpoint | ||
91 | */ | ||
92 | static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | ||
93 | { | ||
94 | struct usb_device *usb_dev = urb->dev; | ||
95 | struct usb_wireless_ep_comp_descriptor *epcd; | ||
96 | bool is_out; | ||
97 | |||
98 | is_out = usb_pipeout(urb->pipe); | ||
99 | |||
100 | epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; | ||
101 | |||
102 | if (epcd) { | ||
103 | qset->max_seq = epcd->bMaxSequence; | ||
104 | qset->max_burst = epcd->bMaxBurst; | ||
105 | } else { | ||
106 | qset->max_seq = 2; | ||
107 | qset->max_burst = 1; | ||
108 | } | ||
109 | |||
110 | qset->qh.info1 = cpu_to_le32( | ||
111 | QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) | ||
112 | | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | ||
113 | | usb_pipe_to_qh_type(urb->pipe) | ||
114 | | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) | ||
115 | | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) | ||
116 | ); | ||
117 | qset->qh.info2 = cpu_to_le32( | ||
118 | QH_INFO2_BURST(qset->max_burst) | ||
119 | | QH_INFO2_DBP(0) | ||
120 | | QH_INFO2_MAX_COUNT(3) | ||
121 | | QH_INFO2_MAX_RETRY(3) | ||
122 | | QH_INFO2_MAX_SEQ(qset->max_seq - 1) | ||
123 | ); | ||
124 | /* FIXME: where can we obtain these Tx parameters from? Why | ||
125 | * doesn't the chip know what Tx power to use? It knows the Rx | ||
126 | * strength and can presumably guess the Tx power required | ||
127 | * from that? */ | ||
128 | qset->qh.info3 = cpu_to_le32( | ||
129 | QH_INFO3_TX_RATE_53_3 | ||
130 | | QH_INFO3_TX_PWR(0) /* 0 == max power */ | ||
131 | ); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * qset_clear - clear fields in a qset so it may be reinserted into a | ||
136 | * schedule | ||
137 | */ | ||
138 | void qset_clear(struct whc *whc, struct whc_qset *qset) | ||
139 | { | ||
140 | qset->td_start = qset->td_end = qset->ntds = 0; | ||
141 | qset->remove = 0; | ||
142 | |||
143 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); | ||
144 | qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start)); | ||
145 | qset->qh.err_count = 0; | ||
146 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | ||
147 | qset->qh.scratch[0] = 0; | ||
148 | qset->qh.scratch[1] = 0; | ||
149 | qset->qh.scratch[2] = 0; | ||
150 | |||
151 | memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); | ||
152 | |||
153 | init_completion(&qset->remove_complete); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * get_qset - get the qset for an async endpoint | ||
158 | * | ||
159 | * A new qset is created if one does not already exist. | ||
160 | */ | ||
161 | struct whc_qset *get_qset(struct whc *whc, struct urb *urb, | ||
162 | gfp_t mem_flags) | ||
163 | { | ||
164 | struct whc_qset *qset; | ||
165 | |||
166 | qset = urb->ep->hcpriv; | ||
167 | if (qset == NULL) { | ||
168 | qset = qset_alloc(whc, mem_flags); | ||
169 | if (qset == NULL) | ||
170 | return NULL; | ||
171 | |||
172 | qset->ep = urb->ep; | ||
173 | urb->ep->hcpriv = qset; | ||
174 | qset_fill_qh(qset, urb); | ||
175 | } | ||
176 | return qset; | ||
177 | } | ||
178 | |||
179 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset) | ||
180 | { | ||
181 | list_del_init(&qset->list_node); | ||
182 | complete(&qset->remove_complete); | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * qset_add_qtds - add qTDs for an URB to a qset | ||
187 | * | ||
188 | * Returns true if the list (ASL/PZL) must be updated because (for a | ||
189 | * WHCI 0.95 controller) an activated qTD was pointed to be iCur. | ||
190 | */ | ||
191 | enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) | ||
192 | { | ||
193 | struct whc_std *std; | ||
194 | enum whc_update update = 0; | ||
195 | |||
196 | list_for_each_entry(std, &qset->stds, list_node) { | ||
197 | struct whc_qtd *qtd; | ||
198 | uint32_t status; | ||
199 | |||
200 | if (qset->ntds >= WHCI_QSET_TD_MAX | ||
201 | || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) | ||
202 | break; | ||
203 | |||
204 | if (std->qtd) | ||
205 | continue; /* already has a qTD */ | ||
206 | |||
207 | qtd = std->qtd = &qset->qtd[qset->td_end]; | ||
208 | |||
209 | /* Fill in setup bytes for control transfers. */ | ||
210 | if (usb_pipecontrol(std->urb->pipe)) | ||
211 | memcpy(qtd->setup, std->urb->setup_packet, 8); | ||
212 | |||
213 | status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); | ||
214 | |||
215 | if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) | ||
216 | status |= QTD_STS_LAST_PKT; | ||
217 | |||
218 | /* | ||
219 | * For an IN transfer the iAlt field should be set so | ||
220 | * the h/w will automatically advance to the next | ||
221 | * transfer. However, if there are 8 or more TDs | ||
222 | * remaining in this transfer then iAlt cannot be set | ||
223 | * as it could point to somewhere in this transfer. | ||
224 | */ | ||
225 | if (std->ntds_remaining < WHCI_QSET_TD_MAX) { | ||
226 | int ialt; | ||
227 | ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; | ||
228 | status |= QTD_STS_IALT(ialt); | ||
229 | } else if (usb_pipein(std->urb->pipe)) | ||
230 | qset->pause_after_urb = std->urb; | ||
231 | |||
232 | if (std->num_pointers) | ||
233 | qtd->options = cpu_to_le32(QTD_OPT_IOC); | ||
234 | else | ||
235 | qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); | ||
236 | qtd->page_list_ptr = cpu_to_le64(std->dma_addr); | ||
237 | |||
238 | qtd->status = cpu_to_le32(status); | ||
239 | |||
240 | if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) | ||
241 | update = WHC_UPDATE_UPDATED; | ||
242 | |||
243 | if (++qset->td_end >= WHCI_QSET_TD_MAX) | ||
244 | qset->td_end = 0; | ||
245 | qset->ntds++; | ||
246 | } | ||
247 | |||
248 | return update; | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * qset_remove_qtd - remove the first qTD from a qset. | ||
253 | * | ||
254 | * The qTD might be still active (if it's part of a IN URB that | ||
255 | * resulted in a short read) so ensure it's deactivated. | ||
256 | */ | ||
257 | static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) | ||
258 | { | ||
259 | qset->qtd[qset->td_start].status = 0; | ||
260 | |||
261 | if (++qset->td_start >= WHCI_QSET_TD_MAX) | ||
262 | qset->td_start = 0; | ||
263 | qset->ntds--; | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * qset_free_std - remove an sTD and free it. | ||
268 | * @whc: the WHCI host controller | ||
269 | * @std: the sTD to remove and free. | ||
270 | */ | ||
271 | void qset_free_std(struct whc *whc, struct whc_std *std) | ||
272 | { | ||
273 | list_del(&std->list_node); | ||
274 | if (std->num_pointers) { | ||
275 | dma_unmap_single(whc->wusbhc.dev, std->dma_addr, | ||
276 | std->num_pointers * sizeof(struct whc_page_list_entry), | ||
277 | DMA_TO_DEVICE); | ||
278 | kfree(std->pl_virt); | ||
279 | } | ||
280 | |||
281 | kfree(std); | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * qset_remove_qtds - remove an URB's qTDs (and sTDs). | ||
286 | */ | ||
287 | static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, | ||
288 | struct urb *urb) | ||
289 | { | ||
290 | struct whc_std *std, *t; | ||
291 | |||
292 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | ||
293 | if (std->urb != urb) | ||
294 | break; | ||
295 | if (std->qtd != NULL) | ||
296 | qset_remove_qtd(whc, qset); | ||
297 | qset_free_std(whc, std); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * qset_free_stds - free any remaining sTDs for an URB. | ||
303 | */ | ||
304 | static void qset_free_stds(struct whc_qset *qset, struct urb *urb) | ||
305 | { | ||
306 | struct whc_std *std, *t; | ||
307 | |||
308 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | ||
309 | if (std->urb == urb) | ||
310 | qset_free_std(qset->whc, std); | ||
311 | } | ||
312 | } | ||
313 | |||
314 | static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) | ||
315 | { | ||
316 | dma_addr_t dma_addr = std->dma_addr; | ||
317 | dma_addr_t sp, ep; | ||
318 | size_t std_len = std->len; | ||
319 | size_t pl_len; | ||
320 | int p; | ||
321 | |||
322 | sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); | ||
323 | ep = dma_addr + std_len; | ||
324 | std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); | ||
325 | |||
326 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | ||
327 | std->pl_virt = kmalloc(pl_len, mem_flags); | ||
328 | if (std->pl_virt == NULL) | ||
329 | return -ENOMEM; | ||
330 | std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); | ||
331 | |||
332 | for (p = 0; p < std->num_pointers; p++) { | ||
333 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); | ||
334 | dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); | ||
335 | } | ||
336 | |||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. | ||
342 | */ | ||
343 | static void urb_dequeue_work(struct work_struct *work) | ||
344 | { | ||
345 | struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); | ||
346 | struct whc_qset *qset = wurb->qset; | ||
347 | struct whc *whc = qset->whc; | ||
348 | unsigned long flags; | ||
349 | |||
350 | if (wurb->is_async == true) | ||
351 | asl_update(whc, WUSBCMD_ASYNC_UPDATED | ||
352 | | WUSBCMD_ASYNC_SYNCED_DB | ||
353 | | WUSBCMD_ASYNC_QSET_RM); | ||
354 | else | ||
355 | pzl_update(whc, WUSBCMD_PERIODIC_UPDATED | ||
356 | | WUSBCMD_PERIODIC_SYNCED_DB | ||
357 | | WUSBCMD_PERIODIC_QSET_RM); | ||
358 | |||
359 | spin_lock_irqsave(&whc->lock, flags); | ||
360 | qset_remove_urb(whc, qset, wurb->urb, wurb->status); | ||
361 | spin_unlock_irqrestore(&whc->lock, flags); | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * qset_add_urb - add an urb to the qset's queue. | ||
366 | * | ||
367 | * The URB is chopped into sTDs, one for each qTD that will required. | ||
368 | * At least one qTD (and sTD) is required even if the transfer has no | ||
369 | * data (e.g., for some control transfers). | ||
370 | */ | ||
371 | int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | ||
372 | gfp_t mem_flags) | ||
373 | { | ||
374 | struct whc_urb *wurb; | ||
375 | int remaining = urb->transfer_buffer_length; | ||
376 | u64 transfer_dma = urb->transfer_dma; | ||
377 | int ntds_remaining; | ||
378 | |||
379 | ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); | ||
380 | if (ntds_remaining == 0) | ||
381 | ntds_remaining = 1; | ||
382 | |||
383 | wurb = kzalloc(sizeof(struct whc_urb), mem_flags); | ||
384 | if (wurb == NULL) | ||
385 | goto err_no_mem; | ||
386 | urb->hcpriv = wurb; | ||
387 | wurb->qset = qset; | ||
388 | wurb->urb = urb; | ||
389 | INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); | ||
390 | |||
391 | while (ntds_remaining) { | ||
392 | struct whc_std *std; | ||
393 | size_t std_len; | ||
394 | |||
395 | std = kmalloc(sizeof(struct whc_std), mem_flags); | ||
396 | if (std == NULL) | ||
397 | goto err_no_mem; | ||
398 | |||
399 | std_len = remaining; | ||
400 | if (std_len > QTD_MAX_XFER_SIZE) | ||
401 | std_len = QTD_MAX_XFER_SIZE; | ||
402 | |||
403 | std->urb = urb; | ||
404 | std->dma_addr = transfer_dma; | ||
405 | std->len = std_len; | ||
406 | std->ntds_remaining = ntds_remaining; | ||
407 | std->qtd = NULL; | ||
408 | |||
409 | INIT_LIST_HEAD(&std->list_node); | ||
410 | list_add_tail(&std->list_node, &qset->stds); | ||
411 | |||
412 | if (std_len > WHCI_PAGE_SIZE) { | ||
413 | if (qset_fill_page_list(whc, std, mem_flags) < 0) | ||
414 | goto err_no_mem; | ||
415 | } else | ||
416 | std->num_pointers = 0; | ||
417 | |||
418 | ntds_remaining--; | ||
419 | remaining -= std_len; | ||
420 | transfer_dma += std_len; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | |||
425 | err_no_mem: | ||
426 | qset_free_stds(qset, urb); | ||
427 | return -ENOMEM; | ||
428 | } | ||
429 | |||
430 | /** | ||
431 | * qset_remove_urb - remove an URB from the urb queue. | ||
432 | * | ||
433 | * The URB is returned to the USB subsystem. | ||
434 | */ | ||
435 | void qset_remove_urb(struct whc *whc, struct whc_qset *qset, | ||
436 | struct urb *urb, int status) | ||
437 | { | ||
438 | struct wusbhc *wusbhc = &whc->wusbhc; | ||
439 | struct whc_urb *wurb = urb->hcpriv; | ||
440 | |||
441 | usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); | ||
442 | /* Drop the lock as urb->complete() may enqueue another urb. */ | ||
443 | spin_unlock(&whc->lock); | ||
444 | wusbhc_giveback_urb(wusbhc, urb, status); | ||
445 | spin_lock(&whc->lock); | ||
446 | |||
447 | kfree(wurb); | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * get_urb_status_from_qtd - get the completed urb status from qTD status | ||
452 | * @urb: completed urb | ||
453 | * @status: qTD status | ||
454 | */ | ||
455 | static int get_urb_status_from_qtd(struct urb *urb, u32 status) | ||
456 | { | ||
457 | if (status & QTD_STS_HALTED) { | ||
458 | if (status & QTD_STS_DBE) | ||
459 | return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; | ||
460 | else if (status & QTD_STS_BABBLE) | ||
461 | return -EOVERFLOW; | ||
462 | else if (status & QTD_STS_RCE) | ||
463 | return -ETIME; | ||
464 | return -EPIPE; | ||
465 | } | ||
466 | if (usb_pipein(urb->pipe) | ||
467 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
468 | && urb->actual_length < urb->transfer_buffer_length) | ||
469 | return -EREMOTEIO; | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * process_inactive_qtd - process an inactive (but not halted) qTD. | ||
475 | * | ||
476 | * Update the urb with the transfer bytes from the qTD, if the urb is | ||
477 | * completely transfered or (in the case of an IN only) the LPF is | ||
478 | * set, then the transfer is complete and the urb should be returned | ||
479 | * to the system. | ||
480 | */ | ||
481 | void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, | ||
482 | struct whc_qtd *qtd) | ||
483 | { | ||
484 | struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); | ||
485 | struct urb *urb = std->urb; | ||
486 | uint32_t status; | ||
487 | bool complete; | ||
488 | |||
489 | status = le32_to_cpu(qtd->status); | ||
490 | |||
491 | urb->actual_length += std->len - QTD_STS_TO_LEN(status); | ||
492 | |||
493 | if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) | ||
494 | complete = true; | ||
495 | else | ||
496 | complete = whc_std_last(std); | ||
497 | |||
498 | qset_remove_qtd(whc, qset); | ||
499 | qset_free_std(whc, std); | ||
500 | |||
501 | /* | ||
502 | * Transfers for this URB are complete? Then return it to the | ||
503 | * USB subsystem. | ||
504 | */ | ||
505 | if (complete) { | ||
506 | qset_remove_qtds(whc, qset, urb); | ||
507 | qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); | ||
508 | |||
509 | /* | ||
510 | * If iAlt isn't valid then the hardware didn't | ||
511 | * advance iCur. Adjust the start and end pointers to | ||
512 | * match iCur. | ||
513 | */ | ||
514 | if (!(status & QTD_STS_IALT_VALID)) | ||
515 | qset->td_start = qset->td_end | ||
516 | = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); | ||
517 | qset->pause_after_urb = NULL; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * process_halted_qtd - process a qset with a halted qtd | ||
523 | * | ||
524 | * Remove all the qTDs for the failed URB and return the failed URB to | ||
525 | * the USB subsystem. Then remove all other qTDs so the qset can be | ||
526 | * removed. | ||
527 | * | ||
528 | * FIXME: this is the point where rate adaptation can be done. If a | ||
529 | * transfer failed because it exceeded the maximum number of retries | ||
530 | * then it could be reactivated with a slower rate without having to | ||
531 | * remove the qset. | ||
532 | */ | ||
533 | void process_halted_qtd(struct whc *whc, struct whc_qset *qset, | ||
534 | struct whc_qtd *qtd) | ||
535 | { | ||
536 | struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); | ||
537 | struct urb *urb = std->urb; | ||
538 | int urb_status; | ||
539 | |||
540 | urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); | ||
541 | |||
542 | qset_remove_qtds(whc, qset, urb); | ||
543 | qset_remove_urb(whc, qset, urb, urb_status); | ||
544 | |||
545 | list_for_each_entry(std, &qset->stds, list_node) { | ||
546 | if (qset->ntds == 0) | ||
547 | break; | ||
548 | qset_remove_qtd(whc, qset); | ||
549 | std->qtd = NULL; | ||
550 | } | ||
551 | |||
552 | qset->remove = 1; | ||
553 | } | ||
554 | |||
555 | void qset_free(struct whc *whc, struct whc_qset *qset) | ||
556 | { | ||
557 | dma_pool_free(whc->qset_pool, qset, qset->qset_dma); | ||
558 | } | ||
559 | |||
560 | /** | ||
561 | * qset_delete - wait for a qset to be unused, then free it. | ||
562 | */ | ||
563 | void qset_delete(struct whc *whc, struct whc_qset *qset) | ||
564 | { | ||
565 | wait_for_completion(&qset->remove_complete); | ||
566 | qset_free(whc, qset); | ||
567 | } | ||
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h new file mode 100644 index 000000000000..1d2a53bd39fd --- /dev/null +++ b/drivers/usb/host/whci/whcd.h | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) private header. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | #ifndef __WHCD_H | ||
21 | #define __WHCD_H | ||
22 | |||
23 | #include <linux/uwb/whci.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | |||
26 | #include "whci-hc.h" | ||
27 | |||
28 | /* Generic command timeout. */ | ||
29 | #define WHC_GENCMD_TIMEOUT_MS 100 | ||
30 | |||
31 | |||
32 | struct whc { | ||
33 | struct wusbhc wusbhc; | ||
34 | struct umc_dev *umc; | ||
35 | |||
36 | resource_size_t base_phys; | ||
37 | void __iomem *base; | ||
38 | int irq; | ||
39 | |||
40 | u8 n_devices; | ||
41 | u8 n_keys; | ||
42 | u8 n_mmc_ies; | ||
43 | |||
44 | u64 *pz_list; | ||
45 | struct dn_buf_entry *dn_buf; | ||
46 | struct di_buf_entry *di_buf; | ||
47 | dma_addr_t pz_list_dma; | ||
48 | dma_addr_t dn_buf_dma; | ||
49 | dma_addr_t di_buf_dma; | ||
50 | |||
51 | spinlock_t lock; | ||
52 | struct mutex mutex; | ||
53 | |||
54 | void * gen_cmd_buf; | ||
55 | dma_addr_t gen_cmd_buf_dma; | ||
56 | wait_queue_head_t cmd_wq; | ||
57 | |||
58 | struct workqueue_struct *workqueue; | ||
59 | struct work_struct dn_work; | ||
60 | |||
61 | struct dma_pool *qset_pool; | ||
62 | |||
63 | struct list_head async_list; | ||
64 | struct list_head async_removed_list; | ||
65 | wait_queue_head_t async_list_wq; | ||
66 | struct work_struct async_work; | ||
67 | |||
68 | struct list_head periodic_list[5]; | ||
69 | struct list_head periodic_removed_list; | ||
70 | wait_queue_head_t periodic_list_wq; | ||
71 | struct work_struct periodic_work; | ||
72 | }; | ||
73 | |||
74 | #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) | ||
75 | |||
76 | /** | ||
77 | * struct whc_std - a software TD. | ||
78 | * @urb: the URB this sTD is for. | ||
79 | * @offset: start of the URB's data for this TD. | ||
80 | * @len: the length of data in the associated TD. | ||
81 | * @ntds_remaining: number of TDs (starting from this one) in this transfer. | ||
82 | * | ||
83 | * Queued URBs may require more TDs than are available in a qset so we | ||
84 | * use a list of these "software TDs" (sTDs) to hold per-TD data. | ||
85 | */ | ||
86 | struct whc_std { | ||
87 | struct urb *urb; | ||
88 | size_t len; | ||
89 | int ntds_remaining; | ||
90 | struct whc_qtd *qtd; | ||
91 | |||
92 | struct list_head list_node; | ||
93 | int num_pointers; | ||
94 | dma_addr_t dma_addr; | ||
95 | struct whc_page_list_entry *pl_virt; | ||
96 | }; | ||
97 | |||
98 | /** | ||
99 | * struct whc_urb - per URB host controller structure. | ||
100 | * @urb: the URB this struct is for. | ||
101 | * @qset: the qset associated to the URB. | ||
102 | * @dequeue_work: the work to remove the URB when dequeued. | ||
103 | * @is_async: the URB belongs to async sheduler or not. | ||
104 | * @status: the status to be returned when calling wusbhc_giveback_urb. | ||
105 | */ | ||
106 | struct whc_urb { | ||
107 | struct urb *urb; | ||
108 | struct whc_qset *qset; | ||
109 | struct work_struct dequeue_work; | ||
110 | bool is_async; | ||
111 | int status; | ||
112 | }; | ||
113 | |||
114 | /** | ||
115 | * whc_std_last - is this sTD the URB's last? | ||
116 | * @std: the sTD to check. | ||
117 | */ | ||
118 | static inline bool whc_std_last(struct whc_std *std) | ||
119 | { | ||
120 | return std->ntds_remaining <= 1; | ||
121 | } | ||
122 | |||
123 | enum whc_update { | ||
124 | WHC_UPDATE_ADDED = 0x01, | ||
125 | WHC_UPDATE_REMOVED = 0x02, | ||
126 | WHC_UPDATE_UPDATED = 0x04, | ||
127 | }; | ||
128 | |||
129 | /* init.c */ | ||
130 | int whc_init(struct whc *whc); | ||
131 | void whc_clean_up(struct whc *whc); | ||
132 | |||
133 | /* hw.c */ | ||
134 | void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val); | ||
135 | int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); | ||
136 | |||
137 | /* wusb.c */ | ||
138 | int whc_wusbhc_start(struct wusbhc *wusbhc); | ||
139 | void whc_wusbhc_stop(struct wusbhc *wusbhc); | ||
140 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | ||
141 | u8 handle, struct wuie_hdr *wuie); | ||
142 | int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); | ||
143 | int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm); | ||
144 | int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); | ||
145 | int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots); | ||
146 | int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, | ||
147 | const void *ptk, size_t key_size); | ||
148 | int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, | ||
149 | const void *gtk, size_t key_size); | ||
150 | int whc_set_cluster_id(struct whc *whc, u8 bcid); | ||
151 | |||
152 | /* int.c */ | ||
153 | irqreturn_t whc_int_handler(struct usb_hcd *hcd); | ||
154 | void whc_dn_work(struct work_struct *work); | ||
155 | |||
156 | /* asl.c */ | ||
157 | void asl_start(struct whc *whc); | ||
158 | void asl_stop(struct whc *whc); | ||
159 | int asl_init(struct whc *whc); | ||
160 | void asl_clean_up(struct whc *whc); | ||
161 | int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); | ||
162 | int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status); | ||
163 | void asl_qset_delete(struct whc *whc, struct whc_qset *qset); | ||
164 | void scan_async_work(struct work_struct *work); | ||
165 | |||
166 | /* pzl.c */ | ||
167 | int pzl_init(struct whc *whc); | ||
168 | void pzl_clean_up(struct whc *whc); | ||
169 | void pzl_start(struct whc *whc); | ||
170 | void pzl_stop(struct whc *whc); | ||
171 | int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); | ||
172 | int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status); | ||
173 | void pzl_qset_delete(struct whc *whc, struct whc_qset *qset); | ||
174 | void scan_periodic_work(struct work_struct *work); | ||
175 | |||
176 | /* qset.c */ | ||
177 | struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags); | ||
178 | void qset_free(struct whc *whc, struct whc_qset *qset); | ||
179 | struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); | ||
180 | void qset_delete(struct whc *whc, struct whc_qset *qset); | ||
181 | void qset_clear(struct whc *whc, struct whc_qset *qset); | ||
182 | int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | ||
183 | gfp_t mem_flags); | ||
184 | void qset_free_std(struct whc *whc, struct whc_std *std); | ||
185 | void qset_remove_urb(struct whc *whc, struct whc_qset *qset, | ||
186 | struct urb *urb, int status); | ||
187 | void process_halted_qtd(struct whc *whc, struct whc_qset *qset, | ||
188 | struct whc_qtd *qtd); | ||
189 | void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, | ||
190 | struct whc_qtd *qtd); | ||
191 | enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); | ||
192 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset); | ||
193 | void dump_qset(struct whc_qset *qset, struct device *dev); | ||
194 | void pzl_update(struct whc *whc, uint32_t wusbcmd); | ||
195 | void asl_update(struct whc *whc, uint32_t wusbcmd); | ||
196 | |||
197 | #endif /* #ifndef __WHCD_H */ | ||
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h new file mode 100644 index 000000000000..bff1eb7a35cf --- /dev/null +++ b/drivers/usb/host/whci/whci-hc.h | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) data structures. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | #ifndef _WHCI_WHCI_HC_H | ||
21 | #define _WHCI_WHCI_HC_H | ||
22 | |||
23 | #include <linux/list.h> | ||
24 | |||
25 | /** | ||
26 | * WHCI_PAGE_SIZE - page size use by WHCI | ||
27 | * | ||
28 | * WHCI assumes that host system uses pages of 4096 octets. | ||
29 | */ | ||
30 | #define WHCI_PAGE_SIZE 4096 | ||
31 | |||
32 | |||
33 | /** | ||
34 | * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single | ||
35 | * qtd. | ||
36 | * | ||
37 | * This is 2^20 - 1. | ||
38 | */ | ||
39 | #define QTD_MAX_XFER_SIZE 1048575 | ||
40 | |||
41 | |||
42 | /** | ||
43 | * struct whc_qtd - Queue Element Transfer Descriptors (qTD) | ||
44 | * | ||
45 | * This describes the data for a bulk, control or interrupt transfer. | ||
46 | * | ||
47 | * [WHCI] section 3.2.4 | ||
48 | */ | ||
49 | struct whc_qtd { | ||
50 | __le32 status; /*< remaining transfer len and transfer status */ | ||
51 | __le32 options; | ||
52 | __le64 page_list_ptr; /*< physical pointer to data buffer page list*/ | ||
53 | __u8 setup[8]; /*< setup data for control transfers */ | ||
54 | } __attribute__((packed)); | ||
55 | |||
56 | #define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */ | ||
57 | #define QTD_STS_HALTED (1 << 30) /* transfer halted */ | ||
58 | #define QTD_STS_DBE (1 << 29) /* data buffer error */ | ||
59 | #define QTD_STS_BABBLE (1 << 28) /* babble detected */ | ||
60 | #define QTD_STS_RCE (1 << 27) /* retry count exceeded */ | ||
61 | #define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */ | ||
62 | #define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */ | ||
63 | #define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */ | ||
64 | #define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */ | ||
65 | #define QTD_STS_LEN(l) ((l) << 0) /* transfer length */ | ||
66 | #define QTD_STS_TO_LEN(s) ((s) & 0x000fffff) | ||
67 | |||
68 | #define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */ | ||
69 | #define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */ | ||
70 | |||
71 | /** | ||
72 | * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD) | ||
73 | * | ||
74 | * This describes the data and other parameters for an isochronous | ||
75 | * transfer. | ||
76 | * | ||
77 | * [WHCI] section 3.2.5 | ||
78 | */ | ||
79 | struct whc_itd { | ||
80 | __le16 presentation_time; /*< presentation time for OUT transfers */ | ||
81 | __u8 num_segments; /*< number of data segments in segment list */ | ||
82 | __u8 status; /*< command execution status */ | ||
83 | __le32 options; /*< misc transfer options */ | ||
84 | __le64 page_list_ptr; /*< physical pointer to data buffer page list */ | ||
85 | __le64 seg_list_ptr; /*< physical pointer to segment list */ | ||
86 | } __attribute__((packed)); | ||
87 | |||
88 | #define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */ | ||
89 | #define ITD_STS_DBE (1 << 5) /* data buffer error */ | ||
90 | #define ITD_STS_BABBLE (1 << 4) /* babble detected */ | ||
91 | #define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */ | ||
92 | |||
93 | #define ITD_OPT_IOC (1 << 1) /* interrupt on complete */ | ||
94 | #define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */ | ||
95 | |||
96 | /** | ||
97 | * Page list entry. | ||
98 | * | ||
99 | * A TD's page list must contain sufficient page list entries for the | ||
100 | * total data length in the TD. | ||
101 | * | ||
102 | * [WHCI] section 3.2.4.3 | ||
103 | */ | ||
104 | struct whc_page_list_entry { | ||
105 | __le64 buf_ptr; /*< physical pointer to buffer */ | ||
106 | } __attribute__((packed)); | ||
107 | |||
108 | /** | ||
109 | * struct whc_seg_list_entry - Segment list entry. | ||
110 | * | ||
111 | * Describes a portion of the data buffer described in the containing | ||
112 | * qTD's page list. | ||
113 | * | ||
114 | * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr | ||
115 | * + qtd->seg_list_ptr[seg].offset; | ||
116 | * | ||
117 | * Segments can't cross page boundries. | ||
118 | * | ||
119 | * [WHCI] section 3.2.5.5 | ||
120 | */ | ||
121 | struct whc_seg_list_entry { | ||
122 | __le16 len; /*< segment length */ | ||
123 | __u8 idx; /*< index into page list */ | ||
124 | __u8 status; /*< segment status */ | ||
125 | __le16 offset; /*< 12 bit offset into page */ | ||
126 | } __attribute__((packed)); | ||
127 | |||
128 | /** | ||
129 | * struct whc_qhead - endpoint and status information for a qset. | ||
130 | * | ||
131 | * [WHCI] section 3.2.6 | ||
132 | */ | ||
133 | struct whc_qhead { | ||
134 | __le64 link; /*< next qset in list */ | ||
135 | __le32 info1; | ||
136 | __le32 info2; | ||
137 | __le32 info3; | ||
138 | __le16 status; | ||
139 | __le16 err_count; /*< transaction error count */ | ||
140 | __le32 cur_window; | ||
141 | __le32 scratch[3]; /*< h/w scratch area */ | ||
142 | union { | ||
143 | struct whc_qtd qtd; | ||
144 | struct whc_itd itd; | ||
145 | } overlay; | ||
146 | } __attribute__((packed)); | ||
147 | |||
148 | #define QH_LINK_PTR_MASK (~0x03Full) | ||
149 | #define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK) | ||
150 | #define QH_LINK_IQS (1 << 4) /* isochronous queue set */ | ||
151 | #define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */ | ||
152 | #define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */ | ||
153 | |||
154 | #define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */ | ||
155 | #define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */ | ||
156 | #define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */ | ||
157 | #define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */ | ||
158 | #define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */ | ||
159 | #define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */ | ||
160 | #define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */ | ||
161 | #define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */ | ||
162 | #define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */ | ||
163 | #define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */ | ||
164 | #define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */ | ||
165 | |||
166 | #define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */ | ||
167 | #define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */ | ||
168 | #define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */ | ||
169 | #define QH_INFO2_RQS (1 << 15) /* reactivate queue set */ | ||
170 | #define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */ | ||
171 | #define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */ | ||
172 | #define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ | ||
173 | #define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ | ||
174 | |||
175 | #define QH_INFO3_TX_RATE_53_3 (0 << 24) | ||
176 | #define QH_INFO3_TX_RATE_80 (1 << 24) | ||
177 | #define QH_INFO3_TX_RATE_106_7 (2 << 24) | ||
178 | #define QH_INFO3_TX_RATE_160 (3 << 24) | ||
179 | #define QH_INFO3_TX_RATE_200 (4 << 24) | ||
180 | #define QH_INFO3_TX_RATE_320 (5 << 24) | ||
181 | #define QH_INFO3_TX_RATE_400 (6 << 24) | ||
182 | #define QH_INFO3_TX_RATE_480 (7 << 24) | ||
183 | #define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ | ||
184 | |||
185 | #define QH_STATUS_FLOW_CTRL (1 << 15) | ||
186 | #define QH_STATUS_ICUR(i) ((i) << 5) | ||
187 | #define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7) | ||
188 | |||
189 | /** | ||
190 | * usb_pipe_to_qh_type - USB core pipe type to QH transfer type | ||
191 | * | ||
192 | * Returns the QH type field for a USB core pipe type. | ||
193 | */ | ||
194 | static inline unsigned usb_pipe_to_qh_type(unsigned pipe) | ||
195 | { | ||
196 | static const unsigned type[] = { | ||
197 | [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC, | ||
198 | [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT, | ||
199 | [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL, | ||
200 | [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK, | ||
201 | }; | ||
202 | return type[usb_pipetype(pipe)]; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * Maxiumum number of TDs in a qset. | ||
207 | */ | ||
208 | #define WHCI_QSET_TD_MAX 8 | ||
209 | |||
210 | /** | ||
211 | * struct whc_qset - WUSB data transfers to a specific endpoint | ||
212 | * @qh: the QHead of this qset | ||
213 | * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt | ||
214 | * transfers) | ||
215 | * @itd: up to 8 iTDs (for qsets for isochronous transfers) | ||
216 | * @qset_dma: DMA address for this qset | ||
217 | * @whc: WHCI HC this qset is for | ||
218 | * @ep: endpoint | ||
219 | * @stds: list of sTDs queued to this qset | ||
220 | * @ntds: number of qTDs queued (not necessarily the same as nTDs | ||
221 | * field in the QH) | ||
222 | * @td_start: index of the first qTD in the list | ||
223 | * @td_end: index of next free qTD in the list (provided | ||
224 | * ntds < WHCI_QSET_TD_MAX) | ||
225 | * | ||
226 | * Queue Sets (qsets) are added to the asynchronous schedule list | ||
227 | * (ASL) or the periodic zone list (PZL). | ||
228 | * | ||
229 | * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate). | ||
230 | * Each TD may refer to at most 1 MiB of data. If a single transfer | ||
231 | * has > 8MiB of data, TDs can be reused as they are completed since | ||
232 | * the TD list is used as a circular buffer. Similarly, several | ||
233 | * (smaller) transfers may be queued in a qset. | ||
234 | * | ||
235 | * WHCI controllers may cache portions of the qsets in the ASL and | ||
236 | * PZL, requiring the WHCD to inform the WHC that the lists have been | ||
237 | * updated (fields changed or qsets inserted or removed). For safe | ||
238 | * insertion and removal of qsets from the lists the schedule must be | ||
239 | * stopped to avoid races in updating the QH link pointers. | ||
240 | * | ||
241 | * Since the HC is free to execute qsets in any order, all transfers | ||
242 | * to an endpoint should use the same qset to ensure transfers are | ||
243 | * executed in the order they're submitted. | ||
244 | * | ||
245 | * [WHCI] section 3.2.3 | ||
246 | */ | ||
247 | struct whc_qset { | ||
248 | struct whc_qhead qh; | ||
249 | union { | ||
250 | struct whc_qtd qtd[WHCI_QSET_TD_MAX]; | ||
251 | struct whc_itd itd[WHCI_QSET_TD_MAX]; | ||
252 | }; | ||
253 | |||
254 | /* private data for WHCD */ | ||
255 | dma_addr_t qset_dma; | ||
256 | struct whc *whc; | ||
257 | struct usb_host_endpoint *ep; | ||
258 | struct list_head stds; | ||
259 | int ntds; | ||
260 | int td_start; | ||
261 | int td_end; | ||
262 | struct list_head list_node; | ||
263 | unsigned in_sw_list:1; | ||
264 | unsigned in_hw_list:1; | ||
265 | unsigned remove:1; | ||
266 | struct urb *pause_after_urb; | ||
267 | struct completion remove_complete; | ||
268 | int max_burst; | ||
269 | int max_seq; | ||
270 | }; | ||
271 | |||
272 | static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) | ||
273 | { | ||
274 | if (target) | ||
275 | *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target); | ||
276 | else | ||
277 | *ptr = QH_LINK_T; | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * struct di_buf_entry - Device Information (DI) buffer entry. | ||
282 | * | ||
283 | * There's one of these per connected device. | ||
284 | */ | ||
285 | struct di_buf_entry { | ||
286 | __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */ | ||
287 | __le32 addr_sec_info; /*< addressing and security info */ | ||
288 | __le32 reserved[7]; | ||
289 | } __attribute__((packed)); | ||
290 | |||
291 | #define WHC_DI_SECURE (1 << 31) | ||
292 | #define WHC_DI_DISABLE (1 << 30) | ||
293 | #define WHC_DI_KEY_IDX(k) ((k) << 8) | ||
294 | #define WHC_DI_KEY_IDX_MASK 0x0000ff00 | ||
295 | #define WHC_DI_DEV_ADDR(a) ((a) << 0) | ||
296 | #define WHC_DI_DEV_ADDR_MASK 0x000000ff | ||
297 | |||
298 | /** | ||
299 | * struct dn_buf_entry - Device Notification (DN) buffer entry. | ||
300 | * | ||
301 | * [WHCI] section 3.2.8 | ||
302 | */ | ||
303 | struct dn_buf_entry { | ||
304 | __u8 msg_size; /*< number of octets of valid DN data */ | ||
305 | __u8 reserved1; | ||
306 | __u8 src_addr; /*< source address */ | ||
307 | __u8 status; /*< buffer entry status */ | ||
308 | __le32 tkid; /*< TKID for source device, valid if secure bit is set */ | ||
309 | __u8 dn_data[56]; /*< up to 56 octets of DN data */ | ||
310 | } __attribute__((packed)); | ||
311 | |||
312 | #define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */ | ||
313 | #define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */ | ||
314 | |||
315 | #define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry)) | ||
316 | |||
317 | /* The Add MMC IE WUSB Generic Command may take up to 256 bytes of | ||
318 | data. [WHCI] section 2.4.7. */ | ||
319 | #define WHC_GEN_CMD_DATA_LEN 256 | ||
320 | |||
321 | /* | ||
322 | * HC registers. | ||
323 | * | ||
324 | * [WHCI] section 2.4 | ||
325 | */ | ||
326 | |||
327 | #define WHCIVERSION 0x00 | ||
328 | |||
329 | #define WHCSPARAMS 0x04 | ||
330 | # define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff) | ||
331 | # define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff) | ||
332 | # define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f) | ||
333 | |||
334 | #define WUSBCMD 0x08 | ||
335 | # define WUSBCMD_BCID(b) ((b) << 16) | ||
336 | # define WUSBCMD_BCID_MASK (0xff << 16) | ||
337 | # define WUSBCMD_ASYNC_QSET_RM (1 << 12) | ||
338 | # define WUSBCMD_PERIODIC_QSET_RM (1 << 11) | ||
339 | # define WUSBCMD_WUSBSI(s) ((s) << 8) | ||
340 | # define WUSBCMD_WUSBSI_MASK (0x7 << 8) | ||
341 | # define WUSBCMD_ASYNC_SYNCED_DB (1 << 7) | ||
342 | # define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6) | ||
343 | # define WUSBCMD_ASYNC_UPDATED (1 << 5) | ||
344 | # define WUSBCMD_PERIODIC_UPDATED (1 << 4) | ||
345 | # define WUSBCMD_ASYNC_EN (1 << 3) | ||
346 | # define WUSBCMD_PERIODIC_EN (1 << 2) | ||
347 | # define WUSBCMD_WHCRESET (1 << 1) | ||
348 | # define WUSBCMD_RUN (1 << 0) | ||
349 | |||
350 | #define WUSBSTS 0x0c | ||
351 | # define WUSBSTS_ASYNC_SCHED (1 << 15) | ||
352 | # define WUSBSTS_PERIODIC_SCHED (1 << 14) | ||
353 | # define WUSBSTS_DNTS_SCHED (1 << 13) | ||
354 | # define WUSBSTS_HCHALTED (1 << 12) | ||
355 | # define WUSBSTS_GEN_CMD_DONE (1 << 9) | ||
356 | # define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8) | ||
357 | # define WUSBSTS_DNTS_OVERFLOW (1 << 7) | ||
358 | # define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6) | ||
359 | # define WUSBSTS_HOST_ERR (1 << 5) | ||
360 | # define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4) | ||
361 | # define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3) | ||
362 | # define WUSBSTS_DNTS_INT (1 << 2) | ||
363 | # define WUSBSTS_ERR_INT (1 << 1) | ||
364 | # define WUSBSTS_INT (1 << 0) | ||
365 | # define WUSBSTS_INT_MASK 0x3ff | ||
366 | |||
367 | #define WUSBINTR 0x10 | ||
368 | # define WUSBINTR_GEN_CMD_DONE (1 << 9) | ||
369 | # define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8) | ||
370 | # define WUSBINTR_DNTS_OVERFLOW (1 << 7) | ||
371 | # define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6) | ||
372 | # define WUSBINTR_HOST_ERR (1 << 5) | ||
373 | # define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4) | ||
374 | # define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3) | ||
375 | # define WUSBINTR_DNTS_INT (1 << 2) | ||
376 | # define WUSBINTR_ERR_INT (1 << 1) | ||
377 | # define WUSBINTR_INT (1 << 0) | ||
378 | # define WUSBINTR_ALL 0x3ff | ||
379 | |||
380 | #define WUSBGENCMDSTS 0x14 | ||
381 | # define WUSBGENCMDSTS_ACTIVE (1 << 31) | ||
382 | # define WUSBGENCMDSTS_ERROR (1 << 24) | ||
383 | # define WUSBGENCMDSTS_IOC (1 << 23) | ||
384 | # define WUSBGENCMDSTS_MMCIE_ADD 0x01 | ||
385 | # define WUSBGENCMDSTS_MMCIE_RM 0x02 | ||
386 | # define WUSBGENCMDSTS_SET_MAS 0x03 | ||
387 | # define WUSBGENCMDSTS_CHAN_STOP 0x04 | ||
388 | # define WUSBGENCMDSTS_RWP_EN 0x05 | ||
389 | |||
390 | #define WUSBGENCMDPARAMS 0x18 | ||
391 | #define WUSBGENADDR 0x20 | ||
392 | #define WUSBASYNCLISTADDR 0x28 | ||
393 | #define WUSBDNTSBUFADDR 0x30 | ||
394 | #define WUSBDEVICEINFOADDR 0x38 | ||
395 | |||
396 | #define WUSBSETSECKEYCMD 0x40 | ||
397 | # define WUSBSETSECKEYCMD_SET (1 << 31) | ||
398 | # define WUSBSETSECKEYCMD_ERASE (1 << 30) | ||
399 | # define WUSBSETSECKEYCMD_GTK (1 << 8) | ||
400 | # define WUSBSETSECKEYCMD_IDX(i) ((i) << 0) | ||
401 | |||
402 | #define WUSBTKID 0x44 | ||
403 | #define WUSBSECKEY 0x48 | ||
404 | #define WUSBPERIODICLISTBASE 0x58 | ||
405 | #define WUSBMASINDEX 0x60 | ||
406 | |||
407 | #define WUSBDNTSCTRL 0x64 | ||
408 | # define WUSBDNTSCTRL_ACTIVE (1 << 31) | ||
409 | # define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8) | ||
410 | # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) | ||
411 | |||
412 | #define WUSBTIME 0x68 | ||
413 | #define WUSBBPST 0x6c | ||
414 | #define WUSBDIBUPDATED 0x70 | ||
415 | |||
416 | #endif /* #ifndef _WHCI_WHCI_HC_H */ | ||
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c new file mode 100644 index 000000000000..66e4ddcd961d --- /dev/null +++ b/drivers/usb/host/whci/wusb.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) WUSB operations. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/uwb/umc.h> | ||
22 | #define D_LOCAL 1 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | |||
25 | #include "../../wusbcore/wusbhc.h" | ||
26 | |||
27 | #include "whcd.h" | ||
28 | |||
29 | #if D_LOCAL >= 1 | ||
30 | static void dump_di(struct whc *whc, int idx) | ||
31 | { | ||
32 | struct di_buf_entry *di = &whc->di_buf[idx]; | ||
33 | struct device *dev = &whc->umc->dev; | ||
34 | char buf[128]; | ||
35 | |||
36 | bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS); | ||
37 | |||
38 | d_printf(1, dev, "DI[%d]\n", idx); | ||
39 | d_printf(1, dev, " availability: %s\n", buf); | ||
40 | d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n", | ||
41 | (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', | ||
42 | (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', | ||
43 | (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, | ||
44 | (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); | ||
45 | } | ||
46 | #else | ||
47 | static inline void dump_di(struct whc *whc, int idx) | ||
48 | { | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | static int whc_update_di(struct whc *whc, int idx) | ||
53 | { | ||
54 | int offset = idx / 32; | ||
55 | u32 bit = 1 << (idx % 32); | ||
56 | |||
57 | dump_di(whc, idx); | ||
58 | |||
59 | le_writel(bit, whc->base + WUSBDIBUPDATED + offset); | ||
60 | |||
61 | return whci_wait_for(&whc->umc->dev, | ||
62 | whc->base + WUSBDIBUPDATED + offset, bit, 0, | ||
63 | 100, "DI update"); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * WHCI starts and stops MMCs based on there being a valid GTK so | ||
68 | * these need only start/stop the asynchronous and periodic schedules. | ||
69 | */ | ||
70 | |||
71 | int whc_wusbhc_start(struct wusbhc *wusbhc) | ||
72 | { | ||
73 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
74 | |||
75 | asl_start(whc); | ||
76 | pzl_start(whc); | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | void whc_wusbhc_stop(struct wusbhc *wusbhc) | ||
82 | { | ||
83 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
84 | |||
85 | pzl_stop(whc); | ||
86 | asl_stop(whc); | ||
87 | } | ||
88 | |||
89 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | ||
90 | u8 handle, struct wuie_hdr *wuie) | ||
91 | { | ||
92 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
93 | u32 params; | ||
94 | |||
95 | params = (interval << 24) | ||
96 | | (repeat_cnt << 16) | ||
97 | | (wuie->bLength << 8) | ||
98 | | handle; | ||
99 | |||
100 | return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength); | ||
101 | } | ||
102 | |||
103 | int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle) | ||
104 | { | ||
105 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
106 | u32 params; | ||
107 | |||
108 | params = handle; | ||
109 | |||
110 | return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0); | ||
111 | } | ||
112 | |||
113 | int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm) | ||
114 | { | ||
115 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
116 | |||
117 | if (stream_index >= 0) | ||
118 | whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index)); | ||
119 | |||
120 | return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm)); | ||
121 | } | ||
122 | |||
123 | int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
124 | { | ||
125 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
126 | int idx = wusb_dev->port_idx; | ||
127 | struct di_buf_entry *di = &whc->di_buf[idx]; | ||
128 | int ret; | ||
129 | |||
130 | mutex_lock(&whc->mutex); | ||
131 | |||
132 | uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability); | ||
133 | di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK); | ||
134 | di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr); | ||
135 | |||
136 | ret = whc_update_di(whc, idx); | ||
137 | |||
138 | mutex_unlock(&whc->mutex); | ||
139 | |||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Set the number of Device Notification Time Slots (DNTS) and enable | ||
145 | * device notifications. | ||
146 | */ | ||
147 | int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) | ||
148 | { | ||
149 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
150 | u32 dntsctrl; | ||
151 | |||
152 | dntsctrl = WUSBDNTSCTRL_ACTIVE | ||
153 | | WUSBDNTSCTRL_INTERVAL(interval) | ||
154 | | WUSBDNTSCTRL_SLOTS(slots); | ||
155 | |||
156 | le_writel(dntsctrl, whc->base + WUSBDNTSCTRL); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, | ||
162 | const void *key, size_t key_size, bool is_gtk) | ||
163 | { | ||
164 | uint32_t setkeycmd; | ||
165 | uint32_t seckey[4]; | ||
166 | int i; | ||
167 | int ret; | ||
168 | |||
169 | memcpy(seckey, key, key_size); | ||
170 | setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index); | ||
171 | if (is_gtk) | ||
172 | setkeycmd |= WUSBSETSECKEYCMD_GTK; | ||
173 | |||
174 | le_writel(tkid, whc->base + WUSBTKID); | ||
175 | for (i = 0; i < 4; i++) | ||
176 | le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i); | ||
177 | le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD); | ||
178 | |||
179 | ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD, | ||
180 | WUSBSETSECKEYCMD_SET, 0, 100, "set key"); | ||
181 | |||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * whc_set_ptk - set the PTK to use for a device. | ||
187 | * | ||
188 | * The index into the key table for this PTK is the same as the | ||
189 | * device's port index. | ||
190 | */ | ||
191 | int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, | ||
192 | const void *ptk, size_t key_size) | ||
193 | { | ||
194 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
195 | struct di_buf_entry *di = &whc->di_buf[port_idx]; | ||
196 | int ret; | ||
197 | |||
198 | mutex_lock(&whc->mutex); | ||
199 | |||
200 | if (ptk) { | ||
201 | ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false); | ||
202 | if (ret) | ||
203 | goto out; | ||
204 | |||
205 | di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK; | ||
206 | di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx); | ||
207 | } else | ||
208 | di->addr_sec_info &= ~WHC_DI_SECURE; | ||
209 | |||
210 | ret = whc_update_di(whc, port_idx); | ||
211 | out: | ||
212 | mutex_unlock(&whc->mutex); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * whc_set_gtk - set the GTK for subsequent broadcast packets | ||
218 | * | ||
219 | * The GTK is stored in the last entry in the key table (the previous | ||
220 | * N_DEVICES entries are for the per-device PTKs). | ||
221 | */ | ||
222 | int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, | ||
223 | const void *gtk, size_t key_size) | ||
224 | { | ||
225 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
226 | int ret; | ||
227 | |||
228 | mutex_lock(&whc->mutex); | ||
229 | |||
230 | ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true); | ||
231 | |||
232 | mutex_unlock(&whc->mutex); | ||
233 | |||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | int whc_set_cluster_id(struct whc *whc, u8 bcid) | ||
238 | { | ||
239 | whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid)); | ||
240 | return 0; | ||
241 | } | ||
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig new file mode 100644 index 000000000000..eb09a0a14a80 --- /dev/null +++ b/drivers/usb/wusbcore/Kconfig | |||
@@ -0,0 +1,41 @@ | |||
1 | # | ||
2 | # Wireless USB Core configuration | ||
3 | # | ||
4 | config USB_WUSB | ||
5 | tristate "Enable Wireless USB extensions (EXPERIMENTAL)" | ||
6 | depends on EXPERIMENTAL | ||
7 | depends on USB | ||
8 | select UWB | ||
9 | select CRYPTO | ||
10 | select CRYPTO_BLKCIPHER | ||
11 | select CRYPTO_CBC | ||
12 | select CRYPTO_MANAGER | ||
13 | select CRYPTO_AES | ||
14 | help | ||
15 | Enable the host-side support for Wireless USB. | ||
16 | |||
17 | To compile this support select Y (built in). It is safe to | ||
18 | select even if you don't have the hardware. | ||
19 | |||
20 | config USB_WUSB_CBAF | ||
21 | tristate "Support WUSB Cable Based Association (CBA)" | ||
22 | depends on USB | ||
23 | help | ||
24 | Some WUSB devices support Cable Based Association. It's used to | ||
25 | enable the secure communication between the host and the | ||
26 | device. | ||
27 | |||
28 | Enable this option if your WUSB device must to be connected | ||
29 | via wired USB before establishing a wireless link. | ||
30 | |||
31 | It is safe to select even if you don't have a compatible | ||
32 | hardware. | ||
33 | |||
34 | config USB_WUSB_CBAF_DEBUG | ||
35 | bool "Enable CBA debug messages" | ||
36 | depends on USB_WUSB_CBAF | ||
37 | help | ||
38 | Say Y here if you want the CBA to produce a bunch of debug messages | ||
39 | to the system log. Select this if you are having a problem with | ||
40 | CBA support and want to see more of what is going on. | ||
41 | |||
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile new file mode 100644 index 000000000000..75f1ade66258 --- /dev/null +++ b/drivers/usb/wusbcore/Makefile | |||
@@ -0,0 +1,26 @@ | |||
1 | obj-$(CONFIG_USB_WUSB) += wusbcore.o | ||
2 | obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o | ||
3 | obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o | ||
4 | |||
5 | |||
6 | wusbcore-objs := \ | ||
7 | crypto.o \ | ||
8 | devconnect.o \ | ||
9 | dev-sysfs.o \ | ||
10 | mmc.o \ | ||
11 | pal.o \ | ||
12 | rh.o \ | ||
13 | reservation.o \ | ||
14 | security.o \ | ||
15 | wusbhc.o | ||
16 | |||
17 | wusb-cbaf-objs := cbaf.o | ||
18 | |||
19 | wusb-wa-objs := wa-hc.o \ | ||
20 | wa-nep.o \ | ||
21 | wa-rpipe.o \ | ||
22 | wa-xfer.o | ||
23 | |||
24 | ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y) | ||
25 | EXTRA_CFLAGS += -DDEBUG | ||
26 | endif | ||
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c new file mode 100644 index 000000000000..ab4788d1785a --- /dev/null +++ b/drivers/usb/wusbcore/cbaf.c | |||
@@ -0,0 +1,673 @@ | |||
1 | /* | ||
2 | * Wireless USB - Cable Based Association | ||
3 | * | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * WUSB devices have to be paired (associated in WUSB lingo) so | ||
25 | * that they can connect to the system. | ||
26 | * | ||
27 | * One way of pairing is using CBA-Cable Based Association. First | ||
28 | * time you plug the device with a cable, association is done between | ||
29 | * host and device and subsequent times, you can connect wirelessly | ||
30 | * without having to associate again. That's the idea. | ||
31 | * | ||
32 | * This driver does nothing Earth shattering. It just provides an | ||
33 | * interface to chat with the wire-connected device so we can get a | ||
34 | * CDID (device ID) that might have been previously associated to a | ||
35 | * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet | ||
36 | * (connection context), with the CK being the secret, or connection | ||
37 | * key. This is the pairing data. | ||
38 | * | ||
39 | * When a device with the CBA capability connects, the probe routine | ||
40 | * just creates a bunch of sysfs files that a user space enumeration | ||
41 | * manager uses to allow it to connect wirelessly to the system or not. | ||
42 | * | ||
43 | * The process goes like this: | ||
44 | * | ||
45 | * 1. Device plugs, cbaf is loaded, notifications happen. | ||
46 | * | ||
47 | * 2. The connection manager (CM) sees a device with CBAF capability | ||
48 | * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). | ||
49 | * | ||
50 | * 3. The CM writes the host name, supported band groups, and the CHID | ||
51 | * (host ID) into the wusb_host_name, wusb_host_band_groups and | ||
52 | * wusb_chid files. These get sent to the device and the CDID (if | ||
53 | * any) for this host is requested. | ||
54 | * | ||
55 | * 4. The CM can verify that the device's supported band groups | ||
56 | * (wusb_device_band_groups) are compatible with the host. | ||
57 | * | ||
58 | * 5. The CM reads the wusb_cdid file. | ||
59 | * | ||
60 | * 6. The CM looks up its database | ||
61 | * | ||
62 | * 6.1 If it has a matching CHID,CDID entry, the device has been | ||
63 | * authorized before (paired) and nothing further needs to be | ||
64 | * done. | ||
65 | * | ||
66 | * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in | ||
67 | * its database), the device is assumed to be not known. The CM | ||
68 | * may associate the host with device by: writing a randomly | ||
69 | * generated CDID to wusb_cdid and then a random CK to wusb_ck | ||
70 | * (this uploads the new CC to the device). | ||
71 | * | ||
72 | * CMD may choose to prompt the user before associating with a new | ||
73 | * device. | ||
74 | * | ||
75 | * 7. Device is unplugged. | ||
76 | * | ||
77 | * When the device tries to connect wirelessly, it will present its | ||
78 | * CDID to the WUSB host controller. The CM will query the | ||
79 | * database. If the CHID/CDID pair found, it will (with a 4-way | ||
80 | * handshake) challenge the device to demonstrate it has the CK secret | ||
81 | * key (from our database) without actually exchanging it. Once | ||
82 | * satisfied, crypto keys are derived from the CK, the device is | ||
83 | * connected and all communication is encrypted. | ||
84 | * | ||
85 | * References: | ||
86 | * [WUSB-AM] Association Models Supplement to the Certified Wireless | ||
87 | * Universal Serial Bus Specification, version 1.0. | ||
88 | */ | ||
89 | #include <linux/module.h> | ||
90 | #include <linux/ctype.h> | ||
91 | #include <linux/version.h> | ||
92 | #include <linux/usb.h> | ||
93 | #include <linux/interrupt.h> | ||
94 | #include <linux/delay.h> | ||
95 | #include <linux/random.h> | ||
96 | #include <linux/mutex.h> | ||
97 | #include <linux/uwb.h> | ||
98 | #include <linux/usb/wusb.h> | ||
99 | #include <linux/usb/association.h> | ||
100 | |||
101 | #define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */ | ||
102 | |||
103 | /* An instance of a Cable-Based-Association-Framework device */ | ||
104 | struct cbaf { | ||
105 | struct usb_device *usb_dev; | ||
106 | struct usb_interface *usb_iface; | ||
107 | void *buffer; | ||
108 | size_t buffer_size; | ||
109 | |||
110 | struct wusb_ckhdid chid; | ||
111 | char host_name[CBA_NAME_LEN]; | ||
112 | u16 host_band_groups; | ||
113 | |||
114 | struct wusb_ckhdid cdid; | ||
115 | char device_name[CBA_NAME_LEN]; | ||
116 | u16 device_band_groups; | ||
117 | |||
118 | struct wusb_ckhdid ck; | ||
119 | }; | ||
120 | |||
121 | /* | ||
122 | * Verify that a CBAF USB-interface has what we need | ||
123 | * | ||
124 | * According to [WUSB-AM], CBA devices should provide at least two | ||
125 | * interfaces: | ||
126 | * - RETRIEVE_HOST_INFO | ||
127 | * - ASSOCIATE | ||
128 | * | ||
129 | * If the device doesn't provide these interfaces, we do not know how | ||
130 | * to deal with it. | ||
131 | */ | ||
132 | static int cbaf_check(struct cbaf *cbaf) | ||
133 | { | ||
134 | int result; | ||
135 | struct device *dev = &cbaf->usb_iface->dev; | ||
136 | struct wusb_cbaf_assoc_info *assoc_info; | ||
137 | struct wusb_cbaf_assoc_request *assoc_request; | ||
138 | size_t assoc_size; | ||
139 | void *itr, *top; | ||
140 | int ar_rhi = 0, ar_assoc = 0; | ||
141 | |||
142 | result = usb_control_msg( | ||
143 | cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), | ||
144 | CBAF_REQ_GET_ASSOCIATION_INFORMATION, | ||
145 | USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
146 | 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
147 | cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); | ||
148 | if (result < 0) { | ||
149 | dev_err(dev, "Cannot get available association types: %d\n", | ||
150 | result); | ||
151 | return result; | ||
152 | } | ||
153 | |||
154 | assoc_info = cbaf->buffer; | ||
155 | if (result < sizeof(*assoc_info)) { | ||
156 | dev_err(dev, "Not enough data to decode association info " | ||
157 | "header (%zu vs %zu bytes required)\n", | ||
158 | (size_t)result, sizeof(*assoc_info)); | ||
159 | return result; | ||
160 | } | ||
161 | |||
162 | assoc_size = le16_to_cpu(assoc_info->Length); | ||
163 | if (result < assoc_size) { | ||
164 | dev_err(dev, "Not enough data to decode association info " | ||
165 | "(%zu vs %zu bytes required)\n", | ||
166 | (size_t)assoc_size, sizeof(*assoc_info)); | ||
167 | return result; | ||
168 | } | ||
169 | /* | ||
170 | * From now on, we just verify, but won't error out unless we | ||
171 | * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE} | ||
172 | * types. | ||
173 | */ | ||
174 | itr = cbaf->buffer + sizeof(*assoc_info); | ||
175 | top = cbaf->buffer + assoc_size; | ||
176 | dev_dbg(dev, "Found %u association requests (%zu bytes)\n", | ||
177 | assoc_info->NumAssociationRequests, assoc_size); | ||
178 | |||
179 | while (itr < top) { | ||
180 | u16 ar_type, ar_subtype; | ||
181 | u32 ar_size; | ||
182 | const char *ar_name; | ||
183 | |||
184 | assoc_request = itr; | ||
185 | |||
186 | if (top - itr < sizeof(*assoc_request)) { | ||
187 | dev_err(dev, "Not enough data to decode associaton " | ||
188 | "request (%zu vs %zu bytes needed)\n", | ||
189 | top - itr, sizeof(*assoc_request)); | ||
190 | break; | ||
191 | } | ||
192 | |||
193 | ar_type = le16_to_cpu(assoc_request->AssociationTypeId); | ||
194 | ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId); | ||
195 | ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize); | ||
196 | ar_name = "unknown"; | ||
197 | |||
198 | switch (ar_type) { | ||
199 | case AR_TYPE_WUSB: | ||
200 | /* Verify we have what is mandated by [WUSB-AM]. */ | ||
201 | switch (ar_subtype) { | ||
202 | case AR_TYPE_WUSB_RETRIEVE_HOST_INFO: | ||
203 | ar_name = "RETRIEVE_HOST_INFO"; | ||
204 | ar_rhi = 1; | ||
205 | break; | ||
206 | case AR_TYPE_WUSB_ASSOCIATE: | ||
207 | /* send assoc data */ | ||
208 | ar_name = "ASSOCIATE"; | ||
209 | ar_assoc = 1; | ||
210 | break; | ||
211 | }; | ||
212 | break; | ||
213 | }; | ||
214 | |||
215 | dev_dbg(dev, "Association request #%02u: 0x%04x/%04x " | ||
216 | "(%zu bytes): %s\n", | ||
217 | assoc_request->AssociationDataIndex, ar_type, | ||
218 | ar_subtype, (size_t)ar_size, ar_name); | ||
219 | |||
220 | itr += sizeof(*assoc_request); | ||
221 | } | ||
222 | |||
223 | if (!ar_rhi) { | ||
224 | dev_err(dev, "Missing RETRIEVE_HOST_INFO association " | ||
225 | "request\n"); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | if (!ar_assoc) { | ||
229 | dev_err(dev, "Missing ASSOCIATE association request\n"); | ||
230 | return -EINVAL; | ||
231 | } | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static const struct wusb_cbaf_host_info cbaf_host_info_defaults = { | ||
237 | .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, | ||
238 | .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), | ||
239 | .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, | ||
240 | .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO), | ||
241 | .CHID_hdr = WUSB_AR_CHID, | ||
242 | .LangID_hdr = WUSB_AR_LangID, | ||
243 | .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName, | ||
244 | }; | ||
245 | |||
246 | /* Send WUSB host information (CHID and name) to a CBAF device */ | ||
247 | static int cbaf_send_host_info(struct cbaf *cbaf) | ||
248 | { | ||
249 | struct wusb_cbaf_host_info *hi; | ||
250 | size_t name_len; | ||
251 | size_t hi_size; | ||
252 | |||
253 | hi = cbaf->buffer; | ||
254 | memset(hi, 0, sizeof(*hi)); | ||
255 | *hi = cbaf_host_info_defaults; | ||
256 | hi->CHID = cbaf->chid; | ||
257 | hi->LangID = 0; /* FIXME: I guess... */ | ||
258 | strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN); | ||
259 | name_len = strlen(cbaf->host_name); | ||
260 | hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len); | ||
261 | hi_size = sizeof(*hi) + name_len; | ||
262 | |||
263 | return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), | ||
264 | CBAF_REQ_SET_ASSOCIATION_RESPONSE, | ||
265 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
266 | 0x0101, | ||
267 | cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
268 | hi, hi_size, 1000 /* FIXME: arbitrary */); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Get device's information (CDID) associated to CHID | ||
273 | * | ||
274 | * The device will return it's information (CDID, name, bandgroups) | ||
275 | * associated to the CHID we have set before, or 0 CDID and default | ||
276 | * name and bandgroup if no CHID set or unknown. | ||
277 | */ | ||
278 | static int cbaf_cdid_get(struct cbaf *cbaf) | ||
279 | { | ||
280 | int result; | ||
281 | struct device *dev = &cbaf->usb_iface->dev; | ||
282 | struct wusb_cbaf_device_info *di; | ||
283 | size_t needed; | ||
284 | |||
285 | di = cbaf->buffer; | ||
286 | result = usb_control_msg( | ||
287 | cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), | ||
288 | CBAF_REQ_GET_ASSOCIATION_REQUEST, | ||
289 | USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
290 | 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
291 | di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); | ||
292 | if (result < 0) { | ||
293 | dev_err(dev, "Cannot request device information: %d\n", result); | ||
294 | return result; | ||
295 | } | ||
296 | |||
297 | needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length); | ||
298 | if (result < needed) { | ||
299 | dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " | ||
300 | "%zu bytes needed)\n", (size_t)result, needed); | ||
301 | return result; | ||
302 | } | ||
303 | |||
304 | strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN); | ||
305 | cbaf->cdid = di->CDID; | ||
306 | cbaf->device_band_groups = le16_to_cpu(di->BandGroups); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static ssize_t cbaf_wusb_chid_show(struct device *dev, | ||
312 | struct device_attribute *attr, | ||
313 | char *buf) | ||
314 | { | ||
315 | struct usb_interface *iface = to_usb_interface(dev); | ||
316 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
317 | char pr_chid[WUSB_CKHDID_STRSIZE]; | ||
318 | |||
319 | ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid); | ||
320 | return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid); | ||
321 | } | ||
322 | |||
323 | static ssize_t cbaf_wusb_chid_store(struct device *dev, | ||
324 | struct device_attribute *attr, | ||
325 | const char *buf, size_t size) | ||
326 | { | ||
327 | ssize_t result; | ||
328 | struct usb_interface *iface = to_usb_interface(dev); | ||
329 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
330 | |||
331 | result = sscanf(buf, | ||
332 | "%02hhx %02hhx %02hhx %02hhx " | ||
333 | "%02hhx %02hhx %02hhx %02hhx " | ||
334 | "%02hhx %02hhx %02hhx %02hhx " | ||
335 | "%02hhx %02hhx %02hhx %02hhx", | ||
336 | &cbaf->chid.data[0] , &cbaf->chid.data[1], | ||
337 | &cbaf->chid.data[2] , &cbaf->chid.data[3], | ||
338 | &cbaf->chid.data[4] , &cbaf->chid.data[5], | ||
339 | &cbaf->chid.data[6] , &cbaf->chid.data[7], | ||
340 | &cbaf->chid.data[8] , &cbaf->chid.data[9], | ||
341 | &cbaf->chid.data[10], &cbaf->chid.data[11], | ||
342 | &cbaf->chid.data[12], &cbaf->chid.data[13], | ||
343 | &cbaf->chid.data[14], &cbaf->chid.data[15]); | ||
344 | |||
345 | if (result != 16) | ||
346 | return -EINVAL; | ||
347 | |||
348 | result = cbaf_send_host_info(cbaf); | ||
349 | if (result < 0) | ||
350 | return result; | ||
351 | result = cbaf_cdid_get(cbaf); | ||
352 | if (result < 0) | ||
353 | return -result; | ||
354 | return size; | ||
355 | } | ||
356 | static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store); | ||
357 | |||
358 | static ssize_t cbaf_wusb_host_name_show(struct device *dev, | ||
359 | struct device_attribute *attr, | ||
360 | char *buf) | ||
361 | { | ||
362 | struct usb_interface *iface = to_usb_interface(dev); | ||
363 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
364 | |||
365 | return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name); | ||
366 | } | ||
367 | |||
368 | static ssize_t cbaf_wusb_host_name_store(struct device *dev, | ||
369 | struct device_attribute *attr, | ||
370 | const char *buf, size_t size) | ||
371 | { | ||
372 | ssize_t result; | ||
373 | struct usb_interface *iface = to_usb_interface(dev); | ||
374 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
375 | |||
376 | result = sscanf(buf, "%63s", cbaf->host_name); | ||
377 | if (result != 1) | ||
378 | return -EINVAL; | ||
379 | |||
380 | return size; | ||
381 | } | ||
382 | static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show, | ||
383 | cbaf_wusb_host_name_store); | ||
384 | |||
385 | static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev, | ||
386 | struct device_attribute *attr, | ||
387 | char *buf) | ||
388 | { | ||
389 | struct usb_interface *iface = to_usb_interface(dev); | ||
390 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
391 | |||
392 | return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups); | ||
393 | } | ||
394 | |||
395 | static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev, | ||
396 | struct device_attribute *attr, | ||
397 | const char *buf, size_t size) | ||
398 | { | ||
399 | ssize_t result; | ||
400 | struct usb_interface *iface = to_usb_interface(dev); | ||
401 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
402 | u16 band_groups = 0; | ||
403 | |||
404 | result = sscanf(buf, "%04hx", &band_groups); | ||
405 | if (result != 1) | ||
406 | return -EINVAL; | ||
407 | |||
408 | cbaf->host_band_groups = band_groups; | ||
409 | |||
410 | return size; | ||
411 | } | ||
412 | |||
413 | static DEVICE_ATTR(wusb_host_band_groups, 0600, | ||
414 | cbaf_wusb_host_band_groups_show, | ||
415 | cbaf_wusb_host_band_groups_store); | ||
416 | |||
417 | static const struct wusb_cbaf_device_info cbaf_device_info_defaults = { | ||
418 | .Length_hdr = WUSB_AR_Length, | ||
419 | .CDID_hdr = WUSB_AR_CDID, | ||
420 | .BandGroups_hdr = WUSB_AR_BandGroups, | ||
421 | .LangID_hdr = WUSB_AR_LangID, | ||
422 | .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName, | ||
423 | }; | ||
424 | |||
425 | static ssize_t cbaf_wusb_cdid_show(struct device *dev, | ||
426 | struct device_attribute *attr, char *buf) | ||
427 | { | ||
428 | struct usb_interface *iface = to_usb_interface(dev); | ||
429 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
430 | char pr_cdid[WUSB_CKHDID_STRSIZE]; | ||
431 | |||
432 | ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid); | ||
433 | return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid); | ||
434 | } | ||
435 | |||
436 | static ssize_t cbaf_wusb_cdid_store(struct device *dev, | ||
437 | struct device_attribute *attr, | ||
438 | const char *buf, size_t size) | ||
439 | { | ||
440 | ssize_t result; | ||
441 | struct usb_interface *iface = to_usb_interface(dev); | ||
442 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
443 | struct wusb_ckhdid cdid; | ||
444 | |||
445 | result = sscanf(buf, | ||
446 | "%02hhx %02hhx %02hhx %02hhx " | ||
447 | "%02hhx %02hhx %02hhx %02hhx " | ||
448 | "%02hhx %02hhx %02hhx %02hhx " | ||
449 | "%02hhx %02hhx %02hhx %02hhx", | ||
450 | &cdid.data[0] , &cdid.data[1], | ||
451 | &cdid.data[2] , &cdid.data[3], | ||
452 | &cdid.data[4] , &cdid.data[5], | ||
453 | &cdid.data[6] , &cdid.data[7], | ||
454 | &cdid.data[8] , &cdid.data[9], | ||
455 | &cdid.data[10], &cdid.data[11], | ||
456 | &cdid.data[12], &cdid.data[13], | ||
457 | &cdid.data[14], &cdid.data[15]); | ||
458 | if (result != 16) | ||
459 | return -EINVAL; | ||
460 | |||
461 | cbaf->cdid = cdid; | ||
462 | |||
463 | return size; | ||
464 | } | ||
465 | static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store); | ||
466 | |||
467 | static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev, | ||
468 | struct device_attribute *attr, | ||
469 | char *buf) | ||
470 | { | ||
471 | struct usb_interface *iface = to_usb_interface(dev); | ||
472 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
473 | |||
474 | return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups); | ||
475 | } | ||
476 | |||
477 | static DEVICE_ATTR(wusb_device_band_groups, 0600, | ||
478 | cbaf_wusb_device_band_groups_show, | ||
479 | NULL); | ||
480 | |||
481 | static ssize_t cbaf_wusb_device_name_show(struct device *dev, | ||
482 | struct device_attribute *attr, | ||
483 | char *buf) | ||
484 | { | ||
485 | struct usb_interface *iface = to_usb_interface(dev); | ||
486 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
487 | |||
488 | return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name); | ||
489 | } | ||
490 | static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL); | ||
491 | |||
492 | static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = { | ||
493 | .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, | ||
494 | .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), | ||
495 | .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, | ||
496 | .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE), | ||
497 | .Length_hdr = WUSB_AR_Length, | ||
498 | .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)), | ||
499 | .ConnectionContext_hdr = WUSB_AR_ConnectionContext, | ||
500 | .BandGroups_hdr = WUSB_AR_BandGroups, | ||
501 | }; | ||
502 | |||
503 | static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = { | ||
504 | .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, | ||
505 | .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, | ||
506 | .Length_hdr = WUSB_AR_Length, | ||
507 | .AssociationStatus_hdr = WUSB_AR_AssociationStatus, | ||
508 | }; | ||
509 | |||
510 | /* | ||
511 | * Send a new CC to the device. | ||
512 | */ | ||
513 | static int cbaf_cc_upload(struct cbaf *cbaf) | ||
514 | { | ||
515 | int result; | ||
516 | struct device *dev = &cbaf->usb_iface->dev; | ||
517 | struct wusb_cbaf_cc_data *ccd; | ||
518 | char pr_cdid[WUSB_CKHDID_STRSIZE]; | ||
519 | |||
520 | ccd = cbaf->buffer; | ||
521 | *ccd = cbaf_cc_data_defaults; | ||
522 | ccd->CHID = cbaf->chid; | ||
523 | ccd->CDID = cbaf->cdid; | ||
524 | ccd->CK = cbaf->ck; | ||
525 | ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups); | ||
526 | |||
527 | dev_dbg(dev, "Trying to upload CC:\n"); | ||
528 | ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID); | ||
529 | dev_dbg(dev, " CHID %s\n", pr_cdid); | ||
530 | ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID); | ||
531 | dev_dbg(dev, " CDID %s\n", pr_cdid); | ||
532 | dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups); | ||
533 | |||
534 | result = usb_control_msg( | ||
535 | cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), | ||
536 | CBAF_REQ_SET_ASSOCIATION_RESPONSE, | ||
537 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
538 | 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
539 | ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */); | ||
540 | |||
541 | return result; | ||
542 | } | ||
543 | |||
544 | static ssize_t cbaf_wusb_ck_store(struct device *dev, | ||
545 | struct device_attribute *attr, | ||
546 | const char *buf, size_t size) | ||
547 | { | ||
548 | ssize_t result; | ||
549 | struct usb_interface *iface = to_usb_interface(dev); | ||
550 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
551 | |||
552 | result = sscanf(buf, | ||
553 | "%02hhx %02hhx %02hhx %02hhx " | ||
554 | "%02hhx %02hhx %02hhx %02hhx " | ||
555 | "%02hhx %02hhx %02hhx %02hhx " | ||
556 | "%02hhx %02hhx %02hhx %02hhx", | ||
557 | &cbaf->ck.data[0] , &cbaf->ck.data[1], | ||
558 | &cbaf->ck.data[2] , &cbaf->ck.data[3], | ||
559 | &cbaf->ck.data[4] , &cbaf->ck.data[5], | ||
560 | &cbaf->ck.data[6] , &cbaf->ck.data[7], | ||
561 | &cbaf->ck.data[8] , &cbaf->ck.data[9], | ||
562 | &cbaf->ck.data[10], &cbaf->ck.data[11], | ||
563 | &cbaf->ck.data[12], &cbaf->ck.data[13], | ||
564 | &cbaf->ck.data[14], &cbaf->ck.data[15]); | ||
565 | if (result != 16) | ||
566 | return -EINVAL; | ||
567 | |||
568 | result = cbaf_cc_upload(cbaf); | ||
569 | if (result < 0) | ||
570 | return result; | ||
571 | |||
572 | return size; | ||
573 | } | ||
574 | static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store); | ||
575 | |||
576 | static struct attribute *cbaf_dev_attrs[] = { | ||
577 | &dev_attr_wusb_host_name.attr, | ||
578 | &dev_attr_wusb_host_band_groups.attr, | ||
579 | &dev_attr_wusb_chid.attr, | ||
580 | &dev_attr_wusb_cdid.attr, | ||
581 | &dev_attr_wusb_device_name.attr, | ||
582 | &dev_attr_wusb_device_band_groups.attr, | ||
583 | &dev_attr_wusb_ck.attr, | ||
584 | NULL, | ||
585 | }; | ||
586 | |||
587 | static struct attribute_group cbaf_dev_attr_group = { | ||
588 | .name = NULL, /* we want them in the same directory */ | ||
589 | .attrs = cbaf_dev_attrs, | ||
590 | }; | ||
591 | |||
592 | static int cbaf_probe(struct usb_interface *iface, | ||
593 | const struct usb_device_id *id) | ||
594 | { | ||
595 | struct cbaf *cbaf; | ||
596 | struct device *dev = &iface->dev; | ||
597 | int result = -ENOMEM; | ||
598 | |||
599 | cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL); | ||
600 | if (cbaf == NULL) | ||
601 | goto error_kzalloc; | ||
602 | cbaf->buffer = kmalloc(512, GFP_KERNEL); | ||
603 | if (cbaf->buffer == NULL) | ||
604 | goto error_kmalloc_buffer; | ||
605 | |||
606 | cbaf->buffer_size = 512; | ||
607 | cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface)); | ||
608 | cbaf->usb_iface = usb_get_intf(iface); | ||
609 | result = cbaf_check(cbaf); | ||
610 | if (result < 0) { | ||
611 | dev_err(dev, "This device is not WUSB-CBAF compliant" | ||
612 | "and is not supported yet.\n"); | ||
613 | goto error_check; | ||
614 | } | ||
615 | |||
616 | result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group); | ||
617 | if (result < 0) { | ||
618 | dev_err(dev, "Can't register sysfs attr group: %d\n", result); | ||
619 | goto error_create_group; | ||
620 | } | ||
621 | usb_set_intfdata(iface, cbaf); | ||
622 | return 0; | ||
623 | |||
624 | error_create_group: | ||
625 | error_check: | ||
626 | kfree(cbaf->buffer); | ||
627 | error_kmalloc_buffer: | ||
628 | kfree(cbaf); | ||
629 | error_kzalloc: | ||
630 | return result; | ||
631 | } | ||
632 | |||
633 | static void cbaf_disconnect(struct usb_interface *iface) | ||
634 | { | ||
635 | struct cbaf *cbaf = usb_get_intfdata(iface); | ||
636 | struct device *dev = &iface->dev; | ||
637 | sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group); | ||
638 | usb_set_intfdata(iface, NULL); | ||
639 | usb_put_intf(iface); | ||
640 | kfree(cbaf->buffer); | ||
641 | /* paranoia: clean up crypto keys */ | ||
642 | memset(cbaf, 0, sizeof(*cbaf)); | ||
643 | kfree(cbaf); | ||
644 | } | ||
645 | |||
646 | static struct usb_device_id cbaf_id_table[] = { | ||
647 | { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, | ||
648 | { }, | ||
649 | }; | ||
650 | MODULE_DEVICE_TABLE(usb, cbaf_id_table); | ||
651 | |||
652 | static struct usb_driver cbaf_driver = { | ||
653 | .name = "wusb-cbaf", | ||
654 | .id_table = cbaf_id_table, | ||
655 | .probe = cbaf_probe, | ||
656 | .disconnect = cbaf_disconnect, | ||
657 | }; | ||
658 | |||
659 | static int __init cbaf_driver_init(void) | ||
660 | { | ||
661 | return usb_register(&cbaf_driver); | ||
662 | } | ||
663 | module_init(cbaf_driver_init); | ||
664 | |||
665 | static void __exit cbaf_driver_exit(void) | ||
666 | { | ||
667 | usb_deregister(&cbaf_driver); | ||
668 | } | ||
669 | module_exit(cbaf_driver_exit); | ||
670 | |||
671 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
672 | MODULE_DESCRIPTION("Wireless USB Cable Based Association"); | ||
673 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c new file mode 100644 index 000000000000..c36c4389baae --- /dev/null +++ b/drivers/usb/wusbcore/crypto.c | |||
@@ -0,0 +1,538 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * AES-128 CCM Encryption | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * We don't do any encryption here; we use the Linux Kernel's AES-128 | ||
24 | * crypto modules to construct keys and payload blocks in a way | ||
25 | * defined by WUSB1.0[6]. Check the erratas, as typos are are patched | ||
26 | * there. | ||
27 | * | ||
28 | * Thanks a zillion to John Keys for his help and clarifications over | ||
29 | * the designed-by-a-committee text. | ||
30 | * | ||
31 | * So the idea is that there is this basic Pseudo-Random-Function | ||
32 | * defined in WUSB1.0[6.5] which is the core of everything. It works | ||
33 | * by tweaking some blocks, AES crypting them and then xoring | ||
34 | * something else with them (this seems to be called CBC(AES) -- can | ||
35 | * you tell I know jack about crypto?). So we just funnel it into the | ||
36 | * Linux Crypto API. | ||
37 | * | ||
38 | * We leave a crypto test module so we can verify that vectors match, | ||
39 | * every now and then. | ||
40 | * | ||
41 | * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I | ||
42 | * am learning a lot... | ||
43 | * | ||
44 | * Conveniently, some data structures that need to be | ||
45 | * funneled through AES are...16 bytes in size! | ||
46 | */ | ||
47 | |||
48 | #include <linux/crypto.h> | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/err.h> | ||
51 | #include <linux/uwb.h> | ||
52 | #include <linux/usb/wusb.h> | ||
53 | #include <linux/scatterlist.h> | ||
54 | #define D_LOCAL 0 | ||
55 | #include <linux/uwb/debug.h> | ||
56 | |||
57 | |||
58 | /* | ||
59 | * Block of data, as understood by AES-CCM | ||
60 | * | ||
61 | * The code assumes this structure is nothing but a 16 byte array | ||
62 | * (packed in a struct to avoid common mess ups that I usually do with | ||
63 | * arrays and enforcing type checking). | ||
64 | */ | ||
65 | struct aes_ccm_block { | ||
66 | u8 data[16]; | ||
67 | } __attribute__((packed)); | ||
68 | |||
69 | /* | ||
70 | * Counter-mode Blocks (WUSB1.0[6.4]) | ||
71 | * | ||
72 | * According to CCM (or so it seems), for the purpose of calculating | ||
73 | * the MIC, the message is broken in N counter-mode blocks, B0, B1, | ||
74 | * ... BN. | ||
75 | * | ||
76 | * B0 contains flags, the CCM nonce and l(m). | ||
77 | * | ||
78 | * B1 contains l(a), the MAC header, the encryption offset and padding. | ||
79 | * | ||
80 | * If EO is nonzero, additional blocks are built from payload bytes | ||
81 | * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The | ||
82 | * padding is not xmitted. | ||
83 | */ | ||
84 | |||
85 | /* WUSB1.0[T6.4] */ | ||
86 | struct aes_ccm_b0 { | ||
87 | u8 flags; /* 0x59, per CCM spec */ | ||
88 | struct aes_ccm_nonce ccm_nonce; | ||
89 | __be16 lm; | ||
90 | } __attribute__((packed)); | ||
91 | |||
92 | /* WUSB1.0[T6.5] */ | ||
93 | struct aes_ccm_b1 { | ||
94 | __be16 la; | ||
95 | u8 mac_header[10]; | ||
96 | __le16 eo; | ||
97 | u8 security_reserved; /* This is always zero */ | ||
98 | u8 padding; /* 0 */ | ||
99 | } __attribute__((packed)); | ||
100 | |||
101 | /* | ||
102 | * Encryption Blocks (WUSB1.0[6.4.4]) | ||
103 | * | ||
104 | * CCM uses Ax blocks to generate a keystream with which the MIC and | ||
105 | * the message's payload are encoded. A0 always encrypts/decrypts the | ||
106 | * MIC. Ax (x>0) are used for the sucesive payload blocks. | ||
107 | * | ||
108 | * The x is the counter, and is increased for each block. | ||
109 | */ | ||
110 | struct aes_ccm_a { | ||
111 | u8 flags; /* 0x01, per CCM spec */ | ||
112 | struct aes_ccm_nonce ccm_nonce; | ||
113 | __be16 counter; /* Value of x */ | ||
114 | } __attribute__((packed)); | ||
115 | |||
116 | static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, | ||
117 | size_t size) | ||
118 | { | ||
119 | u8 *bo = _bo; | ||
120 | const u8 *bi1 = _bi1, *bi2 = _bi2; | ||
121 | size_t itr; | ||
122 | for (itr = 0; itr < size; itr++) | ||
123 | bo[itr] = bi1[itr] ^ bi2[itr]; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * CC-MAC function WUSB1.0[6.5] | ||
128 | * | ||
129 | * Take a data string and produce the encrypted CBC Counter-mode MIC | ||
130 | * | ||
131 | * Note the names for most function arguments are made to (more or | ||
132 | * less) match those used in the pseudo-function definition given in | ||
133 | * WUSB1.0[6.5]. | ||
134 | * | ||
135 | * @tfm_cbc: CBC(AES) blkcipher handle (initialized) | ||
136 | * | ||
137 | * @tfm_aes: AES cipher handle (initialized) | ||
138 | * | ||
139 | * @mic: buffer for placing the computed MIC (Message Integrity | ||
140 | * Code). This is exactly 8 bytes, and we expect the buffer to | ||
141 | * be at least eight bytes in length. | ||
142 | * | ||
143 | * @key: 128 bit symmetric key | ||
144 | * | ||
145 | * @n: CCM nonce | ||
146 | * | ||
147 | * @a: ASCII string, 14 bytes long (I guess zero padded if needed; | ||
148 | * we use exactly 14 bytes). | ||
149 | * | ||
150 | * @b: data stream to be processed; cannot be a global or const local | ||
151 | * (will confuse the scatterlists) | ||
152 | * | ||
153 | * @blen: size of b... | ||
154 | * | ||
155 | * Still not very clear how this is done, but looks like this: we | ||
156 | * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with | ||
157 | * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we | ||
158 | * take the payload and divide it in blocks (16 bytes), xor them with | ||
159 | * the previous crypto result (16 bytes) and crypt it, repeat the next | ||
160 | * block with the output of the previous one, rinse wash (I guess this | ||
161 | * is what AES CBC mode means...but I truly have no idea). So we use | ||
162 | * the CBC(AES) blkcipher, that does precisely that. The IV (Initial | ||
163 | * Vector) is 16 bytes and is set to zero, so | ||
164 | * | ||
165 | * See rfc3610. Linux crypto has a CBC implementation, but the | ||
166 | * documentation is scarce, to say the least, and the example code is | ||
167 | * so intricated that is difficult to understand how things work. Most | ||
168 | * of this is guess work -- bite me. | ||
169 | * | ||
170 | * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and | ||
171 | * using the 14 bytes of @a to fill up | ||
172 | * b1.{mac_header,e0,security_reserved,padding}. | ||
173 | * | ||
174 | * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of | ||
175 | * l(m) is orthogonal, they bear no relationship, so it is not | ||
176 | * in conflict with the parameter's relation that | ||
177 | * WUSB1.0[6.4.2]) defines. | ||
178 | * | ||
179 | * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in | ||
180 | * first errata released on 2005/07. | ||
181 | * | ||
182 | * NOTE: we need to clean IV to zero at each invocation to make sure | ||
183 | * we start with a fresh empty Initial Vector, so that the CBC | ||
184 | * works ok. | ||
185 | * | ||
186 | * NOTE: blen is not aligned to a block size, we'll pad zeros, that's | ||
187 | * what sg[4] is for. Maybe there is a smarter way to do this. | ||
188 | */ | ||
189 | static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, | ||
190 | struct crypto_cipher *tfm_aes, void *mic, | ||
191 | const struct aes_ccm_nonce *n, | ||
192 | const struct aes_ccm_label *a, const void *b, | ||
193 | size_t blen) | ||
194 | { | ||
195 | int result = 0; | ||
196 | struct blkcipher_desc desc; | ||
197 | struct aes_ccm_b0 b0; | ||
198 | struct aes_ccm_b1 b1; | ||
199 | struct aes_ccm_a ax; | ||
200 | struct scatterlist sg[4], sg_dst; | ||
201 | void *iv, *dst_buf; | ||
202 | size_t ivsize, dst_size; | ||
203 | const u8 bzero[16] = { 0 }; | ||
204 | size_t zero_padding; | ||
205 | |||
206 | d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " | ||
207 | "n %p, a %p, b %p, blen %zu)\n", | ||
208 | tfm_cbc, tfm_aes, mic, n, a, b, blen); | ||
209 | /* | ||
210 | * These checks should be compile time optimized out | ||
211 | * ensure @a fills b1's mac_header and following fields | ||
212 | */ | ||
213 | WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); | ||
214 | WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); | ||
215 | WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); | ||
216 | WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); | ||
217 | |||
218 | result = -ENOMEM; | ||
219 | zero_padding = sizeof(struct aes_ccm_block) | ||
220 | - blen % sizeof(struct aes_ccm_block); | ||
221 | zero_padding = blen % sizeof(struct aes_ccm_block); | ||
222 | if (zero_padding) | ||
223 | zero_padding = sizeof(struct aes_ccm_block) - zero_padding; | ||
224 | dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; | ||
225 | dst_buf = kzalloc(dst_size, GFP_KERNEL); | ||
226 | if (dst_buf == NULL) { | ||
227 | printk(KERN_ERR "E: can't alloc destination buffer\n"); | ||
228 | goto error_dst_buf; | ||
229 | } | ||
230 | |||
231 | iv = crypto_blkcipher_crt(tfm_cbc)->iv; | ||
232 | ivsize = crypto_blkcipher_ivsize(tfm_cbc); | ||
233 | memset(iv, 0, ivsize); | ||
234 | |||
235 | /* Setup B0 */ | ||
236 | b0.flags = 0x59; /* Format B0 */ | ||
237 | b0.ccm_nonce = *n; | ||
238 | b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ | ||
239 | |||
240 | /* Setup B1 | ||
241 | * | ||
242 | * The WUSB spec is anything but clear! WUSB1.0[6.5] | ||
243 | * says that to initialize B1 from A with 'l(a) = blen + | ||
244 | * 14'--after clarification, it means to use A's contents | ||
245 | * for MAC Header, EO, sec reserved and padding. | ||
246 | */ | ||
247 | b1.la = cpu_to_be16(blen + 14); | ||
248 | memcpy(&b1.mac_header, a, sizeof(*a)); | ||
249 | |||
250 | d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); | ||
251 | d_dump(4, NULL, &b0, sizeof(b0)); | ||
252 | d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); | ||
253 | d_dump(4, NULL, &b1, sizeof(b1)); | ||
254 | d_printf(4, NULL, "I: B (%zu bytes)\n", blen); | ||
255 | d_dump(4, NULL, b, blen); | ||
256 | d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); | ||
257 | d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); | ||
258 | d_dump(4, NULL, iv, ivsize); | ||
259 | |||
260 | sg_init_table(sg, ARRAY_SIZE(sg)); | ||
261 | sg_set_buf(&sg[0], &b0, sizeof(b0)); | ||
262 | sg_set_buf(&sg[1], &b1, sizeof(b1)); | ||
263 | sg_set_buf(&sg[2], b, blen); | ||
264 | /* 0 if well behaved :) */ | ||
265 | sg_set_buf(&sg[3], bzero, zero_padding); | ||
266 | sg_init_one(&sg_dst, dst_buf, dst_size); | ||
267 | |||
268 | desc.tfm = tfm_cbc; | ||
269 | desc.flags = 0; | ||
270 | result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); | ||
271 | if (result < 0) { | ||
272 | printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", | ||
273 | result); | ||
274 | goto error_cbc_crypt; | ||
275 | } | ||
276 | d_printf(4, NULL, "D: MIC tag\n"); | ||
277 | d_dump(4, NULL, iv, ivsize); | ||
278 | |||
279 | /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] | ||
280 | * The procedure is to AES crypt the A0 block and XOR the MIC | ||
281 | * Tag agains it; we only do the first 8 bytes and place it | ||
282 | * directly in the destination buffer. | ||
283 | * | ||
284 | * POS Crypto API: size is assumed to be AES's block size. | ||
285 | * Thanks for documenting it -- tip taken from airo.c | ||
286 | */ | ||
287 | ax.flags = 0x01; /* as per WUSB 1.0 spec */ | ||
288 | ax.ccm_nonce = *n; | ||
289 | ax.counter = 0; | ||
290 | crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); | ||
291 | bytewise_xor(mic, &ax, iv, 8); | ||
292 | d_printf(4, NULL, "D: CTR[MIC]\n"); | ||
293 | d_dump(4, NULL, &ax, 8); | ||
294 | d_printf(4, NULL, "D: CCM-MIC tag\n"); | ||
295 | d_dump(4, NULL, mic, 8); | ||
296 | result = 8; | ||
297 | error_cbc_crypt: | ||
298 | kfree(dst_buf); | ||
299 | error_dst_buf: | ||
300 | d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " | ||
301 | "n %p, a %p, b %p, blen %zu)\n", | ||
302 | tfm_cbc, tfm_aes, mic, n, a, b, blen); | ||
303 | return result; | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * WUSB Pseudo Random Function (WUSB1.0[6.5]) | ||
308 | * | ||
309 | * @b: buffer to the source data; cannot be a global or const local | ||
310 | * (will confuse the scatterlists) | ||
311 | */ | ||
312 | ssize_t wusb_prf(void *out, size_t out_size, | ||
313 | const u8 key[16], const struct aes_ccm_nonce *_n, | ||
314 | const struct aes_ccm_label *a, | ||
315 | const void *b, size_t blen, size_t len) | ||
316 | { | ||
317 | ssize_t result, bytes = 0, bitr; | ||
318 | struct aes_ccm_nonce n = *_n; | ||
319 | struct crypto_blkcipher *tfm_cbc; | ||
320 | struct crypto_cipher *tfm_aes; | ||
321 | u64 sfn = 0; | ||
322 | __le64 sfn_le; | ||
323 | |||
324 | d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " | ||
325 | "a %p, b %p, blen %zu, len %zu)\n", out, out_size, | ||
326 | key, _n, a, b, blen, len); | ||
327 | |||
328 | tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); | ||
329 | if (IS_ERR(tfm_cbc)) { | ||
330 | result = PTR_ERR(tfm_cbc); | ||
331 | printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); | ||
332 | goto error_alloc_cbc; | ||
333 | } | ||
334 | result = crypto_blkcipher_setkey(tfm_cbc, key, 16); | ||
335 | if (result < 0) { | ||
336 | printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); | ||
337 | goto error_setkey_cbc; | ||
338 | } | ||
339 | |||
340 | tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); | ||
341 | if (IS_ERR(tfm_aes)) { | ||
342 | result = PTR_ERR(tfm_aes); | ||
343 | printk(KERN_ERR "E: can't load AES: %d\n", (int)result); | ||
344 | goto error_alloc_aes; | ||
345 | } | ||
346 | result = crypto_cipher_setkey(tfm_aes, key, 16); | ||
347 | if (result < 0) { | ||
348 | printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); | ||
349 | goto error_setkey_aes; | ||
350 | } | ||
351 | |||
352 | for (bitr = 0; bitr < (len + 63) / 64; bitr++) { | ||
353 | sfn_le = cpu_to_le64(sfn++); | ||
354 | memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ | ||
355 | result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, | ||
356 | &n, a, b, blen); | ||
357 | if (result < 0) | ||
358 | goto error_ccm_mac; | ||
359 | bytes += result; | ||
360 | } | ||
361 | result = bytes; | ||
362 | error_ccm_mac: | ||
363 | error_setkey_aes: | ||
364 | crypto_free_cipher(tfm_aes); | ||
365 | error_alloc_aes: | ||
366 | error_setkey_cbc: | ||
367 | crypto_free_blkcipher(tfm_cbc); | ||
368 | error_alloc_cbc: | ||
369 | d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " | ||
370 | "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, | ||
371 | key, _n, a, b, blen, len, (int)bytes); | ||
372 | return result; | ||
373 | } | ||
374 | |||
375 | /* WUSB1.0[A.2] test vectors */ | ||
376 | static const u8 stv_hsmic_key[16] = { | ||
377 | 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, | ||
378 | 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f | ||
379 | }; | ||
380 | |||
381 | static const struct aes_ccm_nonce stv_hsmic_n = { | ||
382 | .sfn = { 0 }, | ||
383 | .tkid = { 0x76, 0x98, 0x01, }, | ||
384 | .dest_addr = { .data = { 0xbe, 0x00 } }, | ||
385 | .src_addr = { .data = { 0x76, 0x98 } }, | ||
386 | }; | ||
387 | |||
388 | /* | ||
389 | * Out-of-band MIC Generation verification code | ||
390 | * | ||
391 | */ | ||
392 | static int wusb_oob_mic_verify(void) | ||
393 | { | ||
394 | int result; | ||
395 | u8 mic[8]; | ||
396 | /* WUSB1.0[A.2] test vectors | ||
397 | * | ||
398 | * Need to keep it in the local stack as GCC 4.1.3something | ||
399 | * messes up and generates noise. | ||
400 | */ | ||
401 | struct usb_handshake stv_hsmic_hs = { | ||
402 | .bMessageNumber = 2, | ||
403 | .bStatus = 00, | ||
404 | .tTKID = { 0x76, 0x98, 0x01 }, | ||
405 | .bReserved = 00, | ||
406 | .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, | ||
407 | 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, | ||
408 | 0x3c, 0x3d, 0x3e, 0x3f }, | ||
409 | .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, | ||
410 | 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, | ||
411 | 0x2c, 0x2d, 0x2e, 0x2f }, | ||
412 | .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c, | ||
413 | 0x14, 0x7b } , | ||
414 | }; | ||
415 | size_t hs_size; | ||
416 | |||
417 | result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs); | ||
418 | if (result < 0) | ||
419 | printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result); | ||
420 | else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) { | ||
421 | printk(KERN_ERR "E: OOB MIC test: " | ||
422 | "mismatch between MIC result and WUSB1.0[A2]\n"); | ||
423 | hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); | ||
424 | printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); | ||
425 | dump_bytes(NULL, &stv_hsmic_hs, hs_size); | ||
426 | printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", | ||
427 | sizeof(stv_hsmic_n)); | ||
428 | dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); | ||
429 | printk(KERN_ERR "E: MIC out:\n"); | ||
430 | dump_bytes(NULL, mic, sizeof(mic)); | ||
431 | printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); | ||
432 | dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); | ||
433 | result = -EINVAL; | ||
434 | } else | ||
435 | result = 0; | ||
436 | return result; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Test vectors for Key derivation | ||
441 | * | ||
442 | * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1] | ||
443 | * (errata corrected in 2005/07). | ||
444 | */ | ||
445 | static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = { | ||
446 | 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, | ||
447 | 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f | ||
448 | }; | ||
449 | |||
450 | static const struct aes_ccm_nonce stv_keydvt_n_a1 = { | ||
451 | .sfn = { 0 }, | ||
452 | .tkid = { 0x76, 0x98, 0x01, }, | ||
453 | .dest_addr = { .data = { 0xbe, 0x00 } }, | ||
454 | .src_addr = { .data = { 0x76, 0x98 } }, | ||
455 | }; | ||
456 | |||
457 | static const struct wusb_keydvt_out stv_keydvt_out_a1 = { | ||
458 | .kck = { | ||
459 | 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, | ||
460 | 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f | ||
461 | }, | ||
462 | .ptk = { | ||
463 | 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06, | ||
464 | 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d | ||
465 | } | ||
466 | }; | ||
467 | |||
468 | /* | ||
469 | * Performa a test to make sure we match the vectors defined in | ||
470 | * WUSB1.0[A.1](Errata2006/12) | ||
471 | */ | ||
472 | static int wusb_key_derive_verify(void) | ||
473 | { | ||
474 | int result = 0; | ||
475 | struct wusb_keydvt_out keydvt_out; | ||
476 | /* These come from WUSB1.0[A.1] + 2006/12 errata | ||
477 | * NOTE: can't make this const or global -- somehow it seems | ||
478 | * the scatterlists for crypto get confused and we get | ||
479 | * bad data. There is no doc on this... */ | ||
480 | struct wusb_keydvt_in stv_keydvt_in_a1 = { | ||
481 | .hnonce = { | ||
482 | 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, | ||
483 | 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f | ||
484 | }, | ||
485 | .dnonce = { | ||
486 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | ||
487 | 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f | ||
488 | } | ||
489 | }; | ||
490 | |||
491 | result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1, | ||
492 | &stv_keydvt_in_a1); | ||
493 | if (result < 0) | ||
494 | printk(KERN_ERR "E: WUSB key derivation test: " | ||
495 | "derivation failed: %d\n", result); | ||
496 | if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) { | ||
497 | printk(KERN_ERR "E: WUSB key derivation test: " | ||
498 | "mismatch between key derivation result " | ||
499 | "and WUSB1.0[A1] Errata 2006/12\n"); | ||
500 | printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", | ||
501 | sizeof(stv_key_a1)); | ||
502 | dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); | ||
503 | printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", | ||
504 | sizeof(stv_keydvt_n_a1)); | ||
505 | dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); | ||
506 | printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", | ||
507 | sizeof(stv_keydvt_in_a1)); | ||
508 | dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); | ||
509 | printk(KERN_ERR "E: keydvt out: KCK\n"); | ||
510 | dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); | ||
511 | printk(KERN_ERR "E: keydvt out: PTK\n"); | ||
512 | dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
513 | result = -EINVAL; | ||
514 | } else | ||
515 | result = 0; | ||
516 | return result; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Initialize crypto system | ||
521 | * | ||
522 | * FIXME: we do nothing now, other than verifying. Later on we'll | ||
523 | * cache the encryption stuff, so that's why we have a separate init. | ||
524 | */ | ||
525 | int wusb_crypto_init(void) | ||
526 | { | ||
527 | int result; | ||
528 | |||
529 | result = wusb_key_derive_verify(); | ||
530 | if (result < 0) | ||
531 | return result; | ||
532 | return wusb_oob_mic_verify(); | ||
533 | } | ||
534 | |||
535 | void wusb_crypto_exit(void) | ||
536 | { | ||
537 | /* FIXME: free cached crypto transforms */ | ||
538 | } | ||
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c new file mode 100644 index 000000000000..7897a19652e5 --- /dev/null +++ b/drivers/usb/wusbcore/dev-sysfs.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * WUSB devices | ||
3 | * sysfs bindings | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Get them out of the way... | ||
24 | */ | ||
25 | |||
26 | #include <linux/jiffies.h> | ||
27 | #include <linux/ctype.h> | ||
28 | #include <linux/workqueue.h> | ||
29 | #include "wusbhc.h" | ||
30 | |||
31 | #undef D_LOCAL | ||
32 | #define D_LOCAL 4 | ||
33 | #include <linux/uwb/debug.h> | ||
34 | |||
35 | static ssize_t wusb_disconnect_store(struct device *dev, | ||
36 | struct device_attribute *attr, | ||
37 | const char *buf, size_t size) | ||
38 | { | ||
39 | struct usb_device *usb_dev; | ||
40 | struct wusbhc *wusbhc; | ||
41 | unsigned command; | ||
42 | u8 port_idx; | ||
43 | |||
44 | if (sscanf(buf, "%u", &command) != 1) | ||
45 | return -EINVAL; | ||
46 | if (command == 0) | ||
47 | return size; | ||
48 | usb_dev = to_usb_device(dev); | ||
49 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); | ||
50 | if (wusbhc == NULL) | ||
51 | return -ENODEV; | ||
52 | |||
53 | mutex_lock(&wusbhc->mutex); | ||
54 | port_idx = wusb_port_no_to_idx(usb_dev->portnum); | ||
55 | __wusbhc_dev_disable(wusbhc, port_idx); | ||
56 | mutex_unlock(&wusbhc->mutex); | ||
57 | wusbhc_put(wusbhc); | ||
58 | return size; | ||
59 | } | ||
60 | static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store); | ||
61 | |||
62 | static ssize_t wusb_cdid_show(struct device *dev, | ||
63 | struct device_attribute *attr, char *buf) | ||
64 | { | ||
65 | ssize_t result; | ||
66 | struct wusb_dev *wusb_dev; | ||
67 | |||
68 | wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev)); | ||
69 | if (wusb_dev == NULL) | ||
70 | return -ENODEV; | ||
71 | result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid); | ||
72 | strcat(buf, "\n"); | ||
73 | wusb_dev_put(wusb_dev); | ||
74 | return result + 1; | ||
75 | } | ||
76 | static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL); | ||
77 | |||
78 | static ssize_t wusb_ck_store(struct device *dev, | ||
79 | struct device_attribute *attr, | ||
80 | const char *buf, size_t size) | ||
81 | { | ||
82 | int result; | ||
83 | struct usb_device *usb_dev; | ||
84 | struct wusbhc *wusbhc; | ||
85 | struct wusb_ckhdid ck; | ||
86 | |||
87 | result = sscanf(buf, | ||
88 | "%02hhx %02hhx %02hhx %02hhx " | ||
89 | "%02hhx %02hhx %02hhx %02hhx " | ||
90 | "%02hhx %02hhx %02hhx %02hhx " | ||
91 | "%02hhx %02hhx %02hhx %02hhx\n", | ||
92 | &ck.data[0] , &ck.data[1], | ||
93 | &ck.data[2] , &ck.data[3], | ||
94 | &ck.data[4] , &ck.data[5], | ||
95 | &ck.data[6] , &ck.data[7], | ||
96 | &ck.data[8] , &ck.data[9], | ||
97 | &ck.data[10], &ck.data[11], | ||
98 | &ck.data[12], &ck.data[13], | ||
99 | &ck.data[14], &ck.data[15]); | ||
100 | if (result != 16) | ||
101 | return -EINVAL; | ||
102 | |||
103 | usb_dev = to_usb_device(dev); | ||
104 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); | ||
105 | if (wusbhc == NULL) | ||
106 | return -ENODEV; | ||
107 | result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck); | ||
108 | memset(&ck, 0, sizeof(ck)); | ||
109 | wusbhc_put(wusbhc); | ||
110 | return result < 0 ? result : size; | ||
111 | } | ||
112 | static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store); | ||
113 | |||
114 | static struct attribute *wusb_dev_attrs[] = { | ||
115 | &dev_attr_wusb_disconnect.attr, | ||
116 | &dev_attr_wusb_cdid.attr, | ||
117 | &dev_attr_wusb_ck.attr, | ||
118 | NULL, | ||
119 | }; | ||
120 | |||
121 | static struct attribute_group wusb_dev_attr_group = { | ||
122 | .name = NULL, /* we want them in the same directory */ | ||
123 | .attrs = wusb_dev_attrs, | ||
124 | }; | ||
125 | |||
126 | int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev, | ||
127 | struct wusb_dev *wusb_dev) | ||
128 | { | ||
129 | int result = sysfs_create_group(&usb_dev->dev.kobj, | ||
130 | &wusb_dev_attr_group); | ||
131 | struct device *dev = &usb_dev->dev; | ||
132 | if (result < 0) | ||
133 | dev_err(dev, "Cannot register WUSB-dev attributes: %d\n", | ||
134 | result); | ||
135 | return result; | ||
136 | } | ||
137 | |||
138 | void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev) | ||
139 | { | ||
140 | struct usb_device *usb_dev = wusb_dev->usb_dev; | ||
141 | if (usb_dev) | ||
142 | sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group); | ||
143 | } | ||
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c new file mode 100644 index 000000000000..f45d777bef34 --- /dev/null +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -0,0 +1,1297 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) | ||
3 | * Device Connect handling | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * FIXME: this file needs to be broken up, it's grown too big | ||
25 | * | ||
26 | * | ||
27 | * WUSB1.0[7.1, 7.5.1, ] | ||
28 | * | ||
29 | * WUSB device connection is kind of messy. Some background: | ||
30 | * | ||
31 | * When a device wants to connect it scans the UWB radio channels | ||
32 | * looking for a WUSB Channel; a WUSB channel is defined by MMCs | ||
33 | * (Micro Managed Commands or something like that) [see | ||
34 | * Design-overview for more on this] . | ||
35 | * | ||
36 | * So, device scans the radio, finds MMCs and thus a host and checks | ||
37 | * when the next DNTS is. It sends a Device Notification Connect | ||
38 | * (DN_Connect); the host picks it up (through nep.c and notif.c, ends | ||
39 | * up in wusb_devconnect_ack(), which creates a wusb_dev structure in | ||
40 | * wusbhc->port[port_number].wusb_dev), assigns an unauth address | ||
41 | * to the device (this means from 0x80 to 0xfe) and sends, in the MMC | ||
42 | * a Connect Ack Information Element (ConnAck IE). | ||
43 | * | ||
44 | * So now the device now has a WUSB address. From now on, we use | ||
45 | * that to talk to it in the RPipes. | ||
46 | * | ||
47 | * ASSUMPTIONS: | ||
48 | * | ||
49 | * - We use the the as device address the port number where it is | ||
50 | * connected (port 0 doesn't exist). For unauth, it is 128 + that. | ||
51 | * | ||
52 | * ROADMAP: | ||
53 | * | ||
54 | * This file contains the logic for doing that--entry points: | ||
55 | * | ||
56 | * wusb_devconnect_ack() Ack a device until _acked() called. | ||
57 | * Called by notif.c:wusb_handle_dn_connect() | ||
58 | * when a DN_Connect is received. | ||
59 | * | ||
60 | * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when | ||
61 | * doing the device connect sequence. | ||
62 | * | ||
63 | * wusb_devconnect_acked() Ack done, release resources. | ||
64 | * | ||
65 | * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() | ||
66 | * for processing a DN_Alive pong from a device. | ||
67 | * | ||
68 | * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to | ||
69 | * process a disconenct request from a | ||
70 | * device. | ||
71 | * | ||
72 | * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when | ||
73 | * resetting a device. | ||
74 | * | ||
75 | * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when | ||
76 | * disabling a port. | ||
77 | * | ||
78 | * wusb_devconnect_create() Called when creating the host by | ||
79 | * lc.c:wusbhc_create(). | ||
80 | * | ||
81 | * wusb_devconnect_destroy() Cleanup called removing the host. Called | ||
82 | * by lc.c:wusbhc_destroy(). | ||
83 | * | ||
84 | * Each Wireless USB host maintains a list of DN_Connect requests | ||
85 | * (actually we maintain a list of pending Connect Acks, the | ||
86 | * wusbhc->ca_list). | ||
87 | * | ||
88 | * LIFE CYCLE OF port->wusb_dev | ||
89 | * | ||
90 | * Before the @wusbhc structure put()s the reference it owns for | ||
91 | * port->wusb_dev [and clean the wusb_dev pointer], it needs to | ||
92 | * lock @wusbhc->mutex. | ||
93 | */ | ||
94 | |||
95 | #include <linux/jiffies.h> | ||
96 | #include <linux/ctype.h> | ||
97 | #include <linux/workqueue.h> | ||
98 | #include "wusbhc.h" | ||
99 | |||
100 | #undef D_LOCAL | ||
101 | #define D_LOCAL 1 | ||
102 | #include <linux/uwb/debug.h> | ||
103 | |||
104 | static void wusbhc_devconnect_acked_work(struct work_struct *work); | ||
105 | |||
106 | static void wusb_dev_free(struct wusb_dev *wusb_dev) | ||
107 | { | ||
108 | if (wusb_dev) { | ||
109 | kfree(wusb_dev->set_gtk_req); | ||
110 | usb_free_urb(wusb_dev->set_gtk_urb); | ||
111 | kfree(wusb_dev); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) | ||
116 | { | ||
117 | struct wusb_dev *wusb_dev; | ||
118 | struct urb *urb; | ||
119 | struct usb_ctrlrequest *req; | ||
120 | |||
121 | wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); | ||
122 | if (wusb_dev == NULL) | ||
123 | goto err; | ||
124 | |||
125 | wusb_dev->wusbhc = wusbhc; | ||
126 | |||
127 | INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); | ||
128 | |||
129 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
130 | if (urb == NULL) | ||
131 | goto err; | ||
132 | |||
133 | req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); | ||
134 | if (req == NULL) | ||
135 | goto err; | ||
136 | |||
137 | req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; | ||
138 | req->bRequest = USB_REQ_SET_DESCRIPTOR; | ||
139 | req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); | ||
140 | req->wIndex = 0; | ||
141 | req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); | ||
142 | |||
143 | wusb_dev->set_gtk_urb = urb; | ||
144 | wusb_dev->set_gtk_req = req; | ||
145 | |||
146 | return wusb_dev; | ||
147 | err: | ||
148 | wusb_dev_free(wusb_dev); | ||
149 | return NULL; | ||
150 | } | ||
151 | |||
152 | |||
153 | /* | ||
154 | * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE | ||
155 | * properly so that it can be added to the MMC. | ||
156 | * | ||
157 | * We just get the @wusbhc->ca_list and fill out the first four ones or | ||
158 | * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB | ||
159 | * IE is not allocated, we alloc it. | ||
160 | * | ||
161 | * @wusbhc->mutex must be taken | ||
162 | */ | ||
163 | static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc) | ||
164 | { | ||
165 | unsigned cnt; | ||
166 | struct wusb_dev *dev_itr; | ||
167 | struct wuie_connect_ack *cack_ie; | ||
168 | |||
169 | cack_ie = &wusbhc->cack_ie; | ||
170 | cnt = 0; | ||
171 | list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) { | ||
172 | cack_ie->blk[cnt].CDID = dev_itr->cdid; | ||
173 | cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr; | ||
174 | if (++cnt >= WUIE_ELT_MAX) | ||
175 | break; | ||
176 | } | ||
177 | cack_ie->hdr.bLength = sizeof(cack_ie->hdr) | ||
178 | + cnt * sizeof(cack_ie->blk[0]); | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Register a new device that wants to connect | ||
183 | * | ||
184 | * A new device wants to connect, so we add it to the Connect-Ack | ||
185 | * list. We give it an address in the unauthorized range (bit 8 set); | ||
186 | * user space will have to drive authorization further on. | ||
187 | * | ||
188 | * @dev_addr: address to use for the device (which is also the port | ||
189 | * number). | ||
190 | * | ||
191 | * @wusbhc->mutex must be taken | ||
192 | */ | ||
193 | static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, | ||
194 | struct wusb_dn_connect *dnc, | ||
195 | const char *pr_cdid, u8 port_idx) | ||
196 | { | ||
197 | struct device *dev = wusbhc->dev; | ||
198 | struct wusb_dev *wusb_dev; | ||
199 | int new_connection = wusb_dn_connect_new_connection(dnc); | ||
200 | u8 dev_addr; | ||
201 | int result; | ||
202 | |||
203 | /* Is it registered already? */ | ||
204 | list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node) | ||
205 | if (!memcmp(&wusb_dev->cdid, &dnc->CDID, | ||
206 | sizeof(wusb_dev->cdid))) | ||
207 | return wusb_dev; | ||
208 | /* We don't have it, create an entry, register it */ | ||
209 | wusb_dev = wusb_dev_alloc(wusbhc); | ||
210 | if (wusb_dev == NULL) | ||
211 | return NULL; | ||
212 | wusb_dev_init(wusb_dev); | ||
213 | wusb_dev->cdid = dnc->CDID; | ||
214 | wusb_dev->port_idx = port_idx; | ||
215 | |||
216 | /* | ||
217 | * Devices are always available within the cluster reservation | ||
218 | * and since the hardware will take the intersection of the | ||
219 | * per-device availability and the cluster reservation, the | ||
220 | * per-device availability can simply be set to always | ||
221 | * available. | ||
222 | */ | ||
223 | bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS); | ||
224 | |||
225 | /* FIXME: handle reconnects instead of assuming connects are | ||
226 | always new. */ | ||
227 | if (1 && new_connection == 0) | ||
228 | new_connection = 1; | ||
229 | if (new_connection) { | ||
230 | dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH; | ||
231 | |||
232 | dev_info(dev, "Connecting new WUSB device to address %u, " | ||
233 | "port %u\n", dev_addr, port_idx); | ||
234 | |||
235 | result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); | ||
236 | if (result < 0) | ||
237 | return NULL; | ||
238 | } | ||
239 | wusb_dev->entry_ts = jiffies; | ||
240 | list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); | ||
241 | wusbhc->cack_count++; | ||
242 | wusbhc_fill_cack_ie(wusbhc); | ||
243 | return wusb_dev; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Remove a Connect-Ack context entry from the HCs view | ||
248 | * | ||
249 | * @wusbhc->mutex must be taken | ||
250 | */ | ||
251 | static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
252 | { | ||
253 | struct device *dev = wusbhc->dev; | ||
254 | d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); | ||
255 | list_del_init(&wusb_dev->cack_node); | ||
256 | wusbhc->cack_count--; | ||
257 | wusbhc_fill_cack_ie(wusbhc); | ||
258 | d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * @wusbhc->mutex must be taken */ | ||
263 | static | ||
264 | void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
265 | { | ||
266 | struct device *dev = wusbhc->dev; | ||
267 | d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); | ||
268 | wusbhc_cack_rm(wusbhc, wusb_dev); | ||
269 | if (wusbhc->cack_count) | ||
270 | wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); | ||
271 | else | ||
272 | wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); | ||
273 | d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); | ||
274 | } | ||
275 | |||
276 | static void wusbhc_devconnect_acked_work(struct work_struct *work) | ||
277 | { | ||
278 | struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, | ||
279 | devconnect_acked_work); | ||
280 | struct wusbhc *wusbhc = wusb_dev->wusbhc; | ||
281 | |||
282 | mutex_lock(&wusbhc->mutex); | ||
283 | wusbhc_devconnect_acked(wusbhc, wusb_dev); | ||
284 | mutex_unlock(&wusbhc->mutex); | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Ack a device for connection | ||
289 | * | ||
290 | * FIXME: docs | ||
291 | * | ||
292 | * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal. | ||
293 | * | ||
294 | * So we get the connect ack IE (may have been allocated already), | ||
295 | * find an empty connect block, an empty virtual port, create an | ||
296 | * address with it (see below), make it an unauth addr [bit 7 set] and | ||
297 | * set the MMC. | ||
298 | * | ||
299 | * Addresses: because WUSB hosts have no downstream hubs, we can do a | ||
300 | * 1:1 mapping between 'port number' and device | ||
301 | * address. This simplifies many things, as during this | ||
302 | * initial connect phase the USB stack has no knoledge of | ||
303 | * the device and hasn't assigned an address yet--we know | ||
304 | * USB's choose_address() will use the same euristics we | ||
305 | * use here, so we can assume which address will be assigned. | ||
306 | * | ||
307 | * USB stack always assigns address 1 to the root hub, so | ||
308 | * to the port number we add 2 (thus virtual port #0 is | ||
309 | * addr #2). | ||
310 | * | ||
311 | * @wusbhc shall be referenced | ||
312 | */ | ||
313 | static | ||
314 | void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, | ||
315 | const char *pr_cdid) | ||
316 | { | ||
317 | int result; | ||
318 | struct device *dev = wusbhc->dev; | ||
319 | struct wusb_dev *wusb_dev; | ||
320 | struct wusb_port *port; | ||
321 | unsigned idx, devnum; | ||
322 | |||
323 | d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid); | ||
324 | mutex_lock(&wusbhc->mutex); | ||
325 | |||
326 | /* Check we are not handling it already */ | ||
327 | for (idx = 0; idx < wusbhc->ports_max; idx++) { | ||
328 | port = wusb_port_by_idx(wusbhc, idx); | ||
329 | if (port->wusb_dev | ||
330 | && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0) | ||
331 | goto error_unlock; | ||
332 | } | ||
333 | /* Look up those fake ports we have for a free one */ | ||
334 | for (idx = 0; idx < wusbhc->ports_max; idx++) { | ||
335 | port = wusb_port_by_idx(wusbhc, idx); | ||
336 | if ((port->status & USB_PORT_STAT_POWER) | ||
337 | && !(port->status & USB_PORT_STAT_CONNECTION)) | ||
338 | break; | ||
339 | } | ||
340 | if (idx >= wusbhc->ports_max) { | ||
341 | dev_err(dev, "Host controller can't connect more devices " | ||
342 | "(%u already connected); device %s rejected\n", | ||
343 | wusbhc->ports_max, pr_cdid); | ||
344 | /* NOTE: we could send a WUIE_Disconnect here, but we haven't | ||
345 | * event acked, so the device will eventually timeout the | ||
346 | * connection, right? */ | ||
347 | goto error_unlock; | ||
348 | } | ||
349 | |||
350 | devnum = idx + 2; | ||
351 | |||
352 | /* Make sure we are using no crypto on that "virtual port" */ | ||
353 | wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); | ||
354 | |||
355 | /* Grab a filled in Connect-Ack context, fill out the | ||
356 | * Connect-Ack Wireless USB IE, set the MMC */ | ||
357 | wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx); | ||
358 | if (wusb_dev == NULL) | ||
359 | goto error_unlock; | ||
360 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); | ||
361 | if (result < 0) | ||
362 | goto error_unlock; | ||
363 | /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do | ||
364 | * three for a good measure */ | ||
365 | msleep(3); | ||
366 | port->wusb_dev = wusb_dev; | ||
367 | port->status |= USB_PORT_STAT_CONNECTION; | ||
368 | port->change |= USB_PORT_STAT_C_CONNECTION; | ||
369 | port->reset_count = 0; | ||
370 | /* Now the port status changed to connected; khubd will | ||
371 | * pick the change up and try to reset the port to bring it to | ||
372 | * the enabled state--so this process returns up to the stack | ||
373 | * and it calls back into wusbhc_rh_port_reset() who will call | ||
374 | * devconnect_auth(). | ||
375 | */ | ||
376 | error_unlock: | ||
377 | mutex_unlock(&wusbhc->mutex); | ||
378 | d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid); | ||
379 | return; | ||
380 | |||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Disconnect a Wireless USB device from its fake port | ||
385 | * | ||
386 | * Marks the port as disconnected so that khubd can pick up the change | ||
387 | * and drops our knowledge about the device. | ||
388 | * | ||
389 | * Assumes there is a device connected | ||
390 | * | ||
391 | * @port_index: zero based port number | ||
392 | * | ||
393 | * NOTE: @wusbhc->mutex is locked | ||
394 | * | ||
395 | * WARNING: From here it is not very safe to access anything hanging off | ||
396 | * wusb_dev | ||
397 | */ | ||
398 | static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | ||
399 | struct wusb_port *port) | ||
400 | { | ||
401 | struct device *dev = wusbhc->dev; | ||
402 | struct wusb_dev *wusb_dev = port->wusb_dev; | ||
403 | |||
404 | d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port); | ||
405 | port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | ||
406 | | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET | ||
407 | | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); | ||
408 | port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; | ||
409 | if (wusb_dev) { | ||
410 | if (!list_empty(&wusb_dev->cack_node)) | ||
411 | list_del_init(&wusb_dev->cack_node); | ||
412 | /* For the one in cack_add() */ | ||
413 | wusb_dev_put(wusb_dev); | ||
414 | } | ||
415 | port->wusb_dev = NULL; | ||
416 | /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get | ||
417 | * confused! We only reset to zero when we connect a new device. | ||
418 | */ | ||
419 | |||
420 | /* After a device disconnects, change the GTK (see [WUSB] | ||
421 | * section 6.2.11.2). */ | ||
422 | wusbhc_gtk_rekey(wusbhc); | ||
423 | |||
424 | d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port); | ||
425 | /* The Wireless USB part has forgotten about the device already; now | ||
426 | * khubd's timer will pick up the disconnection and remove the USB | ||
427 | * device from the system | ||
428 | */ | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * Authenticate a device into the WUSB Cluster | ||
433 | * | ||
434 | * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when | ||
435 | * asking for a reset on a port that is not enabled (ie: first connect | ||
436 | * on the port). | ||
437 | * | ||
438 | * Performs the 4way handshake to allow the device to comunicate w/ the | ||
439 | * WUSB Cluster securely; once done, issue a request to the device for | ||
440 | * it to change to address 0. | ||
441 | * | ||
442 | * This mimics the reset step of Wired USB that once resetting a | ||
443 | * device, leaves the port in enabled state and the dev with the | ||
444 | * default address (0). | ||
445 | * | ||
446 | * WUSB1.0[7.1.2] | ||
447 | * | ||
448 | * @port_idx: port where the change happened--This is the index into | ||
449 | * the wusbhc port array, not the USB port number. | ||
450 | */ | ||
451 | int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx) | ||
452 | { | ||
453 | struct device *dev = wusbhc->dev; | ||
454 | struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); | ||
455 | |||
456 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
457 | port->status &= ~USB_PORT_STAT_RESET; | ||
458 | port->status |= USB_PORT_STAT_ENABLE; | ||
459 | port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; | ||
460 | d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx); | ||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Refresh the list of keep alives to emit in the MMC | ||
466 | * | ||
467 | * Some devices don't respond to keep alives unless they've been | ||
468 | * authenticated, so skip unauthenticated devices. | ||
469 | * | ||
470 | * We only publish the first four devices that have a coming timeout | ||
471 | * condition. Then when we are done processing those, we go for the | ||
472 | * next ones. We ignore the ones that have timed out already (they'll | ||
473 | * be purged). | ||
474 | * | ||
475 | * This might cause the first devices to timeout the last devices in | ||
476 | * the port array...FIXME: come up with a better algorithm? | ||
477 | * | ||
478 | * Note we can't do much about MMC's ops errors; we hope next refresh | ||
479 | * will kind of handle it. | ||
480 | * | ||
481 | * NOTE: @wusbhc->mutex is locked | ||
482 | */ | ||
483 | static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | ||
484 | { | ||
485 | struct device *dev = wusbhc->dev; | ||
486 | unsigned cnt; | ||
487 | struct wusb_dev *wusb_dev; | ||
488 | struct wusb_port *wusb_port; | ||
489 | struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie; | ||
490 | unsigned keep_alives, old_keep_alives; | ||
491 | |||
492 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); | ||
493 | keep_alives = 0; | ||
494 | for (cnt = 0; | ||
495 | keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; | ||
496 | cnt++) { | ||
497 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); | ||
498 | |||
499 | wusb_port = wusb_port_by_idx(wusbhc, cnt); | ||
500 | wusb_dev = wusb_port->wusb_dev; | ||
501 | |||
502 | if (wusb_dev == NULL) | ||
503 | continue; | ||
504 | if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) | ||
505 | continue; | ||
506 | |||
507 | if (time_after(jiffies, wusb_dev->entry_ts + tt)) { | ||
508 | dev_err(dev, "KEEPALIVE: device %u timed out\n", | ||
509 | wusb_dev->addr); | ||
510 | __wusbhc_dev_disconnect(wusbhc, wusb_port); | ||
511 | } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) { | ||
512 | /* Approaching timeout cut out, need to refresh */ | ||
513 | ie->bDeviceAddress[keep_alives++] = wusb_dev->addr; | ||
514 | } | ||
515 | } | ||
516 | if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */ | ||
517 | ie->bDeviceAddress[keep_alives++] = 0x7f; | ||
518 | ie->hdr.bLength = sizeof(ie->hdr) + | ||
519 | keep_alives*sizeof(ie->bDeviceAddress[0]); | ||
520 | if (keep_alives > 0) | ||
521 | wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr); | ||
522 | else if (old_keep_alives != 0) | ||
523 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * Do a run through all devices checking for timeouts | ||
528 | */ | ||
529 | static void wusbhc_keep_alive_run(struct work_struct *ws) | ||
530 | { | ||
531 | struct delayed_work *dw = | ||
532 | container_of(ws, struct delayed_work, work); | ||
533 | struct wusbhc *wusbhc = | ||
534 | container_of(dw, struct wusbhc, keep_alive_timer); | ||
535 | |||
536 | d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
537 | if (wusbhc->active) { | ||
538 | mutex_lock(&wusbhc->mutex); | ||
539 | __wusbhc_keep_alive(wusbhc); | ||
540 | mutex_unlock(&wusbhc->mutex); | ||
541 | queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, | ||
542 | (wusbhc->trust_timeout * CONFIG_HZ)/1000/2); | ||
543 | } | ||
544 | d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
545 | return; | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * Find the wusb_dev from its device address. | ||
550 | * | ||
551 | * The device can be found directly from the address (see | ||
552 | * wusb_cack_add() for where the device address is set to port_idx | ||
553 | * +2), except when the address is zero. | ||
554 | */ | ||
555 | static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) | ||
556 | { | ||
557 | int p; | ||
558 | |||
559 | if (addr == 0xff) /* unconnected */ | ||
560 | return NULL; | ||
561 | |||
562 | if (addr > 0) { | ||
563 | int port = (addr & ~0x80) - 2; | ||
564 | if (port < 0 || port >= wusbhc->ports_max) | ||
565 | return NULL; | ||
566 | return wusb_port_by_idx(wusbhc, port)->wusb_dev; | ||
567 | } | ||
568 | |||
569 | /* Look for the device with address 0. */ | ||
570 | for (p = 0; p < wusbhc->ports_max; p++) { | ||
571 | struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev; | ||
572 | if (wusb_dev && wusb_dev->addr == addr) | ||
573 | return wusb_dev; | ||
574 | } | ||
575 | return NULL; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Handle a DN_Alive notification (WUSB1.0[7.6.1]) | ||
580 | * | ||
581 | * This just updates the device activity timestamp and then refreshes | ||
582 | * the keep alive IE. | ||
583 | * | ||
584 | * @wusbhc shall be referenced and unlocked | ||
585 | */ | ||
586 | static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
587 | { | ||
588 | struct device *dev = wusbhc->dev; | ||
589 | |||
590 | d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr); | ||
591 | |||
592 | mutex_lock(&wusbhc->mutex); | ||
593 | wusb_dev->entry_ts = jiffies; | ||
594 | __wusbhc_keep_alive(wusbhc); | ||
595 | mutex_unlock(&wusbhc->mutex); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Handle a DN_Connect notification (WUSB1.0[7.6.1]) | ||
600 | * | ||
601 | * @wusbhc | ||
602 | * @pkt_hdr | ||
603 | * @size: Size of the buffer where the notification resides; if the | ||
604 | * notification data suggests there should be more data than | ||
605 | * available, an error will be signaled and the whole buffer | ||
606 | * consumed. | ||
607 | * | ||
608 | * @wusbhc->mutex shall be held | ||
609 | */ | ||
610 | static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, | ||
611 | struct wusb_dn_hdr *dn_hdr, | ||
612 | size_t size) | ||
613 | { | ||
614 | struct device *dev = wusbhc->dev; | ||
615 | struct wusb_dn_connect *dnc; | ||
616 | char pr_cdid[WUSB_CKHDID_STRSIZE]; | ||
617 | static const char *beacon_behaviour[] = { | ||
618 | "reserved", | ||
619 | "self-beacon", | ||
620 | "directed-beacon", | ||
621 | "no-beacon" | ||
622 | }; | ||
623 | |||
624 | d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size); | ||
625 | if (size < sizeof(*dnc)) { | ||
626 | dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", | ||
627 | size, sizeof(*dnc)); | ||
628 | goto out; | ||
629 | } | ||
630 | |||
631 | dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); | ||
632 | ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID); | ||
633 | dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n", | ||
634 | pr_cdid, | ||
635 | wusb_dn_connect_prev_dev_addr(dnc), | ||
636 | beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)], | ||
637 | wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); | ||
638 | /* ACK the connect */ | ||
639 | wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); | ||
640 | out: | ||
641 | d_fnend(3, dev, "(%p, %p, %zu) = void\n", | ||
642 | wusbhc, dn_hdr, size); | ||
643 | return; | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * Handle a DN_Disconnect notification (WUSB1.0[7.6.1]) | ||
648 | * | ||
649 | * Device is going down -- do the disconnect. | ||
650 | * | ||
651 | * @wusbhc shall be referenced and unlocked | ||
652 | */ | ||
653 | static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
654 | { | ||
655 | struct device *dev = wusbhc->dev; | ||
656 | |||
657 | dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); | ||
658 | |||
659 | mutex_lock(&wusbhc->mutex); | ||
660 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); | ||
661 | mutex_unlock(&wusbhc->mutex); | ||
662 | } | ||
663 | |||
664 | /* | ||
665 | * Reset a WUSB device on a HWA | ||
666 | * | ||
667 | * @wusbhc | ||
668 | * @port_idx Index of the port where the device is | ||
669 | * | ||
670 | * In Wireless USB, a reset is more or less equivalent to a full | ||
671 | * disconnect; so we just do a full disconnect and send the device a | ||
672 | * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs). | ||
673 | * | ||
674 | * @wusbhc should be refcounted and unlocked | ||
675 | */ | ||
676 | int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx) | ||
677 | { | ||
678 | int result; | ||
679 | struct device *dev = wusbhc->dev; | ||
680 | struct wusb_dev *wusb_dev; | ||
681 | struct wuie_reset *ie; | ||
682 | |||
683 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
684 | mutex_lock(&wusbhc->mutex); | ||
685 | result = 0; | ||
686 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; | ||
687 | if (wusb_dev == NULL) { | ||
688 | /* reset no device? ignore */ | ||
689 | dev_dbg(dev, "RESET: no device at port %u, ignoring\n", | ||
690 | port_idx); | ||
691 | goto error_unlock; | ||
692 | } | ||
693 | result = -ENOMEM; | ||
694 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); | ||
695 | if (ie == NULL) | ||
696 | goto error_unlock; | ||
697 | ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID); | ||
698 | ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE; | ||
699 | ie->CDID = wusb_dev->cdid; | ||
700 | result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr); | ||
701 | if (result < 0) { | ||
702 | dev_err(dev, "RESET: cant's set MMC: %d\n", result); | ||
703 | goto error_kfree; | ||
704 | } | ||
705 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); | ||
706 | |||
707 | /* 120ms, hopefully 6 MMCs (FIXME) */ | ||
708 | msleep(120); | ||
709 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
710 | error_kfree: | ||
711 | kfree(ie); | ||
712 | error_unlock: | ||
713 | mutex_unlock(&wusbhc->mutex); | ||
714 | d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); | ||
715 | return result; | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * Handle a Device Notification coming a host | ||
720 | * | ||
721 | * The Device Notification comes from a host (HWA, DWA or WHCI) | ||
722 | * wrapped in a set of headers. Somebody else has peeled off those | ||
723 | * headers for us and we just get one Device Notifications. | ||
724 | * | ||
725 | * Invalid DNs (e.g., too short) are discarded. | ||
726 | * | ||
727 | * @wusbhc shall be referenced | ||
728 | * | ||
729 | * FIXMES: | ||
730 | * - implement priorities as in WUSB1.0[Table 7-55]? | ||
731 | */ | ||
732 | void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, | ||
733 | struct wusb_dn_hdr *dn_hdr, size_t size) | ||
734 | { | ||
735 | struct device *dev = wusbhc->dev; | ||
736 | struct wusb_dev *wusb_dev; | ||
737 | |||
738 | d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr); | ||
739 | |||
740 | if (size < sizeof(struct wusb_dn_hdr)) { | ||
741 | dev_err(dev, "DN data shorter than DN header (%d < %d)\n", | ||
742 | (int)size, (int)sizeof(struct wusb_dn_hdr)); | ||
743 | goto out; | ||
744 | } | ||
745 | |||
746 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); | ||
747 | if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { | ||
748 | dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", | ||
749 | dn_hdr->bType, srcaddr); | ||
750 | goto out; | ||
751 | } | ||
752 | |||
753 | switch (dn_hdr->bType) { | ||
754 | case WUSB_DN_CONNECT: | ||
755 | wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); | ||
756 | break; | ||
757 | case WUSB_DN_ALIVE: | ||
758 | wusbhc_handle_dn_alive(wusbhc, wusb_dev); | ||
759 | break; | ||
760 | case WUSB_DN_DISCONNECT: | ||
761 | wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); | ||
762 | break; | ||
763 | case WUSB_DN_MASAVAILCHANGED: | ||
764 | case WUSB_DN_RWAKE: | ||
765 | case WUSB_DN_SLEEP: | ||
766 | /* FIXME: handle these DNs. */ | ||
767 | break; | ||
768 | case WUSB_DN_EPRDY: | ||
769 | /* The hardware handles these. */ | ||
770 | break; | ||
771 | default: | ||
772 | dev_warn(dev, "unknown DN %u (%d octets) from %u\n", | ||
773 | dn_hdr->bType, (int)size, srcaddr); | ||
774 | } | ||
775 | out: | ||
776 | d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr); | ||
777 | return; | ||
778 | } | ||
779 | EXPORT_SYMBOL_GPL(wusbhc_handle_dn); | ||
780 | |||
781 | /* | ||
782 | * Disconnect a WUSB device from a the cluster | ||
783 | * | ||
784 | * @wusbhc | ||
785 | * @port Fake port where the device is (wusbhc index, not USB port number). | ||
786 | * | ||
787 | * In Wireless USB, a disconnect is basically telling the device he is | ||
788 | * being disconnected and forgetting about him. | ||
789 | * | ||
790 | * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100 | ||
791 | * ms and then keep going. | ||
792 | * | ||
793 | * We don't do much in case of error; we always pretend we disabled | ||
794 | * the port and disconnected the device. If physically the request | ||
795 | * didn't get there (many things can fail in the way there), the stack | ||
796 | * will reject the device's communication attempts. | ||
797 | * | ||
798 | * @wusbhc should be refcounted and locked | ||
799 | */ | ||
800 | void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) | ||
801 | { | ||
802 | int result; | ||
803 | struct device *dev = wusbhc->dev; | ||
804 | struct wusb_dev *wusb_dev; | ||
805 | struct wuie_disconnect *ie; | ||
806 | |||
807 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
808 | result = 0; | ||
809 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; | ||
810 | if (wusb_dev == NULL) { | ||
811 | /* reset no device? ignore */ | ||
812 | dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", | ||
813 | port_idx); | ||
814 | goto error; | ||
815 | } | ||
816 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); | ||
817 | |||
818 | result = -ENOMEM; | ||
819 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); | ||
820 | if (ie == NULL) | ||
821 | goto error; | ||
822 | ie->hdr.bLength = sizeof(*ie); | ||
823 | ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; | ||
824 | ie->bDeviceAddress = wusb_dev->addr; | ||
825 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); | ||
826 | if (result < 0) { | ||
827 | dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); | ||
828 | goto error_kfree; | ||
829 | } | ||
830 | |||
831 | /* 120ms, hopefully 6 MMCs */ | ||
832 | msleep(100); | ||
833 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
834 | error_kfree: | ||
835 | kfree(ie); | ||
836 | error: | ||
837 | d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | static void wusb_cap_descr_printf(const unsigned level, struct device *dev, | ||
842 | const struct usb_wireless_cap_descriptor *wcd) | ||
843 | { | ||
844 | d_printf(level, dev, | ||
845 | "WUSB Capability Descriptor\n" | ||
846 | " bDevCapabilityType 0x%02x\n" | ||
847 | " bmAttributes 0x%02x\n" | ||
848 | " wPhyRates 0x%04x\n" | ||
849 | " bmTFITXPowerInfo 0x%02x\n" | ||
850 | " bmFFITXPowerInfo 0x%02x\n" | ||
851 | " bmBandGroup 0x%04x\n" | ||
852 | " bReserved 0x%02x\n", | ||
853 | wcd->bDevCapabilityType, | ||
854 | wcd->bmAttributes, | ||
855 | le16_to_cpu(wcd->wPHYRates), | ||
856 | wcd->bmTFITXPowerInfo, | ||
857 | wcd->bmFFITXPowerInfo, | ||
858 | wcd->bmBandGroup, | ||
859 | wcd->bReserved); | ||
860 | } | ||
861 | |||
862 | /* | ||
863 | * Walk over the BOS descriptor, verify and grok it | ||
864 | * | ||
865 | * @usb_dev: referenced | ||
866 | * @wusb_dev: referenced and unlocked | ||
867 | * | ||
868 | * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a | ||
869 | * "flexible" way to wrap all kinds of descriptors inside an standard | ||
870 | * descriptor (wonder why they didn't use normal descriptors, | ||
871 | * btw). Not like they lack code. | ||
872 | * | ||
873 | * At the end we go to look for the WUSB Device Capabilities | ||
874 | * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor | ||
875 | * that is part of the BOS descriptor set. That tells us what does the | ||
876 | * device support (dual role, beacon type, UWB PHY rates). | ||
877 | */ | ||
878 | static int wusb_dev_bos_grok(struct usb_device *usb_dev, | ||
879 | struct wusb_dev *wusb_dev, | ||
880 | struct usb_bos_descriptor *bos, size_t desc_size) | ||
881 | { | ||
882 | ssize_t result; | ||
883 | struct device *dev = &usb_dev->dev; | ||
884 | void *itr, *top; | ||
885 | |||
886 | /* Walk over BOS capabilities, verify them */ | ||
887 | itr = (void *)bos + sizeof(*bos); | ||
888 | top = itr + desc_size - sizeof(*bos); | ||
889 | while (itr < top) { | ||
890 | struct usb_dev_cap_header *cap_hdr = itr; | ||
891 | size_t cap_size; | ||
892 | u8 cap_type; | ||
893 | if (top - itr < sizeof(*cap_hdr)) { | ||
894 | dev_err(dev, "Device BUG? premature end of BOS header " | ||
895 | "data [offset 0x%02x]: only %zu bytes left\n", | ||
896 | (int)(itr - (void *)bos), top - itr); | ||
897 | result = -ENOSPC; | ||
898 | goto error_bad_cap; | ||
899 | } | ||
900 | cap_size = cap_hdr->bLength; | ||
901 | cap_type = cap_hdr->bDevCapabilityType; | ||
902 | d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n", | ||
903 | cap_type, cap_size); | ||
904 | if (cap_size == 0) | ||
905 | break; | ||
906 | if (cap_size > top - itr) { | ||
907 | dev_err(dev, "Device BUG? premature end of BOS data " | ||
908 | "[offset 0x%02x cap %02x %zu bytes]: " | ||
909 | "only %zu bytes left\n", | ||
910 | (int)(itr - (void *)bos), | ||
911 | cap_type, cap_size, top - itr); | ||
912 | result = -EBADF; | ||
913 | goto error_bad_cap; | ||
914 | } | ||
915 | d_dump(3, dev, itr, cap_size); | ||
916 | switch (cap_type) { | ||
917 | case USB_CAP_TYPE_WIRELESS_USB: | ||
918 | if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) | ||
919 | dev_err(dev, "Device BUG? WUSB Capability " | ||
920 | "descriptor is %zu bytes vs %zu " | ||
921 | "needed\n", cap_size, | ||
922 | sizeof(*wusb_dev->wusb_cap_descr)); | ||
923 | else { | ||
924 | wusb_dev->wusb_cap_descr = itr; | ||
925 | wusb_cap_descr_printf(3, dev, itr); | ||
926 | } | ||
927 | break; | ||
928 | default: | ||
929 | dev_err(dev, "BUG? Unknown BOS capability 0x%02x " | ||
930 | "(%zu bytes) at offset 0x%02x\n", cap_type, | ||
931 | cap_size, (int)(itr - (void *)bos)); | ||
932 | } | ||
933 | itr += cap_size; | ||
934 | } | ||
935 | result = 0; | ||
936 | error_bad_cap: | ||
937 | return result; | ||
938 | } | ||
939 | |||
940 | /* | ||
941 | * Add information from the BOS descriptors to the device | ||
942 | * | ||
943 | * @usb_dev: referenced | ||
944 | * @wusb_dev: referenced and unlocked | ||
945 | * | ||
946 | * So what we do is we alloc a space for the BOS descriptor of 64 | ||
947 | * bytes; read the first four bytes which include the wTotalLength | ||
948 | * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the | ||
949 | * whole thing. If not we realloc to that size. | ||
950 | * | ||
951 | * Then we call the groking function, that will fill up | ||
952 | * wusb_dev->wusb_cap_descr, which is what we'll need later on. | ||
953 | */ | ||
954 | static int wusb_dev_bos_add(struct usb_device *usb_dev, | ||
955 | struct wusb_dev *wusb_dev) | ||
956 | { | ||
957 | ssize_t result; | ||
958 | struct device *dev = &usb_dev->dev; | ||
959 | struct usb_bos_descriptor *bos; | ||
960 | size_t alloc_size = 32, desc_size = 4; | ||
961 | |||
962 | bos = kmalloc(alloc_size, GFP_KERNEL); | ||
963 | if (bos == NULL) | ||
964 | return -ENOMEM; | ||
965 | result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); | ||
966 | if (result < 4) { | ||
967 | dev_err(dev, "Can't get BOS descriptor or too short: %zd\n", | ||
968 | result); | ||
969 | goto error_get_descriptor; | ||
970 | } | ||
971 | desc_size = le16_to_cpu(bos->wTotalLength); | ||
972 | if (desc_size >= alloc_size) { | ||
973 | kfree(bos); | ||
974 | alloc_size = desc_size; | ||
975 | bos = kmalloc(alloc_size, GFP_KERNEL); | ||
976 | if (bos == NULL) | ||
977 | return -ENOMEM; | ||
978 | } | ||
979 | result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); | ||
980 | if (result < 0 || result != desc_size) { | ||
981 | dev_err(dev, "Can't get BOS descriptor or too short (need " | ||
982 | "%zu bytes): %zd\n", desc_size, result); | ||
983 | goto error_get_descriptor; | ||
984 | } | ||
985 | if (result < sizeof(*bos) | ||
986 | || le16_to_cpu(bos->wTotalLength) != desc_size) { | ||
987 | dev_err(dev, "Can't get BOS descriptor or too short (need " | ||
988 | "%zu bytes): %zd\n", desc_size, result); | ||
989 | goto error_get_descriptor; | ||
990 | } | ||
991 | d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n", | ||
992 | result, bos->bNumDeviceCaps); | ||
993 | d_dump(2, dev, bos, result); | ||
994 | result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); | ||
995 | if (result < 0) | ||
996 | goto error_bad_bos; | ||
997 | wusb_dev->bos = bos; | ||
998 | return 0; | ||
999 | |||
1000 | error_bad_bos: | ||
1001 | error_get_descriptor: | ||
1002 | kfree(bos); | ||
1003 | wusb_dev->wusb_cap_descr = NULL; | ||
1004 | return result; | ||
1005 | } | ||
1006 | |||
1007 | static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev) | ||
1008 | { | ||
1009 | kfree(wusb_dev->bos); | ||
1010 | wusb_dev->wusb_cap_descr = NULL; | ||
1011 | }; | ||
1012 | |||
1013 | static struct usb_wireless_cap_descriptor wusb_cap_descr_default = { | ||
1014 | .bLength = sizeof(wusb_cap_descr_default), | ||
1015 | .bDescriptorType = USB_DT_DEVICE_CAPABILITY, | ||
1016 | .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB, | ||
1017 | |||
1018 | .bmAttributes = USB_WIRELESS_BEACON_NONE, | ||
1019 | .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53), | ||
1020 | .bmTFITXPowerInfo = 0, | ||
1021 | .bmFFITXPowerInfo = 0, | ||
1022 | .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */ | ||
1023 | .bReserved = 0 | ||
1024 | }; | ||
1025 | |||
1026 | /* | ||
1027 | * USB stack's device addition Notifier Callback | ||
1028 | * | ||
1029 | * Called from drivers/usb/core/hub.c when a new device is added; we | ||
1030 | * use this hook to perform certain WUSB specific setup work on the | ||
1031 | * new device. As well, it is the first time we can connect the | ||
1032 | * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a | ||
1033 | * reference that we'll drop. | ||
1034 | * | ||
1035 | * First we need to determine if the device is a WUSB device (else we | ||
1036 | * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE) | ||
1037 | * [FIXME: maybe we'd need something more definitive]. If so, we track | ||
1038 | * it's usb_busd and from there, the WUSB HC. | ||
1039 | * | ||
1040 | * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we | ||
1041 | * get the wusbhc for the device. | ||
1042 | * | ||
1043 | * We have a reference on @usb_dev (as we are called at the end of its | ||
1044 | * enumeration). | ||
1045 | * | ||
1046 | * NOTE: @usb_dev locked | ||
1047 | */ | ||
1048 | static void wusb_dev_add_ncb(struct usb_device *usb_dev) | ||
1049 | { | ||
1050 | int result = 0; | ||
1051 | struct wusb_dev *wusb_dev; | ||
1052 | struct wusbhc *wusbhc; | ||
1053 | struct device *dev = &usb_dev->dev; | ||
1054 | u8 port_idx; | ||
1055 | |||
1056 | if (usb_dev->wusb == 0 || usb_dev->devnum == 1) | ||
1057 | return; /* skip non wusb and wusb RHs */ | ||
1058 | |||
1059 | d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev); | ||
1060 | |||
1061 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); | ||
1062 | if (wusbhc == NULL) | ||
1063 | goto error_nodev; | ||
1064 | mutex_lock(&wusbhc->mutex); | ||
1065 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); | ||
1066 | port_idx = wusb_port_no_to_idx(usb_dev->portnum); | ||
1067 | mutex_unlock(&wusbhc->mutex); | ||
1068 | if (wusb_dev == NULL) | ||
1069 | goto error_nodev; | ||
1070 | wusb_dev->usb_dev = usb_get_dev(usb_dev); | ||
1071 | usb_dev->wusb_dev = wusb_dev_get(wusb_dev); | ||
1072 | result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev); | ||
1073 | if (result < 0) { | ||
1074 | dev_err(dev, "Cannot enable security: %d\n", result); | ||
1075 | goto error_sec_add; | ||
1076 | } | ||
1077 | /* Now query the device for it's BOS and attach it to wusb_dev */ | ||
1078 | result = wusb_dev_bos_add(usb_dev, wusb_dev); | ||
1079 | if (result < 0) { | ||
1080 | dev_err(dev, "Cannot get BOS descriptors: %d\n", result); | ||
1081 | goto error_bos_add; | ||
1082 | } | ||
1083 | result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev); | ||
1084 | if (result < 0) | ||
1085 | goto error_add_sysfs; | ||
1086 | out: | ||
1087 | wusb_dev_put(wusb_dev); | ||
1088 | wusbhc_put(wusbhc); | ||
1089 | error_nodev: | ||
1090 | d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev); | ||
1091 | return; | ||
1092 | |||
1093 | wusb_dev_sysfs_rm(wusb_dev); | ||
1094 | error_add_sysfs: | ||
1095 | wusb_dev_bos_rm(wusb_dev); | ||
1096 | error_bos_add: | ||
1097 | wusb_dev_sec_rm(wusb_dev); | ||
1098 | error_sec_add: | ||
1099 | mutex_lock(&wusbhc->mutex); | ||
1100 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); | ||
1101 | mutex_unlock(&wusbhc->mutex); | ||
1102 | goto out; | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Undo all the steps done at connection by the notifier callback | ||
1107 | * | ||
1108 | * NOTE: @usb_dev locked | ||
1109 | */ | ||
1110 | static void wusb_dev_rm_ncb(struct usb_device *usb_dev) | ||
1111 | { | ||
1112 | struct wusb_dev *wusb_dev = usb_dev->wusb_dev; | ||
1113 | |||
1114 | if (usb_dev->wusb == 0 || usb_dev->devnum == 1) | ||
1115 | return; /* skip non wusb and wusb RHs */ | ||
1116 | |||
1117 | wusb_dev_sysfs_rm(wusb_dev); | ||
1118 | wusb_dev_bos_rm(wusb_dev); | ||
1119 | wusb_dev_sec_rm(wusb_dev); | ||
1120 | wusb_dev->usb_dev = NULL; | ||
1121 | usb_dev->wusb_dev = NULL; | ||
1122 | wusb_dev_put(wusb_dev); | ||
1123 | usb_put_dev(usb_dev); | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * Handle notifications from the USB stack (notifier call back) | ||
1128 | * | ||
1129 | * This is called when the USB stack does a | ||
1130 | * usb_{bus,device}_{add,remove}() so we can do WUSB specific | ||
1131 | * handling. It is called with [for the case of | ||
1132 | * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked. | ||
1133 | */ | ||
1134 | int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, | ||
1135 | void *priv) | ||
1136 | { | ||
1137 | int result = NOTIFY_OK; | ||
1138 | |||
1139 | switch (val) { | ||
1140 | case USB_DEVICE_ADD: | ||
1141 | wusb_dev_add_ncb(priv); | ||
1142 | break; | ||
1143 | case USB_DEVICE_REMOVE: | ||
1144 | wusb_dev_rm_ncb(priv); | ||
1145 | break; | ||
1146 | case USB_BUS_ADD: | ||
1147 | /* ignore (for now) */ | ||
1148 | case USB_BUS_REMOVE: | ||
1149 | break; | ||
1150 | default: | ||
1151 | WARN_ON(1); | ||
1152 | result = NOTIFY_BAD; | ||
1153 | }; | ||
1154 | return result; | ||
1155 | } | ||
1156 | |||
1157 | /* | ||
1158 | * Return a referenced wusb_dev given a @wusbhc and @usb_dev | ||
1159 | */ | ||
1160 | struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc, | ||
1161 | struct usb_device *usb_dev) | ||
1162 | { | ||
1163 | struct wusb_dev *wusb_dev; | ||
1164 | u8 port_idx; | ||
1165 | |||
1166 | port_idx = wusb_port_no_to_idx(usb_dev->portnum); | ||
1167 | BUG_ON(port_idx > wusbhc->ports_max); | ||
1168 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; | ||
1169 | if (wusb_dev != NULL) /* ops, device is gone */ | ||
1170 | wusb_dev_get(wusb_dev); | ||
1171 | return wusb_dev; | ||
1172 | } | ||
1173 | EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); | ||
1174 | |||
1175 | void wusb_dev_destroy(struct kref *_wusb_dev) | ||
1176 | { | ||
1177 | struct wusb_dev *wusb_dev | ||
1178 | = container_of(_wusb_dev, struct wusb_dev, refcnt); | ||
1179 | list_del_init(&wusb_dev->cack_node); | ||
1180 | wusb_dev_free(wusb_dev); | ||
1181 | d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev); | ||
1182 | } | ||
1183 | EXPORT_SYMBOL_GPL(wusb_dev_destroy); | ||
1184 | |||
1185 | /* | ||
1186 | * Create all the device connect handling infrastructure | ||
1187 | * | ||
1188 | * This is basically the device info array, Connect Acknowledgement | ||
1189 | * (cack) lists, keep-alive timers (and delayed work thread). | ||
1190 | */ | ||
1191 | int wusbhc_devconnect_create(struct wusbhc *wusbhc) | ||
1192 | { | ||
1193 | d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
1194 | |||
1195 | wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; | ||
1196 | wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); | ||
1197 | INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); | ||
1198 | |||
1199 | wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK; | ||
1200 | wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); | ||
1201 | INIT_LIST_HEAD(&wusbhc->cack_list); | ||
1202 | |||
1203 | d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
1204 | return 0; | ||
1205 | } | ||
1206 | |||
1207 | /* | ||
1208 | * Release all resources taken by the devconnect stuff | ||
1209 | */ | ||
1210 | void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) | ||
1211 | { | ||
1212 | d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
1213 | d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * wusbhc_devconnect_start - start accepting device connections | ||
1218 | * @wusbhc: the WUSB HC | ||
1219 | * | ||
1220 | * Sets the Host Info IE to accept all new connections. | ||
1221 | * | ||
1222 | * FIXME: This also enables the keep alives but this is not necessary | ||
1223 | * until there are connected and authenticated devices. | ||
1224 | */ | ||
1225 | int wusbhc_devconnect_start(struct wusbhc *wusbhc, | ||
1226 | const struct wusb_ckhdid *chid) | ||
1227 | { | ||
1228 | struct device *dev = wusbhc->dev; | ||
1229 | struct wuie_host_info *hi; | ||
1230 | int result; | ||
1231 | |||
1232 | hi = kzalloc(sizeof(*hi), GFP_KERNEL); | ||
1233 | if (hi == NULL) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | hi->hdr.bLength = sizeof(*hi); | ||
1237 | hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; | ||
1238 | hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); | ||
1239 | hi->CHID = *chid; | ||
1240 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); | ||
1241 | if (result < 0) { | ||
1242 | dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); | ||
1243 | goto error_mmcie_set; | ||
1244 | } | ||
1245 | wusbhc->wuie_host_info = hi; | ||
1246 | |||
1247 | queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, | ||
1248 | (wusbhc->trust_timeout*CONFIG_HZ)/1000/2); | ||
1249 | |||
1250 | return 0; | ||
1251 | |||
1252 | error_mmcie_set: | ||
1253 | kfree(hi); | ||
1254 | return result; | ||
1255 | } | ||
1256 | |||
1257 | /* | ||
1258 | * wusbhc_devconnect_stop - stop managing connected devices | ||
1259 | * @wusbhc: the WUSB HC | ||
1260 | * | ||
1261 | * Removes the Host Info IE and stops the keep alives. | ||
1262 | * | ||
1263 | * FIXME: should this disconnect all devices? | ||
1264 | */ | ||
1265 | void wusbhc_devconnect_stop(struct wusbhc *wusbhc) | ||
1266 | { | ||
1267 | cancel_delayed_work_sync(&wusbhc->keep_alive_timer); | ||
1268 | WARN_ON(!list_empty(&wusbhc->cack_list)); | ||
1269 | |||
1270 | wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); | ||
1271 | kfree(wusbhc->wuie_host_info); | ||
1272 | wusbhc->wuie_host_info = NULL; | ||
1273 | } | ||
1274 | |||
1275 | /* | ||
1276 | * wusb_set_dev_addr - set the WUSB device address used by the host | ||
1277 | * @wusbhc: the WUSB HC the device is connect to | ||
1278 | * @wusb_dev: the WUSB device | ||
1279 | * @addr: new device address | ||
1280 | */ | ||
1281 | int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) | ||
1282 | { | ||
1283 | int result; | ||
1284 | |||
1285 | wusb_dev->addr = addr; | ||
1286 | result = wusbhc->dev_info_set(wusbhc, wusb_dev); | ||
1287 | if (result < 0) | ||
1288 | dev_err(wusbhc->dev, "device %d: failed to set device " | ||
1289 | "address\n", wusb_dev->port_idx); | ||
1290 | else | ||
1291 | dev_info(wusbhc->dev, "device %d: %s addr %u\n", | ||
1292 | wusb_dev->port_idx, | ||
1293 | (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth", | ||
1294 | wusb_dev->addr); | ||
1295 | |||
1296 | return result; | ||
1297 | } | ||
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c new file mode 100644 index 000000000000..cfa77a01cebd --- /dev/null +++ b/drivers/usb/wusbcore/mmc.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) | ||
3 | * MMC (Microscheduled Management Command) handling | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * WUIEs and MMC IEs...well, they are almost the same at the end. MMC | ||
24 | * IEs are Wireless USB IEs that go into the MMC period...[what is | ||
25 | * that? look in Design-overview.txt]. | ||
26 | * | ||
27 | * | ||
28 | * This is a simple subsystem to keep track of which IEs are being | ||
29 | * sent by the host in the MMC period. | ||
30 | * | ||
31 | * For each WUIE we ask to send, we keep it in an array, so we can | ||
32 | * request its removal later, or replace the content. They are tracked | ||
33 | * by pointer, so be sure to use the same pointer if you want to | ||
34 | * remove it or update the contents. | ||
35 | * | ||
36 | * FIXME: | ||
37 | * - add timers that autoremove intervalled IEs? | ||
38 | */ | ||
39 | #include <linux/usb/wusb.h> | ||
40 | #include "wusbhc.h" | ||
41 | |||
42 | /* Initialize the MMCIEs handling mechanism */ | ||
43 | int wusbhc_mmcie_create(struct wusbhc *wusbhc) | ||
44 | { | ||
45 | u8 mmcies = wusbhc->mmcies_max; | ||
46 | wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL); | ||
47 | if (wusbhc->mmcie == NULL) | ||
48 | return -ENOMEM; | ||
49 | mutex_init(&wusbhc->mmcie_mutex); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | /* Release resources used by the MMCIEs handling mechanism */ | ||
54 | void wusbhc_mmcie_destroy(struct wusbhc *wusbhc) | ||
55 | { | ||
56 | kfree(wusbhc->mmcie); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Add or replace an MMC Wireless USB IE. | ||
61 | * | ||
62 | * @interval: See WUSB1.0[8.5.3.1] | ||
63 | * @repeat_cnt: See WUSB1.0[8.5.3.1] | ||
64 | * @handle: See WUSB1.0[8.5.3.1] | ||
65 | * @wuie: Pointer to the header of the WUSB IE data to add. | ||
66 | * MUST BE allocated in a kmalloc buffer (no stack or | ||
67 | * vmalloc). | ||
68 | * THE CALLER ALWAYS OWNS THE POINTER (we don't free it | ||
69 | * on remove, we just forget about it). | ||
70 | * @returns: 0 if ok, < 0 errno code on error. | ||
71 | * | ||
72 | * Goes over the *whole* @wusbhc->mmcie array looking for (a) the | ||
73 | * first free spot and (b) if @wuie is already in the array (aka: | ||
74 | * transmitted in the MMCs) the spot were it is. | ||
75 | * | ||
76 | * If present, we "overwrite it" (update). | ||
77 | * | ||
78 | * | ||
79 | * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38. | ||
80 | * The host uses the handle as the 'sort' index. We | ||
81 | * allocate the last one always for the WUIE_ID_HOST_INFO, and | ||
82 | * the rest, first come first serve in inverse order. | ||
83 | * | ||
84 | * Host software must make sure that it adds the other IEs in | ||
85 | * the right order... the host hardware is responsible for | ||
86 | * placing the WCTA IEs in the right place with the other IEs | ||
87 | * set by host software. | ||
88 | * | ||
89 | * NOTE: we can access wusbhc->wa_descr without locking because it is | ||
90 | * read only. | ||
91 | */ | ||
92 | int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | ||
93 | struct wuie_hdr *wuie) | ||
94 | { | ||
95 | int result = -ENOBUFS; | ||
96 | unsigned handle, itr; | ||
97 | |||
98 | /* Search a handle, taking into account the ordering */ | ||
99 | mutex_lock(&wusbhc->mmcie_mutex); | ||
100 | switch (wuie->bIEIdentifier) { | ||
101 | case WUIE_ID_HOST_INFO: | ||
102 | /* Always last */ | ||
103 | handle = wusbhc->mmcies_max - 1; | ||
104 | break; | ||
105 | case WUIE_ID_ISOCH_DISCARD: | ||
106 | dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x " | ||
107 | "unimplemented\n", wuie->bIEIdentifier); | ||
108 | result = -ENOSYS; | ||
109 | goto error_unlock; | ||
110 | default: | ||
111 | /* search for it or find the last empty slot */ | ||
112 | handle = ~0; | ||
113 | for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) { | ||
114 | if (wusbhc->mmcie[itr] == wuie) { | ||
115 | handle = itr; | ||
116 | break; | ||
117 | } | ||
118 | if (wusbhc->mmcie[itr] == NULL) | ||
119 | handle = itr; | ||
120 | } | ||
121 | if (handle == ~0) | ||
122 | goto error_unlock; | ||
123 | } | ||
124 | result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle, | ||
125 | wuie); | ||
126 | if (result >= 0) | ||
127 | wusbhc->mmcie[handle] = wuie; | ||
128 | error_unlock: | ||
129 | mutex_unlock(&wusbhc->mmcie_mutex); | ||
130 | return result; | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(wusbhc_mmcie_set); | ||
133 | |||
134 | /* | ||
135 | * Remove an MMC IE previously added with wusbhc_mmcie_set() | ||
136 | * | ||
137 | * @wuie Pointer used to add the WUIE | ||
138 | */ | ||
139 | void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie) | ||
140 | { | ||
141 | int result; | ||
142 | unsigned handle, itr; | ||
143 | |||
144 | mutex_lock(&wusbhc->mmcie_mutex); | ||
145 | for (itr = 0; itr < wusbhc->mmcies_max; itr++) { | ||
146 | if (wusbhc->mmcie[itr] == wuie) { | ||
147 | handle = itr; | ||
148 | goto found; | ||
149 | } | ||
150 | } | ||
151 | mutex_unlock(&wusbhc->mmcie_mutex); | ||
152 | return; | ||
153 | |||
154 | found: | ||
155 | result = (wusbhc->mmcie_rm)(wusbhc, handle); | ||
156 | if (result == 0) | ||
157 | wusbhc->mmcie[itr] = NULL; | ||
158 | mutex_unlock(&wusbhc->mmcie_mutex); | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); | ||
161 | |||
162 | /* | ||
163 | * wusbhc_start - start transmitting MMCs and accepting connections | ||
164 | * @wusbhc: the HC to start | ||
165 | * @chid: the CHID to use for this host | ||
166 | * | ||
167 | * Establishes a cluster reservation, enables device connections, and | ||
168 | * starts MMCs with appropriate DNTS parameters. | ||
169 | */ | ||
170 | int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | ||
171 | { | ||
172 | int result; | ||
173 | struct device *dev = wusbhc->dev; | ||
174 | |||
175 | WARN_ON(wusbhc->wuie_host_info != NULL); | ||
176 | |||
177 | result = wusbhc_rsv_establish(wusbhc); | ||
178 | if (result < 0) { | ||
179 | dev_err(dev, "cannot establish cluster reservation: %d\n", | ||
180 | result); | ||
181 | goto error_rsv_establish; | ||
182 | } | ||
183 | |||
184 | result = wusbhc_devconnect_start(wusbhc, chid); | ||
185 | if (result < 0) { | ||
186 | dev_err(dev, "error enabling device connections: %d\n", result); | ||
187 | goto error_devconnect_start; | ||
188 | } | ||
189 | |||
190 | result = wusbhc_sec_start(wusbhc); | ||
191 | if (result < 0) { | ||
192 | dev_err(dev, "error starting security in the HC: %d\n", result); | ||
193 | goto error_sec_start; | ||
194 | } | ||
195 | /* FIXME: the choice of the DNTS parameters is somewhat | ||
196 | * arbitrary */ | ||
197 | result = wusbhc->set_num_dnts(wusbhc, 0, 15); | ||
198 | if (result < 0) { | ||
199 | dev_err(dev, "Cannot set DNTS parameters: %d\n", result); | ||
200 | goto error_set_num_dnts; | ||
201 | } | ||
202 | result = wusbhc->start(wusbhc); | ||
203 | if (result < 0) { | ||
204 | dev_err(dev, "error starting wusbch: %d\n", result); | ||
205 | goto error_wusbhc_start; | ||
206 | } | ||
207 | wusbhc->active = 1; | ||
208 | return 0; | ||
209 | |||
210 | error_wusbhc_start: | ||
211 | wusbhc_sec_stop(wusbhc); | ||
212 | error_set_num_dnts: | ||
213 | error_sec_start: | ||
214 | wusbhc_devconnect_stop(wusbhc); | ||
215 | error_devconnect_start: | ||
216 | wusbhc_rsv_terminate(wusbhc); | ||
217 | error_rsv_establish: | ||
218 | return result; | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * Disconnect all from the WUSB Channel | ||
223 | * | ||
224 | * Send a Host Disconnect IE in the MMC, wait, don't send it any more | ||
225 | */ | ||
226 | static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc) | ||
227 | { | ||
228 | int result = -ENOMEM; | ||
229 | struct wuie_host_disconnect *host_disconnect_ie; | ||
230 | might_sleep(); | ||
231 | host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL); | ||
232 | if (host_disconnect_ie == NULL) | ||
233 | goto error_alloc; | ||
234 | host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie); | ||
235 | host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT; | ||
236 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr); | ||
237 | if (result < 0) | ||
238 | goto error_mmcie_set; | ||
239 | |||
240 | /* WUSB1.0[8.5.3.1 & 7.5.2] */ | ||
241 | msleep(100); | ||
242 | wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr); | ||
243 | error_mmcie_set: | ||
244 | kfree(host_disconnect_ie); | ||
245 | error_alloc: | ||
246 | return result; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * wusbhc_stop - stop transmitting MMCs | ||
251 | * @wusbhc: the HC to stop | ||
252 | * | ||
253 | * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs). | ||
254 | * | ||
255 | * If we can't allocate a Host Stop IE, screw it, we don't notify the | ||
256 | * devices we are disconnecting... | ||
257 | */ | ||
258 | void wusbhc_stop(struct wusbhc *wusbhc) | ||
259 | { | ||
260 | if (wusbhc->active) { | ||
261 | wusbhc->active = 0; | ||
262 | wusbhc->stop(wusbhc); | ||
263 | wusbhc_sec_stop(wusbhc); | ||
264 | __wusbhc_host_disconnect_ie(wusbhc); | ||
265 | wusbhc_devconnect_stop(wusbhc); | ||
266 | wusbhc_rsv_terminate(wusbhc); | ||
267 | } | ||
268 | } | ||
269 | EXPORT_SYMBOL_GPL(wusbhc_stop); | ||
270 | |||
271 | /* | ||
272 | * Change the CHID in a WUSB Channel | ||
273 | * | ||
274 | * If it is just a new CHID, send a Host Disconnect IE and then change | ||
275 | * the CHID IE. | ||
276 | */ | ||
277 | static int __wusbhc_chid_change(struct wusbhc *wusbhc, | ||
278 | const struct wusb_ckhdid *chid) | ||
279 | { | ||
280 | int result = -ENOSYS; | ||
281 | struct device *dev = wusbhc->dev; | ||
282 | dev_err(dev, "%s() not implemented yet\n", __func__); | ||
283 | return result; | ||
284 | |||
285 | BUG_ON(wusbhc->wuie_host_info == NULL); | ||
286 | __wusbhc_host_disconnect_ie(wusbhc); | ||
287 | wusbhc->wuie_host_info->CHID = *chid; | ||
288 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr); | ||
289 | if (result < 0) | ||
290 | dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result); | ||
291 | return result; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Set/reset/update a new CHID | ||
296 | * | ||
297 | * Depending on the previous state of the MMCs, start, stop or change | ||
298 | * the sent MMC. This effectively switches the host controller on and | ||
299 | * off (radio wise). | ||
300 | */ | ||
301 | int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | ||
302 | { | ||
303 | int result = 0; | ||
304 | |||
305 | if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0) | ||
306 | chid = NULL; | ||
307 | |||
308 | mutex_lock(&wusbhc->mutex); | ||
309 | if (wusbhc->active) { | ||
310 | if (chid) | ||
311 | result = __wusbhc_chid_change(wusbhc, chid); | ||
312 | else | ||
313 | wusbhc_stop(wusbhc); | ||
314 | } else { | ||
315 | if (chid) | ||
316 | wusbhc_start(wusbhc, chid); | ||
317 | } | ||
318 | mutex_unlock(&wusbhc->mutex); | ||
319 | return result; | ||
320 | } | ||
321 | EXPORT_SYMBOL_GPL(wusbhc_chid_set); | ||
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c new file mode 100644 index 000000000000..7cc51e9905cf --- /dev/null +++ b/drivers/usb/wusbcore/pal.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Wireless USB Host Controller | ||
3 | * UWB Protocol Adaptation Layer (PAL) glue. | ||
4 | * | ||
5 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include "wusbhc.h" | ||
20 | |||
21 | /** | ||
22 | * wusbhc_pal_register - register the WUSB HC as a UWB PAL | ||
23 | * @wusbhc: the WUSB HC | ||
24 | */ | ||
25 | int wusbhc_pal_register(struct wusbhc *wusbhc) | ||
26 | { | ||
27 | uwb_pal_init(&wusbhc->pal); | ||
28 | |||
29 | wusbhc->pal.name = "wusbhc"; | ||
30 | wusbhc->pal.device = wusbhc->usb_hcd.self.controller; | ||
31 | |||
32 | return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL | ||
37 | * @wusbhc: the WUSB HC | ||
38 | */ | ||
39 | void wusbhc_pal_unregister(struct wusbhc *wusbhc) | ||
40 | { | ||
41 | uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal); | ||
42 | } | ||
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c new file mode 100644 index 000000000000..fc63e77ded2d --- /dev/null +++ b/drivers/usb/wusbcore/reservation.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * WUSB cluster reservation management | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/uwb.h> | ||
20 | |||
21 | #include "wusbhc.h" | ||
22 | |||
23 | /* | ||
24 | * WUSB cluster reservations are multicast reservations with the | ||
25 | * broadcast cluster ID (BCID) as the target DevAddr. | ||
26 | * | ||
27 | * FIXME: consider adjusting the reservation depending on what devices | ||
28 | * are attached. | ||
29 | */ | ||
30 | |||
31 | static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream, | ||
32 | const struct uwb_mas_bm *mas) | ||
33 | { | ||
34 | if (mas == NULL) | ||
35 | mas = &uwb_mas_bm_zero; | ||
36 | return wusbhc->bwa_set(wusbhc, stream, mas); | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback | ||
41 | * @rsv: the reservation | ||
42 | * | ||
43 | * Either set or clear the HC's view of the reservation. | ||
44 | * | ||
45 | * FIXME: when a reservation is denied the HC should be stopped. | ||
46 | */ | ||
47 | static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv) | ||
48 | { | ||
49 | struct wusbhc *wusbhc = rsv->pal_priv; | ||
50 | struct device *dev = wusbhc->dev; | ||
51 | char buf[72]; | ||
52 | |||
53 | switch (rsv->state) { | ||
54 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
55 | bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); | ||
56 | dev_dbg(dev, "established reservation: %s\n", buf); | ||
57 | wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); | ||
58 | break; | ||
59 | case UWB_RSV_STATE_NONE: | ||
60 | dev_dbg(dev, "removed reservation\n"); | ||
61 | wusbhc_bwa_set(wusbhc, 0, NULL); | ||
62 | wusbhc->rsv = NULL; | ||
63 | break; | ||
64 | default: | ||
65 | dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); | ||
66 | break; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | |||
71 | /** | ||
72 | * wusbhc_rsv_establish - establish a reservation for the cluster | ||
73 | * @wusbhc: the WUSB HC requesting a bandwith reservation | ||
74 | */ | ||
75 | int wusbhc_rsv_establish(struct wusbhc *wusbhc) | ||
76 | { | ||
77 | struct uwb_rc *rc = wusbhc->uwb_rc; | ||
78 | struct uwb_rsv *rsv; | ||
79 | struct uwb_dev_addr bcid; | ||
80 | int ret; | ||
81 | |||
82 | rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc); | ||
83 | if (rsv == NULL) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | bcid.data[0] = wusbhc->cluster_id; | ||
87 | bcid.data[1] = 0; | ||
88 | |||
89 | rsv->owner = &rc->uwb_dev; | ||
90 | rsv->target.type = UWB_RSV_TARGET_DEVADDR; | ||
91 | rsv->target.devaddr = bcid; | ||
92 | rsv->type = UWB_DRP_TYPE_PRIVATE; | ||
93 | rsv->max_mas = 256; | ||
94 | rsv->min_mas = 16; /* one MAS per zone? */ | ||
95 | rsv->sparsity = 16; /* at least one MAS in each zone? */ | ||
96 | rsv->is_multicast = true; | ||
97 | |||
98 | ret = uwb_rsv_establish(rsv); | ||
99 | if (ret == 0) | ||
100 | wusbhc->rsv = rsv; | ||
101 | else | ||
102 | uwb_rsv_destroy(rsv); | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | |||
107 | /** | ||
108 | * wusbhc_rsv_terminate - terminate any cluster reservation | ||
109 | * @wusbhc: the WUSB host whose reservation is to be terminated | ||
110 | */ | ||
111 | void wusbhc_rsv_terminate(struct wusbhc *wusbhc) | ||
112 | { | ||
113 | if (wusbhc->rsv) | ||
114 | uwb_rsv_terminate(wusbhc->rsv); | ||
115 | } | ||
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c new file mode 100644 index 000000000000..267a64325106 --- /dev/null +++ b/drivers/usb/wusbcore/rh.c | |||
@@ -0,0 +1,477 @@ | |||
1 | /* | ||
2 | * Wireless USB Host Controller | ||
3 | * Root Hub operations | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * We fake a root hub that has fake ports (as many as simultaneous | ||
25 | * devices the Wireless USB Host Controller can deal with). For each | ||
26 | * port we keep an state in @wusbhc->port[index] identical to the one | ||
27 | * specified in the USB2.0[ch11] spec and some extra device | ||
28 | * information that complements the one in 'struct usb_device' (as | ||
29 | * this lacs a hcpriv pointer). | ||
30 | * | ||
31 | * Note this is common to WHCI and HWA host controllers. | ||
32 | * | ||
33 | * Through here we enable most of the state changes that the USB stack | ||
34 | * will use to connect or disconnect devices. We need to do some | ||
35 | * forced adaptation of Wireless USB device states vs. wired: | ||
36 | * | ||
37 | * USB: WUSB: | ||
38 | * | ||
39 | * Port Powered-off port slot n/a | ||
40 | * Powered-on port slot available | ||
41 | * Disconnected port slot available | ||
42 | * Connected port slot assigned device | ||
43 | * device sent DN_Connect | ||
44 | * device was authenticated | ||
45 | * Enabled device is authenticated, transitioned | ||
46 | * from unauth -> auth -> default address | ||
47 | * -> enabled | ||
48 | * Reset disconnect | ||
49 | * Disable disconnect | ||
50 | * | ||
51 | * This maps the standard USB port states with the WUSB device states | ||
52 | * so we can fake ports without having to modify the USB stack. | ||
53 | * | ||
54 | * FIXME: this process will change in the future | ||
55 | * | ||
56 | * | ||
57 | * ENTRY POINTS | ||
58 | * | ||
59 | * Our entry points into here are, as in hcd.c, the USB stack root hub | ||
60 | * ops defined in the usb_hcd struct: | ||
61 | * | ||
62 | * wusbhc_rh_status_data() Provide hub and port status data bitmap | ||
63 | * | ||
64 | * wusbhc_rh_control() Execution of all the major requests | ||
65 | * you can do to a hub (Set|Clear | ||
66 | * features, get descriptors, status, etc). | ||
67 | * | ||
68 | * wusbhc_rh_[suspend|resume]() That | ||
69 | * | ||
70 | * wusbhc_rh_start_port_reset() ??? unimplemented | ||
71 | */ | ||
72 | #include "wusbhc.h" | ||
73 | |||
74 | #define D_LOCAL 0 | ||
75 | #include <linux/uwb/debug.h> | ||
76 | |||
77 | /* | ||
78 | * Reset a fake port | ||
79 | * | ||
80 | * This can be called to reset a port from any other state or to reset | ||
81 | * it when connecting. In Wireless USB they are different; when doing | ||
82 | * a new connect that involves going over the authentication. When | ||
83 | * just reseting, its a different story. | ||
84 | * | ||
85 | * The Linux USB stack resets a port twice before it considers it | ||
86 | * enabled, so we have to detect and ignore that. | ||
87 | * | ||
88 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
89 | * | ||
90 | * Supposedly we are the only thread accesing @wusbhc->port; in any | ||
91 | * case, maybe we should move the mutex locking from | ||
92 | * wusbhc_devconnect_auth() to here. | ||
93 | * | ||
94 | * @port_idx refers to the wusbhc's port index, not the USB port number | ||
95 | */ | ||
96 | static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx) | ||
97 | { | ||
98 | int result = 0; | ||
99 | struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); | ||
100 | |||
101 | d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n", | ||
102 | wusbhc, port_idx); | ||
103 | if (port->reset_count == 0) { | ||
104 | wusbhc_devconnect_auth(wusbhc, port_idx); | ||
105 | port->reset_count++; | ||
106 | } else if (port->reset_count == 1) | ||
107 | /* see header */ | ||
108 | d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx " | ||
109 | "%u\n", port_idx); | ||
110 | else | ||
111 | result = wusbhc_dev_reset(wusbhc, port_idx); | ||
112 | d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n", | ||
113 | wusbhc, port_idx, result); | ||
114 | return result; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Return the hub change status bitmap | ||
119 | * | ||
120 | * The bits in the change status bitmap are cleared when a | ||
121 | * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4]. | ||
122 | * | ||
123 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
124 | * | ||
125 | * WARNING!! This gets called from atomic context; we cannot get the | ||
126 | * mutex--the only race condition we can find is some bit | ||
127 | * changing just after we copy it, which shouldn't be too | ||
128 | * big of a problem [and we can't make it an spinlock | ||
129 | * because other parts need to take it and sleep] . | ||
130 | * | ||
131 | * @usb_hcd is refcounted, so it won't dissapear under us | ||
132 | * and before killing a host, the polling of the root hub | ||
133 | * would be stopped anyway. | ||
134 | */ | ||
135 | int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf) | ||
136 | { | ||
137 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
138 | size_t cnt, size; | ||
139 | unsigned long *buf = (unsigned long *) _buf; | ||
140 | |||
141 | d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
142 | /* WE DON'T LOCK, see comment */ | ||
143 | size = wusbhc->ports_max + 1 /* hub bit */; | ||
144 | size = (size + 8 - 1) / 8; /* round to bytes */ | ||
145 | for (cnt = 0; cnt < wusbhc->ports_max; cnt++) | ||
146 | if (wusb_port_by_idx(wusbhc, cnt)->change) | ||
147 | set_bit(cnt + 1, buf); | ||
148 | else | ||
149 | clear_bit(cnt + 1, buf); | ||
150 | d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size); | ||
151 | d_dump(1, wusbhc->dev, _buf, size); | ||
152 | return size; | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); | ||
155 | |||
156 | /* | ||
157 | * Return the hub's desciptor | ||
158 | * | ||
159 | * NOTE: almost cut and paste from ehci-hub.c | ||
160 | * | ||
161 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked | ||
162 | */ | ||
163 | static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue, | ||
164 | u16 wIndex, | ||
165 | struct usb_hub_descriptor *descr, | ||
166 | u16 wLength) | ||
167 | { | ||
168 | u16 temp = 1 + (wusbhc->ports_max / 8); | ||
169 | u8 length = 7 + 2 * temp; | ||
170 | |||
171 | if (wLength < length) | ||
172 | return -ENOSPC; | ||
173 | descr->bDescLength = 7 + 2 * temp; | ||
174 | descr->bDescriptorType = 0x29; /* HUB type */ | ||
175 | descr->bNbrPorts = wusbhc->ports_max; | ||
176 | descr->wHubCharacteristics = cpu_to_le16( | ||
177 | 0x00 /* All ports power at once */ | ||
178 | | 0x00 /* not part of compound device */ | ||
179 | | 0x10 /* No overcurrent protection */ | ||
180 | | 0x00 /* 8 FS think time FIXME ?? */ | ||
181 | | 0x00); /* No port indicators */ | ||
182 | descr->bPwrOn2PwrGood = 0; | ||
183 | descr->bHubContrCurrent = 0; | ||
184 | /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ | ||
185 | memset(&descr->bitmap[0], 0, temp); | ||
186 | memset(&descr->bitmap[temp], 0xff, temp); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Clear a hub feature | ||
192 | * | ||
193 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
194 | * | ||
195 | * Nothing to do, so no locking needed ;) | ||
196 | */ | ||
197 | static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) | ||
198 | { | ||
199 | int result; | ||
200 | struct device *dev = wusbhc->dev; | ||
201 | |||
202 | d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature); | ||
203 | switch (feature) { | ||
204 | case C_HUB_LOCAL_POWER: | ||
205 | /* FIXME: maybe plug bit 0 to the power input status, | ||
206 | * if any? | ||
207 | * see wusbhc_rh_get_hub_status() */ | ||
208 | case C_HUB_OVER_CURRENT: | ||
209 | result = 0; | ||
210 | break; | ||
211 | default: | ||
212 | result = -EPIPE; | ||
213 | } | ||
214 | d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result); | ||
215 | return result; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Return hub status (it is always zero...) | ||
220 | * | ||
221 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
222 | * | ||
223 | * Nothing to do, so no locking needed ;) | ||
224 | */ | ||
225 | static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf, | ||
226 | u16 wLength) | ||
227 | { | ||
228 | /* FIXME: maybe plug bit 0 to the power input status (if any)? */ | ||
229 | *buf = 0; | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Set a port feature | ||
235 | * | ||
236 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
237 | */ | ||
238 | static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, | ||
239 | u8 selector, u8 port_idx) | ||
240 | { | ||
241 | int result = -EINVAL; | ||
242 | struct device *dev = wusbhc->dev; | ||
243 | |||
244 | d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n", | ||
245 | feature, selector, port_idx); | ||
246 | |||
247 | if (port_idx > wusbhc->ports_max) | ||
248 | goto error; | ||
249 | |||
250 | switch (feature) { | ||
251 | /* According to USB2.0[11.24.2.13]p2, these features | ||
252 | * are not required to be implemented. */ | ||
253 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
254 | case USB_PORT_FEAT_C_ENABLE: | ||
255 | case USB_PORT_FEAT_C_SUSPEND: | ||
256 | case USB_PORT_FEAT_C_CONNECTION: | ||
257 | case USB_PORT_FEAT_C_RESET: | ||
258 | result = 0; | ||
259 | break; | ||
260 | |||
261 | case USB_PORT_FEAT_POWER: | ||
262 | /* No such thing, but we fake it works */ | ||
263 | mutex_lock(&wusbhc->mutex); | ||
264 | wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; | ||
265 | mutex_unlock(&wusbhc->mutex); | ||
266 | result = 0; | ||
267 | break; | ||
268 | case USB_PORT_FEAT_RESET: | ||
269 | result = wusbhc_rh_port_reset(wusbhc, port_idx); | ||
270 | break; | ||
271 | case USB_PORT_FEAT_ENABLE: | ||
272 | case USB_PORT_FEAT_SUSPEND: | ||
273 | dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", | ||
274 | port_idx, feature, selector); | ||
275 | result = -ENOSYS; | ||
276 | break; | ||
277 | default: | ||
278 | dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", | ||
279 | port_idx, feature, selector); | ||
280 | result = -EPIPE; | ||
281 | break; | ||
282 | } | ||
283 | error: | ||
284 | d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n", | ||
285 | feature, selector, port_idx, result); | ||
286 | return result; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Clear a port feature... | ||
291 | * | ||
292 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
293 | */ | ||
294 | static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, | ||
295 | u8 selector, u8 port_idx) | ||
296 | { | ||
297 | int result = -EINVAL; | ||
298 | struct device *dev = wusbhc->dev; | ||
299 | |||
300 | d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n", | ||
301 | wusbhc, feature, selector, port_idx); | ||
302 | |||
303 | if (port_idx > wusbhc->ports_max) | ||
304 | goto error; | ||
305 | |||
306 | mutex_lock(&wusbhc->mutex); | ||
307 | result = 0; | ||
308 | switch (feature) { | ||
309 | case USB_PORT_FEAT_POWER: /* fake port always on */ | ||
310 | /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ | ||
311 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
312 | break; | ||
313 | case USB_PORT_FEAT_C_RESET: | ||
314 | wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET; | ||
315 | break; | ||
316 | case USB_PORT_FEAT_C_CONNECTION: | ||
317 | wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION; | ||
318 | break; | ||
319 | case USB_PORT_FEAT_ENABLE: | ||
320 | __wusbhc_dev_disable(wusbhc, port_idx); | ||
321 | break; | ||
322 | case USB_PORT_FEAT_C_ENABLE: | ||
323 | wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE; | ||
324 | break; | ||
325 | case USB_PORT_FEAT_SUSPEND: | ||
326 | case USB_PORT_FEAT_C_SUSPEND: | ||
327 | case 0xffff: /* ??? FIXME */ | ||
328 | dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", | ||
329 | port_idx, feature, selector); | ||
330 | /* dump_stack(); */ | ||
331 | result = -ENOSYS; | ||
332 | break; | ||
333 | default: | ||
334 | dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n", | ||
335 | port_idx, feature, selector); | ||
336 | result = -EPIPE; | ||
337 | break; | ||
338 | } | ||
339 | mutex_unlock(&wusbhc->mutex); | ||
340 | error: | ||
341 | d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = " | ||
342 | "%d\n", wusbhc, feature, selector, port_idx, result); | ||
343 | return result; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Return the port's status | ||
348 | * | ||
349 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
350 | */ | ||
351 | static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, | ||
352 | u32 *_buf, u16 wLength) | ||
353 | { | ||
354 | int result = -EINVAL; | ||
355 | u16 *buf = (u16 *) _buf; | ||
356 | |||
357 | d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n", | ||
358 | wusbhc, port_idx, wLength); | ||
359 | if (port_idx > wusbhc->ports_max) | ||
360 | goto error; | ||
361 | mutex_lock(&wusbhc->mutex); | ||
362 | buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); | ||
363 | buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); | ||
364 | result = 0; | ||
365 | mutex_unlock(&wusbhc->mutex); | ||
366 | error: | ||
367 | d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result); | ||
368 | d_dump(1, wusbhc->dev, _buf, wLength); | ||
369 | return result; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Entry point for Root Hub operations | ||
374 | * | ||
375 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | ||
376 | */ | ||
377 | int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue, | ||
378 | u16 wIndex, char *buf, u16 wLength) | ||
379 | { | ||
380 | int result = -ENOSYS; | ||
381 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
382 | |||
383 | switch (reqntype) { | ||
384 | case GetHubDescriptor: | ||
385 | result = wusbhc_rh_get_hub_descr( | ||
386 | wusbhc, wValue, wIndex, | ||
387 | (struct usb_hub_descriptor *) buf, wLength); | ||
388 | break; | ||
389 | case ClearHubFeature: | ||
390 | result = wusbhc_rh_clear_hub_feat(wusbhc, wValue); | ||
391 | break; | ||
392 | case GetHubStatus: | ||
393 | result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength); | ||
394 | break; | ||
395 | |||
396 | case SetPortFeature: | ||
397 | result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8, | ||
398 | (wIndex & 0xff) - 1); | ||
399 | break; | ||
400 | case ClearPortFeature: | ||
401 | result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8, | ||
402 | (wIndex & 0xff) - 1); | ||
403 | break; | ||
404 | case GetPortStatus: | ||
405 | result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1, | ||
406 | (u32 *)buf, wLength); | ||
407 | break; | ||
408 | |||
409 | case SetHubFeature: | ||
410 | default: | ||
411 | dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) " | ||
412 | "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype, | ||
413 | wValue, wIndex, buf, wLength); | ||
414 | /* dump_stack(); */ | ||
415 | result = -ENOSYS; | ||
416 | } | ||
417 | return result; | ||
418 | } | ||
419 | EXPORT_SYMBOL_GPL(wusbhc_rh_control); | ||
420 | |||
421 | int wusbhc_rh_suspend(struct usb_hcd *usb_hcd) | ||
422 | { | ||
423 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
424 | dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, | ||
425 | usb_hcd, wusbhc); | ||
426 | /* dump_stack(); */ | ||
427 | return -ENOSYS; | ||
428 | } | ||
429 | EXPORT_SYMBOL_GPL(wusbhc_rh_suspend); | ||
430 | |||
431 | int wusbhc_rh_resume(struct usb_hcd *usb_hcd) | ||
432 | { | ||
433 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
434 | dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, | ||
435 | usb_hcd, wusbhc); | ||
436 | /* dump_stack(); */ | ||
437 | return -ENOSYS; | ||
438 | } | ||
439 | EXPORT_SYMBOL_GPL(wusbhc_rh_resume); | ||
440 | |||
441 | int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx) | ||
442 | { | ||
443 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
444 | dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n", | ||
445 | __func__, usb_hcd, wusbhc, port_idx); | ||
446 | WARN_ON(1); | ||
447 | return -ENOSYS; | ||
448 | } | ||
449 | EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset); | ||
450 | |||
451 | static void wusb_port_init(struct wusb_port *port) | ||
452 | { | ||
453 | port->status |= USB_PORT_STAT_HIGH_SPEED; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * Alloc fake port specific fields and status. | ||
458 | */ | ||
459 | int wusbhc_rh_create(struct wusbhc *wusbhc) | ||
460 | { | ||
461 | int result = -ENOMEM; | ||
462 | size_t port_size, itr; | ||
463 | port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]); | ||
464 | wusbhc->port = kzalloc(port_size, GFP_KERNEL); | ||
465 | if (wusbhc->port == NULL) | ||
466 | goto error_port_alloc; | ||
467 | for (itr = 0; itr < wusbhc->ports_max; itr++) | ||
468 | wusb_port_init(&wusbhc->port[itr]); | ||
469 | result = 0; | ||
470 | error_port_alloc: | ||
471 | return result; | ||
472 | } | ||
473 | |||
474 | void wusbhc_rh_destroy(struct wusbhc *wusbhc) | ||
475 | { | ||
476 | kfree(wusbhc->port); | ||
477 | } | ||
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c new file mode 100644 index 000000000000..a101cad6a8d4 --- /dev/null +++ b/drivers/usb/wusbcore/security.c | |||
@@ -0,0 +1,642 @@ | |||
1 | /* | ||
2 | * Wireless USB Host Controller | ||
3 | * Security support: encryption enablement, etc | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/usb/ch9.h> | ||
27 | #include <linux/random.h> | ||
28 | #include "wusbhc.h" | ||
29 | |||
30 | /* | ||
31 | * DEBUG & SECURITY WARNING!!!! | ||
32 | * | ||
33 | * If you enable this past 1, the debug code will weaken the | ||
34 | * cryptographic safety of the system (on purpose, for debugging). | ||
35 | * | ||
36 | * Weaken means: | ||
37 | * we print secret keys and intermediate values all the way, | ||
38 | */ | ||
39 | #undef D_LOCAL | ||
40 | #define D_LOCAL 2 | ||
41 | #include <linux/uwb/debug.h> | ||
42 | |||
43 | static void wusbhc_set_gtk_callback(struct urb *urb); | ||
44 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work); | ||
45 | |||
46 | int wusbhc_sec_create(struct wusbhc *wusbhc) | ||
47 | { | ||
48 | wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); | ||
49 | wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; | ||
50 | wusbhc->gtk.descr.bReserved = 0; | ||
51 | |||
52 | wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, | ||
53 | WUSB_KEY_INDEX_ORIGINATOR_HOST); | ||
54 | |||
55 | INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | |||
61 | /* Called when the HC is destroyed */ | ||
62 | void wusbhc_sec_destroy(struct wusbhc *wusbhc) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | |||
67 | /** | ||
68 | * wusbhc_next_tkid - generate a new, currently unused, TKID | ||
69 | * @wusbhc: the WUSB host controller | ||
70 | * @wusb_dev: the device whose PTK the TKID is for | ||
71 | * (or NULL for a TKID for a GTK) | ||
72 | * | ||
73 | * The generated TKID consist of two parts: the device's authenicated | ||
74 | * address (or 0 or a GTK); and an incrementing number. This ensures | ||
75 | * that TKIDs cannot be shared between devices and by the time the | ||
76 | * incrementing number wraps around the older TKIDs will no longer be | ||
77 | * in use (a maximum of two keys may be active at any one time). | ||
78 | */ | ||
79 | static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
80 | { | ||
81 | u32 *tkid; | ||
82 | u32 addr; | ||
83 | |||
84 | if (wusb_dev == NULL) { | ||
85 | tkid = &wusbhc->gtk_tkid; | ||
86 | addr = 0; | ||
87 | } else { | ||
88 | tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid; | ||
89 | addr = wusb_dev->addr & 0x7f; | ||
90 | } | ||
91 | |||
92 | *tkid = (addr << 8) | ((*tkid + 1) & 0xff); | ||
93 | |||
94 | return *tkid; | ||
95 | } | ||
96 | |||
97 | static void wusbhc_generate_gtk(struct wusbhc *wusbhc) | ||
98 | { | ||
99 | const size_t key_size = sizeof(wusbhc->gtk.data); | ||
100 | u32 tkid; | ||
101 | |||
102 | tkid = wusbhc_next_tkid(wusbhc, NULL); | ||
103 | |||
104 | wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff; | ||
105 | wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff; | ||
106 | wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff; | ||
107 | |||
108 | get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * wusbhc_sec_start - start the security management process | ||
113 | * @wusbhc: the WUSB host controller | ||
114 | * | ||
115 | * Generate and set an initial GTK on the host controller. | ||
116 | * | ||
117 | * Called when the HC is started. | ||
118 | */ | ||
119 | int wusbhc_sec_start(struct wusbhc *wusbhc) | ||
120 | { | ||
121 | const size_t key_size = sizeof(wusbhc->gtk.data); | ||
122 | int result; | ||
123 | |||
124 | wusbhc_generate_gtk(wusbhc); | ||
125 | |||
126 | result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, | ||
127 | &wusbhc->gtk.descr.bKeyData, key_size); | ||
128 | if (result < 0) | ||
129 | dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", | ||
130 | result); | ||
131 | |||
132 | return result; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * wusbhc_sec_stop - stop the security management process | ||
137 | * @wusbhc: the WUSB host controller | ||
138 | * | ||
139 | * Wait for any pending GTK rekeys to stop. | ||
140 | */ | ||
141 | void wusbhc_sec_stop(struct wusbhc *wusbhc) | ||
142 | { | ||
143 | cancel_work_sync(&wusbhc->gtk_rekey_done_work); | ||
144 | } | ||
145 | |||
146 | |||
147 | /** @returns encryption type name */ | ||
148 | const char *wusb_et_name(u8 x) | ||
149 | { | ||
150 | switch (x) { | ||
151 | case USB_ENC_TYPE_UNSECURE: return "unsecure"; | ||
152 | case USB_ENC_TYPE_WIRED: return "wired"; | ||
153 | case USB_ENC_TYPE_CCM_1: return "CCM-1"; | ||
154 | case USB_ENC_TYPE_RSA_1: return "RSA-1"; | ||
155 | default: return "unknown"; | ||
156 | } | ||
157 | } | ||
158 | EXPORT_SYMBOL_GPL(wusb_et_name); | ||
159 | |||
160 | /* | ||
161 | * Set the device encryption method | ||
162 | * | ||
163 | * We tell the device which encryption method to use; we do this when | ||
164 | * setting up the device's security. | ||
165 | */ | ||
166 | static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) | ||
167 | { | ||
168 | int result; | ||
169 | struct device *dev = &usb_dev->dev; | ||
170 | struct wusb_dev *wusb_dev = usb_dev->wusb_dev; | ||
171 | |||
172 | if (value) { | ||
173 | value = wusb_dev->ccm1_etd.bEncryptionValue; | ||
174 | } else { | ||
175 | /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */ | ||
176 | value = 0; | ||
177 | } | ||
178 | /* Set device's */ | ||
179 | result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
180 | USB_REQ_SET_ENCRYPTION, | ||
181 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | ||
182 | value, 0, NULL, 0, 1000 /* FIXME: arbitrary */); | ||
183 | if (result < 0) | ||
184 | dev_err(dev, "Can't set device's WUSB encryption to " | ||
185 | "%s (value %d): %d\n", | ||
186 | wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType), | ||
187 | wusb_dev->ccm1_etd.bEncryptionValue, result); | ||
188 | return result; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Set the GTK to be used by a device. | ||
193 | * | ||
194 | * The device must be authenticated. | ||
195 | */ | ||
196 | static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | ||
197 | { | ||
198 | struct usb_device *usb_dev = wusb_dev->usb_dev; | ||
199 | |||
200 | return usb_control_msg( | ||
201 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
202 | USB_REQ_SET_DESCRIPTOR, | ||
203 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | ||
204 | USB_DT_KEY << 8 | wusbhc->gtk_index, 0, | ||
205 | &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, | ||
206 | 1000); | ||
207 | } | ||
208 | |||
209 | |||
210 | /* FIXME: prototype for adding security */ | ||
211 | int wusb_dev_sec_add(struct wusbhc *wusbhc, | ||
212 | struct usb_device *usb_dev, struct wusb_dev *wusb_dev) | ||
213 | { | ||
214 | int result, bytes, secd_size; | ||
215 | struct device *dev = &usb_dev->dev; | ||
216 | struct usb_security_descriptor secd; | ||
217 | const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; | ||
218 | void *secd_buf; | ||
219 | const void *itr, *top; | ||
220 | char buf[64]; | ||
221 | |||
222 | d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev); | ||
223 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, | ||
224 | 0, &secd, sizeof(secd)); | ||
225 | if (result < sizeof(secd)) { | ||
226 | dev_err(dev, "Can't read security descriptor or " | ||
227 | "not enough data: %d\n", result); | ||
228 | goto error_secd; | ||
229 | } | ||
230 | secd_size = le16_to_cpu(secd.wTotalLength); | ||
231 | d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n", | ||
232 | result, secd_size); | ||
233 | secd_buf = kmalloc(secd_size, GFP_KERNEL); | ||
234 | if (secd_buf == NULL) { | ||
235 | dev_err(dev, "Can't allocate space for security descriptors\n"); | ||
236 | goto error_secd_alloc; | ||
237 | } | ||
238 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, | ||
239 | 0, secd_buf, secd_size); | ||
240 | if (result < secd_size) { | ||
241 | dev_err(dev, "Can't read security descriptor or " | ||
242 | "not enough data: %d\n", result); | ||
243 | goto error_secd_all; | ||
244 | } | ||
245 | d_printf(5, dev, "got %d bytes of sec descriptors\n", result); | ||
246 | bytes = 0; | ||
247 | itr = secd_buf + sizeof(secd); | ||
248 | top = secd_buf + result; | ||
249 | while (itr < top) { | ||
250 | etd = itr; | ||
251 | if (top - itr < sizeof(*etd)) { | ||
252 | dev_err(dev, "BUG: bad device security descriptor; " | ||
253 | "not enough data (%zu vs %zu bytes left)\n", | ||
254 | top - itr, sizeof(*etd)); | ||
255 | break; | ||
256 | } | ||
257 | if (etd->bLength < sizeof(*etd)) { | ||
258 | dev_err(dev, "BUG: bad device encryption descriptor; " | ||
259 | "descriptor is too short " | ||
260 | "(%u vs %zu needed)\n", | ||
261 | etd->bLength, sizeof(*etd)); | ||
262 | break; | ||
263 | } | ||
264 | itr += etd->bLength; | ||
265 | bytes += snprintf(buf + bytes, sizeof(buf) - bytes, | ||
266 | "%s (0x%02x/%02x) ", | ||
267 | wusb_et_name(etd->bEncryptionType), | ||
268 | etd->bEncryptionValue, etd->bAuthKeyIndex); | ||
269 | if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1) | ||
270 | ccm1_etd = etd; | ||
271 | } | ||
272 | /* This code only supports CCM1 as of now. */ | ||
273 | /* FIXME: user has to choose which sec mode to use? | ||
274 | * In theory we want CCM */ | ||
275 | if (ccm1_etd == NULL) { | ||
276 | dev_err(dev, "WUSB device doesn't support CCM1 encryption, " | ||
277 | "can't use!\n"); | ||
278 | result = -EINVAL; | ||
279 | goto error_no_ccm1; | ||
280 | } | ||
281 | wusb_dev->ccm1_etd = *ccm1_etd; | ||
282 | dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", | ||
283 | buf, wusb_et_name(ccm1_etd->bEncryptionType), | ||
284 | ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); | ||
285 | result = 0; | ||
286 | kfree(secd_buf); | ||
287 | out: | ||
288 | d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n", | ||
289 | usb_dev, wusb_dev, result); | ||
290 | return result; | ||
291 | |||
292 | |||
293 | error_no_ccm1: | ||
294 | error_secd_all: | ||
295 | kfree(secd_buf); | ||
296 | error_secd_alloc: | ||
297 | error_secd: | ||
298 | goto out; | ||
299 | } | ||
300 | |||
301 | void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) | ||
302 | { | ||
303 | /* Nothing so far */ | ||
304 | } | ||
305 | |||
306 | static void hs_printk(unsigned level, struct device *dev, | ||
307 | struct usb_handshake *hs) | ||
308 | { | ||
309 | d_printf(level, dev, | ||
310 | " bMessageNumber: %u\n" | ||
311 | " bStatus: %u\n" | ||
312 | " tTKID: %02x %02x %02x\n" | ||
313 | " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
314 | " %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
315 | " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
316 | " %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
317 | " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
318 | hs->bMessageNumber, hs->bStatus, | ||
319 | hs->tTKID[2], hs->tTKID[1], hs->tTKID[0], | ||
320 | hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3], | ||
321 | hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7], | ||
322 | hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11], | ||
323 | hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15], | ||
324 | hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3], | ||
325 | hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7], | ||
326 | hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11], | ||
327 | hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15], | ||
328 | hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3], | ||
329 | hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]); | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * Update the address of an unauthenticated WUSB device | ||
334 | * | ||
335 | * Once we have successfully authenticated, we take it to addr0 state | ||
336 | * and then to a normal address. | ||
337 | * | ||
338 | * Before the device's address (as known by it) was usb_dev->devnum | | ||
339 | * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. | ||
340 | */ | ||
341 | static int wusb_dev_update_address(struct wusbhc *wusbhc, | ||
342 | struct wusb_dev *wusb_dev) | ||
343 | { | ||
344 | int result = -ENOMEM; | ||
345 | struct usb_device *usb_dev = wusb_dev->usb_dev; | ||
346 | struct device *dev = &usb_dev->dev; | ||
347 | u8 new_address = wusb_dev->addr & 0x7F; | ||
348 | |||
349 | /* Set address 0 */ | ||
350 | result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
351 | USB_REQ_SET_ADDRESS, 0, | ||
352 | 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */); | ||
353 | if (result < 0) { | ||
354 | dev_err(dev, "auth failed: can't set address 0: %d\n", | ||
355 | result); | ||
356 | goto error_addr0; | ||
357 | } | ||
358 | result = wusb_set_dev_addr(wusbhc, wusb_dev, 0); | ||
359 | if (result < 0) | ||
360 | goto error_addr0; | ||
361 | usb_ep0_reinit(usb_dev); | ||
362 | |||
363 | /* Set new (authenticated) address. */ | ||
364 | result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
365 | USB_REQ_SET_ADDRESS, 0, | ||
366 | new_address, 0, NULL, 0, | ||
367 | 1000 /* FIXME: arbitrary */); | ||
368 | if (result < 0) { | ||
369 | dev_err(dev, "auth failed: can't set address %u: %d\n", | ||
370 | new_address, result); | ||
371 | goto error_addr; | ||
372 | } | ||
373 | result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address); | ||
374 | if (result < 0) | ||
375 | goto error_addr; | ||
376 | usb_ep0_reinit(usb_dev); | ||
377 | usb_dev->authenticated = 1; | ||
378 | error_addr: | ||
379 | error_addr0: | ||
380 | return result; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * | ||
385 | * | ||
386 | */ | ||
387 | /* FIXME: split and cleanup */ | ||
388 | int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | ||
389 | struct wusb_ckhdid *ck) | ||
390 | { | ||
391 | int result = -ENOMEM; | ||
392 | struct usb_device *usb_dev = wusb_dev->usb_dev; | ||
393 | struct device *dev = &usb_dev->dev; | ||
394 | u32 tkid; | ||
395 | __le32 tkid_le; | ||
396 | struct usb_handshake *hs; | ||
397 | struct aes_ccm_nonce ccm_n; | ||
398 | u8 mic[8]; | ||
399 | struct wusb_keydvt_in keydvt_in; | ||
400 | struct wusb_keydvt_out keydvt_out; | ||
401 | |||
402 | hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL); | ||
403 | if (hs == NULL) { | ||
404 | dev_err(dev, "can't allocate handshake data\n"); | ||
405 | goto error_kzalloc; | ||
406 | } | ||
407 | |||
408 | /* We need to turn encryption before beginning the 4way | ||
409 | * hshake (WUSB1.0[.3.2.2]) */ | ||
410 | result = wusb_dev_set_encryption(usb_dev, 1); | ||
411 | if (result < 0) | ||
412 | goto error_dev_set_encryption; | ||
413 | |||
414 | tkid = wusbhc_next_tkid(wusbhc, wusb_dev); | ||
415 | tkid_le = cpu_to_le32(tkid); | ||
416 | |||
417 | hs[0].bMessageNumber = 1; | ||
418 | hs[0].bStatus = 0; | ||
419 | memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID)); | ||
420 | hs[0].bReserved = 0; | ||
421 | memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID)); | ||
422 | get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); | ||
423 | memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ | ||
424 | |||
425 | d_printf(1, dev, "I: sending hs1:\n"); | ||
426 | hs_printk(2, dev, &hs[0]); | ||
427 | |||
428 | result = usb_control_msg( | ||
429 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
430 | USB_REQ_SET_HANDSHAKE, | ||
431 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | ||
432 | 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */); | ||
433 | if (result < 0) { | ||
434 | dev_err(dev, "Handshake1: request failed: %d\n", result); | ||
435 | goto error_hs1; | ||
436 | } | ||
437 | |||
438 | /* Handshake 2, from the device -- need to verify fields */ | ||
439 | result = usb_control_msg( | ||
440 | usb_dev, usb_rcvctrlpipe(usb_dev, 0), | ||
441 | USB_REQ_GET_HANDSHAKE, | ||
442 | USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | ||
443 | 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */); | ||
444 | if (result < 0) { | ||
445 | dev_err(dev, "Handshake2: request failed: %d\n", result); | ||
446 | goto error_hs2; | ||
447 | } | ||
448 | d_printf(1, dev, "got HS2:\n"); | ||
449 | hs_printk(2, dev, &hs[1]); | ||
450 | |||
451 | result = -EINVAL; | ||
452 | if (hs[1].bMessageNumber != 2) { | ||
453 | dev_err(dev, "Handshake2 failed: bad message number %u\n", | ||
454 | hs[1].bMessageNumber); | ||
455 | goto error_hs2; | ||
456 | } | ||
457 | if (hs[1].bStatus != 0) { | ||
458 | dev_err(dev, "Handshake2 failed: bad status %u\n", | ||
459 | hs[1].bStatus); | ||
460 | goto error_hs2; | ||
461 | } | ||
462 | if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) { | ||
463 | dev_err(dev, "Handshake2 failed: TKID mismatch " | ||
464 | "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n", | ||
465 | hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2], | ||
466 | hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]); | ||
467 | goto error_hs2; | ||
468 | } | ||
469 | if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) { | ||
470 | dev_err(dev, "Handshake2 failed: CDID mismatch\n"); | ||
471 | goto error_hs2; | ||
472 | } | ||
473 | |||
474 | /* Setup the CCM nonce */ | ||
475 | memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */ | ||
476 | memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid)); | ||
477 | ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr; | ||
478 | ccm_n.dest_addr.data[0] = wusb_dev->addr; | ||
479 | ccm_n.dest_addr.data[1] = 0; | ||
480 | |||
481 | /* Derive the KCK and PTK from CK, the CCM, H and D nonces */ | ||
482 | memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce)); | ||
483 | memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce)); | ||
484 | result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in); | ||
485 | if (result < 0) { | ||
486 | dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n", | ||
487 | result); | ||
488 | goto error_hs2; | ||
489 | } | ||
490 | d_printf(2, dev, "KCK:\n"); | ||
491 | d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck)); | ||
492 | d_printf(2, dev, "PTK:\n"); | ||
493 | d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
494 | |||
495 | /* Compute MIC and verify it */ | ||
496 | result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); | ||
497 | if (result < 0) { | ||
498 | dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n", | ||
499 | result); | ||
500 | goto error_hs2; | ||
501 | } | ||
502 | |||
503 | d_printf(2, dev, "MIC:\n"); | ||
504 | d_dump(2, dev, mic, sizeof(mic)); | ||
505 | if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { | ||
506 | dev_err(dev, "Handshake2 failed: MIC mismatch\n"); | ||
507 | goto error_hs2; | ||
508 | } | ||
509 | |||
510 | /* Send Handshake3 */ | ||
511 | hs[2].bMessageNumber = 3; | ||
512 | hs[2].bStatus = 0; | ||
513 | memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID)); | ||
514 | hs[2].bReserved = 0; | ||
515 | memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID)); | ||
516 | memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce)); | ||
517 | result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]); | ||
518 | if (result < 0) { | ||
519 | dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n", | ||
520 | result); | ||
521 | goto error_hs2; | ||
522 | } | ||
523 | |||
524 | d_printf(1, dev, "I: sending hs3:\n"); | ||
525 | hs_printk(2, dev, &hs[2]); | ||
526 | |||
527 | result = usb_control_msg( | ||
528 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | ||
529 | USB_REQ_SET_HANDSHAKE, | ||
530 | USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, | ||
531 | 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */); | ||
532 | if (result < 0) { | ||
533 | dev_err(dev, "Handshake3: request failed: %d\n", result); | ||
534 | goto error_hs3; | ||
535 | } | ||
536 | |||
537 | d_printf(1, dev, "I: turning on encryption on host for device\n"); | ||
538 | d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
539 | result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, | ||
540 | keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
541 | if (result < 0) | ||
542 | goto error_wusbhc_set_ptk; | ||
543 | |||
544 | d_printf(1, dev, "I: setting a GTK\n"); | ||
545 | result = wusb_dev_set_gtk(wusbhc, wusb_dev); | ||
546 | if (result < 0) { | ||
547 | dev_err(dev, "Set GTK for device: request failed: %d\n", | ||
548 | result); | ||
549 | goto error_wusbhc_set_gtk; | ||
550 | } | ||
551 | |||
552 | /* Update the device's address from unauth to auth */ | ||
553 | if (usb_dev->authenticated == 0) { | ||
554 | d_printf(1, dev, "I: updating addres to auth from non-auth\n"); | ||
555 | result = wusb_dev_update_address(wusbhc, wusb_dev); | ||
556 | if (result < 0) | ||
557 | goto error_dev_update_address; | ||
558 | } | ||
559 | result = 0; | ||
560 | d_printf(1, dev, "I: 4way handshke done, device authenticated\n"); | ||
561 | |||
562 | error_dev_update_address: | ||
563 | error_wusbhc_set_gtk: | ||
564 | error_wusbhc_set_ptk: | ||
565 | error_hs3: | ||
566 | error_hs2: | ||
567 | error_hs1: | ||
568 | memset(hs, 0, 3*sizeof(hs[0])); | ||
569 | memset(&keydvt_out, 0, sizeof(keydvt_out)); | ||
570 | memset(&keydvt_in, 0, sizeof(keydvt_in)); | ||
571 | memset(&ccm_n, 0, sizeof(ccm_n)); | ||
572 | memset(mic, 0, sizeof(mic)); | ||
573 | if (result < 0) { | ||
574 | /* error path */ | ||
575 | wusb_dev_set_encryption(usb_dev, 0); | ||
576 | } | ||
577 | error_dev_set_encryption: | ||
578 | kfree(hs); | ||
579 | error_kzalloc: | ||
580 | return result; | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Once all connected and authenticated devices have received the new | ||
585 | * GTK, switch the host to using it. | ||
586 | */ | ||
587 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work) | ||
588 | { | ||
589 | struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); | ||
590 | size_t key_size = sizeof(wusbhc->gtk.data); | ||
591 | |||
592 | mutex_lock(&wusbhc->mutex); | ||
593 | |||
594 | if (--wusbhc->pending_set_gtks == 0) | ||
595 | wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); | ||
596 | |||
597 | mutex_unlock(&wusbhc->mutex); | ||
598 | } | ||
599 | |||
600 | static void wusbhc_set_gtk_callback(struct urb *urb) | ||
601 | { | ||
602 | struct wusbhc *wusbhc = urb->context; | ||
603 | |||
604 | queue_work(wusbd, &wusbhc->gtk_rekey_done_work); | ||
605 | } | ||
606 | |||
607 | /** | ||
608 | * wusbhc_gtk_rekey - generate and distribute a new GTK | ||
609 | * @wusbhc: the WUSB host controller | ||
610 | * | ||
611 | * Generate a new GTK and distribute it to all connected and | ||
612 | * authenticated devices. When all devices have the new GTK, the host | ||
613 | * starts using it. | ||
614 | * | ||
615 | * This must be called after every device disconnect (see [WUSB] | ||
616 | * section 6.2.11.2). | ||
617 | */ | ||
618 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc) | ||
619 | { | ||
620 | static const size_t key_size = sizeof(wusbhc->gtk.data); | ||
621 | int p; | ||
622 | |||
623 | wusbhc_generate_gtk(wusbhc); | ||
624 | |||
625 | for (p = 0; p < wusbhc->ports_max; p++) { | ||
626 | struct wusb_dev *wusb_dev; | ||
627 | |||
628 | wusb_dev = wusbhc->port[p].wusb_dev; | ||
629 | if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated) | ||
630 | continue; | ||
631 | |||
632 | usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, | ||
633 | usb_sndctrlpipe(wusb_dev->usb_dev, 0), | ||
634 | (void *)wusb_dev->set_gtk_req, | ||
635 | &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, | ||
636 | wusbhc_set_gtk_callback, wusbhc); | ||
637 | if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) | ||
638 | wusbhc->pending_set_gtks++; | ||
639 | } | ||
640 | if (wusbhc->pending_set_gtks == 0) | ||
641 | wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); | ||
642 | } | ||
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c new file mode 100644 index 000000000000..9d04722415bb --- /dev/null +++ b/drivers/usb/wusbcore/wa-hc.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Wire Adapter Host Controller Driver | ||
3 | * Common items to HWA and DWA based HCDs | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | #include "wusbhc.h" | ||
26 | #include "wa-hc.h" | ||
27 | |||
28 | /** | ||
29 | * Assumes | ||
30 | * | ||
31 | * wa->usb_dev and wa->usb_iface initialized and refcounted, | ||
32 | * wa->wa_descr initialized. | ||
33 | */ | ||
34 | int wa_create(struct wahc *wa, struct usb_interface *iface) | ||
35 | { | ||
36 | int result; | ||
37 | struct device *dev = &iface->dev; | ||
38 | |||
39 | result = wa_rpipes_create(wa); | ||
40 | if (result < 0) | ||
41 | goto error_rpipes_create; | ||
42 | /* Fill up Data Transfer EP pointers */ | ||
43 | wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc; | ||
44 | wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc; | ||
45 | wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize); | ||
46 | wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL); | ||
47 | if (wa->xfer_result == NULL) | ||
48 | goto error_xfer_result_alloc; | ||
49 | result = wa_nep_create(wa, iface); | ||
50 | if (result < 0) { | ||
51 | dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n", | ||
52 | result); | ||
53 | goto error_nep_create; | ||
54 | } | ||
55 | return 0; | ||
56 | |||
57 | error_nep_create: | ||
58 | kfree(wa->xfer_result); | ||
59 | error_xfer_result_alloc: | ||
60 | wa_rpipes_destroy(wa); | ||
61 | error_rpipes_create: | ||
62 | return result; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(wa_create); | ||
65 | |||
66 | |||
67 | void __wa_destroy(struct wahc *wa) | ||
68 | { | ||
69 | if (wa->dti_urb) { | ||
70 | usb_kill_urb(wa->dti_urb); | ||
71 | usb_put_urb(wa->dti_urb); | ||
72 | usb_kill_urb(wa->buf_in_urb); | ||
73 | usb_put_urb(wa->buf_in_urb); | ||
74 | } | ||
75 | kfree(wa->xfer_result); | ||
76 | wa_nep_destroy(wa); | ||
77 | wa_rpipes_destroy(wa); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(__wa_destroy); | ||
80 | |||
81 | /** | ||
82 | * wa_reset_all - reset the WA device | ||
83 | * @wa: the WA to be reset | ||
84 | * | ||
85 | * For HWAs the radio controller and all other PALs are also reset. | ||
86 | */ | ||
87 | void wa_reset_all(struct wahc *wa) | ||
88 | { | ||
89 | /* FIXME: assuming HWA. */ | ||
90 | wusbhc_reset_all(wa->wusb); | ||
91 | } | ||
92 | |||
93 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
94 | MODULE_DESCRIPTION("Wireless USB Wire Adapter core"); | ||
95 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h new file mode 100644 index 000000000000..586d350cdb4d --- /dev/null +++ b/drivers/usb/wusbcore/wa-hc.h | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * HWA Host Controller Driver | ||
3 | * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8]) | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * This driver implements a USB Host Controller (struct usb_hcd) for a | ||
24 | * Wireless USB Host Controller based on the Wireless USB 1.0 | ||
25 | * Host-Wire-Adapter specification (in layman terms, a USB-dongle that | ||
26 | * implements a Wireless USB host). | ||
27 | * | ||
28 | * Check out the Design-overview.txt file in the source documentation | ||
29 | * for other details on the implementation. | ||
30 | * | ||
31 | * Main blocks: | ||
32 | * | ||
33 | * driver glue with the driver API, workqueue daemon | ||
34 | * | ||
35 | * lc RC instance life cycle management (create, destroy...) | ||
36 | * | ||
37 | * hcd glue with the USB API Host Controller Interface API. | ||
38 | * | ||
39 | * nep Notification EndPoint managent: collect notifications | ||
40 | * and queue them with the workqueue daemon. | ||
41 | * | ||
42 | * Handle notifications as coming from the NEP. Sends them | ||
43 | * off others to their respective modules (eg: connect, | ||
44 | * disconnect and reset go to devconnect). | ||
45 | * | ||
46 | * rpipe Remote Pipe management; rpipe is what we use to write | ||
47 | * to an endpoint on a WUSB device that is connected to a | ||
48 | * HWA RC. | ||
49 | * | ||
50 | * xfer Transfer managment -- this is all the code that gets a | ||
51 | * buffer and pushes it to a device (or viceversa). * | ||
52 | * | ||
53 | * Some day a lot of this code will be shared between this driver and | ||
54 | * the drivers for DWA (xfer, rpipe). | ||
55 | * | ||
56 | * All starts at driver.c:hwahc_probe(), when one of this guys is | ||
57 | * connected. hwahc_disconnect() stops it. | ||
58 | * | ||
59 | * During operation, the main driver is devices connecting or | ||
60 | * disconnecting. They cause the HWA RC to send notifications into | ||
61 | * nep.c:hwahc_nep_cb() that will dispatch them to | ||
62 | * notif.c:wa_notif_dispatch(). From there they will fan to cause | ||
63 | * device connects, disconnects, etc. | ||
64 | * | ||
65 | * Note much of the activity is difficult to follow. For example a | ||
66 | * device connect goes to devconnect, which will cause the "fake" root | ||
67 | * hub port to show a connect and stop there. Then khubd will notice | ||
68 | * and call into the rh.c:hwahc_rc_port_reset() code to authenticate | ||
69 | * the device (and this might require user intervention) and enable | ||
70 | * the port. | ||
71 | * | ||
72 | * We also have a timer workqueue going from devconnect.c that | ||
73 | * schedules in hwahc_devconnect_create(). | ||
74 | * | ||
75 | * The rest of the traffic is in the usual entry points of a USB HCD, | ||
76 | * which are hooked up in driver.c:hwahc_rc_driver, and defined in | ||
77 | * hcd.c. | ||
78 | */ | ||
79 | |||
80 | #ifndef __HWAHC_INTERNAL_H__ | ||
81 | #define __HWAHC_INTERNAL_H__ | ||
82 | |||
83 | #include <linux/completion.h> | ||
84 | #include <linux/usb.h> | ||
85 | #include <linux/mutex.h> | ||
86 | #include <linux/spinlock.h> | ||
87 | #include <linux/uwb.h> | ||
88 | #include <linux/usb/wusb.h> | ||
89 | #include <linux/usb/wusb-wa.h> | ||
90 | |||
91 | struct wusbhc; | ||
92 | struct wahc; | ||
93 | extern void wa_urb_enqueue_run(struct work_struct *ws); | ||
94 | |||
95 | /** | ||
96 | * RPipe instance | ||
97 | * | ||
98 | * @descr's fields are kept in LE, as we need to send it back and | ||
99 | * forth. | ||
100 | * | ||
101 | * @wa is referenced when set | ||
102 | * | ||
103 | * @segs_available is the number of requests segments that still can | ||
104 | * be submitted to the controller without overloading | ||
105 | * it. It is initialized to descr->wRequests when | ||
106 | * aiming. | ||
107 | * | ||
108 | * A rpipe supports a max of descr->wRequests at the same time; before | ||
109 | * submitting seg_lock has to be taken. If segs_avail > 0, then we can | ||
110 | * submit; if not, we have to queue them. | ||
111 | */ | ||
112 | struct wa_rpipe { | ||
113 | struct kref refcnt; | ||
114 | struct usb_rpipe_descriptor descr; | ||
115 | struct usb_host_endpoint *ep; | ||
116 | struct wahc *wa; | ||
117 | spinlock_t seg_lock; | ||
118 | struct list_head seg_list; | ||
119 | atomic_t segs_available; | ||
120 | u8 buffer[1]; /* For reads/writes on USB */ | ||
121 | }; | ||
122 | |||
123 | |||
124 | /** | ||
125 | * Instance of a HWA Host Controller | ||
126 | * | ||
127 | * Except where a more specific lock/mutex applies or atomic, all | ||
128 | * fields protected by @mutex. | ||
129 | * | ||
130 | * @wa_descr Can be accessed without locking because it is in | ||
131 | * the same area where the device descriptors were | ||
132 | * read, so it is guaranteed to exist umodified while | ||
133 | * the device exists. | ||
134 | * | ||
135 | * Endianess has been converted to CPU's. | ||
136 | * | ||
137 | * @nep_* can be accessed without locking as its processing is | ||
138 | * serialized; we submit a NEP URB and it comes to | ||
139 | * hwahc_nep_cb(), which won't issue another URB until it is | ||
140 | * done processing it. | ||
141 | * | ||
142 | * @xfer_list: | ||
143 | * | ||
144 | * List of active transfers to verify existence from a xfer id | ||
145 | * gotten from the xfer result message. Can't use urb->list because | ||
146 | * it goes by endpoint, and we don't know the endpoint at the time | ||
147 | * when we get the xfer result message. We can't really rely on the | ||
148 | * pointer (will have to change for 64 bits) as the xfer id is 32 bits. | ||
149 | * | ||
150 | * @xfer_delayed_list: List of transfers that need to be started | ||
151 | * (with a workqueue, because they were | ||
152 | * submitted from an atomic context). | ||
153 | * | ||
154 | * FIXME: this needs to be layered up: a wusbhc layer (for sharing | ||
155 | * comonalities with WHCI), a wa layer (for sharing | ||
156 | * comonalities with DWA-RC). | ||
157 | */ | ||
158 | struct wahc { | ||
159 | struct usb_device *usb_dev; | ||
160 | struct usb_interface *usb_iface; | ||
161 | |||
162 | /* HC to deliver notifications */ | ||
163 | union { | ||
164 | struct wusbhc *wusb; | ||
165 | struct dwahc *dwa; | ||
166 | }; | ||
167 | |||
168 | const struct usb_endpoint_descriptor *dto_epd, *dti_epd; | ||
169 | const struct usb_wa_descriptor *wa_descr; | ||
170 | |||
171 | struct urb *nep_urb; /* Notification EndPoint [lockless] */ | ||
172 | struct edc nep_edc; | ||
173 | void *nep_buffer; | ||
174 | size_t nep_buffer_size; | ||
175 | |||
176 | atomic_t notifs_queued; | ||
177 | |||
178 | u16 rpipes; | ||
179 | unsigned long *rpipe_bm; /* rpipe usage bitmap */ | ||
180 | spinlock_t rpipe_bm_lock; /* protect rpipe_bm */ | ||
181 | struct mutex rpipe_mutex; /* assigning resources to endpoints */ | ||
182 | |||
183 | struct urb *dti_urb; /* URB for reading xfer results */ | ||
184 | struct urb *buf_in_urb; /* URB for reading data in */ | ||
185 | struct edc dti_edc; /* DTI error density counter */ | ||
186 | struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */ | ||
187 | size_t xfer_result_size; | ||
188 | |||
189 | s32 status; /* For reading status */ | ||
190 | |||
191 | struct list_head xfer_list; | ||
192 | struct list_head xfer_delayed_list; | ||
193 | spinlock_t xfer_list_lock; | ||
194 | struct work_struct xfer_work; | ||
195 | atomic_t xfer_id_count; | ||
196 | }; | ||
197 | |||
198 | |||
199 | extern int wa_create(struct wahc *wa, struct usb_interface *iface); | ||
200 | extern void __wa_destroy(struct wahc *wa); | ||
201 | void wa_reset_all(struct wahc *wa); | ||
202 | |||
203 | |||
204 | /* Miscellaneous constants */ | ||
205 | enum { | ||
206 | /** Max number of EPROTO errors we tolerate on the NEP in a | ||
207 | * period of time */ | ||
208 | HWAHC_EPROTO_MAX = 16, | ||
209 | /** Period of time for EPROTO errors (in jiffies) */ | ||
210 | HWAHC_EPROTO_PERIOD = 4 * HZ, | ||
211 | }; | ||
212 | |||
213 | |||
214 | /* Notification endpoint handling */ | ||
215 | extern int wa_nep_create(struct wahc *, struct usb_interface *); | ||
216 | extern void wa_nep_destroy(struct wahc *); | ||
217 | |||
218 | static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) | ||
219 | { | ||
220 | struct urb *urb = wa->nep_urb; | ||
221 | urb->transfer_buffer = wa->nep_buffer; | ||
222 | urb->transfer_buffer_length = wa->nep_buffer_size; | ||
223 | return usb_submit_urb(urb, gfp_mask); | ||
224 | } | ||
225 | |||
226 | static inline void wa_nep_disarm(struct wahc *wa) | ||
227 | { | ||
228 | usb_kill_urb(wa->nep_urb); | ||
229 | } | ||
230 | |||
231 | |||
232 | /* RPipes */ | ||
233 | static inline void wa_rpipe_init(struct wahc *wa) | ||
234 | { | ||
235 | spin_lock_init(&wa->rpipe_bm_lock); | ||
236 | mutex_init(&wa->rpipe_mutex); | ||
237 | } | ||
238 | |||
239 | static inline void wa_init(struct wahc *wa) | ||
240 | { | ||
241 | edc_init(&wa->nep_edc); | ||
242 | atomic_set(&wa->notifs_queued, 0); | ||
243 | wa_rpipe_init(wa); | ||
244 | edc_init(&wa->dti_edc); | ||
245 | INIT_LIST_HEAD(&wa->xfer_list); | ||
246 | INIT_LIST_HEAD(&wa->xfer_delayed_list); | ||
247 | spin_lock_init(&wa->xfer_list_lock); | ||
248 | INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); | ||
249 | atomic_set(&wa->xfer_id_count, 1); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * Destroy a pipe (when refcount drops to zero) | ||
254 | * | ||
255 | * Assumes it has been moved to the "QUIESCING" state. | ||
256 | */ | ||
257 | struct wa_xfer; | ||
258 | extern void rpipe_destroy(struct kref *_rpipe); | ||
259 | static inline | ||
260 | void __rpipe_get(struct wa_rpipe *rpipe) | ||
261 | { | ||
262 | kref_get(&rpipe->refcnt); | ||
263 | } | ||
264 | extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *, | ||
265 | struct urb *, gfp_t); | ||
266 | static inline void rpipe_put(struct wa_rpipe *rpipe) | ||
267 | { | ||
268 | kref_put(&rpipe->refcnt, rpipe_destroy); | ||
269 | |||
270 | } | ||
271 | extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *); | ||
272 | extern int wa_rpipes_create(struct wahc *); | ||
273 | extern void wa_rpipes_destroy(struct wahc *); | ||
274 | static inline void rpipe_avail_dec(struct wa_rpipe *rpipe) | ||
275 | { | ||
276 | atomic_dec(&rpipe->segs_available); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * Returns true if the rpipe is ready to submit more segments. | ||
281 | */ | ||
282 | static inline int rpipe_avail_inc(struct wa_rpipe *rpipe) | ||
283 | { | ||
284 | return atomic_inc_return(&rpipe->segs_available) > 0 | ||
285 | && !list_empty(&rpipe->seg_list); | ||
286 | } | ||
287 | |||
288 | |||
289 | /* Transferring data */ | ||
290 | extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *, | ||
291 | struct urb *, gfp_t); | ||
292 | extern int wa_urb_dequeue(struct wahc *, struct urb *); | ||
293 | extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *); | ||
294 | |||
295 | |||
296 | /* Misc | ||
297 | * | ||
298 | * FIXME: Refcounting for the actual @hwahc object is not correct; I | ||
299 | * mean, this should be refcounting on the HCD underneath, but | ||
300 | * it is not. In any case, the semantics for HCD refcounting | ||
301 | * are *weird*...on refcount reaching zero it just frees | ||
302 | * it...no RC specific function is called...unless I miss | ||
303 | * something. | ||
304 | * | ||
305 | * FIXME: has to go away in favour of an 'struct' hcd based sollution | ||
306 | */ | ||
307 | static inline struct wahc *wa_get(struct wahc *wa) | ||
308 | { | ||
309 | usb_get_intf(wa->usb_iface); | ||
310 | return wa; | ||
311 | } | ||
312 | |||
313 | static inline void wa_put(struct wahc *wa) | ||
314 | { | ||
315 | usb_put_intf(wa->usb_iface); | ||
316 | } | ||
317 | |||
318 | |||
319 | static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature) | ||
320 | { | ||
321 | return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
322 | op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE, | ||
323 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
324 | feature, | ||
325 | wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
326 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
327 | } | ||
328 | |||
329 | |||
330 | static inline int __wa_set_feature(struct wahc *wa, u16 feature) | ||
331 | { | ||
332 | return __wa_feature(wa, 1, feature); | ||
333 | } | ||
334 | |||
335 | |||
336 | static inline int __wa_clear_feature(struct wahc *wa, u16 feature) | ||
337 | { | ||
338 | return __wa_feature(wa, 0, feature); | ||
339 | } | ||
340 | |||
341 | |||
342 | /** | ||
343 | * Return the status of a Wire Adapter | ||
344 | * | ||
345 | * @wa: Wire Adapter instance | ||
346 | * @returns < 0 errno code on error, or status bitmap as described | ||
347 | * in WUSB1.0[8.3.1.6]. | ||
348 | * | ||
349 | * NOTE: need malloc, some arches don't take USB from the stack | ||
350 | */ | ||
351 | static inline | ||
352 | s32 __wa_get_status(struct wahc *wa) | ||
353 | { | ||
354 | s32 result; | ||
355 | result = usb_control_msg( | ||
356 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), | ||
357 | USB_REQ_GET_STATUS, | ||
358 | USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
359 | 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
360 | &wa->status, sizeof(wa->status), | ||
361 | 1000 /* FIXME: arbitrary */); | ||
362 | if (result >= 0) | ||
363 | result = wa->status; | ||
364 | return result; | ||
365 | } | ||
366 | |||
367 | |||
368 | /** | ||
369 | * Waits until the Wire Adapter's status matches @mask/@value | ||
370 | * | ||
371 | * @wa: Wire Adapter instance. | ||
372 | * @returns < 0 errno code on error, otherwise status. | ||
373 | * | ||
374 | * Loop until the WAs status matches the mask and value (status & mask | ||
375 | * == value). Timeout if it doesn't happen. | ||
376 | * | ||
377 | * FIXME: is there an official specification on how long status | ||
378 | * changes can take? | ||
379 | */ | ||
380 | static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value) | ||
381 | { | ||
382 | s32 result; | ||
383 | unsigned loops = 10; | ||
384 | do { | ||
385 | msleep(50); | ||
386 | result = __wa_get_status(wa); | ||
387 | if ((result & mask) == value) | ||
388 | break; | ||
389 | if (loops-- == 0) { | ||
390 | result = -ETIMEDOUT; | ||
391 | break; | ||
392 | } | ||
393 | } while (result >= 0); | ||
394 | return result; | ||
395 | } | ||
396 | |||
397 | |||
398 | /** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */ | ||
399 | static inline int __wa_stop(struct wahc *wa) | ||
400 | { | ||
401 | int result; | ||
402 | struct device *dev = &wa->usb_iface->dev; | ||
403 | |||
404 | result = __wa_clear_feature(wa, WA_ENABLE); | ||
405 | if (result < 0 && result != -ENODEV) { | ||
406 | dev_err(dev, "error commanding HC to stop: %d\n", result); | ||
407 | goto out; | ||
408 | } | ||
409 | result = __wa_wait_status(wa, WA_ENABLE, 0); | ||
410 | if (result < 0 && result != -ENODEV) | ||
411 | dev_err(dev, "error waiting for HC to stop: %d\n", result); | ||
412 | out: | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | |||
417 | #endif /* #ifndef __HWAHC_INTERNAL_H__ */ | ||
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c new file mode 100644 index 000000000000..3f542990c73f --- /dev/null +++ b/drivers/usb/wusbcore/wa-nep.c | |||
@@ -0,0 +1,310 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) | ||
3 | * Notification EndPoint support | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * This part takes care of getting the notification from the hw | ||
24 | * only and dispatching through wusbwad into | ||
25 | * wa_notif_dispatch. Handling is done there. | ||
26 | * | ||
27 | * WA notifications are limited in size; most of them are three or | ||
28 | * four bytes long, and the longest is the HWA Device Notification, | ||
29 | * which would not exceed 38 bytes (DNs are limited in payload to 32 | ||
30 | * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA | ||
31 | * header (WUSB1.0[8.5.4.2]). | ||
32 | * | ||
33 | * It is not clear if more than one Device Notification can be packed | ||
34 | * in a HWA Notification, I assume no because of the wording in | ||
35 | * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could | ||
36 | * get is 256 bytes (as the bLength field is a byte). | ||
37 | * | ||
38 | * So what we do is we have this buffer and read into it; when a | ||
39 | * notification arrives we schedule work to a specific, single thread | ||
40 | * workqueue (so notifications are serialized) and copy the | ||
41 | * notification data. After scheduling the work, we rearm the read from | ||
42 | * the notification endpoint. | ||
43 | * | ||
44 | * Entry points here are: | ||
45 | * | ||
46 | * wa_nep_[create|destroy]() To initialize/release this subsystem | ||
47 | * | ||
48 | * wa_nep_cb() Callback for the notification | ||
49 | * endpoint; when data is ready, this | ||
50 | * does the dispatching. | ||
51 | */ | ||
52 | #include <linux/workqueue.h> | ||
53 | #include <linux/ctype.h> | ||
54 | #include <linux/uwb/debug.h> | ||
55 | #include "wa-hc.h" | ||
56 | #include "wusbhc.h" | ||
57 | |||
58 | /* Structure for queueing notifications to the workqueue */ | ||
59 | struct wa_notif_work { | ||
60 | struct work_struct work; | ||
61 | struct wahc *wa; | ||
62 | size_t size; | ||
63 | u8 data[]; | ||
64 | }; | ||
65 | |||
66 | /* | ||
67 | * Process incoming notifications from the WA's Notification EndPoint | ||
68 | * [the wuswad daemon, basically] | ||
69 | * | ||
70 | * @_nw: Pointer to a descriptor which has the pointer to the | ||
71 | * @wa, the size of the buffer and the work queue | ||
72 | * structure (so we can free all when done). | ||
73 | * @returns 0 if ok, < 0 errno code on error. | ||
74 | * | ||
75 | * All notifications follow the same format; they need to start with a | ||
76 | * 'struct wa_notif_hdr' header, so it is easy to parse through | ||
77 | * them. We just break the buffer in individual notifications (the | ||
78 | * standard doesn't say if it can be done or is forbidden, so we are | ||
79 | * cautious) and dispatch each. | ||
80 | * | ||
81 | * So the handling layers are is: | ||
82 | * | ||
83 | * WA specific notification (from NEP) | ||
84 | * Device Notification Received -> wa_handle_notif_dn() | ||
85 | * WUSB Device notification generic handling | ||
86 | * BPST Adjustment -> wa_handle_notif_bpst_adj() | ||
87 | * ... -> ... | ||
88 | * | ||
89 | * @wa has to be referenced | ||
90 | */ | ||
91 | static void wa_notif_dispatch(struct work_struct *ws) | ||
92 | { | ||
93 | void *itr; | ||
94 | u8 missing = 0; | ||
95 | struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work); | ||
96 | struct wahc *wa = nw->wa; | ||
97 | struct wa_notif_hdr *notif_hdr; | ||
98 | size_t size; | ||
99 | |||
100 | struct device *dev = &wa->usb_iface->dev; | ||
101 | |||
102 | #if 0 | ||
103 | /* FIXME: need to check for this??? */ | ||
104 | if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */ | ||
105 | goto out; /* screw it */ | ||
106 | #endif | ||
107 | atomic_dec(&wa->notifs_queued); /* Throttling ctl */ | ||
108 | dev = &wa->usb_iface->dev; | ||
109 | size = nw->size; | ||
110 | itr = nw->data; | ||
111 | |||
112 | while (size) { | ||
113 | if (size < sizeof(*notif_hdr)) { | ||
114 | missing = sizeof(*notif_hdr) - size; | ||
115 | goto exhausted_buffer; | ||
116 | } | ||
117 | notif_hdr = itr; | ||
118 | if (size < notif_hdr->bLength) | ||
119 | goto exhausted_buffer; | ||
120 | itr += notif_hdr->bLength; | ||
121 | size -= notif_hdr->bLength; | ||
122 | /* Dispatch the notification [don't use itr or size!] */ | ||
123 | switch (notif_hdr->bNotifyType) { | ||
124 | case HWA_NOTIF_DN: { | ||
125 | struct hwa_notif_dn *hwa_dn; | ||
126 | hwa_dn = container_of(notif_hdr, struct hwa_notif_dn, | ||
127 | hdr); | ||
128 | wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr, | ||
129 | hwa_dn->dndata, | ||
130 | notif_hdr->bLength - sizeof(*hwa_dn)); | ||
131 | break; | ||
132 | } | ||
133 | case WA_NOTIF_TRANSFER: | ||
134 | wa_handle_notif_xfer(wa, notif_hdr); | ||
135 | break; | ||
136 | case DWA_NOTIF_RWAKE: | ||
137 | case DWA_NOTIF_PORTSTATUS: | ||
138 | case HWA_NOTIF_BPST_ADJ: | ||
139 | /* FIXME: unimplemented WA NOTIFs */ | ||
140 | /* fallthru */ | ||
141 | default: | ||
142 | if (printk_ratelimit()) { | ||
143 | dev_err(dev, "HWA: unknown notification 0x%x, " | ||
144 | "%zu bytes; discarding\n", | ||
145 | notif_hdr->bNotifyType, | ||
146 | (size_t)notif_hdr->bLength); | ||
147 | dump_bytes(dev, notif_hdr, 16); | ||
148 | } | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | out: | ||
153 | wa_put(wa); | ||
154 | kfree(nw); | ||
155 | return; | ||
156 | |||
157 | /* THIS SHOULD NOT HAPPEN | ||
158 | * | ||
159 | * Buffer exahusted with partial data remaining; just warn and | ||
160 | * discard the data, as this should not happen. | ||
161 | */ | ||
162 | exhausted_buffer: | ||
163 | if (!printk_ratelimit()) | ||
164 | goto out; | ||
165 | dev_warn(dev, "HWA: device sent short notification, " | ||
166 | "%d bytes missing; discarding %d bytes.\n", | ||
167 | missing, (int)size); | ||
168 | dump_bytes(dev, itr, size); | ||
169 | goto out; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Deliver incoming WA notifications to the wusbwa workqueue | ||
174 | * | ||
175 | * @wa: Pointer the Wire Adapter Controller Data Streaming | ||
176 | * instance (part of an 'struct usb_hcd'). | ||
177 | * @size: Size of the received buffer | ||
178 | * @returns 0 if ok, < 0 errno code on error. | ||
179 | * | ||
180 | * The input buffer is @wa->nep_buffer, with @size bytes | ||
181 | * (guaranteed to fit in the allocated space, | ||
182 | * @wa->nep_buffer_size). | ||
183 | */ | ||
184 | static int wa_nep_queue(struct wahc *wa, size_t size) | ||
185 | { | ||
186 | int result = 0; | ||
187 | struct device *dev = &wa->usb_iface->dev; | ||
188 | struct wa_notif_work *nw; | ||
189 | |||
190 | /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */ | ||
191 | BUG_ON(size > wa->nep_buffer_size); | ||
192 | if (size == 0) | ||
193 | goto out; | ||
194 | if (atomic_read(&wa->notifs_queued) > 200) { | ||
195 | if (printk_ratelimit()) | ||
196 | dev_err(dev, "Too many notifications queued, " | ||
197 | "throttling back\n"); | ||
198 | goto out; | ||
199 | } | ||
200 | nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC); | ||
201 | if (nw == NULL) { | ||
202 | if (printk_ratelimit()) | ||
203 | dev_err(dev, "No memory to queue notification\n"); | ||
204 | goto out; | ||
205 | } | ||
206 | INIT_WORK(&nw->work, wa_notif_dispatch); | ||
207 | nw->wa = wa_get(wa); | ||
208 | nw->size = size; | ||
209 | memcpy(nw->data, wa->nep_buffer, size); | ||
210 | atomic_inc(&wa->notifs_queued); /* Throttling ctl */ | ||
211 | queue_work(wusbd, &nw->work); | ||
212 | out: | ||
213 | /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */ | ||
214 | return result; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Callback for the notification event endpoint | ||
219 | * | ||
220 | * Check's that everything is fine and then passes the data to be | ||
221 | * queued to the workqueue. | ||
222 | */ | ||
223 | static void wa_nep_cb(struct urb *urb) | ||
224 | { | ||
225 | int result; | ||
226 | struct wahc *wa = urb->context; | ||
227 | struct device *dev = &wa->usb_iface->dev; | ||
228 | |||
229 | switch (result = urb->status) { | ||
230 | case 0: | ||
231 | result = wa_nep_queue(wa, urb->actual_length); | ||
232 | if (result < 0) | ||
233 | dev_err(dev, "NEP: unable to process notification(s): " | ||
234 | "%d\n", result); | ||
235 | break; | ||
236 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | ||
237 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
238 | case -ESHUTDOWN: | ||
239 | dev_dbg(dev, "NEP: going down %d\n", urb->status); | ||
240 | goto out; | ||
241 | default: /* On general errors, we retry unless it gets ugly */ | ||
242 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | ||
243 | EDC_ERROR_TIMEFRAME)) { | ||
244 | dev_err(dev, "NEP: URB max acceptable errors " | ||
245 | "exceeded, resetting device\n"); | ||
246 | wa_reset_all(wa); | ||
247 | goto out; | ||
248 | } | ||
249 | dev_err(dev, "NEP: URB error %d\n", urb->status); | ||
250 | } | ||
251 | result = wa_nep_arm(wa, GFP_ATOMIC); | ||
252 | if (result < 0) { | ||
253 | dev_err(dev, "NEP: cannot submit URB: %d\n", result); | ||
254 | wa_reset_all(wa); | ||
255 | } | ||
256 | out: | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * Initialize @wa's notification and event's endpoint stuff | ||
262 | * | ||
263 | * This includes the allocating the read buffer, the context ID | ||
264 | * allocation bitmap, the URB and submitting the URB. | ||
265 | */ | ||
266 | int wa_nep_create(struct wahc *wa, struct usb_interface *iface) | ||
267 | { | ||
268 | int result; | ||
269 | struct usb_endpoint_descriptor *epd; | ||
270 | struct usb_device *usb_dev = interface_to_usbdev(iface); | ||
271 | struct device *dev = &iface->dev; | ||
272 | |||
273 | edc_init(&wa->nep_edc); | ||
274 | epd = &iface->cur_altsetting->endpoint[0].desc; | ||
275 | wa->nep_buffer_size = 1024; | ||
276 | wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL); | ||
277 | if (wa->nep_buffer == NULL) { | ||
278 | dev_err(dev, "Unable to allocate notification's read buffer\n"); | ||
279 | goto error_nep_buffer; | ||
280 | } | ||
281 | wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
282 | if (wa->nep_urb == NULL) { | ||
283 | dev_err(dev, "Unable to allocate notification URB\n"); | ||
284 | goto error_urb_alloc; | ||
285 | } | ||
286 | usb_fill_int_urb(wa->nep_urb, usb_dev, | ||
287 | usb_rcvintpipe(usb_dev, epd->bEndpointAddress), | ||
288 | wa->nep_buffer, wa->nep_buffer_size, | ||
289 | wa_nep_cb, wa, epd->bInterval); | ||
290 | result = wa_nep_arm(wa, GFP_KERNEL); | ||
291 | if (result < 0) { | ||
292 | dev_err(dev, "Cannot submit notification URB: %d\n", result); | ||
293 | goto error_nep_arm; | ||
294 | } | ||
295 | return 0; | ||
296 | |||
297 | error_nep_arm: | ||
298 | usb_free_urb(wa->nep_urb); | ||
299 | error_urb_alloc: | ||
300 | kfree(wa->nep_buffer); | ||
301 | error_nep_buffer: | ||
302 | return -ENOMEM; | ||
303 | } | ||
304 | |||
305 | void wa_nep_destroy(struct wahc *wa) | ||
306 | { | ||
307 | wa_nep_disarm(wa); | ||
308 | usb_free_urb(wa->nep_urb); | ||
309 | kfree(wa->nep_buffer); | ||
310 | } | ||
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c new file mode 100644 index 000000000000..f18e4aae66e9 --- /dev/null +++ b/drivers/usb/wusbcore/wa-rpipe.c | |||
@@ -0,0 +1,562 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter | ||
3 | * rpipe management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * RPIPE | ||
26 | * | ||
27 | * Targetted at different downstream endpoints | ||
28 | * | ||
29 | * Descriptor: use to config the remote pipe. | ||
30 | * | ||
31 | * The number of blocks could be dynamic (wBlocks in descriptor is | ||
32 | * 0)--need to schedule them then. | ||
33 | * | ||
34 | * Each bit in wa->rpipe_bm represents if an rpipe is being used or | ||
35 | * not. Rpipes are represented with a 'struct wa_rpipe' that is | ||
36 | * attached to the hcpriv member of a 'struct usb_host_endpoint'. | ||
37 | * | ||
38 | * When you need to xfer data to an endpoint, you get an rpipe for it | ||
39 | * with wa_ep_rpipe_get(), which gives you a reference to the rpipe | ||
40 | * and keeps a single one (the first one) with the endpoint. When you | ||
41 | * are done transferring, you drop that reference. At the end the | ||
42 | * rpipe is always allocated and bound to the endpoint. There it might | ||
43 | * be recycled when not used. | ||
44 | * | ||
45 | * Addresses: | ||
46 | * | ||
47 | * We use a 1:1 mapping mechanism between port address (0 based | ||
48 | * index, actually) and the address. The USB stack knows about this. | ||
49 | * | ||
50 | * USB Stack port number 4 (1 based) | ||
51 | * WUSB code port index 3 (0 based) | ||
52 | * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub) | ||
53 | * | ||
54 | * Now, because we don't use the concept as default address exactly | ||
55 | * like the (wired) USB code does, we need to kind of skip it. So we | ||
56 | * never take addresses from the urb->pipe, but from the | ||
57 | * urb->dev->devnum, to make sure that we always have the right | ||
58 | * destination address. | ||
59 | */ | ||
60 | #include <linux/init.h> | ||
61 | #include <asm/atomic.h> | ||
62 | #include <linux/bitmap.h> | ||
63 | #include "wusbhc.h" | ||
64 | #include "wa-hc.h" | ||
65 | |||
66 | #define D_LOCAL 0 | ||
67 | #include <linux/uwb/debug.h> | ||
68 | |||
69 | |||
70 | static int __rpipe_get_descr(struct wahc *wa, | ||
71 | struct usb_rpipe_descriptor *descr, u16 index) | ||
72 | { | ||
73 | ssize_t result; | ||
74 | struct device *dev = &wa->usb_iface->dev; | ||
75 | |||
76 | /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() | ||
77 | * function because the arguments are different. | ||
78 | */ | ||
79 | d_printf(1, dev, "rpipe %u: get descr\n", index); | ||
80 | result = usb_control_msg( | ||
81 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), | ||
82 | USB_REQ_GET_DESCRIPTOR, | ||
83 | USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE, | ||
84 | USB_DT_RPIPE<<8, index, descr, sizeof(*descr), | ||
85 | 1000 /* FIXME: arbitrary */); | ||
86 | if (result < 0) { | ||
87 | dev_err(dev, "rpipe %u: get descriptor failed: %d\n", | ||
88 | index, (int)result); | ||
89 | goto error; | ||
90 | } | ||
91 | if (result < sizeof(*descr)) { | ||
92 | dev_err(dev, "rpipe %u: got short descriptor " | ||
93 | "(%zd vs %zd bytes needed)\n", | ||
94 | index, result, sizeof(*descr)); | ||
95 | result = -EINVAL; | ||
96 | goto error; | ||
97 | } | ||
98 | result = 0; | ||
99 | |||
100 | error: | ||
101 | return result; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * | ||
106 | * The descriptor is assumed to be properly initialized (ie: you got | ||
107 | * it through __rpipe_get_descr()). | ||
108 | */ | ||
109 | static int __rpipe_set_descr(struct wahc *wa, | ||
110 | struct usb_rpipe_descriptor *descr, u16 index) | ||
111 | { | ||
112 | ssize_t result; | ||
113 | struct device *dev = &wa->usb_iface->dev; | ||
114 | |||
115 | /* we cannot use the usb_get_descriptor() function because the | ||
116 | * arguments are different. | ||
117 | */ | ||
118 | d_printf(1, dev, "rpipe %u: set descr\n", index); | ||
119 | result = usb_control_msg( | ||
120 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
121 | USB_REQ_SET_DESCRIPTOR, | ||
122 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, | ||
123 | USB_DT_RPIPE<<8, index, descr, sizeof(*descr), | ||
124 | HZ / 10); | ||
125 | if (result < 0) { | ||
126 | dev_err(dev, "rpipe %u: set descriptor failed: %d\n", | ||
127 | index, (int)result); | ||
128 | goto error; | ||
129 | } | ||
130 | if (result < sizeof(*descr)) { | ||
131 | dev_err(dev, "rpipe %u: sent short descriptor " | ||
132 | "(%zd vs %zd bytes required)\n", | ||
133 | index, result, sizeof(*descr)); | ||
134 | result = -EINVAL; | ||
135 | goto error; | ||
136 | } | ||
137 | result = 0; | ||
138 | |||
139 | error: | ||
140 | return result; | ||
141 | |||
142 | } | ||
143 | |||
144 | static void rpipe_init(struct wa_rpipe *rpipe) | ||
145 | { | ||
146 | kref_init(&rpipe->refcnt); | ||
147 | spin_lock_init(&rpipe->seg_lock); | ||
148 | INIT_LIST_HEAD(&rpipe->seg_list); | ||
149 | } | ||
150 | |||
151 | static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&wa->rpipe_bm_lock, flags); | ||
156 | rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx); | ||
157 | if (rpipe_idx < wa->rpipes) | ||
158 | set_bit(rpipe_idx, wa->rpipe_bm); | ||
159 | spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); | ||
160 | |||
161 | return rpipe_idx; | ||
162 | } | ||
163 | |||
164 | static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx) | ||
165 | { | ||
166 | unsigned long flags; | ||
167 | |||
168 | spin_lock_irqsave(&wa->rpipe_bm_lock, flags); | ||
169 | clear_bit(rpipe_idx, wa->rpipe_bm); | ||
170 | spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); | ||
171 | } | ||
172 | |||
173 | void rpipe_destroy(struct kref *_rpipe) | ||
174 | { | ||
175 | struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); | ||
176 | u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); | ||
177 | d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index); | ||
178 | if (rpipe->ep) | ||
179 | rpipe->ep->hcpriv = NULL; | ||
180 | rpipe_put_idx(rpipe->wa, index); | ||
181 | wa_put(rpipe->wa); | ||
182 | kfree(rpipe); | ||
183 | d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index); | ||
184 | } | ||
185 | EXPORT_SYMBOL_GPL(rpipe_destroy); | ||
186 | |||
187 | /* | ||
188 | * Locate an idle rpipe, create an structure for it and return it | ||
189 | * | ||
190 | * @wa is referenced and unlocked | ||
191 | * @crs enum rpipe_attr, required endpoint characteristics | ||
192 | * | ||
193 | * The rpipe can be used only sequentially (not in parallel). | ||
194 | * | ||
195 | * The rpipe is moved into the "ready" state. | ||
196 | */ | ||
197 | static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs, | ||
198 | gfp_t gfp) | ||
199 | { | ||
200 | int result; | ||
201 | unsigned rpipe_idx; | ||
202 | struct wa_rpipe *rpipe; | ||
203 | struct device *dev = &wa->usb_iface->dev; | ||
204 | |||
205 | d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs); | ||
206 | rpipe = kzalloc(sizeof(*rpipe), gfp); | ||
207 | if (rpipe == NULL) | ||
208 | return -ENOMEM; | ||
209 | rpipe_init(rpipe); | ||
210 | |||
211 | /* Look for an idle pipe */ | ||
212 | for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) { | ||
213 | rpipe_idx = rpipe_get_idx(wa, rpipe_idx); | ||
214 | if (rpipe_idx >= wa->rpipes) /* no more pipes :( */ | ||
215 | break; | ||
216 | result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx); | ||
217 | if (result < 0) | ||
218 | dev_err(dev, "Can't get descriptor for rpipe %u: %d\n", | ||
219 | rpipe_idx, result); | ||
220 | else if ((rpipe->descr.bmCharacteristics & crs) != 0) | ||
221 | goto found; | ||
222 | rpipe_put_idx(wa, rpipe_idx); | ||
223 | } | ||
224 | *prpipe = NULL; | ||
225 | kfree(rpipe); | ||
226 | d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs); | ||
227 | return -ENXIO; | ||
228 | |||
229 | found: | ||
230 | set_bit(rpipe_idx, wa->rpipe_bm); | ||
231 | rpipe->wa = wa_get(wa); | ||
232 | *prpipe = rpipe; | ||
233 | d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs); | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static int __rpipe_reset(struct wahc *wa, unsigned index) | ||
238 | { | ||
239 | int result; | ||
240 | struct device *dev = &wa->usb_iface->dev; | ||
241 | |||
242 | d_printf(1, dev, "rpipe %u: reset\n", index); | ||
243 | result = usb_control_msg( | ||
244 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
245 | USB_REQ_RPIPE_RESET, | ||
246 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, | ||
247 | 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); | ||
248 | if (result < 0) | ||
249 | dev_err(dev, "rpipe %u: reset failed: %d\n", | ||
250 | index, result); | ||
251 | return result; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Fake companion descriptor for ep0 | ||
256 | * | ||
257 | * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl | ||
258 | */ | ||
259 | static struct usb_wireless_ep_comp_descriptor epc0 = { | ||
260 | .bLength = sizeof(epc0), | ||
261 | .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP, | ||
262 | /* .bMaxBurst = 1, */ | ||
263 | .bMaxSequence = 31, | ||
264 | }; | ||
265 | |||
266 | /* | ||
267 | * Look for EP companion descriptor | ||
268 | * | ||
269 | * Get there, look for Inara in the endpoint's extra descriptors | ||
270 | */ | ||
271 | static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find( | ||
272 | struct device *dev, struct usb_host_endpoint *ep) | ||
273 | { | ||
274 | void *itr; | ||
275 | size_t itr_size; | ||
276 | struct usb_descriptor_header *hdr; | ||
277 | struct usb_wireless_ep_comp_descriptor *epcd; | ||
278 | |||
279 | d_fnstart(3, dev, "(ep %p)\n", ep); | ||
280 | if (ep->desc.bEndpointAddress == 0) { | ||
281 | epcd = &epc0; | ||
282 | goto out; | ||
283 | } | ||
284 | itr = ep->extra; | ||
285 | itr_size = ep->extralen; | ||
286 | epcd = NULL; | ||
287 | while (itr_size > 0) { | ||
288 | if (itr_size < sizeof(*hdr)) { | ||
289 | dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors " | ||
290 | "at offset %zu: only %zu bytes left\n", | ||
291 | ep->desc.bEndpointAddress, | ||
292 | itr - (void *) ep->extra, itr_size); | ||
293 | break; | ||
294 | } | ||
295 | hdr = itr; | ||
296 | if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) { | ||
297 | epcd = itr; | ||
298 | break; | ||
299 | } | ||
300 | if (hdr->bLength > itr_size) { | ||
301 | dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor " | ||
302 | "at offset %zu (type 0x%02x) " | ||
303 | "length %d but only %zu bytes left\n", | ||
304 | ep->desc.bEndpointAddress, | ||
305 | itr - (void *) ep->extra, hdr->bDescriptorType, | ||
306 | hdr->bLength, itr_size); | ||
307 | break; | ||
308 | } | ||
309 | itr += hdr->bLength; | ||
310 | itr_size -= hdr->bDescriptorType; | ||
311 | } | ||
312 | out: | ||
313 | d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd); | ||
314 | return epcd; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Aim an rpipe to its device & endpoint destination | ||
319 | * | ||
320 | * Make sure we change the address to unauthenticathed if the device | ||
321 | * is WUSB and it is not authenticated. | ||
322 | */ | ||
323 | static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, | ||
324 | struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp) | ||
325 | { | ||
326 | int result = -ENOMSG; /* better code for lack of companion? */ | ||
327 | struct device *dev = &wa->usb_iface->dev; | ||
328 | struct usb_device *usb_dev = urb->dev; | ||
329 | struct usb_wireless_ep_comp_descriptor *epcd; | ||
330 | u8 unauth; | ||
331 | |||
332 | d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", | ||
333 | rpipe, wa, ep, urb); | ||
334 | epcd = rpipe_epc_find(dev, ep); | ||
335 | if (epcd == NULL) { | ||
336 | dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", | ||
337 | ep->desc.bEndpointAddress); | ||
338 | goto error; | ||
339 | } | ||
340 | unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0; | ||
341 | __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex)); | ||
342 | atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests)); | ||
343 | /* FIXME: block allocation system; request with queuing and timeout */ | ||
344 | /* FIXME: compute so seg_size > ep->maxpktsize */ | ||
345 | rpipe->descr.wBlocks = cpu_to_le16(16); /* given */ | ||
346 | /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */ | ||
347 | rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize); | ||
348 | rpipe->descr.bHSHubAddress = 0; /* reserved: zero */ | ||
349 | rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum); | ||
350 | /* FIXME: use maximum speed as supported or recommended by device */ | ||
351 | rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? | ||
352 | UWB_PHY_RATE_53 : UWB_PHY_RATE_200; | ||
353 | d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", | ||
354 | urb->dev->devnum, urb->dev->devnum | unauth, | ||
355 | le16_to_cpu(rpipe->descr.wRPipeIndex), | ||
356 | usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); | ||
357 | /* see security.c:wusb_update_address() */ | ||
358 | if (unlikely(urb->dev->devnum == 0x80)) | ||
359 | rpipe->descr.bDeviceAddress = 0; | ||
360 | else | ||
361 | rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth; | ||
362 | rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress; | ||
363 | /* FIXME: bDataSequence */ | ||
364 | rpipe->descr.bDataSequence = 0; | ||
365 | /* FIXME: dwCurrentWindow */ | ||
366 | rpipe->descr.dwCurrentWindow = cpu_to_le32(1); | ||
367 | /* FIXME: bMaxDataSequence */ | ||
368 | rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1; | ||
369 | rpipe->descr.bInterval = ep->desc.bInterval; | ||
370 | /* FIXME: bOverTheAirInterval */ | ||
371 | rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */ | ||
372 | /* FIXME: xmit power & preamble blah blah */ | ||
373 | rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03; | ||
374 | /* rpipe->descr.bmCharacteristics RO */ | ||
375 | /* FIXME: bmRetryOptions */ | ||
376 | rpipe->descr.bmRetryOptions = 15; | ||
377 | /* FIXME: use for assessing link quality? */ | ||
378 | rpipe->descr.wNumTransactionErrors = 0; | ||
379 | result = __rpipe_set_descr(wa, &rpipe->descr, | ||
380 | le16_to_cpu(rpipe->descr.wRPipeIndex)); | ||
381 | if (result < 0) { | ||
382 | dev_err(dev, "Cannot aim rpipe: %d\n", result); | ||
383 | goto error; | ||
384 | } | ||
385 | result = 0; | ||
386 | error: | ||
387 | d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n", | ||
388 | rpipe, wa, ep, urb, result); | ||
389 | return result; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Check an aimed rpipe to make sure it points to where we want | ||
394 | * | ||
395 | * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth | ||
396 | * space; when it is like that, we or 0x80 to make an unauth address. | ||
397 | */ | ||
398 | static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa, | ||
399 | const struct usb_host_endpoint *ep, | ||
400 | const struct urb *urb, gfp_t gfp) | ||
401 | { | ||
402 | int result = 0; /* better code for lack of companion? */ | ||
403 | struct device *dev = &wa->usb_iface->dev; | ||
404 | struct usb_device *usb_dev = urb->dev; | ||
405 | u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; | ||
406 | u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); | ||
407 | |||
408 | d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", | ||
409 | rpipe, wa, ep, urb); | ||
410 | #define AIM_CHECK(rdf, val, text) \ | ||
411 | do { \ | ||
412 | if (rpipe->descr.rdf != (val)) { \ | ||
413 | dev_err(dev, \ | ||
414 | "rpipe aim discrepancy: " #rdf " " text "\n", \ | ||
415 | rpipe->descr.rdf, (val)); \ | ||
416 | result = -EINVAL; \ | ||
417 | WARN_ON(1); \ | ||
418 | } \ | ||
419 | } while (0) | ||
420 | AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize), | ||
421 | "(%u vs %u)"); | ||
422 | AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)"); | ||
423 | AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ? | ||
424 | UWB_PHY_RATE_53 : UWB_PHY_RATE_200, | ||
425 | "(%u vs %u)"); | ||
426 | AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)"); | ||
427 | AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)"); | ||
428 | AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)"); | ||
429 | AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)"); | ||
430 | #undef AIM_CHECK | ||
431 | return result; | ||
432 | } | ||
433 | |||
434 | #ifndef CONFIG_BUG | ||
435 | #define CONFIG_BUG 0 | ||
436 | #endif | ||
437 | |||
438 | /* | ||
439 | * Make sure there is an rpipe allocated for an endpoint | ||
440 | * | ||
441 | * If already allocated, we just refcount it; if not, we get an | ||
442 | * idle one, aim it to the right location and take it. | ||
443 | * | ||
444 | * Attaches to ep->hcpriv and rpipe->ep to ep. | ||
445 | */ | ||
446 | int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, | ||
447 | struct urb *urb, gfp_t gfp) | ||
448 | { | ||
449 | int result = 0; | ||
450 | struct device *dev = &wa->usb_iface->dev; | ||
451 | struct wa_rpipe *rpipe; | ||
452 | u8 eptype; | ||
453 | |||
454 | d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, | ||
455 | gfp); | ||
456 | mutex_lock(&wa->rpipe_mutex); | ||
457 | rpipe = ep->hcpriv; | ||
458 | if (rpipe != NULL) { | ||
459 | if (CONFIG_BUG == 1) { | ||
460 | result = rpipe_check_aim(rpipe, wa, ep, urb, gfp); | ||
461 | if (result < 0) | ||
462 | goto error; | ||
463 | } | ||
464 | __rpipe_get(rpipe); | ||
465 | d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n", | ||
466 | ep->desc.bEndpointAddress, | ||
467 | le16_to_cpu(rpipe->descr.wRPipeIndex)); | ||
468 | } else { | ||
469 | /* hmm, assign idle rpipe, aim it */ | ||
470 | result = -ENOBUFS; | ||
471 | eptype = ep->desc.bmAttributes & 0x03; | ||
472 | result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp); | ||
473 | if (result < 0) | ||
474 | goto error; | ||
475 | result = rpipe_aim(rpipe, wa, ep, urb, gfp); | ||
476 | if (result < 0) { | ||
477 | rpipe_put(rpipe); | ||
478 | goto error; | ||
479 | } | ||
480 | ep->hcpriv = rpipe; | ||
481 | rpipe->ep = ep; | ||
482 | __rpipe_get(rpipe); /* for caching into ep->hcpriv */ | ||
483 | d_printf(2, dev, "ep 0x%02x: using rpipe %u\n", | ||
484 | ep->desc.bEndpointAddress, | ||
485 | le16_to_cpu(rpipe->descr.wRPipeIndex)); | ||
486 | } | ||
487 | d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr)); | ||
488 | error: | ||
489 | mutex_unlock(&wa->rpipe_mutex); | ||
490 | d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp); | ||
491 | return result; | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * Allocate the bitmap for each rpipe. | ||
496 | */ | ||
497 | int wa_rpipes_create(struct wahc *wa) | ||
498 | { | ||
499 | wa->rpipes = wa->wa_descr->wNumRPipes; | ||
500 | wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long), | ||
501 | GFP_KERNEL); | ||
502 | if (wa->rpipe_bm == NULL) | ||
503 | return -ENOMEM; | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | void wa_rpipes_destroy(struct wahc *wa) | ||
508 | { | ||
509 | struct device *dev = &wa->usb_iface->dev; | ||
510 | d_fnstart(3, dev, "(wa %p)\n", wa); | ||
511 | if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { | ||
512 | char buf[256]; | ||
513 | WARN_ON(1); | ||
514 | bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes); | ||
515 | dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); | ||
516 | } | ||
517 | kfree(wa->rpipe_bm); | ||
518 | d_fnend(3, dev, "(wa %p)\n", wa); | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Release resources allocated for an endpoint | ||
523 | * | ||
524 | * If there is an associated rpipe to this endpoint, Abort any pending | ||
525 | * transfers and put it. If the rpipe ends up being destroyed, | ||
526 | * __rpipe_destroy() will cleanup ep->hcpriv. | ||
527 | * | ||
528 | * This is called before calling hcd->stop(), so you don't need to do | ||
529 | * anything else in there. | ||
530 | */ | ||
531 | void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) | ||
532 | { | ||
533 | struct device *dev = &wa->usb_iface->dev; | ||
534 | struct wa_rpipe *rpipe; | ||
535 | d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep); | ||
536 | mutex_lock(&wa->rpipe_mutex); | ||
537 | rpipe = ep->hcpriv; | ||
538 | if (rpipe != NULL) { | ||
539 | unsigned rc = atomic_read(&rpipe->refcnt.refcount); | ||
540 | int result; | ||
541 | u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); | ||
542 | |||
543 | if (rc != 1) | ||
544 | d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n", | ||
545 | wa, ep, rpipe, rc); | ||
546 | |||
547 | d_printf(1, dev, "rpipe %u: abort\n", index); | ||
548 | result = usb_control_msg( | ||
549 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), | ||
550 | USB_REQ_RPIPE_ABORT, | ||
551 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, | ||
552 | 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); | ||
553 | if (result < 0 && result != -ENODEV /* dev is gone */) | ||
554 | d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", | ||
555 | wa, index, result); | ||
556 | rpipe_put(rpipe); | ||
557 | } | ||
558 | mutex_unlock(&wa->rpipe_mutex); | ||
559 | d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep); | ||
560 | return; | ||
561 | } | ||
562 | EXPORT_SYMBOL_GPL(rpipe_ep_disable); | ||
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c new file mode 100644 index 000000000000..c038635d1c64 --- /dev/null +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -0,0 +1,1709 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter | ||
3 | * Data transfer and URB enqueing | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * How transfers work: get a buffer, break it up in segments (segment | ||
24 | * size is a multiple of the maxpacket size). For each segment issue a | ||
25 | * segment request (struct wa_xfer_*), then send the data buffer if | ||
26 | * out or nothing if in (all over the DTO endpoint). | ||
27 | * | ||
28 | * For each submitted segment request, a notification will come over | ||
29 | * the NEP endpoint and a transfer result (struct xfer_result) will | ||
30 | * arrive in the DTI URB. Read it, get the xfer ID, see if there is | ||
31 | * data coming (inbound transfer), schedule a read and handle it. | ||
32 | * | ||
33 | * Sounds simple, it is a pain to implement. | ||
34 | * | ||
35 | * | ||
36 | * ENTRY POINTS | ||
37 | * | ||
38 | * FIXME | ||
39 | * | ||
40 | * LIFE CYCLE / STATE DIAGRAM | ||
41 | * | ||
42 | * FIXME | ||
43 | * | ||
44 | * THIS CODE IS DISGUSTING | ||
45 | * | ||
46 | * Warned you are; it's my second try and still not happy with it. | ||
47 | * | ||
48 | * NOTES: | ||
49 | * | ||
50 | * - No iso | ||
51 | * | ||
52 | * - Supports DMA xfers, control, bulk and maybe interrupt | ||
53 | * | ||
54 | * - Does not recycle unused rpipes | ||
55 | * | ||
56 | * An rpipe is assigned to an endpoint the first time it is used, | ||
57 | * and then it's there, assigned, until the endpoint is disabled | ||
58 | * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the | ||
59 | * rpipe to the endpoint is done under the wa->rpipe_sem semaphore | ||
60 | * (should be a mutex). | ||
61 | * | ||
62 | * Two methods it could be done: | ||
63 | * | ||
64 | * (a) set up a timer everytime an rpipe's use count drops to 1 | ||
65 | * (which means unused) or when a transfer ends. Reset the | ||
66 | * timer when a xfer is queued. If the timer expires, release | ||
67 | * the rpipe [see rpipe_ep_disable()]. | ||
68 | * | ||
69 | * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], | ||
70 | * when none are found go over the list, check their endpoint | ||
71 | * and their activity record (if no last-xfer-done-ts in the | ||
72 | * last x seconds) take it | ||
73 | * | ||
74 | * However, due to the fact that we have a set of limited | ||
75 | * resources (max-segments-at-the-same-time per xfer, | ||
76 | * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end | ||
77 | * we are going to have to rebuild all this based on an scheduler, | ||
78 | * to where we have a list of transactions to do and based on the | ||
79 | * availability of the different requried components (blocks, | ||
80 | * rpipes, segment slots, etc), we go scheduling them. Painful. | ||
81 | */ | ||
82 | #include <linux/init.h> | ||
83 | #include <linux/spinlock.h> | ||
84 | #include <linux/hash.h> | ||
85 | #include "wa-hc.h" | ||
86 | #include "wusbhc.h" | ||
87 | |||
88 | #undef D_LOCAL | ||
89 | #define D_LOCAL 0 /* 0 disabled, > 0 different levels... */ | ||
90 | #include <linux/uwb/debug.h> | ||
91 | |||
92 | enum { | ||
93 | WA_SEGS_MAX = 255, | ||
94 | }; | ||
95 | |||
96 | enum wa_seg_status { | ||
97 | WA_SEG_NOTREADY, | ||
98 | WA_SEG_READY, | ||
99 | WA_SEG_DELAYED, | ||
100 | WA_SEG_SUBMITTED, | ||
101 | WA_SEG_PENDING, | ||
102 | WA_SEG_DTI_PENDING, | ||
103 | WA_SEG_DONE, | ||
104 | WA_SEG_ERROR, | ||
105 | WA_SEG_ABORTED, | ||
106 | }; | ||
107 | |||
108 | static void wa_xfer_delayed_run(struct wa_rpipe *); | ||
109 | |||
110 | /* | ||
111 | * Life cycle governed by 'struct urb' (the refcount of the struct is | ||
112 | * that of the 'struct urb' and usb_free_urb() would free the whole | ||
113 | * struct). | ||
114 | */ | ||
115 | struct wa_seg { | ||
116 | struct urb urb; | ||
117 | struct urb *dto_urb; /* for data output? */ | ||
118 | struct list_head list_node; /* for rpipe->req_list */ | ||
119 | struct wa_xfer *xfer; /* out xfer */ | ||
120 | u8 index; /* which segment we are */ | ||
121 | enum wa_seg_status status; | ||
122 | ssize_t result; /* bytes xfered or error */ | ||
123 | struct wa_xfer_hdr xfer_hdr; | ||
124 | u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */ | ||
125 | }; | ||
126 | |||
127 | static void wa_seg_init(struct wa_seg *seg) | ||
128 | { | ||
129 | /* usb_init_urb() repeats a lot of work, so we do it here */ | ||
130 | kref_init(&seg->urb.kref); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Protected by xfer->lock | ||
135 | * | ||
136 | */ | ||
137 | struct wa_xfer { | ||
138 | struct kref refcnt; | ||
139 | struct list_head list_node; | ||
140 | spinlock_t lock; | ||
141 | u32 id; | ||
142 | |||
143 | struct wahc *wa; /* Wire adapter we are plugged to */ | ||
144 | struct usb_host_endpoint *ep; | ||
145 | struct urb *urb; /* URB we are transfering for */ | ||
146 | struct wa_seg **seg; /* transfer segments */ | ||
147 | u8 segs, segs_submitted, segs_done; | ||
148 | unsigned is_inbound:1; | ||
149 | unsigned is_dma:1; | ||
150 | size_t seg_size; | ||
151 | int result; | ||
152 | |||
153 | gfp_t gfp; /* allocation mask */ | ||
154 | |||
155 | struct wusb_dev *wusb_dev; /* for activity timestamps */ | ||
156 | }; | ||
157 | |||
158 | static inline void wa_xfer_init(struct wa_xfer *xfer) | ||
159 | { | ||
160 | kref_init(&xfer->refcnt); | ||
161 | INIT_LIST_HEAD(&xfer->list_node); | ||
162 | spin_lock_init(&xfer->lock); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Destory a transfer structure | ||
167 | * | ||
168 | * Note that the xfer->seg[index] thingies follow the URB life cycle, | ||
169 | * so we need to put them, not free them. | ||
170 | */ | ||
171 | static void wa_xfer_destroy(struct kref *_xfer) | ||
172 | { | ||
173 | struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); | ||
174 | if (xfer->seg) { | ||
175 | unsigned cnt; | ||
176 | for (cnt = 0; cnt < xfer->segs; cnt++) { | ||
177 | if (xfer->is_inbound) | ||
178 | usb_put_urb(xfer->seg[cnt]->dto_urb); | ||
179 | usb_put_urb(&xfer->seg[cnt]->urb); | ||
180 | } | ||
181 | } | ||
182 | kfree(xfer); | ||
183 | d_printf(2, NULL, "xfer %p destroyed\n", xfer); | ||
184 | } | ||
185 | |||
186 | static void wa_xfer_get(struct wa_xfer *xfer) | ||
187 | { | ||
188 | kref_get(&xfer->refcnt); | ||
189 | } | ||
190 | |||
191 | static void wa_xfer_put(struct wa_xfer *xfer) | ||
192 | { | ||
193 | d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n", | ||
194 | xfer, atomic_read(&xfer->refcnt.refcount)); | ||
195 | kref_put(&xfer->refcnt, wa_xfer_destroy); | ||
196 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * xfer is referenced | ||
201 | * | ||
202 | * xfer->lock has to be unlocked | ||
203 | * | ||
204 | * We take xfer->lock for setting the result; this is a barrier | ||
205 | * against drivers/usb/core/hcd.c:unlink1() being called after we call | ||
206 | * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a | ||
207 | * reference to the transfer. | ||
208 | */ | ||
209 | static void wa_xfer_giveback(struct wa_xfer *xfer) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | ||
213 | spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); | ||
214 | list_del_init(&xfer->list_node); | ||
215 | spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); | ||
216 | /* FIXME: segmentation broken -- kills DWA */ | ||
217 | wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); | ||
218 | wa_put(xfer->wa); | ||
219 | wa_xfer_put(xfer); | ||
220 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * xfer is referenced | ||
225 | * | ||
226 | * xfer->lock has to be unlocked | ||
227 | */ | ||
228 | static void wa_xfer_completion(struct wa_xfer *xfer) | ||
229 | { | ||
230 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | ||
231 | if (xfer->wusb_dev) | ||
232 | wusb_dev_put(xfer->wusb_dev); | ||
233 | rpipe_put(xfer->ep->hcpriv); | ||
234 | wa_xfer_giveback(xfer); | ||
235 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
236 | return; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * If transfer is done, wrap it up and return true | ||
241 | * | ||
242 | * xfer->lock has to be locked | ||
243 | */ | ||
244 | static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | ||
245 | { | ||
246 | unsigned result, cnt; | ||
247 | struct wa_seg *seg; | ||
248 | struct urb *urb = xfer->urb; | ||
249 | unsigned found_short = 0; | ||
250 | |||
251 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | ||
252 | result = xfer->segs_done == xfer->segs_submitted; | ||
253 | if (result == 0) | ||
254 | goto out; | ||
255 | urb->actual_length = 0; | ||
256 | for (cnt = 0; cnt < xfer->segs; cnt++) { | ||
257 | seg = xfer->seg[cnt]; | ||
258 | switch (seg->status) { | ||
259 | case WA_SEG_DONE: | ||
260 | if (found_short && seg->result > 0) { | ||
261 | if (printk_ratelimit()) | ||
262 | printk(KERN_ERR "xfer %p#%u: bad short " | ||
263 | "segments (%zu)\n", xfer, cnt, | ||
264 | seg->result); | ||
265 | urb->status = -EINVAL; | ||
266 | goto out; | ||
267 | } | ||
268 | urb->actual_length += seg->result; | ||
269 | if (seg->result < xfer->seg_size | ||
270 | && cnt != xfer->segs-1) | ||
271 | found_short = 1; | ||
272 | d_printf(2, NULL, "xfer %p#%u: DONE short %d " | ||
273 | "result %zu urb->actual_length %d\n", | ||
274 | xfer, seg->index, found_short, seg->result, | ||
275 | urb->actual_length); | ||
276 | break; | ||
277 | case WA_SEG_ERROR: | ||
278 | xfer->result = seg->result; | ||
279 | d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n", | ||
280 | xfer, seg->index, seg->result); | ||
281 | goto out; | ||
282 | case WA_SEG_ABORTED: | ||
283 | WARN_ON(urb->status != -ECONNRESET | ||
284 | && urb->status != -ENOENT); | ||
285 | d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n", | ||
286 | xfer, seg->index, urb->status); | ||
287 | xfer->result = urb->status; | ||
288 | goto out; | ||
289 | default: | ||
290 | /* if (printk_ratelimit()) */ | ||
291 | printk(KERN_ERR "xfer %p#%u: " | ||
292 | "is_done bad state %d\n", | ||
293 | xfer, cnt, seg->status); | ||
294 | xfer->result = -EINVAL; | ||
295 | WARN_ON(1); | ||
296 | goto out; | ||
297 | } | ||
298 | } | ||
299 | xfer->result = 0; | ||
300 | out: | ||
301 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
302 | return result; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Initialize a transfer's ID | ||
307 | * | ||
308 | * We need to use a sequential number; if we use the pointer or the | ||
309 | * hash of the pointer, it can repeat over sequential transfers and | ||
310 | * then it will confuse the HWA....wonder why in hell they put a 32 | ||
311 | * bit handle in there then. | ||
312 | */ | ||
313 | static void wa_xfer_id_init(struct wa_xfer *xfer) | ||
314 | { | ||
315 | xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Return the xfer's ID associated with xfer | ||
320 | * | ||
321 | * Need to generate a | ||
322 | */ | ||
323 | static u32 wa_xfer_id(struct wa_xfer *xfer) | ||
324 | { | ||
325 | return xfer->id; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Search for a transfer list ID on the HCD's URB list | ||
330 | * | ||
331 | * For 32 bit architectures, we use the pointer itself; for 64 bits, a | ||
332 | * 32-bit hash of the pointer. | ||
333 | * | ||
334 | * @returns NULL if not found. | ||
335 | */ | ||
336 | static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) | ||
337 | { | ||
338 | unsigned long flags; | ||
339 | struct wa_xfer *xfer_itr; | ||
340 | spin_lock_irqsave(&wa->xfer_list_lock, flags); | ||
341 | list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { | ||
342 | if (id == xfer_itr->id) { | ||
343 | wa_xfer_get(xfer_itr); | ||
344 | goto out; | ||
345 | } | ||
346 | } | ||
347 | xfer_itr = NULL; | ||
348 | out: | ||
349 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); | ||
350 | return xfer_itr; | ||
351 | } | ||
352 | |||
353 | struct wa_xfer_abort_buffer { | ||
354 | struct urb urb; | ||
355 | struct wa_xfer_abort cmd; | ||
356 | }; | ||
357 | |||
358 | static void __wa_xfer_abort_cb(struct urb *urb) | ||
359 | { | ||
360 | struct wa_xfer_abort_buffer *b = urb->context; | ||
361 | usb_put_urb(&b->urb); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Aborts an ongoing transaction | ||
366 | * | ||
367 | * Assumes the transfer is referenced and locked and in a submitted | ||
368 | * state (mainly that there is an endpoint/rpipe assigned). | ||
369 | * | ||
370 | * The callback (see above) does nothing but freeing up the data by | ||
371 | * putting the URB. Because the URB is allocated at the head of the | ||
372 | * struct, the whole space we allocated is kfreed. | ||
373 | * | ||
374 | * We'll get an 'aborted transaction' xfer result on DTI, that'll | ||
375 | * politely ignore because at this point the transaction has been | ||
376 | * marked as aborted already. | ||
377 | */ | ||
378 | static void __wa_xfer_abort(struct wa_xfer *xfer) | ||
379 | { | ||
380 | int result; | ||
381 | struct device *dev = &xfer->wa->usb_iface->dev; | ||
382 | struct wa_xfer_abort_buffer *b; | ||
383 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | ||
384 | |||
385 | b = kmalloc(sizeof(*b), GFP_ATOMIC); | ||
386 | if (b == NULL) | ||
387 | goto error_kmalloc; | ||
388 | b->cmd.bLength = sizeof(b->cmd); | ||
389 | b->cmd.bRequestType = WA_XFER_ABORT; | ||
390 | b->cmd.wRPipe = rpipe->descr.wRPipeIndex; | ||
391 | b->cmd.dwTransferID = wa_xfer_id(xfer); | ||
392 | |||
393 | usb_init_urb(&b->urb); | ||
394 | usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, | ||
395 | usb_sndbulkpipe(xfer->wa->usb_dev, | ||
396 | xfer->wa->dto_epd->bEndpointAddress), | ||
397 | &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); | ||
398 | result = usb_submit_urb(&b->urb, GFP_ATOMIC); | ||
399 | if (result < 0) | ||
400 | goto error_submit; | ||
401 | return; /* callback frees! */ | ||
402 | |||
403 | |||
404 | error_submit: | ||
405 | if (printk_ratelimit()) | ||
406 | dev_err(dev, "xfer %p: Can't submit abort request: %d\n", | ||
407 | xfer, result); | ||
408 | kfree(b); | ||
409 | error_kmalloc: | ||
410 | return; | ||
411 | |||
412 | } | ||
413 | |||
414 | /* | ||
415 | * | ||
416 | * @returns < 0 on error, transfer segment request size if ok | ||
417 | */ | ||
418 | static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, | ||
419 | enum wa_xfer_type *pxfer_type) | ||
420 | { | ||
421 | ssize_t result; | ||
422 | struct device *dev = &xfer->wa->usb_iface->dev; | ||
423 | size_t maxpktsize; | ||
424 | struct urb *urb = xfer->urb; | ||
425 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | ||
426 | |||
427 | d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", | ||
428 | xfer, rpipe, urb); | ||
429 | switch (rpipe->descr.bmAttribute & 0x3) { | ||
430 | case USB_ENDPOINT_XFER_CONTROL: | ||
431 | *pxfer_type = WA_XFER_TYPE_CTL; | ||
432 | result = sizeof(struct wa_xfer_ctl); | ||
433 | break; | ||
434 | case USB_ENDPOINT_XFER_INT: | ||
435 | case USB_ENDPOINT_XFER_BULK: | ||
436 | *pxfer_type = WA_XFER_TYPE_BI; | ||
437 | result = sizeof(struct wa_xfer_bi); | ||
438 | break; | ||
439 | case USB_ENDPOINT_XFER_ISOC: | ||
440 | dev_err(dev, "FIXME: ISOC not implemented\n"); | ||
441 | result = -ENOSYS; | ||
442 | goto error; | ||
443 | default: | ||
444 | /* never happens */ | ||
445 | BUG(); | ||
446 | result = -EINVAL; /* shut gcc up */ | ||
447 | }; | ||
448 | xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; | ||
449 | xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; | ||
450 | xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) | ||
451 | * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); | ||
452 | /* Compute the segment size and make sure it is a multiple of | ||
453 | * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of | ||
454 | * a check (FIXME) */ | ||
455 | maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); | ||
456 | if (xfer->seg_size < maxpktsize) { | ||
457 | dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " | ||
458 | "%zu\n", xfer->seg_size, maxpktsize); | ||
459 | result = -EINVAL; | ||
460 | goto error; | ||
461 | } | ||
462 | xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; | ||
463 | xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1) | ||
464 | / xfer->seg_size; | ||
465 | if (xfer->segs >= WA_SEGS_MAX) { | ||
466 | dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", | ||
467 | (int)(urb->transfer_buffer_length / xfer->seg_size), | ||
468 | WA_SEGS_MAX); | ||
469 | result = -EINVAL; | ||
470 | goto error; | ||
471 | } | ||
472 | if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) | ||
473 | xfer->segs = 1; | ||
474 | error: | ||
475 | d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", | ||
476 | xfer, rpipe, urb, (int)result); | ||
477 | return result; | ||
478 | } | ||
479 | |||
480 | /** Fill in the common request header and xfer-type specific data. */ | ||
481 | static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, | ||
482 | struct wa_xfer_hdr *xfer_hdr0, | ||
483 | enum wa_xfer_type xfer_type, | ||
484 | size_t xfer_hdr_size) | ||
485 | { | ||
486 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | ||
487 | |||
488 | xfer_hdr0 = &xfer->seg[0]->xfer_hdr; | ||
489 | xfer_hdr0->bLength = xfer_hdr_size; | ||
490 | xfer_hdr0->bRequestType = xfer_type; | ||
491 | xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; | ||
492 | xfer_hdr0->dwTransferID = wa_xfer_id(xfer); | ||
493 | xfer_hdr0->bTransferSegment = 0; | ||
494 | switch (xfer_type) { | ||
495 | case WA_XFER_TYPE_CTL: { | ||
496 | struct wa_xfer_ctl *xfer_ctl = | ||
497 | container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); | ||
498 | xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; | ||
499 | BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP | ||
500 | && xfer->urb->setup_packet == NULL); | ||
501 | memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, | ||
502 | sizeof(xfer_ctl->baSetupData)); | ||
503 | break; | ||
504 | } | ||
505 | case WA_XFER_TYPE_BI: | ||
506 | break; | ||
507 | case WA_XFER_TYPE_ISO: | ||
508 | printk(KERN_ERR "FIXME: ISOC not implemented\n"); | ||
509 | default: | ||
510 | BUG(); | ||
511 | }; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * Callback for the OUT data phase of the segment request | ||
516 | * | ||
517 | * Check wa_seg_cb(); most comments also apply here because this | ||
518 | * function does almost the same thing and they work closely | ||
519 | * together. | ||
520 | * | ||
521 | * If the seg request has failed but this DTO phase has suceeded, | ||
522 | * wa_seg_cb() has already failed the segment and moved the | ||
523 | * status to WA_SEG_ERROR, so this will go through 'case 0' and | ||
524 | * effectively do nothing. | ||
525 | */ | ||
526 | static void wa_seg_dto_cb(struct urb *urb) | ||
527 | { | ||
528 | struct wa_seg *seg = urb->context; | ||
529 | struct wa_xfer *xfer = seg->xfer; | ||
530 | struct wahc *wa; | ||
531 | struct device *dev; | ||
532 | struct wa_rpipe *rpipe; | ||
533 | unsigned long flags; | ||
534 | unsigned rpipe_ready = 0; | ||
535 | u8 done = 0; | ||
536 | |||
537 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
538 | switch (urb->status) { | ||
539 | case 0: | ||
540 | spin_lock_irqsave(&xfer->lock, flags); | ||
541 | wa = xfer->wa; | ||
542 | dev = &wa->usb_iface->dev; | ||
543 | d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n", | ||
544 | xfer, seg->index, urb->actual_length); | ||
545 | if (seg->status < WA_SEG_PENDING) | ||
546 | seg->status = WA_SEG_PENDING; | ||
547 | seg->result = urb->actual_length; | ||
548 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
549 | break; | ||
550 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | ||
551 | case -ENOENT: /* as it was done by the who unlinked us */ | ||
552 | break; | ||
553 | default: /* Other errors ... */ | ||
554 | spin_lock_irqsave(&xfer->lock, flags); | ||
555 | wa = xfer->wa; | ||
556 | dev = &wa->usb_iface->dev; | ||
557 | rpipe = xfer->ep->hcpriv; | ||
558 | if (printk_ratelimit()) | ||
559 | dev_err(dev, "xfer %p#%u: data out error %d\n", | ||
560 | xfer, seg->index, urb->status); | ||
561 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | ||
562 | EDC_ERROR_TIMEFRAME)){ | ||
563 | dev_err(dev, "DTO: URB max acceptable errors " | ||
564 | "exceeded, resetting device\n"); | ||
565 | wa_reset_all(wa); | ||
566 | } | ||
567 | if (seg->status != WA_SEG_ERROR) { | ||
568 | seg->status = WA_SEG_ERROR; | ||
569 | seg->result = urb->status; | ||
570 | xfer->segs_done++; | ||
571 | __wa_xfer_abort(xfer); | ||
572 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
573 | done = __wa_xfer_is_done(xfer); | ||
574 | } | ||
575 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
576 | if (done) | ||
577 | wa_xfer_completion(xfer); | ||
578 | if (rpipe_ready) | ||
579 | wa_xfer_delayed_run(rpipe); | ||
580 | } | ||
581 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * Callback for the segment request | ||
586 | * | ||
587 | * If succesful transition state (unless already transitioned or | ||
588 | * outbound transfer); otherwise, take a note of the error, mark this | ||
589 | * segment done and try completion. | ||
590 | * | ||
591 | * Note we don't access until we are sure that the transfer hasn't | ||
592 | * been cancelled (ECONNRESET, ENOENT), which could mean that | ||
593 | * seg->xfer could be already gone. | ||
594 | * | ||
595 | * We have to check before setting the status to WA_SEG_PENDING | ||
596 | * because sometimes the xfer result callback arrives before this | ||
597 | * callback (geeeeeeze), so it might happen that we are already in | ||
598 | * another state. As well, we don't set it if the transfer is inbound, | ||
599 | * as in that case, wa_seg_dto_cb will do it when the OUT data phase | ||
600 | * finishes. | ||
601 | */ | ||
602 | static void wa_seg_cb(struct urb *urb) | ||
603 | { | ||
604 | struct wa_seg *seg = urb->context; | ||
605 | struct wa_xfer *xfer = seg->xfer; | ||
606 | struct wahc *wa; | ||
607 | struct device *dev; | ||
608 | struct wa_rpipe *rpipe; | ||
609 | unsigned long flags; | ||
610 | unsigned rpipe_ready; | ||
611 | u8 done = 0; | ||
612 | |||
613 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
614 | switch (urb->status) { | ||
615 | case 0: | ||
616 | spin_lock_irqsave(&xfer->lock, flags); | ||
617 | wa = xfer->wa; | ||
618 | dev = &wa->usb_iface->dev; | ||
619 | d_printf(2, dev, "xfer %p#%u: request done\n", | ||
620 | xfer, seg->index); | ||
621 | if (xfer->is_inbound && seg->status < WA_SEG_PENDING) | ||
622 | seg->status = WA_SEG_PENDING; | ||
623 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
624 | break; | ||
625 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | ||
626 | case -ENOENT: /* as it was done by the who unlinked us */ | ||
627 | break; | ||
628 | default: /* Other errors ... */ | ||
629 | spin_lock_irqsave(&xfer->lock, flags); | ||
630 | wa = xfer->wa; | ||
631 | dev = &wa->usb_iface->dev; | ||
632 | rpipe = xfer->ep->hcpriv; | ||
633 | if (printk_ratelimit()) | ||
634 | dev_err(dev, "xfer %p#%u: request error %d\n", | ||
635 | xfer, seg->index, urb->status); | ||
636 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | ||
637 | EDC_ERROR_TIMEFRAME)){ | ||
638 | dev_err(dev, "DTO: URB max acceptable errors " | ||
639 | "exceeded, resetting device\n"); | ||
640 | wa_reset_all(wa); | ||
641 | } | ||
642 | usb_unlink_urb(seg->dto_urb); | ||
643 | seg->status = WA_SEG_ERROR; | ||
644 | seg->result = urb->status; | ||
645 | xfer->segs_done++; | ||
646 | __wa_xfer_abort(xfer); | ||
647 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
648 | done = __wa_xfer_is_done(xfer); | ||
649 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
650 | if (done) | ||
651 | wa_xfer_completion(xfer); | ||
652 | if (rpipe_ready) | ||
653 | wa_xfer_delayed_run(rpipe); | ||
654 | } | ||
655 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * Allocate the segs array and initialize each of them | ||
660 | * | ||
661 | * The segments are freed by wa_xfer_destroy() when the xfer use count | ||
662 | * drops to zero; however, because each segment is given the same life | ||
663 | * cycle as the USB URB it contains, it is actually freed by | ||
664 | * usb_put_urb() on the contained USB URB (twisted, eh?). | ||
665 | */ | ||
666 | static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) | ||
667 | { | ||
668 | int result, cnt; | ||
669 | size_t alloc_size = sizeof(*xfer->seg[0]) | ||
670 | - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; | ||
671 | struct usb_device *usb_dev = xfer->wa->usb_dev; | ||
672 | const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; | ||
673 | struct wa_seg *seg; | ||
674 | size_t buf_itr, buf_size, buf_itr_size; | ||
675 | |||
676 | result = -ENOMEM; | ||
677 | xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); | ||
678 | if (xfer->seg == NULL) | ||
679 | goto error_segs_kzalloc; | ||
680 | buf_itr = 0; | ||
681 | buf_size = xfer->urb->transfer_buffer_length; | ||
682 | for (cnt = 0; cnt < xfer->segs; cnt++) { | ||
683 | seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC); | ||
684 | if (seg == NULL) | ||
685 | goto error_seg_kzalloc; | ||
686 | wa_seg_init(seg); | ||
687 | seg->xfer = xfer; | ||
688 | seg->index = cnt; | ||
689 | usb_fill_bulk_urb(&seg->urb, usb_dev, | ||
690 | usb_sndbulkpipe(usb_dev, | ||
691 | dto_epd->bEndpointAddress), | ||
692 | &seg->xfer_hdr, xfer_hdr_size, | ||
693 | wa_seg_cb, seg); | ||
694 | buf_itr_size = buf_size > xfer->seg_size ? | ||
695 | xfer->seg_size : buf_size; | ||
696 | if (xfer->is_inbound == 0 && buf_size > 0) { | ||
697 | seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
698 | if (seg->dto_urb == NULL) | ||
699 | goto error_dto_alloc; | ||
700 | usb_fill_bulk_urb( | ||
701 | seg->dto_urb, usb_dev, | ||
702 | usb_sndbulkpipe(usb_dev, | ||
703 | dto_epd->bEndpointAddress), | ||
704 | NULL, 0, wa_seg_dto_cb, seg); | ||
705 | if (xfer->is_dma) { | ||
706 | seg->dto_urb->transfer_dma = | ||
707 | xfer->urb->transfer_dma + buf_itr; | ||
708 | seg->dto_urb->transfer_flags |= | ||
709 | URB_NO_TRANSFER_DMA_MAP; | ||
710 | } else | ||
711 | seg->dto_urb->transfer_buffer = | ||
712 | xfer->urb->transfer_buffer + buf_itr; | ||
713 | seg->dto_urb->transfer_buffer_length = buf_itr_size; | ||
714 | } | ||
715 | seg->status = WA_SEG_READY; | ||
716 | buf_itr += buf_itr_size; | ||
717 | buf_size -= buf_itr_size; | ||
718 | } | ||
719 | return 0; | ||
720 | |||
721 | error_dto_alloc: | ||
722 | kfree(xfer->seg[cnt]); | ||
723 | cnt--; | ||
724 | error_seg_kzalloc: | ||
725 | /* use the fact that cnt is left at were it failed */ | ||
726 | for (; cnt > 0; cnt--) { | ||
727 | if (xfer->is_inbound == 0) | ||
728 | kfree(xfer->seg[cnt]->dto_urb); | ||
729 | kfree(xfer->seg[cnt]); | ||
730 | } | ||
731 | error_segs_kzalloc: | ||
732 | return result; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Allocates all the stuff needed to submit a transfer | ||
737 | * | ||
738 | * Breaks the whole data buffer in a list of segments, each one has a | ||
739 | * structure allocated to it and linked in xfer->seg[index] | ||
740 | * | ||
741 | * FIXME: merge setup_segs() and the last part of this function, no | ||
742 | * need to do two for loops when we could run everything in a | ||
743 | * single one | ||
744 | */ | ||
745 | static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) | ||
746 | { | ||
747 | int result; | ||
748 | struct device *dev = &xfer->wa->usb_iface->dev; | ||
749 | enum wa_xfer_type xfer_type = 0; /* shut up GCC */ | ||
750 | size_t xfer_hdr_size, cnt, transfer_size; | ||
751 | struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; | ||
752 | |||
753 | d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", | ||
754 | xfer, xfer->ep->hcpriv, urb); | ||
755 | |||
756 | result = __wa_xfer_setup_sizes(xfer, &xfer_type); | ||
757 | if (result < 0) | ||
758 | goto error_setup_sizes; | ||
759 | xfer_hdr_size = result; | ||
760 | result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); | ||
761 | if (result < 0) { | ||
762 | dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", | ||
763 | xfer, xfer->segs, result); | ||
764 | goto error_setup_segs; | ||
765 | } | ||
766 | /* Fill the first header */ | ||
767 | xfer_hdr0 = &xfer->seg[0]->xfer_hdr; | ||
768 | wa_xfer_id_init(xfer); | ||
769 | __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); | ||
770 | |||
771 | /* Fill remainig headers */ | ||
772 | xfer_hdr = xfer_hdr0; | ||
773 | transfer_size = urb->transfer_buffer_length; | ||
774 | xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? | ||
775 | xfer->seg_size : transfer_size; | ||
776 | transfer_size -= xfer->seg_size; | ||
777 | for (cnt = 1; cnt < xfer->segs; cnt++) { | ||
778 | xfer_hdr = &xfer->seg[cnt]->xfer_hdr; | ||
779 | memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); | ||
780 | xfer_hdr->bTransferSegment = cnt; | ||
781 | xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? | ||
782 | cpu_to_le32(xfer->seg_size) | ||
783 | : cpu_to_le32(transfer_size); | ||
784 | xfer->seg[cnt]->status = WA_SEG_READY; | ||
785 | transfer_size -= xfer->seg_size; | ||
786 | } | ||
787 | xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ | ||
788 | result = 0; | ||
789 | error_setup_segs: | ||
790 | error_setup_sizes: | ||
791 | d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", | ||
792 | xfer, xfer->ep->hcpriv, urb, result); | ||
793 | return result; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * | ||
798 | * | ||
799 | * rpipe->seg_lock is held! | ||
800 | */ | ||
801 | static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, | ||
802 | struct wa_seg *seg) | ||
803 | { | ||
804 | int result; | ||
805 | result = usb_submit_urb(&seg->urb, GFP_ATOMIC); | ||
806 | if (result < 0) { | ||
807 | printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", | ||
808 | xfer, seg->index, result); | ||
809 | goto error_seg_submit; | ||
810 | } | ||
811 | if (seg->dto_urb) { | ||
812 | result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); | ||
813 | if (result < 0) { | ||
814 | printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", | ||
815 | xfer, seg->index, result); | ||
816 | goto error_dto_submit; | ||
817 | } | ||
818 | } | ||
819 | seg->status = WA_SEG_SUBMITTED; | ||
820 | rpipe_avail_dec(rpipe); | ||
821 | return 0; | ||
822 | |||
823 | error_dto_submit: | ||
824 | usb_unlink_urb(&seg->urb); | ||
825 | error_seg_submit: | ||
826 | seg->status = WA_SEG_ERROR; | ||
827 | seg->result = result; | ||
828 | return result; | ||
829 | } | ||
830 | |||
831 | /* | ||
832 | * Execute more queued request segments until the maximum concurrent allowed | ||
833 | * | ||
834 | * The ugly unlock/lock sequence on the error path is needed as the | ||
835 | * xfer->lock normally nests the seg_lock and not viceversa. | ||
836 | * | ||
837 | */ | ||
838 | static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | ||
839 | { | ||
840 | int result; | ||
841 | struct device *dev = &rpipe->wa->usb_iface->dev; | ||
842 | struct wa_seg *seg; | ||
843 | struct wa_xfer *xfer; | ||
844 | unsigned long flags; | ||
845 | |||
846 | d_fnstart(1, dev, "(rpipe #%d) %d segments available\n", | ||
847 | le16_to_cpu(rpipe->descr.wRPipeIndex), | ||
848 | atomic_read(&rpipe->segs_available)); | ||
849 | spin_lock_irqsave(&rpipe->seg_lock, flags); | ||
850 | while (atomic_read(&rpipe->segs_available) > 0 | ||
851 | && !list_empty(&rpipe->seg_list)) { | ||
852 | seg = list_entry(rpipe->seg_list.next, struct wa_seg, | ||
853 | list_node); | ||
854 | list_del(&seg->list_node); | ||
855 | xfer = seg->xfer; | ||
856 | result = __wa_seg_submit(rpipe, xfer, seg); | ||
857 | d_printf(1, dev, "xfer %p#%u submitted from delayed " | ||
858 | "[%d segments available] %d\n", | ||
859 | xfer, seg->index, | ||
860 | atomic_read(&rpipe->segs_available), result); | ||
861 | if (unlikely(result < 0)) { | ||
862 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | ||
863 | spin_lock_irqsave(&xfer->lock, flags); | ||
864 | __wa_xfer_abort(xfer); | ||
865 | xfer->segs_done++; | ||
866 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
867 | spin_lock_irqsave(&rpipe->seg_lock, flags); | ||
868 | } | ||
869 | } | ||
870 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | ||
871 | d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n", | ||
872 | le16_to_cpu(rpipe->descr.wRPipeIndex), | ||
873 | atomic_read(&rpipe->segs_available)); | ||
874 | |||
875 | } | ||
876 | |||
877 | /* | ||
878 | * | ||
879 | * xfer->lock is taken | ||
880 | * | ||
881 | * On failure submitting we just stop submitting and return error; | ||
882 | * wa_urb_enqueue_b() will execute the completion path | ||
883 | */ | ||
884 | static int __wa_xfer_submit(struct wa_xfer *xfer) | ||
885 | { | ||
886 | int result; | ||
887 | struct wahc *wa = xfer->wa; | ||
888 | struct device *dev = &wa->usb_iface->dev; | ||
889 | unsigned cnt; | ||
890 | struct wa_seg *seg; | ||
891 | unsigned long flags; | ||
892 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | ||
893 | size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); | ||
894 | u8 available; | ||
895 | u8 empty; | ||
896 | |||
897 | d_fnstart(3, dev, "(xfer %p [rpipe %p])\n", | ||
898 | xfer, xfer->ep->hcpriv); | ||
899 | |||
900 | spin_lock_irqsave(&wa->xfer_list_lock, flags); | ||
901 | list_add_tail(&xfer->list_node, &wa->xfer_list); | ||
902 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); | ||
903 | |||
904 | BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); | ||
905 | result = 0; | ||
906 | spin_lock_irqsave(&rpipe->seg_lock, flags); | ||
907 | for (cnt = 0; cnt < xfer->segs; cnt++) { | ||
908 | available = atomic_read(&rpipe->segs_available); | ||
909 | empty = list_empty(&rpipe->seg_list); | ||
910 | seg = xfer->seg[cnt]; | ||
911 | d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n", | ||
912 | xfer, cnt, available, empty, | ||
913 | available == 0 || !empty ? "delayed" : "submitted"); | ||
914 | if (available == 0 || !empty) { | ||
915 | d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt); | ||
916 | seg->status = WA_SEG_DELAYED; | ||
917 | list_add_tail(&seg->list_node, &rpipe->seg_list); | ||
918 | } else { | ||
919 | result = __wa_seg_submit(rpipe, xfer, seg); | ||
920 | if (result < 0) | ||
921 | goto error_seg_submit; | ||
922 | } | ||
923 | xfer->segs_submitted++; | ||
924 | } | ||
925 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | ||
926 | d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, | ||
927 | xfer->ep->hcpriv); | ||
928 | return result; | ||
929 | |||
930 | error_seg_submit: | ||
931 | __wa_xfer_abort(xfer); | ||
932 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | ||
933 | d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, | ||
934 | xfer->ep->hcpriv); | ||
935 | return result; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * Second part of a URB/transfer enqueuement | ||
940 | * | ||
941 | * Assumes this comes from wa_urb_enqueue() [maybe through | ||
942 | * wa_urb_enqueue_run()]. At this point: | ||
943 | * | ||
944 | * xfer->wa filled and refcounted | ||
945 | * xfer->ep filled with rpipe refcounted if | ||
946 | * delayed == 0 | ||
947 | * xfer->urb filled and refcounted (this is the case when called | ||
948 | * from wa_urb_enqueue() as we come from usb_submit_urb() | ||
949 | * and when called by wa_urb_enqueue_run(), as we took an | ||
950 | * extra ref dropped by _run() after we return). | ||
951 | * xfer->gfp filled | ||
952 | * | ||
953 | * If we fail at __wa_xfer_submit(), then we just check if we are done | ||
954 | * and if so, we run the completion procedure. However, if we are not | ||
955 | * yet done, we do nothing and wait for the completion handlers from | ||
956 | * the submitted URBs or from the xfer-result path to kick in. If xfer | ||
957 | * result never kicks in, the xfer will timeout from the USB code and | ||
958 | * dequeue() will be called. | ||
959 | */ | ||
960 | static void wa_urb_enqueue_b(struct wa_xfer *xfer) | ||
961 | { | ||
962 | int result; | ||
963 | unsigned long flags; | ||
964 | struct urb *urb = xfer->urb; | ||
965 | struct wahc *wa = xfer->wa; | ||
966 | struct wusbhc *wusbhc = wa->wusb; | ||
967 | struct device *dev = &wa->usb_iface->dev; | ||
968 | struct wusb_dev *wusb_dev; | ||
969 | unsigned done; | ||
970 | |||
971 | d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb); | ||
972 | result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); | ||
973 | if (result < 0) | ||
974 | goto error_rpipe_get; | ||
975 | result = -ENODEV; | ||
976 | /* FIXME: segmentation broken -- kills DWA */ | ||
977 | mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ | ||
978 | if (urb->dev == NULL) | ||
979 | goto error_dev_gone; | ||
980 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); | ||
981 | if (wusb_dev == NULL) { | ||
982 | mutex_unlock(&wusbhc->mutex); | ||
983 | goto error_dev_gone; | ||
984 | } | ||
985 | mutex_unlock(&wusbhc->mutex); | ||
986 | |||
987 | spin_lock_irqsave(&xfer->lock, flags); | ||
988 | xfer->wusb_dev = wusb_dev; | ||
989 | result = urb->status; | ||
990 | if (urb->status != -EINPROGRESS) | ||
991 | goto error_dequeued; | ||
992 | |||
993 | result = __wa_xfer_setup(xfer, urb); | ||
994 | if (result < 0) | ||
995 | goto error_xfer_setup; | ||
996 | result = __wa_xfer_submit(xfer); | ||
997 | if (result < 0) | ||
998 | goto error_xfer_submit; | ||
999 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1000 | d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb); | ||
1001 | return; | ||
1002 | |||
1003 | /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() | ||
1004 | * does a wa_xfer_put() that will call wa_xfer_destroy() and clean | ||
1005 | * upundo setup(). | ||
1006 | */ | ||
1007 | error_xfer_setup: | ||
1008 | error_dequeued: | ||
1009 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1010 | /* FIXME: segmentation broken, kills DWA */ | ||
1011 | if (wusb_dev) | ||
1012 | wusb_dev_put(wusb_dev); | ||
1013 | error_dev_gone: | ||
1014 | rpipe_put(xfer->ep->hcpriv); | ||
1015 | error_rpipe_get: | ||
1016 | xfer->result = result; | ||
1017 | wa_xfer_giveback(xfer); | ||
1018 | d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); | ||
1019 | return; | ||
1020 | |||
1021 | error_xfer_submit: | ||
1022 | done = __wa_xfer_is_done(xfer); | ||
1023 | xfer->result = result; | ||
1024 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1025 | if (done) | ||
1026 | wa_xfer_completion(xfer); | ||
1027 | d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); | ||
1028 | return; | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1032 | * Execute the delayed transfers in the Wire Adapter @wa | ||
1033 | * | ||
1034 | * We need to be careful here, as dequeue() could be called in the | ||
1035 | * middle. That's why we do the whole thing under the | ||
1036 | * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock | ||
1037 | * and then checks the list -- so as we would be acquiring in inverse | ||
1038 | * order, we just drop the lock once we have the xfer and reacquire it | ||
1039 | * later. | ||
1040 | */ | ||
1041 | void wa_urb_enqueue_run(struct work_struct *ws) | ||
1042 | { | ||
1043 | struct wahc *wa = container_of(ws, struct wahc, xfer_work); | ||
1044 | struct device *dev = &wa->usb_iface->dev; | ||
1045 | struct wa_xfer *xfer, *next; | ||
1046 | struct urb *urb; | ||
1047 | |||
1048 | d_fnstart(3, dev, "(wa %p)\n", wa); | ||
1049 | spin_lock_irq(&wa->xfer_list_lock); | ||
1050 | list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, | ||
1051 | list_node) { | ||
1052 | list_del_init(&xfer->list_node); | ||
1053 | spin_unlock_irq(&wa->xfer_list_lock); | ||
1054 | |||
1055 | urb = xfer->urb; | ||
1056 | wa_urb_enqueue_b(xfer); | ||
1057 | usb_put_urb(urb); /* taken when queuing */ | ||
1058 | |||
1059 | spin_lock_irq(&wa->xfer_list_lock); | ||
1060 | } | ||
1061 | spin_unlock_irq(&wa->xfer_list_lock); | ||
1062 | d_fnend(3, dev, "(wa %p) = void\n", wa); | ||
1063 | } | ||
1064 | EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); | ||
1065 | |||
1066 | /* | ||
1067 | * Submit a transfer to the Wire Adapter in a delayed way | ||
1068 | * | ||
1069 | * The process of enqueuing involves possible sleeps() [see | ||
1070 | * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are | ||
1071 | * in an atomic section, we defer the enqueue_b() call--else we call direct. | ||
1072 | * | ||
1073 | * @urb: We own a reference to it done by the HCI Linux USB stack that | ||
1074 | * will be given up by calling usb_hcd_giveback_urb() or by | ||
1075 | * returning error from this function -> ergo we don't have to | ||
1076 | * refcount it. | ||
1077 | */ | ||
1078 | int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, | ||
1079 | struct urb *urb, gfp_t gfp) | ||
1080 | { | ||
1081 | int result; | ||
1082 | struct device *dev = &wa->usb_iface->dev; | ||
1083 | struct wa_xfer *xfer; | ||
1084 | unsigned long my_flags; | ||
1085 | unsigned cant_sleep = irqs_disabled() | in_atomic(); | ||
1086 | |||
1087 | d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n", | ||
1088 | wa, ep, urb, urb->transfer_buffer_length, gfp); | ||
1089 | |||
1090 | if (urb->transfer_buffer == NULL | ||
1091 | && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) | ||
1092 | && urb->transfer_buffer_length != 0) { | ||
1093 | dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); | ||
1094 | dump_stack(); | ||
1095 | } | ||
1096 | |||
1097 | result = -ENOMEM; | ||
1098 | xfer = kzalloc(sizeof(*xfer), gfp); | ||
1099 | if (xfer == NULL) | ||
1100 | goto error_kmalloc; | ||
1101 | |||
1102 | result = -ENOENT; | ||
1103 | if (urb->status != -EINPROGRESS) /* cancelled */ | ||
1104 | goto error_dequeued; /* before starting? */ | ||
1105 | wa_xfer_init(xfer); | ||
1106 | xfer->wa = wa_get(wa); | ||
1107 | xfer->urb = urb; | ||
1108 | xfer->gfp = gfp; | ||
1109 | xfer->ep = ep; | ||
1110 | urb->hcpriv = xfer; | ||
1111 | d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", | ||
1112 | xfer, urb, urb->pipe, urb->transfer_buffer_length, | ||
1113 | urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", | ||
1114 | urb->pipe & USB_DIR_IN ? "inbound" : "outbound", | ||
1115 | cant_sleep ? "deferred" : "inline"); | ||
1116 | if (cant_sleep) { | ||
1117 | usb_get_urb(urb); | ||
1118 | spin_lock_irqsave(&wa->xfer_list_lock, my_flags); | ||
1119 | list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); | ||
1120 | spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); | ||
1121 | queue_work(wusbd, &wa->xfer_work); | ||
1122 | } else { | ||
1123 | wa_urb_enqueue_b(xfer); | ||
1124 | } | ||
1125 | d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n", | ||
1126 | wa, ep, urb, urb->transfer_buffer_length, gfp); | ||
1127 | return 0; | ||
1128 | |||
1129 | error_dequeued: | ||
1130 | kfree(xfer); | ||
1131 | error_kmalloc: | ||
1132 | d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n", | ||
1133 | wa, ep, urb, urb->transfer_buffer_length, gfp, result); | ||
1134 | return result; | ||
1135 | } | ||
1136 | EXPORT_SYMBOL_GPL(wa_urb_enqueue); | ||
1137 | |||
1138 | /* | ||
1139 | * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion | ||
1140 | * handler] is called. | ||
1141 | * | ||
1142 | * Until a transfer goes successfully through wa_urb_enqueue() it | ||
1143 | * needs to be dequeued with completion calling; when stuck in delayed | ||
1144 | * or before wa_xfer_setup() is called, we need to do completion. | ||
1145 | * | ||
1146 | * not setup If there is no hcpriv yet, that means that that enqueue | ||
1147 | * still had no time to set the xfer up. Because | ||
1148 | * urb->status should be other than -EINPROGRESS, | ||
1149 | * enqueue() will catch that and bail out. | ||
1150 | * | ||
1151 | * If the transfer has gone through setup, we just need to clean it | ||
1152 | * up. If it has gone through submit(), we have to abort it [with an | ||
1153 | * asynch request] and then make sure we cancel each segment. | ||
1154 | * | ||
1155 | */ | ||
1156 | int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | ||
1157 | { | ||
1158 | struct device *dev = &wa->usb_iface->dev; | ||
1159 | unsigned long flags, flags2; | ||
1160 | struct wa_xfer *xfer; | ||
1161 | struct wa_seg *seg; | ||
1162 | struct wa_rpipe *rpipe; | ||
1163 | unsigned cnt; | ||
1164 | unsigned rpipe_ready = 0; | ||
1165 | |||
1166 | d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb); | ||
1167 | |||
1168 | d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb); | ||
1169 | xfer = urb->hcpriv; | ||
1170 | if (xfer == NULL) { | ||
1171 | /* NOthing setup yet enqueue will see urb->status != | ||
1172 | * -EINPROGRESS (by hcd layer) and bail out with | ||
1173 | * error, no need to do completion | ||
1174 | */ | ||
1175 | BUG_ON(urb->status == -EINPROGRESS); | ||
1176 | goto out; | ||
1177 | } | ||
1178 | spin_lock_irqsave(&xfer->lock, flags); | ||
1179 | rpipe = xfer->ep->hcpriv; | ||
1180 | /* Check the delayed list -> if there, release and complete */ | ||
1181 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | ||
1182 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | ||
1183 | goto dequeue_delayed; | ||
1184 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); | ||
1185 | if (xfer->seg == NULL) /* still hasn't reached */ | ||
1186 | goto out_unlock; /* setup(), enqueue_b() completes */ | ||
1187 | /* Ok, the xfer is in flight already, it's been setup and submitted.*/ | ||
1188 | __wa_xfer_abort(xfer); | ||
1189 | for (cnt = 0; cnt < xfer->segs; cnt++) { | ||
1190 | seg = xfer->seg[cnt]; | ||
1191 | switch (seg->status) { | ||
1192 | case WA_SEG_NOTREADY: | ||
1193 | case WA_SEG_READY: | ||
1194 | printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", | ||
1195 | xfer, cnt, seg->status); | ||
1196 | WARN_ON(1); | ||
1197 | break; | ||
1198 | case WA_SEG_DELAYED: | ||
1199 | seg->status = WA_SEG_ABORTED; | ||
1200 | spin_lock_irqsave(&rpipe->seg_lock, flags2); | ||
1201 | list_del(&seg->list_node); | ||
1202 | xfer->segs_done++; | ||
1203 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1204 | spin_unlock_irqrestore(&rpipe->seg_lock, flags2); | ||
1205 | break; | ||
1206 | case WA_SEG_SUBMITTED: | ||
1207 | seg->status = WA_SEG_ABORTED; | ||
1208 | usb_unlink_urb(&seg->urb); | ||
1209 | if (xfer->is_inbound == 0) | ||
1210 | usb_unlink_urb(seg->dto_urb); | ||
1211 | xfer->segs_done++; | ||
1212 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1213 | break; | ||
1214 | case WA_SEG_PENDING: | ||
1215 | seg->status = WA_SEG_ABORTED; | ||
1216 | xfer->segs_done++; | ||
1217 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1218 | break; | ||
1219 | case WA_SEG_DTI_PENDING: | ||
1220 | usb_unlink_urb(wa->dti_urb); | ||
1221 | seg->status = WA_SEG_ABORTED; | ||
1222 | xfer->segs_done++; | ||
1223 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1224 | break; | ||
1225 | case WA_SEG_DONE: | ||
1226 | case WA_SEG_ERROR: | ||
1227 | case WA_SEG_ABORTED: | ||
1228 | break; | ||
1229 | } | ||
1230 | } | ||
1231 | xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ | ||
1232 | __wa_xfer_is_done(xfer); | ||
1233 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1234 | wa_xfer_completion(xfer); | ||
1235 | if (rpipe_ready) | ||
1236 | wa_xfer_delayed_run(rpipe); | ||
1237 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1238 | return 0; | ||
1239 | |||
1240 | out_unlock: | ||
1241 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1242 | out: | ||
1243 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1244 | return 0; | ||
1245 | |||
1246 | dequeue_delayed: | ||
1247 | list_del_init(&xfer->list_node); | ||
1248 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); | ||
1249 | xfer->result = urb->status; | ||
1250 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1251 | wa_xfer_giveback(xfer); | ||
1252 | usb_put_urb(urb); /* we got a ref in enqueue() */ | ||
1253 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1254 | return 0; | ||
1255 | } | ||
1256 | EXPORT_SYMBOL_GPL(wa_urb_dequeue); | ||
1257 | |||
1258 | /* | ||
1259 | * Translation from WA status codes (WUSB1.0 Table 8.15) to errno | ||
1260 | * codes | ||
1261 | * | ||
1262 | * Positive errno values are internal inconsistencies and should be | ||
1263 | * flagged louder. Negative are to be passed up to the user in the | ||
1264 | * normal way. | ||
1265 | * | ||
1266 | * @status: USB WA status code -- high two bits are stripped. | ||
1267 | */ | ||
1268 | static int wa_xfer_status_to_errno(u8 status) | ||
1269 | { | ||
1270 | int errno; | ||
1271 | u8 real_status = status; | ||
1272 | static int xlat[] = { | ||
1273 | [WA_XFER_STATUS_SUCCESS] = 0, | ||
1274 | [WA_XFER_STATUS_HALTED] = -EPIPE, | ||
1275 | [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, | ||
1276 | [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, | ||
1277 | [WA_XFER_RESERVED] = EINVAL, | ||
1278 | [WA_XFER_STATUS_NOT_FOUND] = 0, | ||
1279 | [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, | ||
1280 | [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, | ||
1281 | [WA_XFER_STATUS_ABORTED] = -EINTR, | ||
1282 | [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, | ||
1283 | [WA_XFER_INVALID_FORMAT] = EINVAL, | ||
1284 | [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, | ||
1285 | [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, | ||
1286 | }; | ||
1287 | status &= 0x3f; | ||
1288 | |||
1289 | if (status == 0) | ||
1290 | return 0; | ||
1291 | if (status >= ARRAY_SIZE(xlat)) { | ||
1292 | if (printk_ratelimit()) | ||
1293 | printk(KERN_ERR "%s(): BUG? " | ||
1294 | "Unknown WA transfer status 0x%02x\n", | ||
1295 | __func__, real_status); | ||
1296 | return -EINVAL; | ||
1297 | } | ||
1298 | errno = xlat[status]; | ||
1299 | if (unlikely(errno > 0)) { | ||
1300 | if (printk_ratelimit()) | ||
1301 | printk(KERN_ERR "%s(): BUG? " | ||
1302 | "Inconsistent WA status: 0x%02x\n", | ||
1303 | __func__, real_status); | ||
1304 | errno = -errno; | ||
1305 | } | ||
1306 | return errno; | ||
1307 | } | ||
1308 | |||
1309 | /* | ||
1310 | * Process a xfer result completion message | ||
1311 | * | ||
1312 | * inbound transfers: need to schedule a DTI read | ||
1313 | * | ||
1314 | * FIXME: this functio needs to be broken up in parts | ||
1315 | */ | ||
1316 | static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) | ||
1317 | { | ||
1318 | int result; | ||
1319 | struct device *dev = &wa->usb_iface->dev; | ||
1320 | unsigned long flags; | ||
1321 | u8 seg_idx; | ||
1322 | struct wa_seg *seg; | ||
1323 | struct wa_rpipe *rpipe; | ||
1324 | struct wa_xfer_result *xfer_result = wa->xfer_result; | ||
1325 | u8 done = 0; | ||
1326 | u8 usb_status; | ||
1327 | unsigned rpipe_ready = 0; | ||
1328 | |||
1329 | d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer); | ||
1330 | spin_lock_irqsave(&xfer->lock, flags); | ||
1331 | seg_idx = xfer_result->bTransferSegment & 0x7f; | ||
1332 | if (unlikely(seg_idx >= xfer->segs)) | ||
1333 | goto error_bad_seg; | ||
1334 | seg = xfer->seg[seg_idx]; | ||
1335 | rpipe = xfer->ep->hcpriv; | ||
1336 | usb_status = xfer_result->bTransferStatus; | ||
1337 | d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", | ||
1338 | xfer, seg_idx, usb_status, seg->status); | ||
1339 | if (seg->status == WA_SEG_ABORTED | ||
1340 | || seg->status == WA_SEG_ERROR) /* already handled */ | ||
1341 | goto segment_aborted; | ||
1342 | if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ | ||
1343 | seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ | ||
1344 | if (seg->status != WA_SEG_PENDING) { | ||
1345 | if (printk_ratelimit()) | ||
1346 | dev_err(dev, "xfer %p#%u: Bad segment state %u\n", | ||
1347 | xfer, seg_idx, seg->status); | ||
1348 | seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ | ||
1349 | } | ||
1350 | if (usb_status & 0x80) { | ||
1351 | seg->result = wa_xfer_status_to_errno(usb_status); | ||
1352 | dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n", | ||
1353 | xfer, seg->index, usb_status); | ||
1354 | goto error_complete; | ||
1355 | } | ||
1356 | /* FIXME: we ignore warnings, tally them for stats */ | ||
1357 | if (usb_status & 0x40) /* Warning?... */ | ||
1358 | usb_status = 0; /* ... pass */ | ||
1359 | if (xfer->is_inbound) { /* IN data phase: read to buffer */ | ||
1360 | seg->status = WA_SEG_DTI_PENDING; | ||
1361 | BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); | ||
1362 | if (xfer->is_dma) { | ||
1363 | wa->buf_in_urb->transfer_dma = | ||
1364 | xfer->urb->transfer_dma | ||
1365 | + seg_idx * xfer->seg_size; | ||
1366 | wa->buf_in_urb->transfer_flags | ||
1367 | |= URB_NO_TRANSFER_DMA_MAP; | ||
1368 | } else { | ||
1369 | wa->buf_in_urb->transfer_buffer = | ||
1370 | xfer->urb->transfer_buffer | ||
1371 | + seg_idx * xfer->seg_size; | ||
1372 | wa->buf_in_urb->transfer_flags | ||
1373 | &= ~URB_NO_TRANSFER_DMA_MAP; | ||
1374 | } | ||
1375 | wa->buf_in_urb->transfer_buffer_length = | ||
1376 | le32_to_cpu(xfer_result->dwTransferLength); | ||
1377 | wa->buf_in_urb->context = seg; | ||
1378 | result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); | ||
1379 | if (result < 0) | ||
1380 | goto error_submit_buf_in; | ||
1381 | } else { | ||
1382 | /* OUT data phase, complete it -- */ | ||
1383 | seg->status = WA_SEG_DONE; | ||
1384 | seg->result = le32_to_cpu(xfer_result->dwTransferLength); | ||
1385 | xfer->segs_done++; | ||
1386 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1387 | done = __wa_xfer_is_done(xfer); | ||
1388 | } | ||
1389 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1390 | if (done) | ||
1391 | wa_xfer_completion(xfer); | ||
1392 | if (rpipe_ready) | ||
1393 | wa_xfer_delayed_run(rpipe); | ||
1394 | d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer); | ||
1395 | return; | ||
1396 | |||
1397 | |||
1398 | error_submit_buf_in: | ||
1399 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { | ||
1400 | dev_err(dev, "DTI: URB max acceptable errors " | ||
1401 | "exceeded, resetting device\n"); | ||
1402 | wa_reset_all(wa); | ||
1403 | } | ||
1404 | if (printk_ratelimit()) | ||
1405 | dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", | ||
1406 | xfer, seg_idx, result); | ||
1407 | seg->result = result; | ||
1408 | error_complete: | ||
1409 | seg->status = WA_SEG_ERROR; | ||
1410 | xfer->segs_done++; | ||
1411 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1412 | __wa_xfer_abort(xfer); | ||
1413 | done = __wa_xfer_is_done(xfer); | ||
1414 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1415 | if (done) | ||
1416 | wa_xfer_completion(xfer); | ||
1417 | if (rpipe_ready) | ||
1418 | wa_xfer_delayed_run(rpipe); | ||
1419 | d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n", | ||
1420 | wa, xfer); | ||
1421 | return; | ||
1422 | |||
1423 | |||
1424 | error_bad_seg: | ||
1425 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1426 | wa_urb_dequeue(wa, xfer->urb); | ||
1427 | if (printk_ratelimit()) | ||
1428 | dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); | ||
1429 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { | ||
1430 | dev_err(dev, "DTI: URB max acceptable errors " | ||
1431 | "exceeded, resetting device\n"); | ||
1432 | wa_reset_all(wa); | ||
1433 | } | ||
1434 | d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer); | ||
1435 | return; | ||
1436 | |||
1437 | |||
1438 | segment_aborted: | ||
1439 | /* nothing to do, as the aborter did the completion */ | ||
1440 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1441 | d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n", | ||
1442 | wa, xfer); | ||
1443 | return; | ||
1444 | |||
1445 | } | ||
1446 | |||
1447 | /* | ||
1448 | * Callback for the IN data phase | ||
1449 | * | ||
1450 | * If succesful transition state; otherwise, take a note of the | ||
1451 | * error, mark this segment done and try completion. | ||
1452 | * | ||
1453 | * Note we don't access until we are sure that the transfer hasn't | ||
1454 | * been cancelled (ECONNRESET, ENOENT), which could mean that | ||
1455 | * seg->xfer could be already gone. | ||
1456 | */ | ||
1457 | static void wa_buf_in_cb(struct urb *urb) | ||
1458 | { | ||
1459 | struct wa_seg *seg = urb->context; | ||
1460 | struct wa_xfer *xfer = seg->xfer; | ||
1461 | struct wahc *wa; | ||
1462 | struct device *dev; | ||
1463 | struct wa_rpipe *rpipe; | ||
1464 | unsigned rpipe_ready; | ||
1465 | unsigned long flags; | ||
1466 | u8 done = 0; | ||
1467 | |||
1468 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
1469 | switch (urb->status) { | ||
1470 | case 0: | ||
1471 | spin_lock_irqsave(&xfer->lock, flags); | ||
1472 | wa = xfer->wa; | ||
1473 | dev = &wa->usb_iface->dev; | ||
1474 | rpipe = xfer->ep->hcpriv; | ||
1475 | d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n", | ||
1476 | xfer, seg->index, (size_t)urb->actual_length); | ||
1477 | seg->status = WA_SEG_DONE; | ||
1478 | seg->result = urb->actual_length; | ||
1479 | xfer->segs_done++; | ||
1480 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1481 | done = __wa_xfer_is_done(xfer); | ||
1482 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1483 | if (done) | ||
1484 | wa_xfer_completion(xfer); | ||
1485 | if (rpipe_ready) | ||
1486 | wa_xfer_delayed_run(rpipe); | ||
1487 | break; | ||
1488 | case -ECONNRESET: /* URB unlinked; no need to do anything */ | ||
1489 | case -ENOENT: /* as it was done by the who unlinked us */ | ||
1490 | break; | ||
1491 | default: /* Other errors ... */ | ||
1492 | spin_lock_irqsave(&xfer->lock, flags); | ||
1493 | wa = xfer->wa; | ||
1494 | dev = &wa->usb_iface->dev; | ||
1495 | rpipe = xfer->ep->hcpriv; | ||
1496 | if (printk_ratelimit()) | ||
1497 | dev_err(dev, "xfer %p#%u: data in error %d\n", | ||
1498 | xfer, seg->index, urb->status); | ||
1499 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | ||
1500 | EDC_ERROR_TIMEFRAME)){ | ||
1501 | dev_err(dev, "DTO: URB max acceptable errors " | ||
1502 | "exceeded, resetting device\n"); | ||
1503 | wa_reset_all(wa); | ||
1504 | } | ||
1505 | seg->status = WA_SEG_ERROR; | ||
1506 | seg->result = urb->status; | ||
1507 | xfer->segs_done++; | ||
1508 | rpipe_ready = rpipe_avail_inc(rpipe); | ||
1509 | __wa_xfer_abort(xfer); | ||
1510 | done = __wa_xfer_is_done(xfer); | ||
1511 | spin_unlock_irqrestore(&xfer->lock, flags); | ||
1512 | if (done) | ||
1513 | wa_xfer_completion(xfer); | ||
1514 | if (rpipe_ready) | ||
1515 | wa_xfer_delayed_run(rpipe); | ||
1516 | } | ||
1517 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * Handle an incoming transfer result buffer | ||
1522 | * | ||
1523 | * Given a transfer result buffer, it completes the transfer (possibly | ||
1524 | * scheduling and buffer in read) and then resubmits the DTI URB for a | ||
1525 | * new transfer result read. | ||
1526 | * | ||
1527 | * | ||
1528 | * The xfer_result DTI URB state machine | ||
1529 | * | ||
1530 | * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) | ||
1531 | * | ||
1532 | * We start in OFF mode, the first xfer_result notification [through | ||
1533 | * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to | ||
1534 | * read. | ||
1535 | * | ||
1536 | * We receive a buffer -- if it is not a xfer_result, we complain and | ||
1537 | * repost the DTI-URB. If it is a xfer_result then do the xfer seg | ||
1538 | * request accounting. If it is an IN segment, we move to RBI and post | ||
1539 | * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will | ||
1540 | * repost the DTI-URB and move to RXR state. if there was no IN | ||
1541 | * segment, it will repost the DTI-URB. | ||
1542 | * | ||
1543 | * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many | ||
1544 | * errors) in the URBs. | ||
1545 | */ | ||
1546 | static void wa_xfer_result_cb(struct urb *urb) | ||
1547 | { | ||
1548 | int result; | ||
1549 | struct wahc *wa = urb->context; | ||
1550 | struct device *dev = &wa->usb_iface->dev; | ||
1551 | struct wa_xfer_result *xfer_result; | ||
1552 | u32 xfer_id; | ||
1553 | struct wa_xfer *xfer; | ||
1554 | u8 usb_status; | ||
1555 | |||
1556 | d_fnstart(3, dev, "(%p)\n", wa); | ||
1557 | BUG_ON(wa->dti_urb != urb); | ||
1558 | switch (wa->dti_urb->status) { | ||
1559 | case 0: | ||
1560 | /* We have a xfer result buffer; check it */ | ||
1561 | d_printf(2, dev, "DTI: xfer result %d bytes at %p\n", | ||
1562 | urb->actual_length, urb->transfer_buffer); | ||
1563 | d_dump(3, dev, urb->transfer_buffer, urb->actual_length); | ||
1564 | if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { | ||
1565 | dev_err(dev, "DTI Error: xfer result--bad size " | ||
1566 | "xfer result (%d bytes vs %zu needed)\n", | ||
1567 | urb->actual_length, sizeof(*xfer_result)); | ||
1568 | break; | ||
1569 | } | ||
1570 | xfer_result = wa->xfer_result; | ||
1571 | if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { | ||
1572 | dev_err(dev, "DTI Error: xfer result--" | ||
1573 | "bad header length %u\n", | ||
1574 | xfer_result->hdr.bLength); | ||
1575 | break; | ||
1576 | } | ||
1577 | if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { | ||
1578 | dev_err(dev, "DTI Error: xfer result--" | ||
1579 | "bad header type 0x%02x\n", | ||
1580 | xfer_result->hdr.bNotifyType); | ||
1581 | break; | ||
1582 | } | ||
1583 | usb_status = xfer_result->bTransferStatus & 0x3f; | ||
1584 | if (usb_status == WA_XFER_STATUS_ABORTED | ||
1585 | || usb_status == WA_XFER_STATUS_NOT_FOUND) | ||
1586 | /* taken care of already */ | ||
1587 | break; | ||
1588 | xfer_id = xfer_result->dwTransferID; | ||
1589 | xfer = wa_xfer_get_by_id(wa, xfer_id); | ||
1590 | if (xfer == NULL) { | ||
1591 | /* FIXME: transaction might have been cancelled */ | ||
1592 | dev_err(dev, "DTI Error: xfer result--" | ||
1593 | "unknown xfer 0x%08x (status 0x%02x)\n", | ||
1594 | xfer_id, usb_status); | ||
1595 | break; | ||
1596 | } | ||
1597 | wa_xfer_result_chew(wa, xfer); | ||
1598 | wa_xfer_put(xfer); | ||
1599 | break; | ||
1600 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
1601 | case -ESHUTDOWN: /* going away! */ | ||
1602 | dev_dbg(dev, "DTI: going down! %d\n", urb->status); | ||
1603 | goto out; | ||
1604 | default: | ||
1605 | /* Unknown error */ | ||
1606 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, | ||
1607 | EDC_ERROR_TIMEFRAME)) { | ||
1608 | dev_err(dev, "DTI: URB max acceptable errors " | ||
1609 | "exceeded, resetting device\n"); | ||
1610 | wa_reset_all(wa); | ||
1611 | goto out; | ||
1612 | } | ||
1613 | if (printk_ratelimit()) | ||
1614 | dev_err(dev, "DTI: URB error %d\n", urb->status); | ||
1615 | break; | ||
1616 | } | ||
1617 | /* Resubmit the DTI URB */ | ||
1618 | result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); | ||
1619 | if (result < 0) { | ||
1620 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " | ||
1621 | "resetting\n", result); | ||
1622 | wa_reset_all(wa); | ||
1623 | } | ||
1624 | out: | ||
1625 | d_fnend(3, dev, "(%p) = void\n", wa); | ||
1626 | return; | ||
1627 | } | ||
1628 | |||
1629 | /* | ||
1630 | * Transfer complete notification | ||
1631 | * | ||
1632 | * Called from the notif.c code. We get a notification on EP2 saying | ||
1633 | * that some endpoint has some transfer result data available. We are | ||
1634 | * about to read it. | ||
1635 | * | ||
1636 | * To speed up things, we always have a URB reading the DTI URB; we | ||
1637 | * don't really set it up and start it until the first xfer complete | ||
1638 | * notification arrives, which is what we do here. | ||
1639 | * | ||
1640 | * Follow up in wa_xfer_result_cb(), as that's where the whole state | ||
1641 | * machine starts. | ||
1642 | * | ||
1643 | * So here we just initialize the DTI URB for reading transfer result | ||
1644 | * notifications and also the buffer-in URB, for reading buffers. Then | ||
1645 | * we just submit the DTI URB. | ||
1646 | * | ||
1647 | * @wa shall be referenced | ||
1648 | */ | ||
1649 | void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) | ||
1650 | { | ||
1651 | int result; | ||
1652 | struct device *dev = &wa->usb_iface->dev; | ||
1653 | struct wa_notif_xfer *notif_xfer; | ||
1654 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; | ||
1655 | |||
1656 | d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr); | ||
1657 | notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); | ||
1658 | BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); | ||
1659 | |||
1660 | if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { | ||
1661 | /* FIXME: hardcoded limitation, adapt */ | ||
1662 | dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", | ||
1663 | notif_xfer->bEndpoint, dti_epd->bEndpointAddress); | ||
1664 | goto error; | ||
1665 | } | ||
1666 | if (wa->dti_urb != NULL) /* DTI URB already started */ | ||
1667 | goto out; | ||
1668 | |||
1669 | wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
1670 | if (wa->dti_urb == NULL) { | ||
1671 | dev_err(dev, "Can't allocate DTI URB\n"); | ||
1672 | goto error_dti_urb_alloc; | ||
1673 | } | ||
1674 | usb_fill_bulk_urb( | ||
1675 | wa->dti_urb, wa->usb_dev, | ||
1676 | usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), | ||
1677 | wa->xfer_result, wa->xfer_result_size, | ||
1678 | wa_xfer_result_cb, wa); | ||
1679 | |||
1680 | wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
1681 | if (wa->buf_in_urb == NULL) { | ||
1682 | dev_err(dev, "Can't allocate BUF-IN URB\n"); | ||
1683 | goto error_buf_in_urb_alloc; | ||
1684 | } | ||
1685 | usb_fill_bulk_urb( | ||
1686 | wa->buf_in_urb, wa->usb_dev, | ||
1687 | usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), | ||
1688 | NULL, 0, wa_buf_in_cb, wa); | ||
1689 | result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); | ||
1690 | if (result < 0) { | ||
1691 | dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " | ||
1692 | "resetting\n", result); | ||
1693 | goto error_dti_urb_submit; | ||
1694 | } | ||
1695 | out: | ||
1696 | d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); | ||
1697 | return; | ||
1698 | |||
1699 | error_dti_urb_submit: | ||
1700 | usb_put_urb(wa->buf_in_urb); | ||
1701 | error_buf_in_urb_alloc: | ||
1702 | usb_put_urb(wa->dti_urb); | ||
1703 | wa->dti_urb = NULL; | ||
1704 | error_dti_urb_alloc: | ||
1705 | error: | ||
1706 | wa_reset_all(wa); | ||
1707 | d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); | ||
1708 | return; | ||
1709 | } | ||
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c new file mode 100644 index 000000000000..07c63a31c799 --- /dev/null +++ b/drivers/usb/wusbcore/wusbhc.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * Wireless USB Host Controller | ||
3 | * sysfs glue, wusbcore module support and life cycle management | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * Creation/destruction of wusbhc is split in two parts; that that | ||
25 | * doesn't require the HCD to be added (wusbhc_{create,destroy}) and | ||
26 | * the one that requires (phase B, wusbhc_b_{create,destroy}). | ||
27 | * | ||
28 | * This is so because usb_add_hcd() will start the HC, and thus, all | ||
29 | * the HC specific stuff has to be already initialiazed (like sysfs | ||
30 | * thingies). | ||
31 | */ | ||
32 | #include <linux/device.h> | ||
33 | #include <linux/module.h> | ||
34 | #include "wusbhc.h" | ||
35 | |||
36 | /** | ||
37 | * Extract the wusbhc that corresponds to a USB Host Controller class device | ||
38 | * | ||
39 | * WARNING! Apply only if @dev is that of a | ||
40 | * wusbhc.usb_hcd.self->class_dev; otherwise, you loose. | ||
41 | */ | ||
42 | static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev) | ||
43 | { | ||
44 | struct usb_bus *usb_bus = dev_get_drvdata(dev); | ||
45 | struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus); | ||
46 | return usb_hcd_to_wusbhc(usb_hcd); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Show & store the current WUSB trust timeout | ||
51 | * | ||
52 | * We don't do locking--it is an 'atomic' value. | ||
53 | * | ||
54 | * The units that we store/show are always MILLISECONDS. However, the | ||
55 | * value of trust_timeout is jiffies. | ||
56 | */ | ||
57 | static ssize_t wusb_trust_timeout_show(struct device *dev, | ||
58 | struct device_attribute *attr, char *buf) | ||
59 | { | ||
60 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); | ||
61 | |||
62 | return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout); | ||
63 | } | ||
64 | |||
65 | static ssize_t wusb_trust_timeout_store(struct device *dev, | ||
66 | struct device_attribute *attr, | ||
67 | const char *buf, size_t size) | ||
68 | { | ||
69 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); | ||
70 | ssize_t result = -ENOSYS; | ||
71 | unsigned trust_timeout; | ||
72 | |||
73 | result = sscanf(buf, "%u", &trust_timeout); | ||
74 | if (result != 1) { | ||
75 | result = -EINVAL; | ||
76 | goto out; | ||
77 | } | ||
78 | /* FIXME: maybe we should check for range validity? */ | ||
79 | wusbhc->trust_timeout = trust_timeout; | ||
80 | cancel_delayed_work(&wusbhc->keep_alive_timer); | ||
81 | flush_workqueue(wusbd); | ||
82 | queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, | ||
83 | (trust_timeout * CONFIG_HZ)/1000/2); | ||
84 | out: | ||
85 | return result < 0 ? result : size; | ||
86 | } | ||
87 | static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show, | ||
88 | wusb_trust_timeout_store); | ||
89 | |||
90 | /* | ||
91 | * Show & store the current WUSB CHID | ||
92 | */ | ||
93 | static ssize_t wusb_chid_show(struct device *dev, | ||
94 | struct device_attribute *attr, char *buf) | ||
95 | { | ||
96 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); | ||
97 | ssize_t result = 0; | ||
98 | |||
99 | if (wusbhc->wuie_host_info != NULL) | ||
100 | result += ckhdid_printf(buf, PAGE_SIZE, | ||
101 | &wusbhc->wuie_host_info->CHID); | ||
102 | return result; | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Store a new CHID | ||
107 | * | ||
108 | * This will (FIXME) trigger many changes. | ||
109 | * | ||
110 | * - Send an all zeros CHID and it will stop the controller | ||
111 | * - Send a non-zero CHID and it will start it | ||
112 | * (unless it was started, it will just change the CHID, | ||
113 | * diconnecting all devices first). | ||
114 | * | ||
115 | * So first we scan the MMC we are sent and then we act on it. We | ||
116 | * read it in the same format as we print it, an ASCII string of 16 | ||
117 | * hex bytes. | ||
118 | * | ||
119 | * See wusbhc_chid_set() for more info. | ||
120 | */ | ||
121 | static ssize_t wusb_chid_store(struct device *dev, | ||
122 | struct device_attribute *attr, | ||
123 | const char *buf, size_t size) | ||
124 | { | ||
125 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); | ||
126 | struct wusb_ckhdid chid; | ||
127 | ssize_t result; | ||
128 | |||
129 | result = sscanf(buf, | ||
130 | "%02hhx %02hhx %02hhx %02hhx " | ||
131 | "%02hhx %02hhx %02hhx %02hhx " | ||
132 | "%02hhx %02hhx %02hhx %02hhx " | ||
133 | "%02hhx %02hhx %02hhx %02hhx\n", | ||
134 | &chid.data[0] , &chid.data[1] , | ||
135 | &chid.data[2] , &chid.data[3] , | ||
136 | &chid.data[4] , &chid.data[5] , | ||
137 | &chid.data[6] , &chid.data[7] , | ||
138 | &chid.data[8] , &chid.data[9] , | ||
139 | &chid.data[10], &chid.data[11], | ||
140 | &chid.data[12], &chid.data[13], | ||
141 | &chid.data[14], &chid.data[15]); | ||
142 | if (result != 16) { | ||
143 | dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): " | ||
144 | "%d\n", (int)result); | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | result = wusbhc_chid_set(wusbhc, &chid); | ||
148 | return result < 0 ? result : size; | ||
149 | } | ||
150 | static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store); | ||
151 | |||
152 | /* Group all the WUSBHC attributes */ | ||
153 | static struct attribute *wusbhc_attrs[] = { | ||
154 | &dev_attr_wusb_trust_timeout.attr, | ||
155 | &dev_attr_wusb_chid.attr, | ||
156 | NULL, | ||
157 | }; | ||
158 | |||
159 | static struct attribute_group wusbhc_attr_group = { | ||
160 | .name = NULL, /* we want them in the same directory */ | ||
161 | .attrs = wusbhc_attrs, | ||
162 | }; | ||
163 | |||
164 | /* | ||
165 | * Create a wusbhc instance | ||
166 | * | ||
167 | * NOTEs: | ||
168 | * | ||
169 | * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been | ||
170 | * initialized but not added. | ||
171 | * | ||
172 | * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling. | ||
173 | * | ||
174 | * - fill out wusbhc->uwb_rc and refcount it before calling | ||
175 | * - fill out the wusbhc->sec_modes array | ||
176 | */ | ||
177 | int wusbhc_create(struct wusbhc *wusbhc) | ||
178 | { | ||
179 | int result = 0; | ||
180 | |||
181 | wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS; | ||
182 | mutex_init(&wusbhc->mutex); | ||
183 | result = wusbhc_mmcie_create(wusbhc); | ||
184 | if (result < 0) | ||
185 | goto error_mmcie_create; | ||
186 | result = wusbhc_devconnect_create(wusbhc); | ||
187 | if (result < 0) | ||
188 | goto error_devconnect_create; | ||
189 | result = wusbhc_rh_create(wusbhc); | ||
190 | if (result < 0) | ||
191 | goto error_rh_create; | ||
192 | result = wusbhc_sec_create(wusbhc); | ||
193 | if (result < 0) | ||
194 | goto error_sec_create; | ||
195 | return 0; | ||
196 | |||
197 | error_sec_create: | ||
198 | wusbhc_rh_destroy(wusbhc); | ||
199 | error_rh_create: | ||
200 | wusbhc_devconnect_destroy(wusbhc); | ||
201 | error_devconnect_create: | ||
202 | wusbhc_mmcie_destroy(wusbhc); | ||
203 | error_mmcie_create: | ||
204 | return result; | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(wusbhc_create); | ||
207 | |||
208 | static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc) | ||
209 | { | ||
210 | return &wusbhc->usb_hcd.self.controller->kobj; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Phase B of a wusbhc instance creation | ||
215 | * | ||
216 | * Creates fields that depend on wusbhc->usb_hcd having been | ||
217 | * added. This is where we create the sysfs files in | ||
218 | * /sys/class/usb_host/usb_hostX/. | ||
219 | * | ||
220 | * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper | ||
221 | * layer (hwahc or whci) | ||
222 | */ | ||
223 | int wusbhc_b_create(struct wusbhc *wusbhc) | ||
224 | { | ||
225 | int result = 0; | ||
226 | struct device *dev = wusbhc->usb_hcd.self.controller; | ||
227 | |||
228 | result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); | ||
229 | if (result < 0) { | ||
230 | dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result); | ||
231 | goto error_create_attr_group; | ||
232 | } | ||
233 | |||
234 | result = wusbhc_pal_register(wusbhc); | ||
235 | if (result < 0) | ||
236 | goto error_pal_register; | ||
237 | return 0; | ||
238 | |||
239 | error_pal_register: | ||
240 | sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); | ||
241 | error_create_attr_group: | ||
242 | return result; | ||
243 | } | ||
244 | EXPORT_SYMBOL_GPL(wusbhc_b_create); | ||
245 | |||
246 | void wusbhc_b_destroy(struct wusbhc *wusbhc) | ||
247 | { | ||
248 | wusbhc_pal_unregister(wusbhc); | ||
249 | sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(wusbhc_b_destroy); | ||
252 | |||
253 | void wusbhc_destroy(struct wusbhc *wusbhc) | ||
254 | { | ||
255 | wusbhc_sec_destroy(wusbhc); | ||
256 | wusbhc_rh_destroy(wusbhc); | ||
257 | wusbhc_devconnect_destroy(wusbhc); | ||
258 | wusbhc_mmcie_destroy(wusbhc); | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(wusbhc_destroy); | ||
261 | |||
262 | struct workqueue_struct *wusbd; | ||
263 | EXPORT_SYMBOL_GPL(wusbd); | ||
264 | |||
265 | /* | ||
266 | * WUSB Cluster ID allocation map | ||
267 | * | ||
268 | * Each WUSB bus in a channel is identified with a Cluster Id in the | ||
269 | * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff | ||
270 | * (that's space for 31 WUSB controllers, as 0xff can't be taken). We | ||
271 | * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff). | ||
272 | * | ||
273 | * For each one we taken, we pin it in the bitap | ||
274 | */ | ||
275 | #define CLUSTER_IDS 32 | ||
276 | static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS); | ||
277 | static DEFINE_SPINLOCK(wusb_cluster_ids_lock); | ||
278 | |||
279 | /* | ||
280 | * Get a WUSB Cluster ID | ||
281 | * | ||
282 | * Need to release with wusb_cluster_id_put() when done w/ it. | ||
283 | */ | ||
284 | /* FIXME: coordinate with the choose_addres() from the USB stack */ | ||
285 | /* we want to leave the top of the 128 range for cluster addresses and | ||
286 | * the bottom for device addresses (as we map them one on one with | ||
287 | * ports). */ | ||
288 | u8 wusb_cluster_id_get(void) | ||
289 | { | ||
290 | u8 id; | ||
291 | spin_lock(&wusb_cluster_ids_lock); | ||
292 | id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS); | ||
293 | if (id > CLUSTER_IDS) { | ||
294 | id = 0; | ||
295 | goto out; | ||
296 | } | ||
297 | set_bit(id, wusb_cluster_id_table); | ||
298 | id = (u8) 0xff - id; | ||
299 | out: | ||
300 | spin_unlock(&wusb_cluster_ids_lock); | ||
301 | return id; | ||
302 | |||
303 | } | ||
304 | EXPORT_SYMBOL_GPL(wusb_cluster_id_get); | ||
305 | |||
306 | /* | ||
307 | * Release a WUSB Cluster ID | ||
308 | * | ||
309 | * Obtained it with wusb_cluster_id_get() | ||
310 | */ | ||
311 | void wusb_cluster_id_put(u8 id) | ||
312 | { | ||
313 | id = 0xff - id; | ||
314 | BUG_ON(id >= CLUSTER_IDS); | ||
315 | spin_lock(&wusb_cluster_ids_lock); | ||
316 | WARN_ON(!test_bit(id, wusb_cluster_id_table)); | ||
317 | clear_bit(id, wusb_cluster_id_table); | ||
318 | spin_unlock(&wusb_cluster_ids_lock); | ||
319 | } | ||
320 | EXPORT_SYMBOL_GPL(wusb_cluster_id_put); | ||
321 | |||
322 | /** | ||
323 | * wusbhc_giveback_urb - return an URB to the USB core | ||
324 | * @wusbhc: the host controller the URB is from. | ||
325 | * @urb: the URB. | ||
326 | * @status: the URB's status. | ||
327 | * | ||
328 | * Return an URB to the USB core doing some additional WUSB specific | ||
329 | * processing. | ||
330 | * | ||
331 | * - After a successful transfer, update the trust timeout timestamp | ||
332 | * for the WUSB device. | ||
333 | * | ||
334 | * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion | ||
335 | * condition for the WCONNECTACK_IE is that the host has observed | ||
336 | * the associated device responding to a control transfer. | ||
337 | */ | ||
338 | void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status) | ||
339 | { | ||
340 | struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); | ||
341 | |||
342 | if (status == 0) { | ||
343 | wusb_dev->entry_ts = jiffies; | ||
344 | |||
345 | /* wusbhc_devconnect_acked() can't be called from from | ||
346 | atomic context so defer it to a work queue. */ | ||
347 | if (!list_empty(&wusb_dev->cack_node)) | ||
348 | queue_work(wusbd, &wusb_dev->devconnect_acked_work); | ||
349 | } | ||
350 | |||
351 | usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); | ||
352 | } | ||
353 | EXPORT_SYMBOL_GPL(wusbhc_giveback_urb); | ||
354 | |||
355 | /** | ||
356 | * wusbhc_reset_all - reset the HC hardware | ||
357 | * @wusbhc: the host controller to reset. | ||
358 | * | ||
359 | * Request a full hardware reset of the chip. This will also reset | ||
360 | * the radio controller and any other PALs. | ||
361 | */ | ||
362 | void wusbhc_reset_all(struct wusbhc *wusbhc) | ||
363 | { | ||
364 | uwb_rc_reset_all(wusbhc->uwb_rc); | ||
365 | } | ||
366 | EXPORT_SYMBOL_GPL(wusbhc_reset_all); | ||
367 | |||
368 | static struct notifier_block wusb_usb_notifier = { | ||
369 | .notifier_call = wusb_usb_ncb, | ||
370 | .priority = INT_MAX /* Need to be called first of all */ | ||
371 | }; | ||
372 | |||
373 | static int __init wusbcore_init(void) | ||
374 | { | ||
375 | int result; | ||
376 | result = wusb_crypto_init(); | ||
377 | if (result < 0) | ||
378 | goto error_crypto_init; | ||
379 | /* WQ is singlethread because we need to serialize notifications */ | ||
380 | wusbd = create_singlethread_workqueue("wusbd"); | ||
381 | if (wusbd == NULL) { | ||
382 | result = -ENOMEM; | ||
383 | printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n"); | ||
384 | goto error_wusbd_create; | ||
385 | } | ||
386 | usb_register_notify(&wusb_usb_notifier); | ||
387 | bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS); | ||
388 | set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */ | ||
389 | return 0; | ||
390 | |||
391 | error_wusbd_create: | ||
392 | wusb_crypto_exit(); | ||
393 | error_crypto_init: | ||
394 | return result; | ||
395 | |||
396 | } | ||
397 | module_init(wusbcore_init); | ||
398 | |||
399 | static void __exit wusbcore_exit(void) | ||
400 | { | ||
401 | clear_bit(0, wusb_cluster_id_table); | ||
402 | if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) { | ||
403 | char buf[256]; | ||
404 | bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table, | ||
405 | CLUSTER_IDS); | ||
406 | printk(KERN_ERR "BUG: WUSB Cluster IDs not released " | ||
407 | "on exit: %s\n", buf); | ||
408 | WARN_ON(1); | ||
409 | } | ||
410 | usb_unregister_notify(&wusb_usb_notifier); | ||
411 | destroy_workqueue(wusbd); | ||
412 | wusb_crypto_exit(); | ||
413 | } | ||
414 | module_exit(wusbcore_exit); | ||
415 | |||
416 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
417 | MODULE_DESCRIPTION("Wireless USB core"); | ||
418 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h new file mode 100644 index 000000000000..d0c132434f1b --- /dev/null +++ b/drivers/usb/wusbcore/wusbhc.h | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * Wireless USB Host Controller | ||
3 | * Common infrastructure for WHCI and HWA WUSB-HC drivers | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This driver implements parts common to all Wireless USB Host | ||
25 | * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used | ||
26 | * by: | ||
27 | * | ||
28 | * - hwahc: HWA, USB-dongle that implements a Wireless USB host | ||
29 | * controller, (Wireless USB 1.0 Host-Wire-Adapter specification). | ||
30 | * | ||
31 | * - whci: WHCI, a PCI card with a wireless host controller | ||
32 | * (Wireless Host Controller Interface 1.0 specification). | ||
33 | * | ||
34 | * Check out the Design-overview.txt file in the source documentation | ||
35 | * for other details on the implementation. | ||
36 | * | ||
37 | * Main blocks: | ||
38 | * | ||
39 | * rh Root Hub emulation (part of the HCD glue) | ||
40 | * | ||
41 | * devconnect Handle all the issues related to device connection, | ||
42 | * authentication, disconnection, timeout, reseting, | ||
43 | * keepalives, etc. | ||
44 | * | ||
45 | * mmc MMC IE broadcasting handling | ||
46 | * | ||
47 | * A host controller driver just initializes its stuff and as part of | ||
48 | * that, creates a 'struct wusbhc' instance that handles all the | ||
49 | * common WUSB mechanisms. Links in the function ops that are specific | ||
50 | * to it and then registers the host controller. Ready to run. | ||
51 | */ | ||
52 | |||
53 | #ifndef __WUSBHC_H__ | ||
54 | #define __WUSBHC_H__ | ||
55 | |||
56 | #include <linux/usb.h> | ||
57 | #include <linux/list.h> | ||
58 | #include <linux/mutex.h> | ||
59 | #include <linux/kref.h> | ||
60 | #include <linux/workqueue.h> | ||
61 | /* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not | ||
62 | * public */ | ||
63 | #include <linux/../../drivers/usb/core/hcd.h> | ||
64 | #include <linux/uwb.h> | ||
65 | #include <linux/usb/wusb.h> | ||
66 | |||
67 | |||
68 | /** | ||
69 | * Wireless USB device | ||
70 | * | ||
71 | * Describe a WUSB device connected to the cluster. This struct | ||
72 | * belongs to the 'struct wusb_port' it is attached to and it is | ||
73 | * responsible for putting and clearing the pointer to it. | ||
74 | * | ||
75 | * Note this "complements" the 'struct usb_device' that the usb_hcd | ||
76 | * keeps for each connected USB device. However, it extends some | ||
77 | * information that is not available (there is no hcpriv ptr in it!) | ||
78 | * *and* most importantly, it's life cycle is different. It is created | ||
79 | * as soon as we get a DN_Connect (connect request notification) from | ||
80 | * the device through the WUSB host controller; the USB stack doesn't | ||
81 | * create the device until we authenticate it. FIXME: this will | ||
82 | * change. | ||
83 | * | ||
84 | * @bos: This is allocated when the BOS descriptors are read from | ||
85 | * the device and freed upon the wusb_dev struct dying. | ||
86 | * @wusb_cap_descr: points into @bos, and has been verified to be size | ||
87 | * safe. | ||
88 | */ | ||
89 | struct wusb_dev { | ||
90 | struct kref refcnt; | ||
91 | struct wusbhc *wusbhc; | ||
92 | struct list_head cack_node; /* Connect-Ack list */ | ||
93 | u8 port_idx; | ||
94 | u8 addr; | ||
95 | u8 beacon_type:4; | ||
96 | struct usb_encryption_descriptor ccm1_etd; | ||
97 | struct wusb_ckhdid cdid; | ||
98 | unsigned long entry_ts; | ||
99 | struct usb_bos_descriptor *bos; | ||
100 | struct usb_wireless_cap_descriptor *wusb_cap_descr; | ||
101 | struct uwb_mas_bm availability; | ||
102 | struct work_struct devconnect_acked_work; | ||
103 | struct urb *set_gtk_urb; | ||
104 | struct usb_ctrlrequest *set_gtk_req; | ||
105 | struct usb_device *usb_dev; | ||
106 | }; | ||
107 | |||
108 | #define WUSB_DEV_ADDR_UNAUTH 0x80 | ||
109 | |||
110 | static inline void wusb_dev_init(struct wusb_dev *wusb_dev) | ||
111 | { | ||
112 | kref_init(&wusb_dev->refcnt); | ||
113 | /* no need to init the cack_node */ | ||
114 | } | ||
115 | |||
116 | extern void wusb_dev_destroy(struct kref *_wusb_dev); | ||
117 | |||
118 | static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev) | ||
119 | { | ||
120 | kref_get(&wusb_dev->refcnt); | ||
121 | return wusb_dev; | ||
122 | } | ||
123 | |||
124 | static inline void wusb_dev_put(struct wusb_dev *wusb_dev) | ||
125 | { | ||
126 | kref_put(&wusb_dev->refcnt, wusb_dev_destroy); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * Wireless USB Host Controlller root hub "fake" ports | ||
131 | * (state and device information) | ||
132 | * | ||
133 | * Wireless USB is wireless, so there are no ports; but we | ||
134 | * fake'em. Each RC can connect a max of devices at the same time | ||
135 | * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's | ||
136 | * caps), referred to in wusbhc->ports_max. | ||
137 | * | ||
138 | * See rh.c for more information. | ||
139 | * | ||
140 | * The @status and @change use the same bits as in USB2.0[11.24.2.7], | ||
141 | * so we don't have to do much when getting the port's status. | ||
142 | * | ||
143 | * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10], | ||
144 | * include/linux/usb_ch9.h (#define USB_PORT_STAT_*) | ||
145 | */ | ||
146 | struct wusb_port { | ||
147 | u16 status; | ||
148 | u16 change; | ||
149 | struct wusb_dev *wusb_dev; /* connected device's info */ | ||
150 | unsigned reset_count; | ||
151 | u32 ptk_tkid; | ||
152 | }; | ||
153 | |||
154 | /** | ||
155 | * WUSB Host Controller specifics | ||
156 | * | ||
157 | * All fields that are common to all Wireless USB controller types | ||
158 | * (HWA and WHCI) are grouped here. Host Controller | ||
159 | * functions/operations that only deal with general Wireless USB HC | ||
160 | * issues use this data type to refer to the host. | ||
161 | * | ||
162 | * @usb_hcd Instantiation of a USB host controller | ||
163 | * (initialized by upper layer [HWA=HC or WHCI]. | ||
164 | * | ||
165 | * @dev Device that implements this; initialized by the | ||
166 | * upper layer (HWA-HC, WHCI...); this device should | ||
167 | * have a refcount. | ||
168 | * | ||
169 | * @trust_timeout After this time without hearing for device | ||
170 | * activity, we consider the device gone and we have to | ||
171 | * re-authenticate. | ||
172 | * | ||
173 | * Can be accessed w/o locking--however, read to a | ||
174 | * local variable then use. | ||
175 | * | ||
176 | * @chid WUSB Cluster Host ID: this is supposed to be a | ||
177 | * unique value that doesn't change across reboots (so | ||
178 | * that your devices do not require re-association). | ||
179 | * | ||
180 | * Read/Write protected by @mutex | ||
181 | * | ||
182 | * @dev_info This array has ports_max elements. It is used to | ||
183 | * give the HC information about the WUSB devices (see | ||
184 | * 'struct wusb_dev_info'). | ||
185 | * | ||
186 | * For HWA we need to allocate it in heap; for WHCI it | ||
187 | * needs to be permanently mapped, so we keep it for | ||
188 | * both and make it easy. Call wusbhc->dev_info_set() | ||
189 | * to update an entry. | ||
190 | * | ||
191 | * @ports_max Number of simultaneous device connections (fake | ||
192 | * ports) this HC will take. Read-only. | ||
193 | * | ||
194 | * @port Array of port status for each fake root port. Guaranteed to | ||
195 | * always be the same lenght during device existence | ||
196 | * [this allows for some unlocked but referenced reading]. | ||
197 | * | ||
198 | * @mmcies_max Max number of Information Elements this HC can send | ||
199 | * in its MMC. Read-only. | ||
200 | * | ||
201 | * @mmcie_add HC specific operation (WHCI or HWA) for adding an | ||
202 | * MMCIE. | ||
203 | * | ||
204 | * @mmcie_rm HC specific operation (WHCI or HWA) for removing an | ||
205 | * MMCIE. | ||
206 | * | ||
207 | * @enc_types Array which describes the encryptions methods | ||
208 | * supported by the host as described in WUSB1.0 -- | ||
209 | * one entry per supported method. As of WUSB1.0 there | ||
210 | * is only four methods, we make space for eight just in | ||
211 | * case they decide to add some more (and pray they do | ||
212 | * it in sequential order). if 'enc_types[enc_method] | ||
213 | * != 0', then it is supported by the host. enc_method | ||
214 | * is USB_ENC_TYPE*. | ||
215 | * | ||
216 | * @set_ptk: Set the PTK and enable encryption for a device. Or, if | ||
217 | * the supplied key is NULL, disable encryption for that | ||
218 | * device. | ||
219 | * | ||
220 | * @set_gtk: Set the GTK to be used for all future broadcast packets | ||
221 | * (i.e., MMCs). With some hardware, setting the GTK may start | ||
222 | * MMC transmission. | ||
223 | * | ||
224 | * NOTE: | ||
225 | * | ||
226 | * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid | ||
227 | * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev | ||
228 | * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a | ||
229 | * refcount on it). | ||
230 | * | ||
231 | * Most of the times when you need to use it, it will be non-NULL, | ||
232 | * so there is no real need to check for it (wusb_dev will | ||
233 | * dissapear before usb_dev). | ||
234 | * | ||
235 | * - The following fields need to be filled out before calling | ||
236 | * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}. | ||
237 | * | ||
238 | * - there is no wusbhc_init() method, we do everything in | ||
239 | * wusbhc_create(). | ||
240 | * | ||
241 | * - Creation is done in two phases, wusbhc_create() and | ||
242 | * wusbhc_create_b(); b are the parts that need to be called after | ||
243 | * calling usb_hcd_add(&wusbhc->usb_hcd). | ||
244 | */ | ||
245 | struct wusbhc { | ||
246 | struct usb_hcd usb_hcd; /* HAS TO BE 1st */ | ||
247 | struct device *dev; | ||
248 | struct uwb_rc *uwb_rc; | ||
249 | struct uwb_pal pal; | ||
250 | |||
251 | unsigned trust_timeout; /* in jiffies */ | ||
252 | struct wuie_host_info *wuie_host_info; /* Includes CHID */ | ||
253 | |||
254 | struct mutex mutex; /* locks everything else */ | ||
255 | u16 cluster_id; /* Wireless USB Cluster ID */ | ||
256 | struct wusb_port *port; /* Fake port status handling */ | ||
257 | struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */ | ||
258 | u8 ports_max; | ||
259 | unsigned active:1; /* currently xmit'ing MMCs */ | ||
260 | struct wuie_keep_alive keep_alive_ie; /* protected by mutex */ | ||
261 | struct delayed_work keep_alive_timer; | ||
262 | struct list_head cack_list; /* Connect acknowledging */ | ||
263 | size_t cack_count; /* protected by 'mutex' */ | ||
264 | struct wuie_connect_ack cack_ie; | ||
265 | struct uwb_rsv *rsv; /* cluster bandwidth reservation */ | ||
266 | |||
267 | struct mutex mmcie_mutex; /* MMC WUIE handling */ | ||
268 | struct wuie_hdr **mmcie; /* WUIE array */ | ||
269 | u8 mmcies_max; | ||
270 | /* FIXME: make wusbhc_ops? */ | ||
271 | int (*start)(struct wusbhc *wusbhc); | ||
272 | void (*stop)(struct wusbhc *wusbhc); | ||
273 | int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | ||
274 | u8 handle, struct wuie_hdr *wuie); | ||
275 | int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); | ||
276 | int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev); | ||
277 | int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index, | ||
278 | const struct uwb_mas_bm *); | ||
279 | int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx, | ||
280 | u32 tkid, const void *key, size_t key_size); | ||
281 | int (*set_gtk)(struct wusbhc *wusbhc, | ||
282 | u32 tkid, const void *key, size_t key_size); | ||
283 | int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots); | ||
284 | |||
285 | struct { | ||
286 | struct usb_key_descriptor descr; | ||
287 | u8 data[16]; /* GTK key data */ | ||
288 | } __attribute__((packed)) gtk; | ||
289 | u8 gtk_index; | ||
290 | u32 gtk_tkid; | ||
291 | struct work_struct gtk_rekey_done_work; | ||
292 | int pending_set_gtks; | ||
293 | |||
294 | struct usb_encryption_descriptor *ccm1_etd; | ||
295 | }; | ||
296 | |||
297 | #define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd) | ||
298 | |||
299 | |||
300 | extern int wusbhc_create(struct wusbhc *); | ||
301 | extern int wusbhc_b_create(struct wusbhc *); | ||
302 | extern void wusbhc_b_destroy(struct wusbhc *); | ||
303 | extern void wusbhc_destroy(struct wusbhc *); | ||
304 | extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *, | ||
305 | struct wusb_dev *); | ||
306 | extern void wusb_dev_sysfs_rm(struct wusb_dev *); | ||
307 | extern int wusbhc_sec_create(struct wusbhc *); | ||
308 | extern int wusbhc_sec_start(struct wusbhc *); | ||
309 | extern void wusbhc_sec_stop(struct wusbhc *); | ||
310 | extern void wusbhc_sec_destroy(struct wusbhc *); | ||
311 | extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, | ||
312 | int status); | ||
313 | void wusbhc_reset_all(struct wusbhc *wusbhc); | ||
314 | |||
315 | int wusbhc_pal_register(struct wusbhc *wusbhc); | ||
316 | void wusbhc_pal_unregister(struct wusbhc *wusbhc); | ||
317 | |||
318 | /* | ||
319 | * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone | ||
320 | * | ||
321 | * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) | ||
322 | * | ||
323 | * This is a safe assumption as @usb_dev->bus is referenced all the | ||
324 | * time during the @usb_dev life cycle. | ||
325 | */ | ||
326 | static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev) | ||
327 | { | ||
328 | struct usb_hcd *usb_hcd; | ||
329 | usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self); | ||
330 | return usb_get_hcd(usb_hcd); | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Increment the reference count on a wusbhc. | ||
335 | * | ||
336 | * @wusbhc's life cycle is identical to that of the underlying usb_hcd. | ||
337 | */ | ||
338 | static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc) | ||
339 | { | ||
340 | return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Return the wusbhc associated to a @usb_dev | ||
345 | * | ||
346 | * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) | ||
347 | * | ||
348 | * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down. | ||
349 | * WARNING: referenced at the usb_hcd level, unlocked | ||
350 | * | ||
351 | * FIXME: move offline | ||
352 | */ | ||
353 | static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev) | ||
354 | { | ||
355 | struct wusbhc *wusbhc = NULL; | ||
356 | struct usb_hcd *usb_hcd; | ||
357 | if (usb_dev->devnum > 1 && !usb_dev->wusb) { | ||
358 | /* but root hubs */ | ||
359 | dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum, | ||
360 | usb_dev->wusb); | ||
361 | BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb); | ||
362 | } | ||
363 | usb_hcd = usb_hcd_get_by_usb_dev(usb_dev); | ||
364 | if (usb_hcd == NULL) | ||
365 | return NULL; | ||
366 | BUG_ON(usb_hcd->wireless == 0); | ||
367 | return wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
368 | } | ||
369 | |||
370 | |||
371 | static inline void wusbhc_put(struct wusbhc *wusbhc) | ||
372 | { | ||
373 | usb_put_hcd(&wusbhc->usb_hcd); | ||
374 | } | ||
375 | |||
376 | int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid); | ||
377 | void wusbhc_stop(struct wusbhc *wusbhc); | ||
378 | extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); | ||
379 | |||
380 | /* Device connect handling */ | ||
381 | extern int wusbhc_devconnect_create(struct wusbhc *); | ||
382 | extern void wusbhc_devconnect_destroy(struct wusbhc *); | ||
383 | extern int wusbhc_devconnect_start(struct wusbhc *wusbhc, | ||
384 | const struct wusb_ckhdid *chid); | ||
385 | extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); | ||
386 | extern int wusbhc_devconnect_auth(struct wusbhc *, u8); | ||
387 | extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, | ||
388 | struct wusb_dn_hdr *dn_hdr, size_t size); | ||
389 | extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port); | ||
390 | extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); | ||
391 | extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, | ||
392 | void *priv); | ||
393 | extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | ||
394 | u8 addr); | ||
395 | |||
396 | /* Wireless USB fake Root Hub methods */ | ||
397 | extern int wusbhc_rh_create(struct wusbhc *); | ||
398 | extern void wusbhc_rh_destroy(struct wusbhc *); | ||
399 | |||
400 | extern int wusbhc_rh_status_data(struct usb_hcd *, char *); | ||
401 | extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16); | ||
402 | extern int wusbhc_rh_suspend(struct usb_hcd *); | ||
403 | extern int wusbhc_rh_resume(struct usb_hcd *); | ||
404 | extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned); | ||
405 | |||
406 | /* MMC handling */ | ||
407 | extern int wusbhc_mmcie_create(struct wusbhc *); | ||
408 | extern void wusbhc_mmcie_destroy(struct wusbhc *); | ||
409 | extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt, | ||
410 | struct wuie_hdr *); | ||
411 | extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *); | ||
412 | |||
413 | /* Bandwidth reservation */ | ||
414 | int wusbhc_rsv_establish(struct wusbhc *wusbhc); | ||
415 | void wusbhc_rsv_terminate(struct wusbhc *wusbhc); | ||
416 | |||
417 | /* | ||
418 | * I've always said | ||
419 | * I wanted a wedding in a church... | ||
420 | * | ||
421 | * but lately I've been thinking about | ||
422 | * the Botanical Gardens. | ||
423 | * | ||
424 | * We could do it by the tulips. | ||
425 | * It'll be beautiful | ||
426 | * | ||
427 | * --Security! | ||
428 | */ | ||
429 | extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *, | ||
430 | struct wusb_dev *); | ||
431 | extern void wusb_dev_sec_rm(struct wusb_dev *) ; | ||
432 | extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, | ||
433 | struct wusb_ckhdid *ck); | ||
434 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc); | ||
435 | |||
436 | |||
437 | /* WUSB Cluster ID handling */ | ||
438 | extern u8 wusb_cluster_id_get(void); | ||
439 | extern void wusb_cluster_id_put(u8); | ||
440 | |||
441 | /* | ||
442 | * wusb_port_by_idx - return the port associated to a zero-based port index | ||
443 | * | ||
444 | * NOTE: valid without locking as long as wusbhc is referenced (as the | ||
445 | * number of ports doesn't change). The data pointed to has to | ||
446 | * be verified though :) | ||
447 | */ | ||
448 | static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc, | ||
449 | u8 port_idx) | ||
450 | { | ||
451 | return &wusbhc->port[port_idx]; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to | ||
456 | * a port_idx. | ||
457 | * | ||
458 | * USB stack USB ports are 1 based!! | ||
459 | * | ||
460 | * NOTE: only valid for WUSB devices!!! | ||
461 | */ | ||
462 | static inline u8 wusb_port_no_to_idx(u8 port_no) | ||
463 | { | ||
464 | return port_no - 1; | ||
465 | } | ||
466 | |||
467 | extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *, | ||
468 | struct usb_device *); | ||
469 | |||
470 | /* | ||
471 | * Return a referenced wusb_dev given a @usb_dev | ||
472 | * | ||
473 | * Returns NULL if the usb_dev is being torn down. | ||
474 | * | ||
475 | * FIXME: move offline | ||
476 | */ | ||
477 | static inline | ||
478 | struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev) | ||
479 | { | ||
480 | struct wusbhc *wusbhc; | ||
481 | struct wusb_dev *wusb_dev; | ||
482 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); | ||
483 | if (wusbhc == NULL) | ||
484 | return NULL; | ||
485 | mutex_lock(&wusbhc->mutex); | ||
486 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); | ||
487 | mutex_unlock(&wusbhc->mutex); | ||
488 | wusbhc_put(wusbhc); | ||
489 | return wusb_dev; | ||
490 | } | ||
491 | |||
492 | /* Misc */ | ||
493 | |||
494 | extern struct workqueue_struct *wusbd; | ||
495 | #endif /* #ifndef __WUSBHC_H__ */ | ||
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig new file mode 100644 index 000000000000..ca783127af36 --- /dev/null +++ b/drivers/uwb/Kconfig | |||
@@ -0,0 +1,90 @@ | |||
1 | # | ||
2 | # UWB device configuration | ||
3 | # | ||
4 | |||
5 | menuconfig UWB | ||
6 | tristate "Ultra Wideband devices (EXPERIMENTAL)" | ||
7 | depends on EXPERIMENTAL | ||
8 | depends on PCI | ||
9 | default n | ||
10 | help | ||
11 | UWB is a high-bandwidth, low-power, point-to-point radio | ||
12 | technology using a wide spectrum (3.1-10.6GHz). It is | ||
13 | optimized for in-room use (480Mbps at 2 meters, 110Mbps at | ||
14 | 10m). It serves as the transport layer for other protocols, | ||
15 | such as Wireless USB (WUSB), IP (WLP) and upcoming | ||
16 | Bluetooth and 1394 | ||
17 | |||
18 | The topology is peer to peer; however, higher level | ||
19 | protocols (such as WUSB) might impose a master/slave | ||
20 | relationship. | ||
21 | |||
22 | Say Y here if your computer has UWB radio controllers (USB or PCI) | ||
23 | based. You will need to enable the radio controllers | ||
24 | below. It is ok to select all of them, no harm done. | ||
25 | |||
26 | For more help check the UWB and WUSB related files in | ||
27 | <file:Documentation/usb/>. | ||
28 | |||
29 | To compile the UWB stack as a module, choose M here. | ||
30 | |||
31 | if UWB | ||
32 | |||
33 | config UWB_HWA | ||
34 | tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)" | ||
35 | depends on USB | ||
36 | help | ||
37 | This driver enables the radio controller for HWA USB | ||
38 | devices. HWA stands for Host Wire Adapter, and it is a UWB | ||
39 | Radio Controller connected to your system via USB. Most of | ||
40 | them come with a Wireless USB host controller also. | ||
41 | |||
42 | To compile this driver select Y (built in) or M (module). It | ||
43 | is safe to select any even if you do not have the hardware. | ||
44 | |||
45 | config UWB_WHCI | ||
46 | tristate "UWB Radio Control driver for WHCI-compliant cards" | ||
47 | depends on PCI | ||
48 | help | ||
49 | This driver enables the radio controller for WHCI cards. | ||
50 | |||
51 | WHCI is an specification developed by Intel | ||
52 | (http://www.intel.com/technology/comms/wusb/whci.htm) much | ||
53 | in the spirit of USB's EHCI, but for UWB and Wireless USB | ||
54 | radio/host controllers connected via memmory mapping (eg: | ||
55 | PCI). Most of these cards come also with a Wireless USB host | ||
56 | controller. | ||
57 | |||
58 | To compile this driver select Y (built in) or M (module). It | ||
59 | is safe to select any even if you do not have the hardware. | ||
60 | |||
61 | config UWB_WLP | ||
62 | tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)" | ||
63 | depends on UWB && NET | ||
64 | help | ||
65 | This is a common library for drivers that implement | ||
66 | networking over UWB. | ||
67 | |||
68 | config UWB_I1480U | ||
69 | tristate "Support for Intel Wireless UWB Link 1480 HWA" | ||
70 | depends on UWB_HWA | ||
71 | select FW_LOADER | ||
72 | help | ||
73 | This driver enables support for the i1480 when connected via | ||
74 | USB. It consists of a firmware uploader that will enable it | ||
75 | to behave as an HWA device. | ||
76 | |||
77 | To compile this driver select Y (built in) or M (module). It | ||
78 | is safe to select any even if you do not have the hardware. | ||
79 | |||
80 | config UWB_I1480U_WLP | ||
81 | tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface" | ||
82 | depends on UWB_I1480U && UWB_WLP && NET | ||
83 | help | ||
84 | This driver enables WLP support for the i1480 when connected via | ||
85 | USB. WLP is the WiMedia Link Protocol, or IP over UWB. | ||
86 | |||
87 | To compile this driver select Y (built in) or M (module). It | ||
88 | is safe to select any even if you don't have the hardware. | ||
89 | |||
90 | endif # UWB | ||
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile new file mode 100644 index 000000000000..257e6908304c --- /dev/null +++ b/drivers/uwb/Makefile | |||
@@ -0,0 +1,29 @@ | |||
1 | obj-$(CONFIG_UWB) += uwb.o | ||
2 | obj-$(CONFIG_UWB_WLP) += wlp/ | ||
3 | obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o | ||
4 | obj-$(CONFIG_UWB_HWA) += hwa-rc.o | ||
5 | obj-$(CONFIG_UWB_I1480U) += i1480/ | ||
6 | |||
7 | uwb-objs := \ | ||
8 | address.o \ | ||
9 | beacon.o \ | ||
10 | driver.o \ | ||
11 | drp.o \ | ||
12 | drp-avail.o \ | ||
13 | drp-ie.o \ | ||
14 | est.o \ | ||
15 | ie.o \ | ||
16 | lc-dev.o \ | ||
17 | lc-rc.o \ | ||
18 | neh.o \ | ||
19 | pal.o \ | ||
20 | reset.o \ | ||
21 | rsv.o \ | ||
22 | scan.o \ | ||
23 | uwb-debug.o \ | ||
24 | uwbd.o | ||
25 | |||
26 | umc-objs := \ | ||
27 | umc-bus.o \ | ||
28 | umc-dev.o \ | ||
29 | umc-drv.o | ||
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c new file mode 100644 index 000000000000..1664ae5f1706 --- /dev/null +++ b/drivers/uwb/address.c | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Address management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/errno.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/random.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/uwb/debug.h> | ||
32 | #include "uwb-internal.h" | ||
33 | |||
34 | |||
35 | /** Device Address Management command */ | ||
36 | struct uwb_rc_cmd_dev_addr_mgmt { | ||
37 | struct uwb_rccb rccb; | ||
38 | u8 bmOperationType; | ||
39 | u8 baAddr[6]; | ||
40 | } __attribute__((packed)); | ||
41 | |||
42 | |||
43 | /** | ||
44 | * Low level command for setting/getting UWB radio's addresses | ||
45 | * | ||
46 | * @hwarc: HWA Radio Control interface instance | ||
47 | * @bmOperationType: | ||
48 | * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2]) | ||
49 | * @baAddr: address buffer--assumed to have enough data to hold | ||
50 | * the address type requested. | ||
51 | * @reply: Pointer to reply buffer (can be stack allocated) | ||
52 | * @returns: 0 if ok, < 0 errno code on error. | ||
53 | * | ||
54 | * @cmd has to be allocated because USB cannot grok USB or vmalloc | ||
55 | * buffers depending on your combination of host architecture. | ||
56 | */ | ||
57 | static | ||
58 | int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, | ||
59 | u8 bmOperationType, const u8 *baAddr, | ||
60 | struct uwb_rc_evt_dev_addr_mgmt *reply) | ||
61 | { | ||
62 | int result; | ||
63 | struct uwb_rc_cmd_dev_addr_mgmt *cmd; | ||
64 | |||
65 | result = -ENOMEM; | ||
66 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
67 | if (cmd == NULL) | ||
68 | goto error_kzalloc; | ||
69 | cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; | ||
70 | cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT); | ||
71 | cmd->bmOperationType = bmOperationType; | ||
72 | if (baAddr) { | ||
73 | size_t size = 0; | ||
74 | switch (bmOperationType >> 1) { | ||
75 | case 0: size = 2; break; | ||
76 | case 1: size = 6; break; | ||
77 | default: BUG(); | ||
78 | } | ||
79 | memcpy(cmd->baAddr, baAddr, size); | ||
80 | } | ||
81 | reply->rceb.bEventType = UWB_RC_CET_GENERAL; | ||
82 | reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; | ||
83 | result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT", | ||
84 | &cmd->rccb, sizeof(*cmd), | ||
85 | &reply->rceb, sizeof(*reply)); | ||
86 | if (result < 0) | ||
87 | goto error_cmd; | ||
88 | if (result < sizeof(*reply)) { | ||
89 | dev_err(&rc->uwb_dev.dev, | ||
90 | "DEV-ADDR-MGMT: not enough data replied: " | ||
91 | "%d vs %zu bytes needed\n", result, sizeof(*reply)); | ||
92 | result = -ENOMSG; | ||
93 | } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { | ||
94 | dev_err(&rc->uwb_dev.dev, | ||
95 | "DEV-ADDR-MGMT: command execution failed: %s (%d)\n", | ||
96 | uwb_rc_strerror(reply->bResultCode), | ||
97 | reply->bResultCode); | ||
98 | result = -EIO; | ||
99 | } else | ||
100 | result = 0; | ||
101 | error_cmd: | ||
102 | kfree(cmd); | ||
103 | error_kzalloc: | ||
104 | return result; | ||
105 | } | ||
106 | |||
107 | |||
108 | /** | ||
109 | * Set the UWB RC MAC or device address. | ||
110 | * | ||
111 | * @rc: UWB Radio Controller | ||
112 | * @_addr: Pointer to address to write [assumed to be either a | ||
113 | * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. | ||
114 | * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC). | ||
115 | * @returns: 0 if ok, < 0 errno code on error. | ||
116 | * | ||
117 | * Some anal retentivity here: even if both 'struct | ||
118 | * uwb_{dev,mac}_addr' have the actual byte array in the same offset | ||
119 | * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer | ||
120 | * to use some syntatic sugar in case someday we decide to change the | ||
121 | * format of the structs. The compiler will optimize it out anyway. | ||
122 | */ | ||
123 | static int uwb_rc_addr_set(struct uwb_rc *rc, | ||
124 | const void *_addr, enum uwb_addr_type type) | ||
125 | { | ||
126 | int result; | ||
127 | u8 bmOperationType = 0x1; /* Set address */ | ||
128 | const struct uwb_dev_addr *dev_addr = _addr; | ||
129 | const struct uwb_mac_addr *mac_addr = _addr; | ||
130 | struct uwb_rc_evt_dev_addr_mgmt reply; | ||
131 | const u8 *baAddr; | ||
132 | |||
133 | result = -EINVAL; | ||
134 | switch (type) { | ||
135 | case UWB_ADDR_DEV: | ||
136 | baAddr = dev_addr->data; | ||
137 | break; | ||
138 | case UWB_ADDR_MAC: | ||
139 | baAddr = mac_addr->data; | ||
140 | bmOperationType |= 0x2; | ||
141 | break; | ||
142 | default: | ||
143 | return result; | ||
144 | } | ||
145 | return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); | ||
146 | } | ||
147 | |||
148 | |||
149 | /** | ||
150 | * Get the UWB radio's MAC or device address. | ||
151 | * | ||
152 | * @rc: UWB Radio Controller | ||
153 | * @_addr: Where to write the address data [assumed to be either a | ||
154 | * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. | ||
155 | * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC). | ||
156 | * @returns: 0 if ok (and *_addr set), < 0 errno code on error. | ||
157 | * | ||
158 | * See comment in uwb_rc_addr_set() about anal retentivity in the | ||
159 | * type handling of the address variables. | ||
160 | */ | ||
161 | static int uwb_rc_addr_get(struct uwb_rc *rc, | ||
162 | void *_addr, enum uwb_addr_type type) | ||
163 | { | ||
164 | int result; | ||
165 | u8 bmOperationType = 0x0; /* Get address */ | ||
166 | struct uwb_rc_evt_dev_addr_mgmt evt; | ||
167 | struct uwb_dev_addr *dev_addr = _addr; | ||
168 | struct uwb_mac_addr *mac_addr = _addr; | ||
169 | u8 *baAddr; | ||
170 | |||
171 | result = -EINVAL; | ||
172 | switch (type) { | ||
173 | case UWB_ADDR_DEV: | ||
174 | baAddr = dev_addr->data; | ||
175 | break; | ||
176 | case UWB_ADDR_MAC: | ||
177 | bmOperationType |= 0x2; | ||
178 | baAddr = mac_addr->data; | ||
179 | break; | ||
180 | default: | ||
181 | return result; | ||
182 | } | ||
183 | result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt); | ||
184 | if (result == 0) | ||
185 | switch (type) { | ||
186 | case UWB_ADDR_DEV: | ||
187 | memcpy(&dev_addr->data, evt.baAddr, | ||
188 | sizeof(dev_addr->data)); | ||
189 | break; | ||
190 | case UWB_ADDR_MAC: | ||
191 | memcpy(&mac_addr->data, evt.baAddr, | ||
192 | sizeof(mac_addr->data)); | ||
193 | break; | ||
194 | default: /* shut gcc up */ | ||
195 | BUG(); | ||
196 | } | ||
197 | return result; | ||
198 | } | ||
199 | |||
200 | |||
201 | /** Get @rc's MAC address to @addr */ | ||
202 | int uwb_rc_mac_addr_get(struct uwb_rc *rc, | ||
203 | struct uwb_mac_addr *addr) { | ||
204 | return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC); | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get); | ||
207 | |||
208 | |||
209 | /** Get @rc's device address to @addr */ | ||
210 | int uwb_rc_dev_addr_get(struct uwb_rc *rc, | ||
211 | struct uwb_dev_addr *addr) { | ||
212 | return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV); | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get); | ||
215 | |||
216 | |||
217 | /** Set @rc's address to @addr */ | ||
218 | int uwb_rc_mac_addr_set(struct uwb_rc *rc, | ||
219 | const struct uwb_mac_addr *addr) | ||
220 | { | ||
221 | int result = -EINVAL; | ||
222 | mutex_lock(&rc->uwb_dev.mutex); | ||
223 | result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC); | ||
224 | mutex_unlock(&rc->uwb_dev.mutex); | ||
225 | return result; | ||
226 | } | ||
227 | |||
228 | |||
229 | /** Set @rc's address to @addr */ | ||
230 | int uwb_rc_dev_addr_set(struct uwb_rc *rc, | ||
231 | const struct uwb_dev_addr *addr) | ||
232 | { | ||
233 | int result = -EINVAL; | ||
234 | mutex_lock(&rc->uwb_dev.mutex); | ||
235 | result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV); | ||
236 | rc->uwb_dev.dev_addr = *addr; | ||
237 | mutex_unlock(&rc->uwb_dev.mutex); | ||
238 | return result; | ||
239 | } | ||
240 | |||
241 | /* Returns !0 if given address is already assigned to device. */ | ||
242 | int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr) | ||
243 | { | ||
244 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
245 | struct uwb_mac_addr *addr = _addr; | ||
246 | |||
247 | if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr)) | ||
248 | return !0; | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | /* Returns !0 if given address is already assigned to device. */ | ||
253 | int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr) | ||
254 | { | ||
255 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
256 | struct uwb_dev_addr *addr = _addr; | ||
257 | if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr)) | ||
258 | return !0; | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller | ||
264 | * @rc: the (local) radio controller device requiring a new DevAddr | ||
265 | * | ||
266 | * A new DevAddr is required when: | ||
267 | * - first setting up a radio controller | ||
268 | * - if the hardware reports a DevAddr conflict | ||
269 | * | ||
270 | * The DevAddr is randomly generated in the generated DevAddr range | ||
271 | * [0x100, 0xfeff]. The number of devices in a beacon group is limited | ||
272 | * by mMaxBPLength (96) so this address space will never be exhausted. | ||
273 | * | ||
274 | * [ECMA-368] 17.1.1, 17.16. | ||
275 | */ | ||
276 | int uwb_rc_dev_addr_assign(struct uwb_rc *rc) | ||
277 | { | ||
278 | struct uwb_dev_addr new_addr; | ||
279 | |||
280 | do { | ||
281 | get_random_bytes(new_addr.data, sizeof(new_addr.data)); | ||
282 | } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff | ||
283 | || __uwb_dev_addr_assigned(rc, &new_addr)); | ||
284 | |||
285 | return uwb_rc_dev_addr_set(rc, &new_addr); | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event | ||
290 | * @evt: the DEV_ADDR_CONFLICT notification from the radio controller | ||
291 | * | ||
292 | * A new (non-conflicting) DevAddr is assigned to the radio controller. | ||
293 | * | ||
294 | * [ECMA-368] 17.1.1.1. | ||
295 | */ | ||
296 | int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt) | ||
297 | { | ||
298 | struct uwb_rc *rc = evt->rc; | ||
299 | |||
300 | return uwb_rc_dev_addr_assign(rc); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Print the 48-bit EUI MAC address of the radio controller when | ||
305 | * reading /sys/class/uwb_rc/XX/mac_address | ||
306 | */ | ||
307 | static ssize_t uwb_rc_mac_addr_show(struct device *dev, | ||
308 | struct device_attribute *attr, char *buf) | ||
309 | { | ||
310 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
311 | struct uwb_rc *rc = uwb_dev->rc; | ||
312 | struct uwb_mac_addr addr; | ||
313 | ssize_t result; | ||
314 | |||
315 | mutex_lock(&rc->uwb_dev.mutex); | ||
316 | result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC); | ||
317 | mutex_unlock(&rc->uwb_dev.mutex); | ||
318 | if (result >= 0) { | ||
319 | result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr); | ||
320 | buf[result++] = '\n'; | ||
321 | } | ||
322 | return result; | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address | ||
327 | * and if correct, set it. | ||
328 | */ | ||
329 | static ssize_t uwb_rc_mac_addr_store(struct device *dev, | ||
330 | struct device_attribute *attr, | ||
331 | const char *buf, size_t size) | ||
332 | { | ||
333 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
334 | struct uwb_rc *rc = uwb_dev->rc; | ||
335 | struct uwb_mac_addr addr; | ||
336 | ssize_t result; | ||
337 | |||
338 | result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n", | ||
339 | &addr.data[0], &addr.data[1], &addr.data[2], | ||
340 | &addr.data[3], &addr.data[4], &addr.data[5]); | ||
341 | if (result != 6) { | ||
342 | result = -EINVAL; | ||
343 | goto out; | ||
344 | } | ||
345 | if (is_multicast_ether_addr(addr.data)) { | ||
346 | dev_err(&rc->uwb_dev.dev, "refusing to set multicast " | ||
347 | "MAC address %s\n", buf); | ||
348 | result = -EINVAL; | ||
349 | goto out; | ||
350 | } | ||
351 | result = uwb_rc_mac_addr_set(rc, &addr); | ||
352 | if (result == 0) | ||
353 | rc->uwb_dev.mac_addr = addr; | ||
354 | out: | ||
355 | return result < 0 ? result : size; | ||
356 | } | ||
357 | DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store); | ||
358 | |||
359 | /** Print @addr to @buf, @return bytes written */ | ||
360 | size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr, | ||
361 | int type) | ||
362 | { | ||
363 | size_t result; | ||
364 | if (type) | ||
365 | result = scnprintf(buf, buf_size, | ||
366 | "%02x:%02x:%02x:%02x:%02x:%02x", | ||
367 | addr[0], addr[1], addr[2], | ||
368 | addr[3], addr[4], addr[5]); | ||
369 | else | ||
370 | result = scnprintf(buf, buf_size, "%02x:%02x", | ||
371 | addr[1], addr[0]); | ||
372 | return result; | ||
373 | } | ||
374 | EXPORT_SYMBOL_GPL(__uwb_addr_print); | ||
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c new file mode 100644 index 000000000000..46b18eec5026 --- /dev/null +++ b/drivers/uwb/beacon.c | |||
@@ -0,0 +1,642 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Beacon management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/err.h> | ||
31 | #include <linux/kdev_t.h> | ||
32 | #include "uwb-internal.h" | ||
33 | |||
34 | #define D_LOCAL 0 | ||
35 | #include <linux/uwb/debug.h> | ||
36 | |||
37 | /** Start Beaconing command structure */ | ||
38 | struct uwb_rc_cmd_start_beacon { | ||
39 | struct uwb_rccb rccb; | ||
40 | __le16 wBPSTOffset; | ||
41 | u8 bChannelNumber; | ||
42 | } __attribute__((packed)); | ||
43 | |||
44 | |||
45 | static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel) | ||
46 | { | ||
47 | int result; | ||
48 | struct uwb_rc_cmd_start_beacon *cmd; | ||
49 | struct uwb_rc_evt_confirm reply; | ||
50 | |||
51 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
52 | if (cmd == NULL) | ||
53 | return -ENOMEM; | ||
54 | cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; | ||
55 | cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON); | ||
56 | cmd->wBPSTOffset = cpu_to_le16(bpst_offset); | ||
57 | cmd->bChannelNumber = channel; | ||
58 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
59 | reply.rceb.wEvent = UWB_RC_CMD_START_BEACON; | ||
60 | result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd), | ||
61 | &reply.rceb, sizeof(reply)); | ||
62 | if (result < 0) | ||
63 | goto error_cmd; | ||
64 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
65 | dev_err(&rc->uwb_dev.dev, | ||
66 | "START-BEACON: command execution failed: %s (%d)\n", | ||
67 | uwb_rc_strerror(reply.bResultCode), reply.bResultCode); | ||
68 | result = -EIO; | ||
69 | } | ||
70 | error_cmd: | ||
71 | kfree(cmd); | ||
72 | return result; | ||
73 | } | ||
74 | |||
75 | static int uwb_rc_stop_beacon(struct uwb_rc *rc) | ||
76 | { | ||
77 | int result; | ||
78 | struct uwb_rccb *cmd; | ||
79 | struct uwb_rc_evt_confirm reply; | ||
80 | |||
81 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
82 | if (cmd == NULL) | ||
83 | return -ENOMEM; | ||
84 | cmd->bCommandType = UWB_RC_CET_GENERAL; | ||
85 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON); | ||
86 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
87 | reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON; | ||
88 | result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd), | ||
89 | &reply.rceb, sizeof(reply)); | ||
90 | if (result < 0) | ||
91 | goto error_cmd; | ||
92 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
93 | dev_err(&rc->uwb_dev.dev, | ||
94 | "STOP-BEACON: command execution failed: %s (%d)\n", | ||
95 | uwb_rc_strerror(reply.bResultCode), reply.bResultCode); | ||
96 | result = -EIO; | ||
97 | } | ||
98 | error_cmd: | ||
99 | kfree(cmd); | ||
100 | return result; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Start/stop beacons | ||
105 | * | ||
106 | * @rc: UWB Radio Controller to operate on | ||
107 | * @channel: UWB channel on which to beacon (WUSB[table | ||
108 | * 5-12]). If -1, stop beaconing. | ||
109 | * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero | ||
110 | * | ||
111 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB | ||
112 | * of a SET IE command after the device sent the first beacon that includes | ||
113 | * the IEs specified in the SET IE command. So, after we start beaconing we | ||
114 | * check if there is anything in the IE cache and call the SET IE command | ||
115 | * if needed. | ||
116 | */ | ||
117 | int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | ||
118 | { | ||
119 | int result; | ||
120 | struct device *dev = &rc->uwb_dev.dev; | ||
121 | |||
122 | mutex_lock(&rc->uwb_dev.mutex); | ||
123 | if (channel < 0) | ||
124 | channel = -1; | ||
125 | if (channel == -1) | ||
126 | result = uwb_rc_stop_beacon(rc); | ||
127 | else { | ||
128 | /* channel >= 0...dah */ | ||
129 | result = uwb_rc_start_beacon(rc, bpst_offset, channel); | ||
130 | if (result < 0) | ||
131 | goto out_up; | ||
132 | if (le16_to_cpu(rc->ies->wIELength) > 0) { | ||
133 | result = uwb_rc_set_ie(rc, rc->ies); | ||
134 | if (result < 0) { | ||
135 | dev_err(dev, "Cannot set new IE on device: " | ||
136 | "%d\n", result); | ||
137 | result = uwb_rc_stop_beacon(rc); | ||
138 | channel = -1; | ||
139 | bpst_offset = 0; | ||
140 | } else | ||
141 | result = 0; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | if (result < 0) | ||
146 | goto out_up; | ||
147 | rc->beaconing = channel; | ||
148 | |||
149 | uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); | ||
150 | |||
151 | out_up: | ||
152 | mutex_unlock(&rc->uwb_dev.mutex); | ||
153 | return result; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Beacon cache | ||
158 | * | ||
159 | * The purpose of this is to speed up the lookup of becon information | ||
160 | * when a new beacon arrives. The UWB Daemon uses it also to keep a | ||
161 | * tab of which devices are in radio distance and which not. When a | ||
162 | * device's beacon stays present for more than a certain amount of | ||
163 | * time, it is considered a new, usable device. When a beacon ceases | ||
164 | * to be received for a certain amount of time, it is considered that | ||
165 | * the device is gone. | ||
166 | * | ||
167 | * FIXME: use an allocator for the entries | ||
168 | * FIXME: use something faster for search than a list | ||
169 | */ | ||
170 | |||
171 | struct uwb_beca uwb_beca = { | ||
172 | .list = LIST_HEAD_INIT(uwb_beca.list), | ||
173 | .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) | ||
174 | }; | ||
175 | |||
176 | |||
177 | void uwb_bce_kfree(struct kref *_bce) | ||
178 | { | ||
179 | struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); | ||
180 | |||
181 | kfree(bce->be); | ||
182 | kfree(bce); | ||
183 | } | ||
184 | |||
185 | |||
186 | /* Find a beacon by dev addr in the cache */ | ||
187 | static | ||
188 | struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) | ||
189 | { | ||
190 | struct uwb_beca_e *bce, *next; | ||
191 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | ||
192 | d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", | ||
193 | dev_addr->data[0], dev_addr->data[1], | ||
194 | bce->dev_addr.data[0], bce->dev_addr.data[1]); | ||
195 | if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) | ||
196 | goto out; | ||
197 | } | ||
198 | bce = NULL; | ||
199 | out: | ||
200 | return bce; | ||
201 | } | ||
202 | |||
203 | /* Find a beacon by dev addr in the cache */ | ||
204 | static | ||
205 | struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) | ||
206 | { | ||
207 | struct uwb_beca_e *bce, *next; | ||
208 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | ||
209 | if (!memcmp(bce->mac_addr, mac_addr->data, | ||
210 | sizeof(struct uwb_mac_addr))) | ||
211 | goto out; | ||
212 | } | ||
213 | bce = NULL; | ||
214 | out: | ||
215 | return bce; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr | ||
220 | * @rc: the radio controller that saw the device | ||
221 | * @devaddr: DevAddr of the UWB device to find | ||
222 | * | ||
223 | * There may be more than one matching device (in the case of a | ||
224 | * DevAddr conflict), but only the first one is returned. | ||
225 | */ | ||
226 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | ||
227 | const struct uwb_dev_addr *devaddr) | ||
228 | { | ||
229 | struct uwb_dev *found = NULL; | ||
230 | struct uwb_beca_e *bce; | ||
231 | |||
232 | mutex_lock(&uwb_beca.mutex); | ||
233 | bce = __uwb_beca_find_bydev(devaddr); | ||
234 | if (bce) | ||
235 | found = uwb_dev_try_get(rc, bce->uwb_dev); | ||
236 | mutex_unlock(&uwb_beca.mutex); | ||
237 | |||
238 | return found; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48 | ||
243 | * @rc: the radio controller that saw the device | ||
244 | * @devaddr: EUI-48 of the UWB device to find | ||
245 | */ | ||
246 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | ||
247 | const struct uwb_mac_addr *macaddr) | ||
248 | { | ||
249 | struct uwb_dev *found = NULL; | ||
250 | struct uwb_beca_e *bce; | ||
251 | |||
252 | mutex_lock(&uwb_beca.mutex); | ||
253 | bce = __uwb_beca_find_bymac(macaddr); | ||
254 | if (bce) | ||
255 | found = uwb_dev_try_get(rc, bce->uwb_dev); | ||
256 | mutex_unlock(&uwb_beca.mutex); | ||
257 | |||
258 | return found; | ||
259 | } | ||
260 | |||
261 | /* Initialize a beacon cache entry */ | ||
262 | static void uwb_beca_e_init(struct uwb_beca_e *bce) | ||
263 | { | ||
264 | mutex_init(&bce->mutex); | ||
265 | kref_init(&bce->refcnt); | ||
266 | stats_init(&bce->lqe_stats); | ||
267 | stats_init(&bce->rssi_stats); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * Add a beacon to the cache | ||
272 | * | ||
273 | * @be: Beacon event information | ||
274 | * @bf: Beacon frame (part of b, really) | ||
275 | * @ts_jiffies: Timestamp (in jiffies) when the beacon was received | ||
276 | */ | ||
277 | struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | ||
278 | struct uwb_beacon_frame *bf, | ||
279 | unsigned long ts_jiffies) | ||
280 | { | ||
281 | struct uwb_beca_e *bce; | ||
282 | |||
283 | bce = kzalloc(sizeof(*bce), GFP_KERNEL); | ||
284 | if (bce == NULL) | ||
285 | return NULL; | ||
286 | uwb_beca_e_init(bce); | ||
287 | bce->ts_jiffies = ts_jiffies; | ||
288 | bce->uwb_dev = NULL; | ||
289 | list_add(&bce->node, &uwb_beca.list); | ||
290 | return bce; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Wipe out beacon entries that became stale | ||
295 | * | ||
296 | * Remove associated devicest too. | ||
297 | */ | ||
298 | void uwb_beca_purge(void) | ||
299 | { | ||
300 | struct uwb_beca_e *bce, *next; | ||
301 | unsigned long expires; | ||
302 | |||
303 | mutex_lock(&uwb_beca.mutex); | ||
304 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | ||
305 | expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); | ||
306 | if (time_after(jiffies, expires)) { | ||
307 | uwbd_dev_offair(bce); | ||
308 | list_del(&bce->node); | ||
309 | uwb_bce_put(bce); | ||
310 | } | ||
311 | } | ||
312 | mutex_unlock(&uwb_beca.mutex); | ||
313 | } | ||
314 | |||
315 | /* Clean up the whole beacon cache. Called on shutdown */ | ||
316 | void uwb_beca_release(void) | ||
317 | { | ||
318 | struct uwb_beca_e *bce, *next; | ||
319 | mutex_lock(&uwb_beca.mutex); | ||
320 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | ||
321 | list_del(&bce->node); | ||
322 | uwb_bce_put(bce); | ||
323 | } | ||
324 | mutex_unlock(&uwb_beca.mutex); | ||
325 | } | ||
326 | |||
327 | static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, | ||
328 | struct uwb_beacon_frame *bf) | ||
329 | { | ||
330 | char macbuf[UWB_ADDR_STRSIZE]; | ||
331 | char devbuf[UWB_ADDR_STRSIZE]; | ||
332 | char dstbuf[UWB_ADDR_STRSIZE]; | ||
333 | |||
334 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier); | ||
335 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr); | ||
336 | uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr); | ||
337 | dev_info(&rc->uwb_dev.dev, | ||
338 | "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n", | ||
339 | devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset, | ||
340 | bf->Beacon_Slot_Number, macbuf); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * @bce: beacon cache entry, referenced | ||
345 | */ | ||
346 | ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, | ||
347 | char *buf, size_t size) | ||
348 | { | ||
349 | ssize_t result = 0; | ||
350 | struct uwb_rc_evt_beacon *be; | ||
351 | struct uwb_beacon_frame *bf; | ||
352 | struct uwb_buf_ctx ctx = { | ||
353 | .buf = buf, | ||
354 | .bytes = 0, | ||
355 | .size = size | ||
356 | }; | ||
357 | |||
358 | mutex_lock(&bce->mutex); | ||
359 | be = bce->be; | ||
360 | if (be == NULL) | ||
361 | goto out; | ||
362 | bf = (void *) be->BeaconInfo; | ||
363 | uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, | ||
364 | bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); | ||
365 | result = ctx.bytes; | ||
366 | out: | ||
367 | mutex_unlock(&bce->mutex); | ||
368 | return result; | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * Verify that the beacon event, frame and IEs are ok | ||
373 | */ | ||
374 | static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt, | ||
375 | struct uwb_rc_evt_beacon *be) | ||
376 | { | ||
377 | int result = -EINVAL; | ||
378 | struct uwb_beacon_frame *bf; | ||
379 | struct device *dev = &rc->uwb_dev.dev; | ||
380 | |||
381 | /* Is there enough data to decode a beacon frame? */ | ||
382 | if (evt->notif.size < sizeof(*be) + sizeof(*bf)) { | ||
383 | dev_err(dev, "BEACON event: Not enough data to decode " | ||
384 | "(%zu vs %zu bytes needed)\n", evt->notif.size, | ||
385 | sizeof(*be) + sizeof(*bf)); | ||
386 | goto error; | ||
387 | } | ||
388 | /* FIXME: make sure beacon frame IEs are fine and that the whole thing | ||
389 | * is consistent */ | ||
390 | result = 0; | ||
391 | error: | ||
392 | return result; | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Handle UWB_RC_EVT_BEACON events | ||
397 | * | ||
398 | * We check the beacon cache to see how the received beacon fares. If | ||
399 | * is there already we refresh the timestamp. If not we create a new | ||
400 | * entry. | ||
401 | * | ||
402 | * According to the WHCI and WUSB specs, only one beacon frame is | ||
403 | * allowed per notification block, so we don't bother about scanning | ||
404 | * for more. | ||
405 | */ | ||
406 | int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) | ||
407 | { | ||
408 | int result = -EINVAL; | ||
409 | struct uwb_rc *rc; | ||
410 | struct uwb_rc_evt_beacon *be; | ||
411 | struct uwb_beacon_frame *bf; | ||
412 | struct uwb_beca_e *bce; | ||
413 | unsigned long last_ts; | ||
414 | |||
415 | rc = evt->rc; | ||
416 | be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb); | ||
417 | result = uwb_verify_beacon(rc, evt, be); | ||
418 | if (result < 0) | ||
419 | return result; | ||
420 | |||
421 | /* FIXME: handle alien beacons. */ | ||
422 | if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN || | ||
423 | be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) { | ||
424 | return -ENOSYS; | ||
425 | } | ||
426 | |||
427 | bf = (struct uwb_beacon_frame *) be->BeaconInfo; | ||
428 | |||
429 | /* | ||
430 | * Drop beacons from devices with a NULL EUI-48 -- they cannot | ||
431 | * be uniquely identified. | ||
432 | * | ||
433 | * It's expected that these will all be WUSB devices and they | ||
434 | * have a WUSB specific connection method so ignoring them | ||
435 | * here shouldn't be a problem. | ||
436 | */ | ||
437 | if (uwb_mac_addr_bcast(&bf->Device_Identifier)) | ||
438 | return 0; | ||
439 | |||
440 | mutex_lock(&uwb_beca.mutex); | ||
441 | bce = __uwb_beca_find_bymac(&bf->Device_Identifier); | ||
442 | if (bce == NULL) { | ||
443 | /* Not in there, a new device is pinging */ | ||
444 | uwb_beacon_print(evt->rc, be, bf); | ||
445 | bce = __uwb_beca_add(be, bf, evt->ts_jiffies); | ||
446 | if (bce == NULL) { | ||
447 | mutex_unlock(&uwb_beca.mutex); | ||
448 | return -ENOMEM; | ||
449 | } | ||
450 | } | ||
451 | mutex_unlock(&uwb_beca.mutex); | ||
452 | |||
453 | mutex_lock(&bce->mutex); | ||
454 | /* purge old beacon data */ | ||
455 | kfree(bce->be); | ||
456 | |||
457 | last_ts = bce->ts_jiffies; | ||
458 | |||
459 | /* Update commonly used fields */ | ||
460 | bce->ts_jiffies = evt->ts_jiffies; | ||
461 | bce->be = be; | ||
462 | bce->dev_addr = bf->hdr.SrcAddr; | ||
463 | bce->mac_addr = &bf->Device_Identifier; | ||
464 | be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset); | ||
465 | be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength); | ||
466 | stats_add_sample(&bce->lqe_stats, be->bLQI - 7); | ||
467 | stats_add_sample(&bce->rssi_stats, be->bRSSI + 18); | ||
468 | |||
469 | /* | ||
470 | * This might be a beacon from a new device. | ||
471 | */ | ||
472 | if (bce->uwb_dev == NULL) | ||
473 | uwbd_dev_onair(evt->rc, bce); | ||
474 | |||
475 | mutex_unlock(&bce->mutex); | ||
476 | |||
477 | return 1; /* we keep the event data */ | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * Handle UWB_RC_EVT_BEACON_SIZE events | ||
482 | * | ||
483 | * XXXXX | ||
484 | */ | ||
485 | int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt) | ||
486 | { | ||
487 | int result = -EINVAL; | ||
488 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
489 | struct uwb_rc_evt_beacon_size *bs; | ||
490 | |||
491 | /* Is there enough data to decode the event? */ | ||
492 | if (evt->notif.size < sizeof(*bs)) { | ||
493 | dev_err(dev, "BEACON SIZE notification: Not enough data to " | ||
494 | "decode (%zu vs %zu bytes needed)\n", | ||
495 | evt->notif.size, sizeof(*bs)); | ||
496 | goto error; | ||
497 | } | ||
498 | bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb); | ||
499 | if (0) | ||
500 | dev_info(dev, "Beacon size changed to %u bytes " | ||
501 | "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize)); | ||
502 | else { | ||
503 | /* temporary hack until we do something with this message... */ | ||
504 | static unsigned count; | ||
505 | if (++count % 1000 == 0) | ||
506 | dev_info(dev, "Beacon size changed %u times " | ||
507 | "(FIXME: action?)\n", count); | ||
508 | } | ||
509 | result = 0; | ||
510 | error: | ||
511 | return result; | ||
512 | } | ||
513 | |||
514 | /** | ||
515 | * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event | ||
516 | * @evt: the BP_SLOT_CHANGE notification from the radio controller | ||
517 | * | ||
518 | * If the event indicates that no beacon period slots were available | ||
519 | * then radio controller has transitioned to a non-beaconing state. | ||
520 | * Otherwise, simply save the current beacon slot. | ||
521 | */ | ||
522 | int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt) | ||
523 | { | ||
524 | struct uwb_rc *rc = evt->rc; | ||
525 | struct device *dev = &rc->uwb_dev.dev; | ||
526 | struct uwb_rc_evt_bp_slot_change *bpsc; | ||
527 | |||
528 | if (evt->notif.size < sizeof(*bpsc)) { | ||
529 | dev_err(dev, "BP SLOT CHANGE event: Not enough data\n"); | ||
530 | return -EINVAL; | ||
531 | } | ||
532 | bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb); | ||
533 | |||
534 | mutex_lock(&rc->uwb_dev.mutex); | ||
535 | if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) { | ||
536 | dev_info(dev, "stopped beaconing: No free slots in BP\n"); | ||
537 | rc->beaconing = -1; | ||
538 | } else | ||
539 | rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc); | ||
540 | mutex_unlock(&rc->uwb_dev.mutex); | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * Handle UWB_RC_EVT_BPOIE_CHANGE events | ||
547 | * | ||
548 | * XXXXX | ||
549 | */ | ||
550 | struct uwb_ie_bpo { | ||
551 | struct uwb_ie_hdr hdr; | ||
552 | u8 bp_length; | ||
553 | u8 data[]; | ||
554 | } __attribute__((packed)); | ||
555 | |||
556 | int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt) | ||
557 | { | ||
558 | int result = -EINVAL; | ||
559 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
560 | struct uwb_rc_evt_bpoie_change *bpoiec; | ||
561 | struct uwb_ie_bpo *bpoie; | ||
562 | static unsigned count; /* FIXME: this is a temp hack */ | ||
563 | size_t iesize; | ||
564 | |||
565 | /* Is there enough data to decode it? */ | ||
566 | if (evt->notif.size < sizeof(*bpoiec)) { | ||
567 | dev_err(dev, "BPOIEC notification: Not enough data to " | ||
568 | "decode (%zu vs %zu bytes needed)\n", | ||
569 | evt->notif.size, sizeof(*bpoiec)); | ||
570 | goto error; | ||
571 | } | ||
572 | bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb); | ||
573 | iesize = le16_to_cpu(bpoiec->wBPOIELength); | ||
574 | if (iesize < sizeof(*bpoie)) { | ||
575 | dev_err(dev, "BPOIEC notification: Not enough IE data to " | ||
576 | "decode (%zu vs %zu bytes needed)\n", | ||
577 | iesize, sizeof(*bpoie)); | ||
578 | goto error; | ||
579 | } | ||
580 | if (++count % 1000 == 0) /* Lame placeholder */ | ||
581 | dev_info(dev, "BPOIE: %u changes received\n", count); | ||
582 | /* | ||
583 | * FIXME: At this point we should go over all the IEs in the | ||
584 | * bpoiec->BPOIE array and act on each. | ||
585 | */ | ||
586 | result = 0; | ||
587 | error: | ||
588 | return result; | ||
589 | } | ||
590 | |||
591 | /** | ||
592 | * uwb_bg_joined - is the RC in a beacon group? | ||
593 | * @rc: the radio controller | ||
594 | * | ||
595 | * Returns true if the radio controller is in a beacon group (even if | ||
596 | * it's the sole member). | ||
597 | */ | ||
598 | int uwb_bg_joined(struct uwb_rc *rc) | ||
599 | { | ||
600 | return rc->beaconing != -1; | ||
601 | } | ||
602 | EXPORT_SYMBOL_GPL(uwb_bg_joined); | ||
603 | |||
604 | /* | ||
605 | * Print beaconing state. | ||
606 | */ | ||
607 | static ssize_t uwb_rc_beacon_show(struct device *dev, | ||
608 | struct device_attribute *attr, char *buf) | ||
609 | { | ||
610 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
611 | struct uwb_rc *rc = uwb_dev->rc; | ||
612 | ssize_t result; | ||
613 | |||
614 | mutex_lock(&rc->uwb_dev.mutex); | ||
615 | result = sprintf(buf, "%d\n", rc->beaconing); | ||
616 | mutex_unlock(&rc->uwb_dev.mutex); | ||
617 | return result; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Start beaconing on the specified channel, or stop beaconing. | ||
622 | * | ||
623 | * The BPST offset of when to start searching for a beacon group to | ||
624 | * join may be specified. | ||
625 | */ | ||
626 | static ssize_t uwb_rc_beacon_store(struct device *dev, | ||
627 | struct device_attribute *attr, | ||
628 | const char *buf, size_t size) | ||
629 | { | ||
630 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
631 | struct uwb_rc *rc = uwb_dev->rc; | ||
632 | int channel; | ||
633 | unsigned bpst_offset = 0; | ||
634 | ssize_t result = -EINVAL; | ||
635 | |||
636 | result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); | ||
637 | if (result >= 1) | ||
638 | result = uwb_rc_beacon(rc, channel, bpst_offset); | ||
639 | |||
640 | return result < 0 ? result : size; | ||
641 | } | ||
642 | DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store); | ||
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c new file mode 100644 index 000000000000..521cdeb84971 --- /dev/null +++ b/drivers/uwb/driver.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Driver initialization, etc | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * Life cycle: FIXME: explain | ||
26 | * | ||
27 | * UWB radio controller: | ||
28 | * | ||
29 | * 1. alloc a uwb_rc, zero it | ||
30 | * 2. call uwb_rc_init() on it to set it up + ops (won't do any | ||
31 | * kind of allocation) | ||
32 | * 3. register (now it is owned by the UWB stack--deregister before | ||
33 | * freeing/destroying). | ||
34 | * 4. It lives on it's own now (UWB stack handles)--when it | ||
35 | * disconnects, call unregister() | ||
36 | * 5. free it. | ||
37 | * | ||
38 | * Make sure you have a reference to the uwb_rc before calling | ||
39 | * any of the UWB API functions. | ||
40 | * | ||
41 | * TODO: | ||
42 | * | ||
43 | * 1. Locking and life cycle management is crappy still. All entry | ||
44 | * points to the UWB HCD API assume you have a reference on the | ||
45 | * uwb_rc structure and that it won't go away. They mutex lock it | ||
46 | * before doing anything. | ||
47 | */ | ||
48 | |||
49 | #include <linux/kernel.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/module.h> | ||
52 | #include <linux/device.h> | ||
53 | #include <linux/err.h> | ||
54 | #include <linux/kdev_t.h> | ||
55 | #include <linux/random.h> | ||
56 | #include <linux/uwb/debug.h> | ||
57 | #include "uwb-internal.h" | ||
58 | |||
59 | |||
60 | /* UWB stack attributes (or 'global' constants) */ | ||
61 | |||
62 | |||
63 | /** | ||
64 | * If a beacon dissapears for longer than this, then we consider the | ||
65 | * device who was represented by that beacon to be gone. | ||
66 | * | ||
67 | * ECMA-368[17.2.3, last para] establishes that a device must not | ||
68 | * consider a device to be its neighbour if he doesn't receive a beacon | ||
69 | * for more than mMaxLostBeacons. mMaxLostBeacons is defined in | ||
70 | * ECMA-368[17.16] as 3; because we can get only one beacon per | ||
71 | * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time | ||
72 | * for jitter and stuff and make it 500 ms. | ||
73 | */ | ||
74 | unsigned long beacon_timeout_ms = 500; | ||
75 | |||
76 | static | ||
77 | ssize_t beacon_timeout_ms_show(struct class *class, char *buf) | ||
78 | { | ||
79 | return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms); | ||
80 | } | ||
81 | |||
82 | static | ||
83 | ssize_t beacon_timeout_ms_store(struct class *class, | ||
84 | const char *buf, size_t size) | ||
85 | { | ||
86 | unsigned long bt; | ||
87 | ssize_t result; | ||
88 | result = sscanf(buf, "%lu", &bt); | ||
89 | if (result != 1) | ||
90 | return -EINVAL; | ||
91 | beacon_timeout_ms = bt; | ||
92 | return size; | ||
93 | } | ||
94 | |||
95 | static struct class_attribute uwb_class_attrs[] = { | ||
96 | __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO, | ||
97 | beacon_timeout_ms_show, beacon_timeout_ms_store), | ||
98 | __ATTR_NULL, | ||
99 | }; | ||
100 | |||
101 | /** Device model classes */ | ||
102 | struct class uwb_rc_class = { | ||
103 | .name = "uwb_rc", | ||
104 | .class_attrs = uwb_class_attrs, | ||
105 | }; | ||
106 | |||
107 | |||
108 | static int __init uwb_subsys_init(void) | ||
109 | { | ||
110 | int result = 0; | ||
111 | |||
112 | result = uwb_est_create(); | ||
113 | if (result < 0) { | ||
114 | printk(KERN_ERR "uwb: Can't initialize EST subsystem\n"); | ||
115 | goto error_est_init; | ||
116 | } | ||
117 | |||
118 | result = class_register(&uwb_rc_class); | ||
119 | if (result < 0) | ||
120 | goto error_uwb_rc_class_register; | ||
121 | uwbd_start(); | ||
122 | uwb_dbg_init(); | ||
123 | return 0; | ||
124 | |||
125 | error_uwb_rc_class_register: | ||
126 | uwb_est_destroy(); | ||
127 | error_est_init: | ||
128 | return result; | ||
129 | } | ||
130 | module_init(uwb_subsys_init); | ||
131 | |||
132 | static void __exit uwb_subsys_exit(void) | ||
133 | { | ||
134 | uwb_dbg_exit(); | ||
135 | uwbd_stop(); | ||
136 | class_unregister(&uwb_rc_class); | ||
137 | uwb_est_destroy(); | ||
138 | return; | ||
139 | } | ||
140 | module_exit(uwb_subsys_exit); | ||
141 | |||
142 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
143 | MODULE_DESCRIPTION("Ultra Wide Band core"); | ||
144 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c new file mode 100644 index 000000000000..3febd8552808 --- /dev/null +++ b/drivers/uwb/drp-avail.c | |||
@@ -0,0 +1,288 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * DRP availability management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Reinette Chatre <reinette.chatre@intel.com> | ||
7 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | * | ||
21 | * | ||
22 | * Manage DRP Availability (the MAS available for DRP | ||
23 | * reservations). Thus: | ||
24 | * | ||
25 | * - Handle DRP Availability Change notifications | ||
26 | * | ||
27 | * - Allow the reservation manager to indicate MAS reserved/released | ||
28 | * by local (owned by/targeted at the radio controller) | ||
29 | * reservations. | ||
30 | * | ||
31 | * - Based on the two sources above, generate a DRP Availability IE to | ||
32 | * be included in the beacon. | ||
33 | * | ||
34 | * See also the documentation for struct uwb_drp_avail. | ||
35 | */ | ||
36 | |||
37 | #include <linux/errno.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/bitmap.h> | ||
41 | #include "uwb-internal.h" | ||
42 | |||
43 | /** | ||
44 | * uwb_drp_avail_init - initialize an RC's MAS availability | ||
45 | * | ||
46 | * All MAS are available initially. The RC will inform use which | ||
47 | * slots are used for the BP (it may change in size). | ||
48 | */ | ||
49 | void uwb_drp_avail_init(struct uwb_rc *rc) | ||
50 | { | ||
51 | bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS); | ||
52 | bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS); | ||
53 | bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Determine MAS available for new local reservations. | ||
58 | * | ||
59 | * avail = global & local & pending | ||
60 | */ | ||
61 | static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) | ||
62 | { | ||
63 | bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); | ||
64 | bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation | ||
69 | * @rc: the radio controller | ||
70 | * @mas: the MAS to reserve | ||
71 | * | ||
72 | * Returns 0 on success, or -EBUSY if the MAS requested aren't available. | ||
73 | */ | ||
74 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas) | ||
75 | { | ||
76 | struct uwb_mas_bm avail; | ||
77 | |||
78 | uwb_drp_available(rc, &avail); | ||
79 | if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS)) | ||
80 | return -EBUSY; | ||
81 | |||
82 | bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * uwb_drp_avail_reserve - reserve MAS for an established reservation | ||
88 | * @rc: the radio controller | ||
89 | * @mas: the MAS to reserve | ||
90 | */ | ||
91 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas) | ||
92 | { | ||
93 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); | ||
94 | bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); | ||
95 | rc->drp_avail.ie_valid = false; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * uwb_drp_avail_release - release MAS from a pending or established reservation | ||
100 | * @rc: the radio controller | ||
101 | * @mas: the MAS to release | ||
102 | */ | ||
103 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas) | ||
104 | { | ||
105 | bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); | ||
106 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); | ||
107 | rc->drp_avail.ie_valid = false; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * uwb_drp_avail_ie_update - update the DRP Availability IE | ||
112 | * @rc: the radio controller | ||
113 | * | ||
114 | * avail = global & local | ||
115 | */ | ||
116 | void uwb_drp_avail_ie_update(struct uwb_rc *rc) | ||
117 | { | ||
118 | struct uwb_mas_bm avail; | ||
119 | |||
120 | bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); | ||
121 | |||
122 | rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY; | ||
123 | rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8; | ||
124 | uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail); | ||
125 | rc->drp_avail.ie_valid = true; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * Create an unsigned long from a buffer containing a byte stream. | ||
130 | * | ||
131 | * @array: pointer to buffer | ||
132 | * @itr: index of buffer from where we start | ||
133 | * @len: the buffer's remaining size may not be exact multiple of | ||
134 | * sizeof(unsigned long), @len is the length of buffer that needs | ||
135 | * to be converted. This will be sizeof(unsigned long) or smaller | ||
136 | * (BUG if not). If it is smaller then we will pad the remaining | ||
137 | * space of the result with zeroes. | ||
138 | */ | ||
139 | static | ||
140 | unsigned long get_val(u8 *array, size_t itr, size_t len) | ||
141 | { | ||
142 | unsigned long val = 0; | ||
143 | size_t top = itr + len; | ||
144 | |||
145 | BUG_ON(len > sizeof(val)); | ||
146 | |||
147 | while (itr < top) { | ||
148 | val <<= 8; | ||
149 | val |= array[top - 1]; | ||
150 | top--; | ||
151 | } | ||
152 | val <<= 8 * (sizeof(val) - len); /* padding */ | ||
153 | return val; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * Initialize bitmap from data buffer. | ||
158 | * | ||
159 | * The bitmap to be converted could come from a IE, for example a | ||
160 | * DRP Availability IE. | ||
161 | * From ECMA-368 1.0 [16.8.7]: " | ||
162 | * octets: 1 1 N * (0 to 32) | ||
163 | * Element ID Length (=N) DRP Availability Bitmap | ||
164 | * | ||
165 | * The DRP Availability Bitmap field is up to 256 bits long, one | ||
166 | * bit for each MAS in the superframe, where the least-significant | ||
167 | * bit of the field corresponds to the first MAS in the superframe | ||
168 | * and successive bits correspond to successive MASs." | ||
169 | * | ||
170 | * The DRP Availability bitmap is in octets from 0 to 32, so octet | ||
171 | * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32 | ||
172 | * octets, the bits in octets not included at the end of the bitmap are | ||
173 | * treated as zero. In this case (when the bitmap is smaller than 32 | ||
174 | * octets) the MAS represented range from MAS 1 to MAS (size of bitmap) | ||
175 | * with the last octet still containing bits for MAS 1-8, etc. | ||
176 | * | ||
177 | * For example: | ||
178 | * F00F0102 03040506 0708090A 0B0C0D0E 0F010203 | ||
179 | * ^^^^ | ||
180 | * |||| | ||
181 | * |||| | ||
182 | * |||\LSB of byte is MAS 9 | ||
183 | * ||\MSB of byte is MAS 16 | ||
184 | * |\LSB of first byte is MAS 1 | ||
185 | * \ MSB of byte is MAS 8 | ||
186 | * | ||
187 | * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11] | ||
188 | * | ||
189 | * The resulting bitmap will have the following mapping: | ||
190 | * bit position 0 == MAS 1 | ||
191 | * bit position 1 == MAS 2 | ||
192 | * ... | ||
193 | * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS | ||
194 | * | ||
195 | * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP) | ||
196 | * @buffer: pointer to buffer containing bitmap data in big endian | ||
197 | * format (MSB first) | ||
198 | * @buffer_size:number of bytes with which bitmap should be initialized | ||
199 | */ | ||
200 | static | ||
201 | void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer, | ||
202 | size_t buffer_size) | ||
203 | { | ||
204 | u8 *buffer = _buffer; | ||
205 | size_t itr, len; | ||
206 | unsigned long val; | ||
207 | |||
208 | itr = 0; | ||
209 | while (itr < buffer_size) { | ||
210 | len = buffer_size - itr >= sizeof(val) ? | ||
211 | sizeof(val) : buffer_size - itr; | ||
212 | val = get_val(buffer, itr, len); | ||
213 | bmp_itr[itr / sizeof(val)] = val; | ||
214 | itr += sizeof(val); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | |||
219 | /** | ||
220 | * Extract DRP Availability bitmap from the notification. | ||
221 | * | ||
222 | * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes | ||
223 | * We convert that to our internal representation. | ||
224 | */ | ||
225 | static | ||
226 | int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp) | ||
227 | { | ||
228 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
229 | struct uwb_rc_evt_drp_avail *drp_evt; | ||
230 | int result = -EINVAL; | ||
231 | |||
232 | /* Is there enough data to decode the event? */ | ||
233 | if (evt->notif.size < sizeof(*drp_evt)) { | ||
234 | dev_err(dev, "DRP Availability Change: Not enough " | ||
235 | "data to decode event [%zu bytes, %zu " | ||
236 | "needed]\n", evt->notif.size, sizeof(*drp_evt)); | ||
237 | goto error; | ||
238 | } | ||
239 | drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb); | ||
240 | buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8); | ||
241 | result = 0; | ||
242 | error: | ||
243 | return result; | ||
244 | } | ||
245 | |||
246 | |||
247 | /** | ||
248 | * Process an incoming DRP Availability notification. | ||
249 | * | ||
250 | * @evt: Event information (packs the actual event data, which | ||
251 | * radio controller it came to, etc). | ||
252 | * | ||
253 | * @returns: 0 on success (so uwbd() frees the event buffer), < 0 | ||
254 | * on error. | ||
255 | * | ||
256 | * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that | ||
257 | * the MAS slot is available, bits set to ZERO indicate that the slot | ||
258 | * is busy. | ||
259 | * | ||
260 | * So we clear available slots, we set used slots :) | ||
261 | * | ||
262 | * The notification only marks non-availability based on the BP and | ||
263 | * received DRP IEs that are not for this radio controller. A copy of | ||
264 | * this bitmap is needed to generate the real availability (which | ||
265 | * includes local and pending reservations). | ||
266 | * | ||
267 | * The DRP Availability IE that this radio controller emits will need | ||
268 | * to be updated. | ||
269 | */ | ||
270 | int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt) | ||
271 | { | ||
272 | int result; | ||
273 | struct uwb_rc *rc = evt->rc; | ||
274 | DECLARE_BITMAP(bmp, UWB_NUM_MAS); | ||
275 | |||
276 | result = uwbd_evt_get_drp_avail(evt, bmp); | ||
277 | if (result < 0) | ||
278 | return result; | ||
279 | |||
280 | mutex_lock(&rc->rsvs_mutex); | ||
281 | bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); | ||
282 | rc->drp_avail.ie_valid = false; | ||
283 | mutex_unlock(&rc->rsvs_mutex); | ||
284 | |||
285 | uwb_rsv_sched_update(rc); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c new file mode 100644 index 000000000000..882724c5f126 --- /dev/null +++ b/drivers/uwb/drp-ie.c | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * UWB DRP IE management. | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/random.h> | ||
22 | #include <linux/uwb.h> | ||
23 | |||
24 | #include "uwb-internal.h" | ||
25 | |||
26 | /* | ||
27 | * Allocate a DRP IE. | ||
28 | * | ||
29 | * To save having to free/allocate a DRP IE when its MAS changes, | ||
30 | * enough memory is allocated for the maxiumum number of DRP | ||
31 | * allocation fields. This gives an overhead per reservation of up to | ||
32 | * (UWB_NUM_ZONES - 1) * 4 = 60 octets. | ||
33 | */ | ||
34 | static struct uwb_ie_drp *uwb_drp_ie_alloc(void) | ||
35 | { | ||
36 | struct uwb_ie_drp *drp_ie; | ||
37 | unsigned tiebreaker; | ||
38 | |||
39 | drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + | ||
40 | UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), | ||
41 | GFP_KERNEL); | ||
42 | if (drp_ie) { | ||
43 | drp_ie->hdr.element_id = UWB_IE_DRP; | ||
44 | |||
45 | get_random_bytes(&tiebreaker, sizeof(unsigned)); | ||
46 | uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); | ||
47 | } | ||
48 | return drp_ie; | ||
49 | } | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Fill a DRP IE's allocation fields from a MAS bitmap. | ||
54 | */ | ||
55 | static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, | ||
56 | struct uwb_mas_bm *mas) | ||
57 | { | ||
58 | int z, i, num_fields = 0, next = 0; | ||
59 | struct uwb_drp_alloc *zones; | ||
60 | __le16 current_bmp; | ||
61 | DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); | ||
62 | DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); | ||
63 | |||
64 | zones = drp_ie->allocs; | ||
65 | |||
66 | bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); | ||
67 | |||
68 | /* Determine unique MAS bitmaps in zones from bitmap. */ | ||
69 | for (z = 0; z < UWB_NUM_ZONES; z++) { | ||
70 | bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); | ||
71 | if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { | ||
72 | bool found = false; | ||
73 | current_bmp = (__le16) *tmp_mas_bm; | ||
74 | for (i = 0; i < next; i++) { | ||
75 | if (current_bmp == zones[i].mas_bm) { | ||
76 | zones[i].zone_bm |= 1 << z; | ||
77 | found = true; | ||
78 | break; | ||
79 | } | ||
80 | } | ||
81 | if (!found) { | ||
82 | num_fields++; | ||
83 | zones[next].zone_bm = 1 << z; | ||
84 | zones[next].mas_bm = current_bmp; | ||
85 | next++; | ||
86 | } | ||
87 | } | ||
88 | bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | ||
89 | } | ||
90 | |||
91 | /* Store in format ready for transmission (le16). */ | ||
92 | for (i = 0; i < num_fields; i++) { | ||
93 | drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); | ||
94 | drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); | ||
95 | } | ||
96 | |||
97 | drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) | ||
98 | + num_fields * sizeof(struct uwb_drp_alloc); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * uwb_drp_ie_update - update a reservation's DRP IE | ||
103 | * @rsv: the reservation | ||
104 | */ | ||
105 | int uwb_drp_ie_update(struct uwb_rsv *rsv) | ||
106 | { | ||
107 | struct device *dev = &rsv->rc->uwb_dev.dev; | ||
108 | struct uwb_ie_drp *drp_ie; | ||
109 | int reason_code, status; | ||
110 | |||
111 | switch (rsv->state) { | ||
112 | case UWB_RSV_STATE_NONE: | ||
113 | kfree(rsv->drp_ie); | ||
114 | rsv->drp_ie = NULL; | ||
115 | return 0; | ||
116 | case UWB_RSV_STATE_O_INITIATED: | ||
117 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
118 | status = 0; | ||
119 | break; | ||
120 | case UWB_RSV_STATE_O_PENDING: | ||
121 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
122 | status = 0; | ||
123 | break; | ||
124 | case UWB_RSV_STATE_O_MODIFIED: | ||
125 | reason_code = UWB_DRP_REASON_MODIFIED; | ||
126 | status = 1; | ||
127 | break; | ||
128 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
129 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
130 | status = 1; | ||
131 | break; | ||
132 | case UWB_RSV_STATE_T_ACCEPTED: | ||
133 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
134 | status = 1; | ||
135 | break; | ||
136 | case UWB_RSV_STATE_T_DENIED: | ||
137 | reason_code = UWB_DRP_REASON_DENIED; | ||
138 | status = 0; | ||
139 | break; | ||
140 | default: | ||
141 | dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | |||
145 | if (rsv->drp_ie == NULL) { | ||
146 | rsv->drp_ie = uwb_drp_ie_alloc(); | ||
147 | if (rsv->drp_ie == NULL) | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | drp_ie = rsv->drp_ie; | ||
151 | |||
152 | uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); | ||
153 | uwb_ie_drp_set_status(drp_ie, status); | ||
154 | uwb_ie_drp_set_reason_code(drp_ie, reason_code); | ||
155 | uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); | ||
156 | uwb_ie_drp_set_type(drp_ie, rsv->type); | ||
157 | |||
158 | if (uwb_rsv_is_owner(rsv)) { | ||
159 | switch (rsv->target.type) { | ||
160 | case UWB_RSV_TARGET_DEV: | ||
161 | drp_ie->dev_addr = rsv->target.dev->dev_addr; | ||
162 | break; | ||
163 | case UWB_RSV_TARGET_DEVADDR: | ||
164 | drp_ie->dev_addr = rsv->target.devaddr; | ||
165 | break; | ||
166 | } | ||
167 | } else | ||
168 | drp_ie->dev_addr = rsv->owner->dev_addr; | ||
169 | |||
170 | uwb_drp_ie_from_bm(drp_ie, &rsv->mas); | ||
171 | |||
172 | rsv->ie_valid = true; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Set MAS bits from given MAS bitmap in a single zone of large bitmap. | ||
178 | * | ||
179 | * We are given a zone id and the MAS bitmap of bits that need to be set in | ||
180 | * this zone. Note that this zone may already have bits set and this only | ||
181 | * adds settings - we cannot simply assign the MAS bitmap contents to the | ||
182 | * zone contents. We iterate over the the bits (MAS) in the zone and set the | ||
183 | * bits that are set in the given MAS bitmap. | ||
184 | */ | ||
185 | static | ||
186 | void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) | ||
187 | { | ||
188 | int mas; | ||
189 | u16 mas_mask; | ||
190 | |||
191 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { | ||
192 | mas_mask = 1 << mas; | ||
193 | if (mas_bm & mas_mask) | ||
194 | set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap | ||
200 | * @mas: MAS bitmap that will be populated to correspond to the | ||
201 | * allocation fields in the DRP IE | ||
202 | * @drp_ie: the DRP IE that contains the allocation fields. | ||
203 | * | ||
204 | * The input format is an array of MAS allocation fields (16 bit Zone | ||
205 | * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section | ||
206 | * 16.8.6. The output is a full 256 bit MAS bitmap. | ||
207 | * | ||
208 | * We go over all the allocation fields, for each allocation field we | ||
209 | * know which zones are impacted. We iterate over all the zones | ||
210 | * impacted and call a function that will set the correct MAS bits in | ||
211 | * each zone. | ||
212 | */ | ||
213 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) | ||
214 | { | ||
215 | int numallocs = (drp_ie->hdr.length - 4) / 4; | ||
216 | const struct uwb_drp_alloc *alloc; | ||
217 | int cnt; | ||
218 | u16 zone_bm, mas_bm; | ||
219 | u8 zone; | ||
220 | u16 zone_mask; | ||
221 | |||
222 | for (cnt = 0; cnt < numallocs; cnt++) { | ||
223 | alloc = &drp_ie->allocs[cnt]; | ||
224 | zone_bm = le16_to_cpu(alloc->zone_bm); | ||
225 | mas_bm = le16_to_cpu(alloc->mas_bm); | ||
226 | for (zone = 0; zone < UWB_NUM_ZONES; zone++) { | ||
227 | zone_mask = 1 << zone; | ||
228 | if (zone_bm & zone_mask) | ||
229 | uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); | ||
230 | } | ||
231 | } | ||
232 | } | ||
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c new file mode 100644 index 000000000000..c0b1e5e2bd08 --- /dev/null +++ b/drivers/uwb/drp.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Dynamic Reservation Protocol handling | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | #include <linux/kthread.h> | ||
22 | #include <linux/freezer.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include "uwb-internal.h" | ||
25 | |||
26 | /** | ||
27 | * Construct and send the SET DRP IE | ||
28 | * | ||
29 | * @rc: UWB Host controller | ||
30 | * @returns: >= 0 number of bytes still available in the beacon | ||
31 | * < 0 errno code on error. | ||
32 | * | ||
33 | * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the | ||
34 | * device to include in its beacon at the same time. We thus have to | ||
35 | * traverse all reservations and include the DRP IEs of all PENDING | ||
36 | * and NEGOTIATED reservations in a SET DRP command for transmission. | ||
37 | * | ||
38 | * A DRP Availability IE is appended. | ||
39 | * | ||
40 | * rc->uwb_dev.mutex is held | ||
41 | * | ||
42 | * FIXME We currently ignore the returned value indicating the remaining space | ||
43 | * in beacon. This could be used to deny reservation requests earlier if | ||
44 | * determined that they would cause the beacon space to be exceeded. | ||
45 | */ | ||
46 | static | ||
47 | int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | ||
48 | { | ||
49 | int result; | ||
50 | struct device *dev = &rc->uwb_dev.dev; | ||
51 | struct uwb_rc_cmd_set_drp_ie *cmd; | ||
52 | struct uwb_rc_evt_set_drp_ie reply; | ||
53 | struct uwb_rsv *rsv; | ||
54 | int num_bytes = 0; | ||
55 | u8 *IEDataptr; | ||
56 | |||
57 | result = -ENOMEM; | ||
58 | /* First traverse all reservations to determine memory needed. */ | ||
59 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
60 | if (rsv->drp_ie != NULL) | ||
61 | num_bytes += rsv->drp_ie->hdr.length + 2; | ||
62 | } | ||
63 | num_bytes += sizeof(rc->drp_avail.ie); | ||
64 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); | ||
65 | if (cmd == NULL) | ||
66 | goto error; | ||
67 | cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; | ||
68 | cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE); | ||
69 | cmd->wIELength = num_bytes; | ||
70 | IEDataptr = (u8 *)&cmd->IEData[0]; | ||
71 | |||
72 | /* Next traverse all reservations to place IEs in allocated memory. */ | ||
73 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
74 | if (rsv->drp_ie != NULL) { | ||
75 | memcpy(IEDataptr, rsv->drp_ie, | ||
76 | rsv->drp_ie->hdr.length + 2); | ||
77 | IEDataptr += rsv->drp_ie->hdr.length + 2; | ||
78 | } | ||
79 | } | ||
80 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
81 | |||
82 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
83 | reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; | ||
84 | result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, | ||
85 | sizeof(*cmd) + num_bytes, &reply.rceb, | ||
86 | sizeof(reply)); | ||
87 | if (result < 0) | ||
88 | goto error_cmd; | ||
89 | result = le16_to_cpu(reply.wRemainingSpace); | ||
90 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
91 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " | ||
92 | "failed: %s (%d). RemainingSpace in beacon " | ||
93 | "= %d\n", uwb_rc_strerror(reply.bResultCode), | ||
94 | reply.bResultCode, result); | ||
95 | result = -EIO; | ||
96 | } else { | ||
97 | dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " | ||
98 | "= %d.\n", result); | ||
99 | result = 0; | ||
100 | } | ||
101 | error_cmd: | ||
102 | kfree(cmd); | ||
103 | error: | ||
104 | return result; | ||
105 | |||
106 | } | ||
107 | /** | ||
108 | * Send all DRP IEs associated with this host | ||
109 | * | ||
110 | * @returns: >= 0 number of bytes still available in the beacon | ||
111 | * < 0 errno code on error. | ||
112 | * | ||
113 | * As per the protocol we obtain the host controller device lock to access | ||
114 | * bandwidth structures. | ||
115 | */ | ||
116 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) | ||
117 | { | ||
118 | int result; | ||
119 | |||
120 | mutex_lock(&rc->uwb_dev.mutex); | ||
121 | result = uwb_rc_gen_send_drp_ie(rc); | ||
122 | mutex_unlock(&rc->uwb_dev.mutex); | ||
123 | return result; | ||
124 | } | ||
125 | |||
126 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv) | ||
127 | { | ||
128 | struct device *dev = &rsv->rc->uwb_dev.dev; | ||
129 | |||
130 | dev_dbg(dev, "reservation timeout in state %s (%d)\n", | ||
131 | uwb_rsv_state_str(rsv->state), rsv->state); | ||
132 | |||
133 | switch (rsv->state) { | ||
134 | case UWB_RSV_STATE_O_INITIATED: | ||
135 | if (rsv->is_multicast) { | ||
136 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
137 | return; | ||
138 | } | ||
139 | break; | ||
140 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
141 | if (rsv->is_multicast) | ||
142 | return; | ||
143 | break; | ||
144 | default: | ||
145 | break; | ||
146 | } | ||
147 | uwb_rsv_remove(rsv); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Based on the DRP IE, transition a target reservation to a new | ||
152 | * state. | ||
153 | */ | ||
154 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | ||
155 | struct uwb_ie_drp *drp_ie) | ||
156 | { | ||
157 | struct device *dev = &rc->uwb_dev.dev; | ||
158 | int status; | ||
159 | enum uwb_drp_reason reason_code; | ||
160 | |||
161 | status = uwb_ie_drp_status(drp_ie); | ||
162 | reason_code = uwb_ie_drp_reason_code(drp_ie); | ||
163 | |||
164 | if (status) { | ||
165 | switch (reason_code) { | ||
166 | case UWB_DRP_REASON_ACCEPTED: | ||
167 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
168 | break; | ||
169 | case UWB_DRP_REASON_MODIFIED: | ||
170 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
171 | reason_code, status); | ||
172 | break; | ||
173 | default: | ||
174 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
175 | reason_code, status); | ||
176 | } | ||
177 | } else { | ||
178 | switch (reason_code) { | ||
179 | case UWB_DRP_REASON_ACCEPTED: | ||
180 | /* New reservations are handled in uwb_rsv_find(). */ | ||
181 | break; | ||
182 | case UWB_DRP_REASON_DENIED: | ||
183 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
184 | break; | ||
185 | case UWB_DRP_REASON_CONFLICT: | ||
186 | case UWB_DRP_REASON_MODIFIED: | ||
187 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
188 | reason_code, status); | ||
189 | break; | ||
190 | default: | ||
191 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
192 | reason_code, status); | ||
193 | } | ||
194 | } | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * Based on the DRP IE, transition an owner reservation to a new | ||
199 | * state. | ||
200 | */ | ||
201 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | ||
202 | struct uwb_ie_drp *drp_ie) | ||
203 | { | ||
204 | struct device *dev = &rc->uwb_dev.dev; | ||
205 | int status; | ||
206 | enum uwb_drp_reason reason_code; | ||
207 | |||
208 | status = uwb_ie_drp_status(drp_ie); | ||
209 | reason_code = uwb_ie_drp_reason_code(drp_ie); | ||
210 | |||
211 | if (status) { | ||
212 | switch (reason_code) { | ||
213 | case UWB_DRP_REASON_ACCEPTED: | ||
214 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
215 | break; | ||
216 | case UWB_DRP_REASON_MODIFIED: | ||
217 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
218 | reason_code, status); | ||
219 | break; | ||
220 | default: | ||
221 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
222 | reason_code, status); | ||
223 | } | ||
224 | } else { | ||
225 | switch (reason_code) { | ||
226 | case UWB_DRP_REASON_PENDING: | ||
227 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING); | ||
228 | break; | ||
229 | case UWB_DRP_REASON_DENIED: | ||
230 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
231 | break; | ||
232 | case UWB_DRP_REASON_CONFLICT: | ||
233 | case UWB_DRP_REASON_MODIFIED: | ||
234 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
235 | reason_code, status); | ||
236 | break; | ||
237 | default: | ||
238 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
239 | reason_code, status); | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Process a received DRP IE, it's either for a reservation owned by | ||
246 | * the RC or targeted at it (or it's for a WUSB cluster reservation). | ||
247 | */ | ||
248 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | ||
249 | struct uwb_ie_drp *drp_ie) | ||
250 | { | ||
251 | struct uwb_rsv *rsv; | ||
252 | |||
253 | rsv = uwb_rsv_find(rc, src, drp_ie); | ||
254 | if (!rsv) { | ||
255 | /* | ||
256 | * No reservation? It's either for a recently | ||
257 | * terminated reservation; or the DRP IE couldn't be | ||
258 | * processed (e.g., an invalid IE or out of memory). | ||
259 | */ | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Do nothing with DRP IEs for reservations that have been | ||
265 | * terminated. | ||
266 | */ | ||
267 | if (rsv->state == UWB_RSV_STATE_NONE) { | ||
268 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
269 | return; | ||
270 | } | ||
271 | |||
272 | if (uwb_ie_drp_owner(drp_ie)) | ||
273 | uwb_drp_process_target(rc, rsv, drp_ie); | ||
274 | else | ||
275 | uwb_drp_process_owner(rc, rsv, drp_ie); | ||
276 | } | ||
277 | |||
278 | |||
279 | /* | ||
280 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) | ||
281 | * from a device. | ||
282 | */ | ||
283 | static | ||
284 | void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
285 | size_t ielen, struct uwb_dev *src_dev) | ||
286 | { | ||
287 | struct device *dev = &rc->uwb_dev.dev; | ||
288 | struct uwb_ie_hdr *ie_hdr; | ||
289 | void *ptr; | ||
290 | |||
291 | ptr = drp_evt->ie_data; | ||
292 | for (;;) { | ||
293 | ie_hdr = uwb_ie_next(&ptr, &ielen); | ||
294 | if (!ie_hdr) | ||
295 | break; | ||
296 | |||
297 | switch (ie_hdr->element_id) { | ||
298 | case UWB_IE_DRP_AVAILABILITY: | ||
299 | /* FIXME: does something need to be done with this? */ | ||
300 | break; | ||
301 | case UWB_IE_DRP: | ||
302 | uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); | ||
303 | break; | ||
304 | default: | ||
305 | dev_warn(dev, "unexpected IE in DRP notification\n"); | ||
306 | break; | ||
307 | } | ||
308 | } | ||
309 | |||
310 | if (ielen > 0) | ||
311 | dev_warn(dev, "%d octets remaining in DRP notification\n", | ||
312 | (int)ielen); | ||
313 | } | ||
314 | |||
315 | |||
316 | /* | ||
317 | * Go through all the DRP IEs and find the ones that conflict with our | ||
318 | * reservations. | ||
319 | * | ||
320 | * FIXME: must resolve the conflict according the the rules in | ||
321 | * [ECMA-368]. | ||
322 | */ | ||
323 | static | ||
324 | void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
325 | size_t ielen, struct uwb_dev *src_dev) | ||
326 | { | ||
327 | struct device *dev = &rc->uwb_dev.dev; | ||
328 | struct uwb_ie_hdr *ie_hdr; | ||
329 | struct uwb_ie_drp *drp_ie; | ||
330 | void *ptr; | ||
331 | |||
332 | ptr = drp_evt->ie_data; | ||
333 | for (;;) { | ||
334 | ie_hdr = uwb_ie_next(&ptr, &ielen); | ||
335 | if (!ie_hdr) | ||
336 | break; | ||
337 | |||
338 | drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); | ||
339 | |||
340 | /* FIXME: check if this DRP IE conflicts. */ | ||
341 | } | ||
342 | |||
343 | if (ielen > 0) | ||
344 | dev_warn(dev, "%d octets remaining in DRP notification\n", | ||
345 | (int)ielen); | ||
346 | } | ||
347 | |||
348 | |||
349 | /* | ||
350 | * Terminate all reservations owned by, or targeted at, 'uwb_dev'. | ||
351 | */ | ||
352 | static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) | ||
353 | { | ||
354 | struct uwb_rsv *rsv; | ||
355 | |||
356 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
357 | if (rsv->owner == uwb_dev | ||
358 | || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) | ||
359 | uwb_rsv_remove(rsv); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | |||
364 | /** | ||
365 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event | ||
366 | * @evt: the DRP_IE event from the radio controller | ||
367 | * | ||
368 | * This processes DRP notifications from the radio controller, either | ||
369 | * initiating a new reservation or transitioning an existing | ||
370 | * reservation into a different state. | ||
371 | * | ||
372 | * DRP notifications can occur for three different reasons: | ||
373 | * | ||
374 | * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as | ||
375 | * the target or source have been recieved. | ||
376 | * | ||
377 | * These DRP IEs could be new or for an existing reservation. | ||
378 | * | ||
379 | * If the DRP IE for an existing reservation ceases to be to | ||
380 | * recieved for at least mMaxLostBeacons, the reservation should be | ||
381 | * considered to be terminated. Note that the TERMINATE reason (see | ||
382 | * below) may not always be signalled (e.g., the remote device has | ||
383 | * two or more reservations established with the RC). | ||
384 | * | ||
385 | * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon | ||
386 | * group conflict with the RC's reservations. | ||
387 | * | ||
388 | * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received | ||
389 | * from a device (i.e., it's terminated all reservations). | ||
390 | * | ||
391 | * Only the software state of the reservations is changed; the setting | ||
392 | * of the radio controller's DRP IEs is done after all the events in | ||
393 | * an event buffer are processed. This saves waiting multiple times | ||
394 | * for the SET_DRP_IE command to complete. | ||
395 | */ | ||
396 | int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | ||
397 | { | ||
398 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
399 | struct uwb_rc *rc = evt->rc; | ||
400 | struct uwb_rc_evt_drp *drp_evt; | ||
401 | size_t ielength, bytes_left; | ||
402 | struct uwb_dev_addr src_addr; | ||
403 | struct uwb_dev *src_dev; | ||
404 | int reason; | ||
405 | |||
406 | /* Is there enough data to decode the event (and any IEs in | ||
407 | its payload)? */ | ||
408 | if (evt->notif.size < sizeof(*drp_evt)) { | ||
409 | dev_err(dev, "DRP event: Not enough data to decode event " | ||
410 | "[%zu bytes left, %zu needed]\n", | ||
411 | evt->notif.size, sizeof(*drp_evt)); | ||
412 | return 0; | ||
413 | } | ||
414 | bytes_left = evt->notif.size - sizeof(*drp_evt); | ||
415 | drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb); | ||
416 | ielength = le16_to_cpu(drp_evt->ie_length); | ||
417 | if (bytes_left != ielength) { | ||
418 | dev_err(dev, "DRP event: Not enough data in payload [%zu" | ||
419 | "bytes left, %zu declared in the event]\n", | ||
420 | bytes_left, ielength); | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr)); | ||
425 | src_dev = uwb_dev_get_by_devaddr(rc, &src_addr); | ||
426 | if (!src_dev) { | ||
427 | /* | ||
428 | * A DRP notification from an unrecognized device. | ||
429 | * | ||
430 | * This is probably from a WUSB device that doesn't | ||
431 | * have an EUI-48 and therefore doesn't show up in the | ||
432 | * UWB device database. It's safe to simply ignore | ||
433 | * these. | ||
434 | */ | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | mutex_lock(&rc->rsvs_mutex); | ||
439 | |||
440 | reason = uwb_rc_evt_drp_reason(drp_evt); | ||
441 | |||
442 | switch (reason) { | ||
443 | case UWB_DRP_NOTIF_DRP_IE_RCVD: | ||
444 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | ||
445 | break; | ||
446 | case UWB_DRP_NOTIF_CONFLICT: | ||
447 | uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); | ||
448 | break; | ||
449 | case UWB_DRP_NOTIF_TERMINATE: | ||
450 | uwb_drp_terminate_all(rc, src_dev); | ||
451 | break; | ||
452 | default: | ||
453 | dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); | ||
454 | break; | ||
455 | } | ||
456 | |||
457 | mutex_unlock(&rc->rsvs_mutex); | ||
458 | |||
459 | uwb_dev_put(src_dev); | ||
460 | return 0; | ||
461 | } | ||
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c new file mode 100644 index 000000000000..5fe566b7c845 --- /dev/null +++ b/drivers/uwb/est.c | |||
@@ -0,0 +1,477 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band Radio Control | ||
3 | * Event Size Tables management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * Infrastructure, code and data tables for guessing the size of | ||
26 | * events received on the notification endpoints of UWB radio | ||
27 | * controllers. | ||
28 | * | ||
29 | * You define a table of events and for each, its size and how to get | ||
30 | * the extra size. | ||
31 | * | ||
32 | * ENTRY POINTS: | ||
33 | * | ||
34 | * uwb_est_{init/destroy}(): To initialize/release the EST subsystem. | ||
35 | * | ||
36 | * uwb_est_[u]register(): To un/register event size tables | ||
37 | * uwb_est_grow() | ||
38 | * | ||
39 | * uwb_est_find_size(): Get the size of an event | ||
40 | * uwb_est_get_size() | ||
41 | */ | ||
42 | #include <linux/spinlock.h> | ||
43 | #define D_LOCAL 0 | ||
44 | #include <linux/uwb/debug.h> | ||
45 | #include "uwb-internal.h" | ||
46 | |||
47 | |||
48 | struct uwb_est { | ||
49 | u16 type_event_high; | ||
50 | u16 vendor, product; | ||
51 | u8 entries; | ||
52 | const struct uwb_est_entry *entry; | ||
53 | }; | ||
54 | |||
55 | |||
56 | static struct uwb_est *uwb_est; | ||
57 | static u8 uwb_est_size; | ||
58 | static u8 uwb_est_used; | ||
59 | static DEFINE_RWLOCK(uwb_est_lock); | ||
60 | |||
61 | /** | ||
62 | * WUSB Standard Event Size Table, HWA-RC interface | ||
63 | * | ||
64 | * Sizes for events and notifications type 0 (general), high nibble 0. | ||
65 | */ | ||
66 | static | ||
67 | struct uwb_est_entry uwb_est_00_00xx[] = { | ||
68 | [UWB_RC_EVT_IE_RCV] = { | ||
69 | .size = sizeof(struct uwb_rc_evt_ie_rcv), | ||
70 | .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength), | ||
71 | }, | ||
72 | [UWB_RC_EVT_BEACON] = { | ||
73 | .size = sizeof(struct uwb_rc_evt_beacon), | ||
74 | .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength), | ||
75 | }, | ||
76 | [UWB_RC_EVT_BEACON_SIZE] = { | ||
77 | .size = sizeof(struct uwb_rc_evt_beacon_size), | ||
78 | }, | ||
79 | [UWB_RC_EVT_BPOIE_CHANGE] = { | ||
80 | .size = sizeof(struct uwb_rc_evt_bpoie_change), | ||
81 | .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change, | ||
82 | wBPOIELength), | ||
83 | }, | ||
84 | [UWB_RC_EVT_BP_SLOT_CHANGE] = { | ||
85 | .size = sizeof(struct uwb_rc_evt_bp_slot_change), | ||
86 | }, | ||
87 | [UWB_RC_EVT_BP_SWITCH_IE_RCV] = { | ||
88 | .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv), | ||
89 | .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength), | ||
90 | }, | ||
91 | [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { | ||
92 | .size = sizeof(struct uwb_rc_evt_dev_addr_conflict), | ||
93 | }, | ||
94 | [UWB_RC_EVT_DRP_AVAIL] = { | ||
95 | .size = sizeof(struct uwb_rc_evt_drp_avail) | ||
96 | }, | ||
97 | [UWB_RC_EVT_DRP] = { | ||
98 | .size = sizeof(struct uwb_rc_evt_drp), | ||
99 | .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length), | ||
100 | }, | ||
101 | [UWB_RC_EVT_BP_SWITCH_STATUS] = { | ||
102 | .size = sizeof(struct uwb_rc_evt_bp_switch_status), | ||
103 | }, | ||
104 | [UWB_RC_EVT_CMD_FRAME_RCV] = { | ||
105 | .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv), | ||
106 | .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength), | ||
107 | }, | ||
108 | [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = { | ||
109 | .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv), | ||
110 | .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength), | ||
111 | }, | ||
112 | [UWB_RC_CMD_CHANNEL_CHANGE] = { | ||
113 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
114 | }, | ||
115 | [UWB_RC_CMD_DEV_ADDR_MGMT] = { | ||
116 | .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) }, | ||
117 | [UWB_RC_CMD_GET_IE] = { | ||
118 | .size = sizeof(struct uwb_rc_evt_get_ie), | ||
119 | .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength), | ||
120 | }, | ||
121 | [UWB_RC_CMD_RESET] = { | ||
122 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
123 | }, | ||
124 | [UWB_RC_CMD_SCAN] = { | ||
125 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
126 | }, | ||
127 | [UWB_RC_CMD_SET_BEACON_FILTER] = { | ||
128 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
129 | }, | ||
130 | [UWB_RC_CMD_SET_DRP_IE] = { | ||
131 | .size = sizeof(struct uwb_rc_evt_set_drp_ie), | ||
132 | }, | ||
133 | [UWB_RC_CMD_SET_IE] = { | ||
134 | .size = sizeof(struct uwb_rc_evt_set_ie), | ||
135 | }, | ||
136 | [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = { | ||
137 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
138 | }, | ||
139 | [UWB_RC_CMD_SET_TX_POWER] = { | ||
140 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
141 | }, | ||
142 | [UWB_RC_CMD_SLEEP] = { | ||
143 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
144 | }, | ||
145 | [UWB_RC_CMD_START_BEACON] = { | ||
146 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
147 | }, | ||
148 | [UWB_RC_CMD_STOP_BEACON] = { | ||
149 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
150 | }, | ||
151 | [UWB_RC_CMD_BP_MERGE] = { | ||
152 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
153 | }, | ||
154 | [UWB_RC_CMD_SEND_COMMAND_FRAME] = { | ||
155 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
156 | }, | ||
157 | [UWB_RC_CMD_SET_ASIE_NOTIF] = { | ||
158 | .size = sizeof(struct uwb_rc_evt_confirm), | ||
159 | }, | ||
160 | }; | ||
161 | |||
162 | static | ||
163 | struct uwb_est_entry uwb_est_01_00xx[] = { | ||
164 | [UWB_RC_DAA_ENERGY_DETECTED] = { | ||
165 | .size = sizeof(struct uwb_rc_evt_daa_energy_detected), | ||
166 | }, | ||
167 | [UWB_RC_SET_DAA_ENERGY_MASK] = { | ||
168 | .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask), | ||
169 | }, | ||
170 | [UWB_RC_SET_NOTIFICATION_FILTER_EX] = { | ||
171 | .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex), | ||
172 | }, | ||
173 | }; | ||
174 | |||
175 | /** | ||
176 | * Initialize the EST subsystem | ||
177 | * | ||
178 | * Register the standard tables also. | ||
179 | * | ||
180 | * FIXME: tag init | ||
181 | */ | ||
182 | int uwb_est_create(void) | ||
183 | { | ||
184 | int result; | ||
185 | |||
186 | uwb_est_size = 2; | ||
187 | uwb_est_used = 0; | ||
188 | uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL); | ||
189 | if (uwb_est == NULL) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff, | ||
193 | uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx)); | ||
194 | if (result < 0) | ||
195 | goto out; | ||
196 | result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff, | ||
197 | uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx)); | ||
198 | out: | ||
199 | return result; | ||
200 | } | ||
201 | |||
202 | |||
203 | /** Clean it up */ | ||
204 | void uwb_est_destroy(void) | ||
205 | { | ||
206 | kfree(uwb_est); | ||
207 | uwb_est = NULL; | ||
208 | uwb_est_size = uwb_est_used = 0; | ||
209 | } | ||
210 | |||
211 | |||
212 | /** | ||
213 | * Double the capacity of the EST table | ||
214 | * | ||
215 | * @returns 0 if ok, < 0 errno no error. | ||
216 | */ | ||
217 | static | ||
218 | int uwb_est_grow(void) | ||
219 | { | ||
220 | size_t actual_size = uwb_est_size * sizeof(uwb_est[0]); | ||
221 | void *new = kmalloc(2 * actual_size, GFP_ATOMIC); | ||
222 | if (new == NULL) | ||
223 | return -ENOMEM; | ||
224 | memcpy(new, uwb_est, actual_size); | ||
225 | memset(new + actual_size, 0, actual_size); | ||
226 | kfree(uwb_est); | ||
227 | uwb_est = new; | ||
228 | uwb_est_size *= 2; | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | |||
233 | /** | ||
234 | * Register an event size table | ||
235 | * | ||
236 | * Makes room for it if the table is full, and then inserts it in the | ||
237 | * right position (entries are sorted by type, event_high, vendor and | ||
238 | * then product). | ||
239 | * | ||
240 | * @vendor: vendor code for matching against the device (0x0000 and | ||
241 | * 0xffff mean any); use 0x0000 to force all to match without | ||
242 | * checking possible vendor specific ones, 0xfffff to match | ||
243 | * after checking vendor specific ones. | ||
244 | * | ||
245 | * @product: product code from that vendor; same matching rules, use | ||
246 | * 0x0000 for not allowing vendor specific matches, 0xffff | ||
247 | * for allowing. | ||
248 | * | ||
249 | * This arragement just makes the tables sort differenty. Because the | ||
250 | * table is sorted by growing type-event_high-vendor-product, a zero | ||
251 | * vendor will match before than a 0x456a vendor, that will match | ||
252 | * before a 0xfffff vendor. | ||
253 | * | ||
254 | * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). | ||
255 | */ | ||
256 | /* FIXME: add bus type to vendor/product code */ | ||
257 | int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product, | ||
258 | const struct uwb_est_entry *entry, size_t entries) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | unsigned itr; | ||
262 | u16 type_event_high; | ||
263 | int result = 0; | ||
264 | |||
265 | write_lock_irqsave(&uwb_est_lock, flags); | ||
266 | if (uwb_est_used == uwb_est_size) { | ||
267 | result = uwb_est_grow(); | ||
268 | if (result < 0) | ||
269 | goto out; | ||
270 | } | ||
271 | /* Find the right spot to insert it in */ | ||
272 | type_event_high = type << 8 | event_high; | ||
273 | for (itr = 0; itr < uwb_est_used; itr++) | ||
274 | if (uwb_est[itr].type_event_high < type | ||
275 | && uwb_est[itr].vendor < vendor | ||
276 | && uwb_est[itr].product < product) | ||
277 | break; | ||
278 | |||
279 | /* Shift others to make room for the new one? */ | ||
280 | if (itr < uwb_est_used) | ||
281 | memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr); | ||
282 | uwb_est[itr].type_event_high = type << 8 | event_high; | ||
283 | uwb_est[itr].vendor = vendor; | ||
284 | uwb_est[itr].product = product; | ||
285 | uwb_est[itr].entry = entry; | ||
286 | uwb_est[itr].entries = entries; | ||
287 | uwb_est_used++; | ||
288 | out: | ||
289 | write_unlock_irqrestore(&uwb_est_lock, flags); | ||
290 | return result; | ||
291 | } | ||
292 | EXPORT_SYMBOL_GPL(uwb_est_register); | ||
293 | |||
294 | |||
295 | /** | ||
296 | * Unregister an event size table | ||
297 | * | ||
298 | * This just removes the specified entry and moves the ones after it | ||
299 | * to fill in the gap. This is needed to keep the list sorted; no | ||
300 | * reallocation is done to reduce the size of the table. | ||
301 | * | ||
302 | * We unregister by all the data we used to register instead of by | ||
303 | * pointer to the @entry array because we might have used the same | ||
304 | * table for a bunch of IDs (for example). | ||
305 | * | ||
306 | * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). | ||
307 | */ | ||
308 | int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product, | ||
309 | const struct uwb_est_entry *entry, size_t entries) | ||
310 | { | ||
311 | unsigned long flags; | ||
312 | unsigned itr; | ||
313 | struct uwb_est est_cmp = { | ||
314 | .type_event_high = type << 8 | event_high, | ||
315 | .vendor = vendor, | ||
316 | .product = product, | ||
317 | .entry = entry, | ||
318 | .entries = entries | ||
319 | }; | ||
320 | write_lock_irqsave(&uwb_est_lock, flags); | ||
321 | for (itr = 0; itr < uwb_est_used; itr++) | ||
322 | if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp))) | ||
323 | goto found; | ||
324 | write_unlock_irqrestore(&uwb_est_lock, flags); | ||
325 | return -ENOENT; | ||
326 | |||
327 | found: | ||
328 | if (itr < uwb_est_used - 1) /* Not last one? move ones above */ | ||
329 | memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1); | ||
330 | uwb_est_used--; | ||
331 | write_unlock_irqrestore(&uwb_est_lock, flags); | ||
332 | return 0; | ||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(uwb_est_unregister); | ||
335 | |||
336 | |||
337 | /** | ||
338 | * Get the size of an event from a table | ||
339 | * | ||
340 | * @rceb: pointer to the buffer with the event | ||
341 | * @rceb_size: size of the area pointed to by @rceb in bytes. | ||
342 | * @returns: > 0 Size of the event | ||
343 | * -ENOSPC An area big enough was not provided to look | ||
344 | * ahead into the event's guts and guess the size. | ||
345 | * -EINVAL Unknown event code (wEvent). | ||
346 | * | ||
347 | * This will look at the received RCEB and guess what is the total | ||
348 | * size. For variable sized events, it will look further ahead into | ||
349 | * their length field to see how much data should be read. | ||
350 | * | ||
351 | * Note this size is *not* final--the neh (Notification/Event Handle) | ||
352 | * might specificy an extra size to add. | ||
353 | */ | ||
354 | static | ||
355 | ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est, | ||
356 | u8 event_low, const struct uwb_rceb *rceb, | ||
357 | size_t rceb_size) | ||
358 | { | ||
359 | unsigned offset; | ||
360 | ssize_t size; | ||
361 | struct device *dev = &uwb_rc->uwb_dev.dev; | ||
362 | const struct uwb_est_entry *entry; | ||
363 | |||
364 | size = -ENOENT; | ||
365 | if (event_low >= est->entries) { /* in range? */ | ||
366 | dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n", | ||
367 | est, est->type_event_high, est->vendor, est->product, | ||
368 | est->entries, event_low); | ||
369 | goto out; | ||
370 | } | ||
371 | size = -ENOENT; | ||
372 | entry = &est->entry[event_low]; | ||
373 | if (entry->size == 0 && entry->offset == 0) { /* unknown? */ | ||
374 | dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n", | ||
375 | est, est->type_event_high, est->vendor, est->product, | ||
376 | est->entries, event_low); | ||
377 | goto out; | ||
378 | } | ||
379 | offset = entry->offset; /* extra fries with that? */ | ||
380 | if (offset == 0) | ||
381 | size = entry->size; | ||
382 | else { | ||
383 | /* Ops, got an extra size field at 'offset'--read it */ | ||
384 | const void *ptr = rceb; | ||
385 | size_t type_size = 0; | ||
386 | offset--; | ||
387 | size = -ENOSPC; /* enough data for more? */ | ||
388 | switch (entry->type) { | ||
389 | case UWB_EST_16: type_size = sizeof(__le16); break; | ||
390 | case UWB_EST_8: type_size = sizeof(u8); break; | ||
391 | default: BUG(); | ||
392 | } | ||
393 | if (offset + type_size > rceb_size) { | ||
394 | dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " | ||
395 | "not enough data to read extra size\n", | ||
396 | est, est->type_event_high, est->vendor, | ||
397 | est->product, est->entries); | ||
398 | goto out; | ||
399 | } | ||
400 | size = entry->size; | ||
401 | ptr += offset; | ||
402 | switch (entry->type) { | ||
403 | case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break; | ||
404 | case UWB_EST_8: size += *(u8 *)ptr; break; | ||
405 | default: BUG(); | ||
406 | } | ||
407 | } | ||
408 | out: | ||
409 | return size; | ||
410 | } | ||
411 | |||
412 | |||
413 | /** | ||
414 | * Guesses the size of a WA event | ||
415 | * | ||
416 | * @rceb: pointer to the buffer with the event | ||
417 | * @rceb_size: size of the area pointed to by @rceb in bytes. | ||
418 | * @returns: > 0 Size of the event | ||
419 | * -ENOSPC An area big enough was not provided to look | ||
420 | * ahead into the event's guts and guess the size. | ||
421 | * -EINVAL Unknown event code (wEvent). | ||
422 | * | ||
423 | * This will look at the received RCEB and guess what is the total | ||
424 | * size by checking all the tables registered with | ||
425 | * uwb_est_register(). For variable sized events, it will look further | ||
426 | * ahead into their length field to see how much data should be read. | ||
427 | * | ||
428 | * Note this size is *not* final--the neh (Notification/Event Handle) | ||
429 | * might specificy an extra size to add or replace. | ||
430 | */ | ||
431 | ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, | ||
432 | size_t rceb_size) | ||
433 | { | ||
434 | /* FIXME: add vendor/product data */ | ||
435 | ssize_t size; | ||
436 | struct device *dev = &rc->uwb_dev.dev; | ||
437 | unsigned long flags; | ||
438 | unsigned itr; | ||
439 | u16 type_event_high, event; | ||
440 | u8 *ptr = (u8 *) rceb; | ||
441 | |||
442 | read_lock_irqsave(&uwb_est_lock, flags); | ||
443 | d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," | ||
444 | " buffer size %ld\n", | ||
445 | (unsigned) rceb->bEventType, | ||
446 | (unsigned) le16_to_cpu(rceb->wEvent), | ||
447 | (unsigned) rceb->bEventContext, | ||
448 | (long) rceb_size); | ||
449 | size = -ENOSPC; | ||
450 | if (rceb_size < sizeof(*rceb)) | ||
451 | goto out; | ||
452 | event = le16_to_cpu(rceb->wEvent); | ||
453 | type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; | ||
454 | for (itr = 0; itr < uwb_est_used; itr++) { | ||
455 | d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", | ||
456 | uwb_est[itr].type_event_high, uwb_est[itr].vendor, | ||
457 | uwb_est[itr].product); | ||
458 | if (uwb_est[itr].type_event_high != type_event_high) | ||
459 | continue; | ||
460 | size = uwb_est_get_size(rc, &uwb_est[itr], | ||
461 | event & 0x00ff, rceb, rceb_size); | ||
462 | /* try more tables that might handle the same type */ | ||
463 | if (size != -ENOENT) | ||
464 | goto out; | ||
465 | } | ||
466 | dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; " | ||
467 | "RCEB %02x %02x %02x %02x\n", | ||
468 | (unsigned) rceb->bEventType, | ||
469 | (unsigned) le16_to_cpu(rceb->wEvent), | ||
470 | (unsigned) rceb->bEventContext, | ||
471 | ptr[0], ptr[1], ptr[2], ptr[3]); | ||
472 | size = -ENOENT; | ||
473 | out: | ||
474 | read_unlock_irqrestore(&uwb_est_lock, flags); | ||
475 | return size; | ||
476 | } | ||
477 | EXPORT_SYMBOL_GPL(uwb_est_find_size); | ||
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c new file mode 100644 index 000000000000..3d26fa0f8ae1 --- /dev/null +++ b/drivers/uwb/hwa-rc.c | |||
@@ -0,0 +1,926 @@ | |||
1 | /* | ||
2 | * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6]) | ||
3 | * Radio Control command/event transport | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Initialize the Radio Control interface Driver. | ||
24 | * | ||
25 | * For each device probed, creates an 'struct hwarc' which contains | ||
26 | * just the representation of the UWB Radio Controller, and the logic | ||
27 | * for reading notifications and passing them to the UWB Core. | ||
28 | * | ||
29 | * So we initialize all of those, register the UWB Radio Controller | ||
30 | * and setup the notification/event handle to pipe the notifications | ||
31 | * to the UWB management Daemon. | ||
32 | * | ||
33 | * Command and event filtering. | ||
34 | * | ||
35 | * This is the driver for the Radio Control Interface described in WUSB | ||
36 | * 1.0. The core UWB module assumes that all drivers are compliant to the | ||
37 | * WHCI 0.95 specification. We thus create a filter that parses all | ||
38 | * incoming messages from the (WUSB 1.0) device and manipulate them to | ||
39 | * conform to the WHCI 0.95 specification. Similarly, outgoing messages | ||
40 | * are parsed and manipulated to conform to the WUSB 1.0 compliant messages | ||
41 | * that the device expects. Only a few messages are affected: | ||
42 | * Affected events: | ||
43 | * UWB_RC_EVT_BEACON | ||
44 | * UWB_RC_EVT_BP_SLOT_CHANGE | ||
45 | * UWB_RC_EVT_DRP_AVAIL | ||
46 | * UWB_RC_EVT_DRP | ||
47 | * Affected commands: | ||
48 | * UWB_RC_CMD_SCAN | ||
49 | * UWB_RC_CMD_SET_DRP_IE | ||
50 | * | ||
51 | * | ||
52 | * | ||
53 | */ | ||
54 | #include <linux/version.h> | ||
55 | #include <linux/init.h> | ||
56 | #include <linux/module.h> | ||
57 | #include <linux/usb.h> | ||
58 | #include <linux/usb/wusb.h> | ||
59 | #include <linux/usb/wusb-wa.h> | ||
60 | #include <linux/uwb.h> | ||
61 | #include "uwb-internal.h" | ||
62 | #define D_LOCAL 1 | ||
63 | #include <linux/uwb/debug.h> | ||
64 | |||
65 | /* The device uses commands and events from the WHCI specification, although | ||
66 | * reporting itself as WUSB compliant. */ | ||
67 | #define WUSB_QUIRK_WHCI_CMD_EVT 0x01 | ||
68 | |||
69 | /** | ||
70 | * Descriptor for an instance of the UWB Radio Control Driver that | ||
71 | * attaches to the RCI interface of the Host Wired Adapter. | ||
72 | * | ||
73 | * Unless there is a lock specific to the 'data members', all access | ||
74 | * is protected by uwb_rc->mutex. | ||
75 | * | ||
76 | * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to | ||
77 | * @rd_buffer. Note there is no locking because it is perfectly (heh!) | ||
78 | * serialized--probe() submits an URB, callback is called, processes | ||
79 | * the data (synchronously), submits another URB, and so on. There is | ||
80 | * no concurrent access to the buffer. | ||
81 | */ | ||
82 | struct hwarc { | ||
83 | struct usb_device *usb_dev; | ||
84 | struct usb_interface *usb_iface; | ||
85 | struct uwb_rc *uwb_rc; /* UWB host controller */ | ||
86 | struct urb *neep_urb; /* Notification endpoint handling */ | ||
87 | struct edc neep_edc; | ||
88 | void *rd_buffer; /* NEEP read buffer */ | ||
89 | }; | ||
90 | |||
91 | |||
92 | /* Beacon received notification (WUSB 1.0 [8.6.3.2]) */ | ||
93 | struct uwb_rc_evt_beacon_WUSB_0100 { | ||
94 | struct uwb_rceb rceb; | ||
95 | u8 bChannelNumber; | ||
96 | __le16 wBPSTOffset; | ||
97 | u8 bLQI; | ||
98 | u8 bRSSI; | ||
99 | __le16 wBeaconInfoLength; | ||
100 | u8 BeaconInfo[]; | ||
101 | } __attribute__((packed)); | ||
102 | |||
103 | /** | ||
104 | * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95 | ||
105 | * | ||
106 | * @header: the incoming event | ||
107 | * @buf_size: size of buffer containing incoming event | ||
108 | * @new_size: size of event after filtering completed | ||
109 | * | ||
110 | * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at | ||
111 | * the time we receive the beacon from WUSB so we just set it to | ||
112 | * UWB_RC_BEACON_TYPE_NEIGHBOR as a default. | ||
113 | * The solution below allocates memory upon receipt of every beacon from a | ||
114 | * WUSB device. This will deteriorate performance. What is the right way to | ||
115 | * do this? | ||
116 | */ | ||
117 | static | ||
118 | int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc, | ||
119 | struct uwb_rceb **header, | ||
120 | const size_t buf_size, | ||
121 | size_t *new_size) | ||
122 | { | ||
123 | struct uwb_rc_evt_beacon_WUSB_0100 *be; | ||
124 | struct uwb_rc_evt_beacon *newbe; | ||
125 | size_t bytes_left, ielength; | ||
126 | struct device *dev = &rc->uwb_dev.dev; | ||
127 | |||
128 | be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb); | ||
129 | bytes_left = buf_size; | ||
130 | if (bytes_left < sizeof(*be)) { | ||
131 | dev_err(dev, "Beacon Received Notification: Not enough data " | ||
132 | "to decode for filtering (%zu vs %zu bytes needed)\n", | ||
133 | bytes_left, sizeof(*be)); | ||
134 | return -EINVAL; | ||
135 | } | ||
136 | bytes_left -= sizeof(*be); | ||
137 | ielength = le16_to_cpu(be->wBeaconInfoLength); | ||
138 | if (bytes_left < ielength) { | ||
139 | dev_err(dev, "Beacon Received Notification: Not enough data " | ||
140 | "to decode IEs (%zu vs %zu bytes needed)\n", | ||
141 | bytes_left, ielength); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC); | ||
145 | if (newbe == NULL) | ||
146 | return -ENOMEM; | ||
147 | newbe->rceb = be->rceb; | ||
148 | newbe->bChannelNumber = be->bChannelNumber; | ||
149 | newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR; | ||
150 | newbe->wBPSTOffset = be->wBPSTOffset; | ||
151 | newbe->bLQI = be->bLQI; | ||
152 | newbe->bRSSI = be->bRSSI; | ||
153 | newbe->wBeaconInfoLength = be->wBeaconInfoLength; | ||
154 | memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength); | ||
155 | *header = &newbe->rceb; | ||
156 | *new_size = sizeof(*newbe) + ielength; | ||
157 | return 1; /* calling function will free memory */ | ||
158 | } | ||
159 | |||
160 | |||
161 | /* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */ | ||
162 | struct uwb_rc_evt_drp_avail_WUSB_0100 { | ||
163 | struct uwb_rceb rceb; | ||
164 | __le16 wIELength; | ||
165 | u8 IEData[]; | ||
166 | } __attribute__((packed)); | ||
167 | |||
168 | /** | ||
169 | * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95 | ||
170 | * | ||
171 | * @header: the incoming event | ||
172 | * @buf_size: size of buffer containing incoming event | ||
173 | * @new_size: size of event after filtering completed | ||
174 | */ | ||
175 | static | ||
176 | int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc, | ||
177 | struct uwb_rceb **header, | ||
178 | const size_t buf_size, | ||
179 | size_t *new_size) | ||
180 | { | ||
181 | struct uwb_rc_evt_drp_avail_WUSB_0100 *da; | ||
182 | struct uwb_rc_evt_drp_avail *newda; | ||
183 | struct uwb_ie_hdr *ie_hdr; | ||
184 | size_t bytes_left, ielength; | ||
185 | struct device *dev = &rc->uwb_dev.dev; | ||
186 | |||
187 | |||
188 | da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb); | ||
189 | bytes_left = buf_size; | ||
190 | if (bytes_left < sizeof(*da)) { | ||
191 | dev_err(dev, "Not enough data to decode DRP Avail " | ||
192 | "Notification for filtering. Expected %zu, " | ||
193 | "received %zu.\n", (size_t)sizeof(*da), bytes_left); | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | bytes_left -= sizeof(*da); | ||
197 | ielength = le16_to_cpu(da->wIELength); | ||
198 | if (bytes_left < ielength) { | ||
199 | dev_err(dev, "DRP Avail Notification filter: IE length " | ||
200 | "[%zu bytes] does not match actual length " | ||
201 | "[%zu bytes].\n", ielength, bytes_left); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | if (ielength < sizeof(*ie_hdr)) { | ||
205 | dev_err(dev, "DRP Avail Notification filter: Not enough " | ||
206 | "data to decode IE [%zu bytes, %zu needed]\n", | ||
207 | ielength, sizeof(*ie_hdr)); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | ie_hdr = (void *) da->IEData; | ||
211 | if (ie_hdr->length > 32) { | ||
212 | dev_err(dev, "DRP Availability Change event has unexpected " | ||
213 | "length for filtering. Expected < 32 bytes, " | ||
214 | "got %zu bytes.\n", (size_t)ie_hdr->length); | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | newda = kzalloc(sizeof(*newda), GFP_ATOMIC); | ||
218 | if (newda == NULL) | ||
219 | return -ENOMEM; | ||
220 | newda->rceb = da->rceb; | ||
221 | memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length); | ||
222 | *header = &newda->rceb; | ||
223 | *new_size = sizeof(*newda); | ||
224 | return 1; /* calling function will free memory */ | ||
225 | } | ||
226 | |||
227 | |||
228 | /* DRP notification (WUSB 1.0 [8.6.3.9]) */ | ||
229 | struct uwb_rc_evt_drp_WUSB_0100 { | ||
230 | struct uwb_rceb rceb; | ||
231 | struct uwb_dev_addr wSrcAddr; | ||
232 | u8 bExplicit; | ||
233 | __le16 wIELength; | ||
234 | u8 IEData[]; | ||
235 | } __attribute__((packed)); | ||
236 | |||
237 | /** | ||
238 | * Filter WUSB 1.0 DRP Notification to be WHCI 0.95 | ||
239 | * | ||
240 | * @header: the incoming event | ||
241 | * @buf_size: size of buffer containing incoming event | ||
242 | * @new_size: size of event after filtering completed | ||
243 | * | ||
244 | * It is hard to manage DRP reservations without having a Reason code. | ||
245 | * Unfortunately there is none in the WUSB spec. We just set the default to | ||
246 | * DRP IE RECEIVED. | ||
247 | * We do not currently use the bBeaconSlotNumber value, so we set this to | ||
248 | * zero for now. | ||
249 | */ | ||
250 | static | ||
251 | int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc, | ||
252 | struct uwb_rceb **header, | ||
253 | const size_t buf_size, | ||
254 | size_t *new_size) | ||
255 | { | ||
256 | struct uwb_rc_evt_drp_WUSB_0100 *drpev; | ||
257 | struct uwb_rc_evt_drp *newdrpev; | ||
258 | size_t bytes_left, ielength; | ||
259 | struct device *dev = &rc->uwb_dev.dev; | ||
260 | |||
261 | drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb); | ||
262 | bytes_left = buf_size; | ||
263 | if (bytes_left < sizeof(*drpev)) { | ||
264 | dev_err(dev, "Not enough data to decode DRP Notification " | ||
265 | "for filtering. Expected %zu, received %zu.\n", | ||
266 | (size_t)sizeof(*drpev), bytes_left); | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | ielength = le16_to_cpu(drpev->wIELength); | ||
270 | bytes_left -= sizeof(*drpev); | ||
271 | if (bytes_left < ielength) { | ||
272 | dev_err(dev, "DRP Notification filter: header length [%zu " | ||
273 | "bytes] does not match actual length [%zu " | ||
274 | "bytes].\n", ielength, bytes_left); | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC); | ||
278 | if (newdrpev == NULL) | ||
279 | return -ENOMEM; | ||
280 | newdrpev->rceb = drpev->rceb; | ||
281 | newdrpev->src_addr = drpev->wSrcAddr; | ||
282 | newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD; | ||
283 | newdrpev->beacon_slot_number = 0; | ||
284 | newdrpev->ie_length = drpev->wIELength; | ||
285 | memcpy(newdrpev->ie_data, drpev->IEData, ielength); | ||
286 | *header = &newdrpev->rceb; | ||
287 | *new_size = sizeof(*newdrpev) + ielength; | ||
288 | return 1; /* calling function will free memory */ | ||
289 | } | ||
290 | |||
291 | |||
292 | /* Scan Command (WUSB 1.0 [8.6.2.5]) */ | ||
293 | struct uwb_rc_cmd_scan_WUSB_0100 { | ||
294 | struct uwb_rccb rccb; | ||
295 | u8 bChannelNumber; | ||
296 | u8 bScanState; | ||
297 | } __attribute__((packed)); | ||
298 | |||
299 | /** | ||
300 | * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command | ||
301 | * | ||
302 | * @header: command sent to device (compliant to WHCI 0.95) | ||
303 | * @size: size of command sent to device | ||
304 | * | ||
305 | * We only reduce the size by two bytes because the WUSB 1.0 scan command | ||
306 | * does not have the last field (wStarttime). Also, make sure we don't send | ||
307 | * the device an unexpected scan type. | ||
308 | */ | ||
309 | static | ||
310 | int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc, | ||
311 | struct uwb_rccb **header, | ||
312 | size_t *size) | ||
313 | { | ||
314 | struct uwb_rc_cmd_scan *sc; | ||
315 | |||
316 | sc = container_of(*header, struct uwb_rc_cmd_scan, rccb); | ||
317 | |||
318 | if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME) | ||
319 | sc->bScanState = UWB_SCAN_ONLY; | ||
320 | /* Don't send the last two bytes. */ | ||
321 | *size -= 2; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | |||
326 | /* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */ | ||
327 | struct uwb_rc_cmd_set_drp_ie_WUSB_0100 { | ||
328 | struct uwb_rccb rccb; | ||
329 | u8 bExplicit; | ||
330 | __le16 wIELength; | ||
331 | struct uwb_ie_drp IEData[]; | ||
332 | } __attribute__((packed)); | ||
333 | |||
334 | /** | ||
335 | * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command | ||
336 | * | ||
337 | * @header: command sent to device (compliant to WHCI 0.95) | ||
338 | * @size: size of command sent to device | ||
339 | * | ||
340 | * WUSB has an extra bExplicit field - we assume always explicit | ||
341 | * negotiation so this field is set. The command expected by the device is | ||
342 | * thus larger than the one prepared by the driver so we need to | ||
343 | * reallocate memory to accommodate this. | ||
344 | * We trust the driver to send us the correct data so no checking is done | ||
345 | * on incoming data - evn though it is variable length. | ||
346 | */ | ||
347 | static | ||
348 | int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc, | ||
349 | struct uwb_rccb **header, | ||
350 | size_t *size) | ||
351 | { | ||
352 | struct uwb_rc_cmd_set_drp_ie *orgcmd; | ||
353 | struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd; | ||
354 | size_t ielength; | ||
355 | |||
356 | orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb); | ||
357 | ielength = le16_to_cpu(orgcmd->wIELength); | ||
358 | cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL); | ||
359 | if (cmd == NULL) | ||
360 | return -ENOMEM; | ||
361 | cmd->rccb = orgcmd->rccb; | ||
362 | cmd->bExplicit = 0; | ||
363 | cmd->wIELength = orgcmd->wIELength; | ||
364 | memcpy(cmd->IEData, orgcmd->IEData, ielength); | ||
365 | *header = &cmd->rccb; | ||
366 | *size = sizeof(*cmd) + ielength; | ||
367 | return 1; /* calling function will free memory */ | ||
368 | } | ||
369 | |||
370 | |||
371 | /** | ||
372 | * Filter data from WHCI driver to WUSB device | ||
373 | * | ||
374 | * @header: WHCI 0.95 compliant command from driver | ||
375 | * @size: length of command | ||
376 | * | ||
377 | * The routine managing commands to the device (uwb_rc_cmd()) will call the | ||
378 | * filtering function pointer (if it exists) before it passes any data to | ||
379 | * the device. At this time the command has been formatted according to | ||
380 | * WHCI 0.95 and is ready to be sent to the device. | ||
381 | * | ||
382 | * The filter function will be provided with the current command and its | ||
383 | * length. The function will manipulate the command if necessary and | ||
384 | * potentially reallocate memory for a command that needed more memory that | ||
385 | * the given command. If new memory was created the function will return 1 | ||
386 | * to indicate to the calling function that the memory need to be freed | ||
387 | * when not needed any more. The size will contain the new length of the | ||
388 | * command. | ||
389 | * If memory has not been allocated we rely on the original mechanisms to | ||
390 | * free the memory of the command - even when we reduce the value of size. | ||
391 | */ | ||
392 | static | ||
393 | int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header, | ||
394 | size_t *size) | ||
395 | { | ||
396 | int result; | ||
397 | struct uwb_rccb *rccb = *header; | ||
398 | int cmd = le16_to_cpu(rccb->wCommand); | ||
399 | switch (cmd) { | ||
400 | case UWB_RC_CMD_SCAN: | ||
401 | result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size); | ||
402 | break; | ||
403 | case UWB_RC_CMD_SET_DRP_IE: | ||
404 | result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size); | ||
405 | break; | ||
406 | default: | ||
407 | result = -ENOANO; | ||
408 | break; | ||
409 | } | ||
410 | return result; | ||
411 | } | ||
412 | |||
413 | |||
414 | /** | ||
415 | * Filter data from WHCI driver to WUSB device | ||
416 | * | ||
417 | * @header: WHCI 0.95 compliant command from driver | ||
418 | * @size: length of command | ||
419 | * | ||
420 | * Filter commands based on which protocol the device supports. The WUSB | ||
421 | * errata should be the same as WHCI 0.95 so we do not filter that here - | ||
422 | * only WUSB 1.0. | ||
423 | */ | ||
424 | static | ||
425 | int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header, | ||
426 | size_t *size) | ||
427 | { | ||
428 | int result = -ENOANO; | ||
429 | if (rc->version == 0x0100) | ||
430 | result = hwarc_filter_cmd_WUSB_0100(rc, header, size); | ||
431 | return result; | ||
432 | } | ||
433 | |||
434 | |||
435 | /** | ||
436 | * Compute return value as sum of incoming value and value at given offset | ||
437 | * | ||
438 | * @rceb: event for which we compute the size, it contains a variable | ||
439 | * length field. | ||
440 | * @core_size: size of the "non variable" part of the event | ||
441 | * @offset: place in event where the length of the variable part is stored | ||
442 | * @buf_size: total length of buffer in which event arrived - we need to make | ||
443 | * sure we read the offset in memory that is still part of the event | ||
444 | */ | ||
445 | static | ||
446 | ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, | ||
447 | size_t core_size, size_t offset, | ||
448 | const size_t buf_size) | ||
449 | { | ||
450 | ssize_t size = -ENOSPC; | ||
451 | const void *ptr = rceb; | ||
452 | size_t type_size = sizeof(__le16); | ||
453 | struct device *dev = &rc->uwb_dev.dev; | ||
454 | |||
455 | if (offset + type_size >= buf_size) { | ||
456 | dev_err(dev, "Not enough data to read extra size of event " | ||
457 | "0x%02x/%04x/%02x, only got %zu bytes.\n", | ||
458 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | ||
459 | rceb->bEventContext, buf_size); | ||
460 | goto out; | ||
461 | } | ||
462 | ptr += offset; | ||
463 | size = core_size + le16_to_cpu(*(__le16 *)ptr); | ||
464 | out: | ||
465 | return size; | ||
466 | } | ||
467 | |||
468 | |||
469 | /* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */ | ||
470 | struct uwb_rc_evt_bp_slot_change_WUSB_0100 { | ||
471 | struct uwb_rceb rceb; | ||
472 | u8 bSlotNumber; | ||
473 | } __attribute__((packed)); | ||
474 | |||
475 | |||
476 | /** | ||
477 | * Filter data from WUSB device to WHCI driver | ||
478 | * | ||
479 | * @header: incoming event | ||
480 | * @buf_size: size of buffer in which event arrived | ||
481 | * @_event_size: actual size of event in the buffer | ||
482 | * @new_size: size of event after filtered | ||
483 | * | ||
484 | * We don't know how the buffer is constructed - there may be more than one | ||
485 | * event in it so buffer length does not determine event length. We first | ||
486 | * determine the expected size of the incoming event. This value is passed | ||
487 | * back only if the actual filtering succeeded (so we know the computed | ||
488 | * expected size is correct). This value will be zero if | ||
489 | * the event did not need any filtering. | ||
490 | * | ||
491 | * WHCI interprets the BP Slot Change event's data differently than | ||
492 | * WUSB. The event sizes are exactly the same. The data field | ||
493 | * indicates the new beacon slot in which a RC is transmitting its | ||
494 | * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368 | ||
495 | * 17.16 (Table 117)). We thus know that the WUSB value will not set | ||
496 | * the bit bNoSlot, so we don't really do anything (placeholder). | ||
497 | */ | ||
498 | static | ||
499 | int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header, | ||
500 | const size_t buf_size, size_t *_real_size, | ||
501 | size_t *_new_size) | ||
502 | { | ||
503 | int result = -ENOANO; | ||
504 | struct uwb_rceb *rceb = *header; | ||
505 | int event = le16_to_cpu(rceb->wEvent); | ||
506 | size_t event_size; | ||
507 | size_t core_size, offset; | ||
508 | |||
509 | if (rceb->bEventType != UWB_RC_CET_GENERAL) | ||
510 | goto out; | ||
511 | switch (event) { | ||
512 | case UWB_RC_EVT_BEACON: | ||
513 | core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100); | ||
514 | offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100, | ||
515 | wBeaconInfoLength); | ||
516 | event_size = hwarc_get_event_size(rc, rceb, core_size, | ||
517 | offset, buf_size); | ||
518 | if (event_size < 0) | ||
519 | goto out; | ||
520 | *_real_size = event_size; | ||
521 | result = hwarc_filter_evt_beacon_WUSB_0100(rc, header, | ||
522 | buf_size, _new_size); | ||
523 | break; | ||
524 | case UWB_RC_EVT_BP_SLOT_CHANGE: | ||
525 | *_new_size = *_real_size = | ||
526 | sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100); | ||
527 | result = 0; | ||
528 | break; | ||
529 | |||
530 | case UWB_RC_EVT_DRP_AVAIL: | ||
531 | core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100); | ||
532 | offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100, | ||
533 | wIELength); | ||
534 | event_size = hwarc_get_event_size(rc, rceb, core_size, | ||
535 | offset, buf_size); | ||
536 | if (event_size < 0) | ||
537 | goto out; | ||
538 | *_real_size = event_size; | ||
539 | result = hwarc_filter_evt_drp_avail_WUSB_0100( | ||
540 | rc, header, buf_size, _new_size); | ||
541 | break; | ||
542 | |||
543 | case UWB_RC_EVT_DRP: | ||
544 | core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100); | ||
545 | offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength); | ||
546 | event_size = hwarc_get_event_size(rc, rceb, core_size, | ||
547 | offset, buf_size); | ||
548 | if (event_size < 0) | ||
549 | goto out; | ||
550 | *_real_size = event_size; | ||
551 | result = hwarc_filter_evt_drp_WUSB_0100(rc, header, | ||
552 | buf_size, _new_size); | ||
553 | break; | ||
554 | |||
555 | default: | ||
556 | break; | ||
557 | } | ||
558 | out: | ||
559 | return result; | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * Filter data from WUSB device to WHCI driver | ||
564 | * | ||
565 | * @header: incoming event | ||
566 | * @buf_size: size of buffer in which event arrived | ||
567 | * @_event_size: actual size of event in the buffer | ||
568 | * @_new_size: size of event after filtered | ||
569 | * | ||
570 | * Filter events based on which protocol the device supports. The WUSB | ||
571 | * errata should be the same as WHCI 0.95 so we do not filter that here - | ||
572 | * only WUSB 1.0. | ||
573 | * | ||
574 | * If we don't handle it, we return -ENOANO (why the weird error code? | ||
575 | * well, so if I get it, I can pinpoint in the code that raised | ||
576 | * it...after all, not too many places use the higher error codes). | ||
577 | */ | ||
578 | static | ||
579 | int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header, | ||
580 | const size_t buf_size, size_t *_real_size, | ||
581 | size_t *_new_size) | ||
582 | { | ||
583 | int result = -ENOANO; | ||
584 | if (rc->version == 0x0100) | ||
585 | result = hwarc_filter_event_WUSB_0100( | ||
586 | rc, header, buf_size, _real_size, _new_size); | ||
587 | return result; | ||
588 | } | ||
589 | |||
590 | |||
591 | /** | ||
592 | * Execute an UWB RC command on HWA | ||
593 | * | ||
594 | * @rc: Instance of a Radio Controller that is a HWA | ||
595 | * @cmd: Buffer containing the RCCB and payload to execute | ||
596 | * @cmd_size: Size of the command buffer. | ||
597 | * | ||
598 | * NOTE: rc's mutex has to be locked | ||
599 | */ | ||
600 | static | ||
601 | int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size) | ||
602 | { | ||
603 | struct hwarc *hwarc = uwb_rc->priv; | ||
604 | return usb_control_msg( | ||
605 | hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0), | ||
606 | WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
607 | 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber, | ||
608 | (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */); | ||
609 | } | ||
610 | |||
611 | static | ||
612 | int hwarc_reset(struct uwb_rc *uwb_rc) | ||
613 | { | ||
614 | struct hwarc *hwarc = uwb_rc->priv; | ||
615 | return usb_reset_device(hwarc->usb_dev); | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * Callback for the notification and event endpoint | ||
620 | * | ||
621 | * Check's that everything is fine and then passes the read data to | ||
622 | * the notification/event handling mechanism (neh). | ||
623 | */ | ||
624 | static | ||
625 | void hwarc_neep_cb(struct urb *urb) | ||
626 | { | ||
627 | struct hwarc *hwarc = urb->context; | ||
628 | struct usb_interface *usb_iface = hwarc->usb_iface; | ||
629 | struct device *dev = &usb_iface->dev; | ||
630 | int result; | ||
631 | |||
632 | switch (result = urb->status) { | ||
633 | case 0: | ||
634 | d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", | ||
635 | urb->status, (size_t)urb->actual_length); | ||
636 | uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, | ||
637 | urb->actual_length); | ||
638 | break; | ||
639 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | ||
640 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
641 | d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); | ||
642 | goto out; | ||
643 | case -ESHUTDOWN: /* going away! */ | ||
644 | d_printf(2, dev, "NEEP: URB down %d\n", urb->status); | ||
645 | goto out; | ||
646 | default: /* On general errors, retry unless it gets ugly */ | ||
647 | if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, | ||
648 | EDC_ERROR_TIMEFRAME)) | ||
649 | goto error_exceeded; | ||
650 | dev_err(dev, "NEEP: URB error %d\n", urb->status); | ||
651 | } | ||
652 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
653 | d_printf(3, dev, "NEEP: submit %d\n", result); | ||
654 | if (result < 0) { | ||
655 | dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", | ||
656 | result); | ||
657 | goto error; | ||
658 | } | ||
659 | out: | ||
660 | return; | ||
661 | |||
662 | error_exceeded: | ||
663 | dev_err(dev, "NEEP: URB max acceptable errors " | ||
664 | "exceeded, resetting device\n"); | ||
665 | error: | ||
666 | uwb_rc_neh_error(hwarc->uwb_rc, result); | ||
667 | uwb_rc_reset_all(hwarc->uwb_rc); | ||
668 | return; | ||
669 | } | ||
670 | |||
671 | static void hwarc_init(struct hwarc *hwarc) | ||
672 | { | ||
673 | edc_init(&hwarc->neep_edc); | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * Initialize the notification/event endpoint stuff | ||
678 | * | ||
679 | * Note this is effectively a parallel thread; it knows that | ||
680 | * hwarc->uwb_rc always exists because the existence of a 'hwarc' | ||
681 | * means that there is a reverence on the hwarc->uwb_rc (see | ||
682 | * _probe()), and thus _neep_cb() can execute safely. | ||
683 | */ | ||
684 | static int hwarc_neep_init(struct uwb_rc *rc) | ||
685 | { | ||
686 | struct hwarc *hwarc = rc->priv; | ||
687 | struct usb_interface *iface = hwarc->usb_iface; | ||
688 | struct usb_device *usb_dev = interface_to_usbdev(iface); | ||
689 | struct device *dev = &iface->dev; | ||
690 | int result; | ||
691 | struct usb_endpoint_descriptor *epd; | ||
692 | |||
693 | epd = &iface->cur_altsetting->endpoint[0].desc; | ||
694 | hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL); | ||
695 | if (hwarc->rd_buffer == NULL) { | ||
696 | dev_err(dev, "Unable to allocate notification's read buffer\n"); | ||
697 | goto error_rd_buffer; | ||
698 | } | ||
699 | hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
700 | if (hwarc->neep_urb == NULL) { | ||
701 | dev_err(dev, "Unable to allocate notification URB\n"); | ||
702 | goto error_urb_alloc; | ||
703 | } | ||
704 | usb_fill_int_urb(hwarc->neep_urb, usb_dev, | ||
705 | usb_rcvintpipe(usb_dev, epd->bEndpointAddress), | ||
706 | hwarc->rd_buffer, PAGE_SIZE, | ||
707 | hwarc_neep_cb, hwarc, epd->bInterval); | ||
708 | result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC); | ||
709 | if (result < 0) { | ||
710 | dev_err(dev, "Cannot submit notification URB: %d\n", result); | ||
711 | goto error_neep_submit; | ||
712 | } | ||
713 | return 0; | ||
714 | |||
715 | error_neep_submit: | ||
716 | usb_free_urb(hwarc->neep_urb); | ||
717 | error_urb_alloc: | ||
718 | free_page((unsigned long)hwarc->rd_buffer); | ||
719 | error_rd_buffer: | ||
720 | return -ENOMEM; | ||
721 | } | ||
722 | |||
723 | |||
724 | /** Clean up all the notification endpoint resources */ | ||
725 | static void hwarc_neep_release(struct uwb_rc *rc) | ||
726 | { | ||
727 | struct hwarc *hwarc = rc->priv; | ||
728 | |||
729 | usb_kill_urb(hwarc->neep_urb); | ||
730 | usb_free_urb(hwarc->neep_urb); | ||
731 | free_page((unsigned long)hwarc->rd_buffer); | ||
732 | } | ||
733 | |||
734 | /** | ||
735 | * Get the version from class-specific descriptor | ||
736 | * | ||
737 | * NOTE: this descriptor comes with the big bundled configuration | ||
738 | * descriptor that includes the interfaces' and endpoints', so | ||
739 | * we just look for it in the cached copy kept by the USB stack. | ||
740 | * | ||
741 | * NOTE2: We convert LE fields to CPU order. | ||
742 | */ | ||
743 | static int hwarc_get_version(struct uwb_rc *rc) | ||
744 | { | ||
745 | int result; | ||
746 | |||
747 | struct hwarc *hwarc = rc->priv; | ||
748 | struct uwb_rc_control_intf_class_desc *descr; | ||
749 | struct device *dev = &rc->uwb_dev.dev; | ||
750 | struct usb_device *usb_dev = hwarc->usb_dev; | ||
751 | char *itr; | ||
752 | struct usb_descriptor_header *hdr; | ||
753 | size_t itr_size, actconfig_idx; | ||
754 | u16 version; | ||
755 | |||
756 | actconfig_idx = (usb_dev->actconfig - usb_dev->config) / | ||
757 | sizeof(usb_dev->config[0]); | ||
758 | itr = usb_dev->rawdescriptors[actconfig_idx]; | ||
759 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | ||
760 | while (itr_size >= sizeof(*hdr)) { | ||
761 | hdr = (struct usb_descriptor_header *) itr; | ||
762 | d_printf(3, dev, "Extra device descriptor: " | ||
763 | "type %02x/%u bytes @ %zu (%zu left)\n", | ||
764 | hdr->bDescriptorType, hdr->bLength, | ||
765 | (itr - usb_dev->rawdescriptors[actconfig_idx]), | ||
766 | itr_size); | ||
767 | if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) | ||
768 | goto found; | ||
769 | itr += hdr->bLength; | ||
770 | itr_size -= hdr->bLength; | ||
771 | } | ||
772 | dev_err(dev, "cannot find Radio Control Interface Class descriptor\n"); | ||
773 | return -ENODEV; | ||
774 | |||
775 | found: | ||
776 | result = -EINVAL; | ||
777 | if (hdr->bLength > itr_size) { /* is it available? */ | ||
778 | dev_err(dev, "incomplete Radio Control Interface Class " | ||
779 | "descriptor (%zu bytes left, %u needed)\n", | ||
780 | itr_size, hdr->bLength); | ||
781 | goto error; | ||
782 | } | ||
783 | if (hdr->bLength < sizeof(*descr)) { | ||
784 | dev_err(dev, "short Radio Control Interface Class " | ||
785 | "descriptor\n"); | ||
786 | goto error; | ||
787 | } | ||
788 | descr = (struct uwb_rc_control_intf_class_desc *) hdr; | ||
789 | /* Make LE fields CPU order */ | ||
790 | version = __le16_to_cpu(descr->bcdRCIVersion); | ||
791 | if (version != 0x0100) { | ||
792 | dev_err(dev, "Device reports protocol version 0x%04x. We " | ||
793 | "do not support that. \n", version); | ||
794 | result = -EINVAL; | ||
795 | goto error; | ||
796 | } | ||
797 | rc->version = version; | ||
798 | d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", | ||
799 | rc->version); | ||
800 | result = 0; | ||
801 | error: | ||
802 | return result; | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * By creating a 'uwb_rc', we have a reference on it -- that reference | ||
807 | * is the one we drop when we disconnect. | ||
808 | * | ||
809 | * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there | ||
810 | * is only one altsetting allowed. | ||
811 | */ | ||
812 | static int hwarc_probe(struct usb_interface *iface, | ||
813 | const struct usb_device_id *id) | ||
814 | { | ||
815 | int result; | ||
816 | struct uwb_rc *uwb_rc; | ||
817 | struct hwarc *hwarc; | ||
818 | struct device *dev = &iface->dev; | ||
819 | |||
820 | result = -ENOMEM; | ||
821 | uwb_rc = uwb_rc_alloc(); | ||
822 | if (uwb_rc == NULL) { | ||
823 | dev_err(dev, "unable to allocate RC instance\n"); | ||
824 | goto error_rc_alloc; | ||
825 | } | ||
826 | hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL); | ||
827 | if (hwarc == NULL) { | ||
828 | dev_err(dev, "unable to allocate HWA RC instance\n"); | ||
829 | goto error_alloc; | ||
830 | } | ||
831 | hwarc_init(hwarc); | ||
832 | hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface)); | ||
833 | hwarc->usb_iface = usb_get_intf(iface); | ||
834 | hwarc->uwb_rc = uwb_rc; | ||
835 | |||
836 | uwb_rc->owner = THIS_MODULE; | ||
837 | uwb_rc->start = hwarc_neep_init; | ||
838 | uwb_rc->stop = hwarc_neep_release; | ||
839 | uwb_rc->cmd = hwarc_cmd; | ||
840 | uwb_rc->reset = hwarc_reset; | ||
841 | if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) { | ||
842 | uwb_rc->filter_cmd = NULL; | ||
843 | uwb_rc->filter_event = NULL; | ||
844 | } else { | ||
845 | uwb_rc->filter_cmd = hwarc_filter_cmd; | ||
846 | uwb_rc->filter_event = hwarc_filter_event; | ||
847 | } | ||
848 | |||
849 | result = uwb_rc_add(uwb_rc, dev, hwarc); | ||
850 | if (result < 0) | ||
851 | goto error_rc_add; | ||
852 | result = hwarc_get_version(uwb_rc); | ||
853 | if (result < 0) { | ||
854 | dev_err(dev, "cannot retrieve version of RC \n"); | ||
855 | goto error_get_version; | ||
856 | } | ||
857 | usb_set_intfdata(iface, hwarc); | ||
858 | return 0; | ||
859 | |||
860 | error_get_version: | ||
861 | uwb_rc_rm(uwb_rc); | ||
862 | error_rc_add: | ||
863 | usb_put_intf(iface); | ||
864 | usb_put_dev(hwarc->usb_dev); | ||
865 | error_alloc: | ||
866 | uwb_rc_put(uwb_rc); | ||
867 | error_rc_alloc: | ||
868 | return result; | ||
869 | } | ||
870 | |||
871 | static void hwarc_disconnect(struct usb_interface *iface) | ||
872 | { | ||
873 | struct hwarc *hwarc = usb_get_intfdata(iface); | ||
874 | struct uwb_rc *uwb_rc = hwarc->uwb_rc; | ||
875 | |||
876 | usb_set_intfdata(hwarc->usb_iface, NULL); | ||
877 | uwb_rc_rm(uwb_rc); | ||
878 | usb_put_intf(hwarc->usb_iface); | ||
879 | usb_put_dev(hwarc->usb_dev); | ||
880 | d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); | ||
881 | kfree(hwarc); | ||
882 | uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ | ||
883 | } | ||
884 | |||
885 | /** USB device ID's that we handle */ | ||
886 | static struct usb_device_id hwarc_id_table[] = { | ||
887 | /* D-Link DUB-1210 */ | ||
888 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), | ||
889 | .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, | ||
890 | /* Intel i1480 (using firmware 1.3PA2-20070828) */ | ||
891 | { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02), | ||
892 | .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, | ||
893 | /* Generic match for the Radio Control interface */ | ||
894 | { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), }, | ||
895 | { }, | ||
896 | }; | ||
897 | MODULE_DEVICE_TABLE(usb, hwarc_id_table); | ||
898 | |||
899 | static struct usb_driver hwarc_driver = { | ||
900 | .name = "hwa-rc", | ||
901 | .probe = hwarc_probe, | ||
902 | .disconnect = hwarc_disconnect, | ||
903 | .id_table = hwarc_id_table, | ||
904 | }; | ||
905 | |||
906 | static int __init hwarc_driver_init(void) | ||
907 | { | ||
908 | int result; | ||
909 | result = usb_register(&hwarc_driver); | ||
910 | if (result < 0) | ||
911 | printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", | ||
912 | result); | ||
913 | return result; | ||
914 | |||
915 | } | ||
916 | module_init(hwarc_driver_init); | ||
917 | |||
918 | static void __exit hwarc_driver_exit(void) | ||
919 | { | ||
920 | usb_deregister(&hwarc_driver); | ||
921 | } | ||
922 | module_exit(hwarc_driver_exit); | ||
923 | |||
924 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
925 | MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver"); | ||
926 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile new file mode 100644 index 000000000000..212bbc7d4c32 --- /dev/null +++ b/drivers/uwb/i1480/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o | ||
2 | obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/ | ||
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile new file mode 100644 index 000000000000..bd1b9f25424c --- /dev/null +++ b/drivers/uwb/i1480/dfu/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o | ||
2 | |||
3 | i1480-dfu-usb-objs := \ | ||
4 | dfu.o \ | ||
5 | mac.o \ | ||
6 | phy.o \ | ||
7 | usb.o | ||
8 | |||
9 | |||
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c new file mode 100644 index 000000000000..9097b3b30385 --- /dev/null +++ b/drivers/uwb/i1480/dfu/dfu.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * Intel Wireless UWB Link 1480 | ||
3 | * Main driver | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Common code for firmware upload used by the USB and PCI version; | ||
24 | * i1480_fw_upload() takes a device descriptor and uses the function | ||
25 | * pointers it provides to upload firmware and prepare the PHY. | ||
26 | * | ||
27 | * As well, provides common functions used by the rest of the code. | ||
28 | */ | ||
29 | #include "i1480-dfu.h" | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/pci.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/uwb.h> | ||
35 | #include <linux/random.h> | ||
36 | |||
37 | #define D_LOCAL 0 | ||
38 | #include <linux/uwb/debug.h> | ||
39 | |||
40 | /** | ||
41 | * i1480_rceb_check - Check RCEB for expected field values | ||
42 | * @i1480: pointer to device for which RCEB is being checked | ||
43 | * @rceb: RCEB being checked | ||
44 | * @cmd: which command the RCEB is related to | ||
45 | * @context: expected context | ||
46 | * @expected_type: expected event type | ||
47 | * @expected_event: expected event | ||
48 | * | ||
49 | * If @cmd is NULL, do not print error messages, but still return an error | ||
50 | * code. | ||
51 | * | ||
52 | * Return 0 if @rceb matches the expected values, -EINVAL otherwise. | ||
53 | */ | ||
54 | int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, | ||
55 | const char *cmd, u8 context, u8 expected_type, | ||
56 | unsigned expected_event) | ||
57 | { | ||
58 | int result = 0; | ||
59 | struct device *dev = i1480->dev; | ||
60 | if (rceb->bEventContext != context) { | ||
61 | if (cmd) | ||
62 | dev_err(dev, "%s: unexpected context id 0x%02x " | ||
63 | "(expected 0x%02x)\n", cmd, | ||
64 | rceb->bEventContext, context); | ||
65 | result = -EINVAL; | ||
66 | } | ||
67 | if (rceb->bEventType != expected_type) { | ||
68 | if (cmd) | ||
69 | dev_err(dev, "%s: unexpected event type 0x%02x " | ||
70 | "(expected 0x%02x)\n", cmd, | ||
71 | rceb->bEventType, expected_type); | ||
72 | result = -EINVAL; | ||
73 | } | ||
74 | if (le16_to_cpu(rceb->wEvent) != expected_event) { | ||
75 | if (cmd) | ||
76 | dev_err(dev, "%s: unexpected event 0x%04x " | ||
77 | "(expected 0x%04x)\n", cmd, | ||
78 | le16_to_cpu(rceb->wEvent), expected_event); | ||
79 | result = -EINVAL; | ||
80 | } | ||
81 | return result; | ||
82 | } | ||
83 | EXPORT_SYMBOL_GPL(i1480_rceb_check); | ||
84 | |||
85 | |||
86 | /** | ||
87 | * Execute a Radio Control Command | ||
88 | * | ||
89 | * Command data has to be in i1480->cmd_buf. | ||
90 | * | ||
91 | * @returns size of the reply data filled in i1480->evt_buf or < 0 errno | ||
92 | * code on error. | ||
93 | */ | ||
94 | ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, | ||
95 | size_t reply_size) | ||
96 | { | ||
97 | ssize_t result; | ||
98 | struct uwb_rceb *reply = i1480->evt_buf; | ||
99 | struct uwb_rccb *cmd = i1480->cmd_buf; | ||
100 | u16 expected_event = reply->wEvent; | ||
101 | u8 expected_type = reply->bEventType; | ||
102 | u8 context; | ||
103 | |||
104 | d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
105 | init_completion(&i1480->evt_complete); | ||
106 | i1480->evt_result = -EINPROGRESS; | ||
107 | do { | ||
108 | get_random_bytes(&context, 1); | ||
109 | } while (context == 0x00 || context == 0xff); | ||
110 | cmd->bCommandContext = context; | ||
111 | result = i1480->cmd(i1480, cmd_name, cmd_size); | ||
112 | if (result < 0) | ||
113 | goto error; | ||
114 | /* wait for the callback to report a event was received */ | ||
115 | result = wait_for_completion_interruptible_timeout( | ||
116 | &i1480->evt_complete, HZ); | ||
117 | if (result == 0) { | ||
118 | result = -ETIMEDOUT; | ||
119 | goto error; | ||
120 | } | ||
121 | if (result < 0) | ||
122 | goto error; | ||
123 | result = i1480->evt_result; | ||
124 | if (result < 0) { | ||
125 | dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", | ||
126 | cmd_name, result); | ||
127 | goto error; | ||
128 | } | ||
129 | /* | ||
130 | * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a | ||
131 | * spurious notification after firmware is downloaded. So check whether | ||
132 | * the receibed RCEB is such notification before assuming that the | ||
133 | * command has failed. | ||
134 | */ | ||
135 | if (i1480_rceb_check(i1480, i1480->evt_buf, NULL, | ||
136 | 0, 0xfd, 0x0022) == 0) { | ||
137 | /* Now wait for the actual RCEB for this command. */ | ||
138 | result = i1480->wait_init_done(i1480); | ||
139 | if (result < 0) | ||
140 | goto error; | ||
141 | result = i1480->evt_result; | ||
142 | } | ||
143 | if (result != reply_size) { | ||
144 | dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", | ||
145 | cmd_name, result, reply_size); | ||
146 | result = -EINVAL; | ||
147 | goto error; | ||
148 | } | ||
149 | /* Verify we got the right event in response */ | ||
150 | result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, | ||
151 | expected_type, expected_event); | ||
152 | error: | ||
153 | d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", | ||
154 | i1480, cmd_name, cmd_size, result); | ||
155 | return result; | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(i1480_cmd); | ||
158 | |||
159 | |||
160 | static | ||
161 | int i1480_print_state(struct i1480 *i1480) | ||
162 | { | ||
163 | int result; | ||
164 | u32 *buf = (u32 *) i1480->cmd_buf; | ||
165 | |||
166 | result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf)); | ||
167 | if (result < 0) { | ||
168 | dev_err(i1480->dev, "cannot read U & L states: %d\n", result); | ||
169 | goto error; | ||
170 | } | ||
171 | dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]); | ||
172 | error: | ||
173 | return result; | ||
174 | } | ||
175 | |||
176 | |||
177 | /* | ||
178 | * PCI probe, firmware uploader | ||
179 | * | ||
180 | * _mac_fw_upload() will call rc_setup(), which needs an rc_release(). | ||
181 | */ | ||
182 | int i1480_fw_upload(struct i1480 *i1480) | ||
183 | { | ||
184 | int result; | ||
185 | |||
186 | result = i1480_pre_fw_upload(i1480); /* PHY pre fw */ | ||
187 | if (result < 0 && result != -ENOENT) { | ||
188 | i1480_print_state(i1480); | ||
189 | goto error; | ||
190 | } | ||
191 | result = i1480_mac_fw_upload(i1480); /* MAC fw */ | ||
192 | if (result < 0) { | ||
193 | if (result == -ENOENT) | ||
194 | dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n", | ||
195 | i1480->mac_fw_name); | ||
196 | else | ||
197 | i1480_print_state(i1480); | ||
198 | goto error; | ||
199 | } | ||
200 | result = i1480_phy_fw_upload(i1480); /* PHY fw */ | ||
201 | if (result < 0 && result != -ENOENT) { | ||
202 | i1480_print_state(i1480); | ||
203 | goto error_rc_release; | ||
204 | } | ||
205 | /* | ||
206 | * FIXME: find some reliable way to check whether firmware is running | ||
207 | * properly. Maybe use some standard request that has no side effects? | ||
208 | */ | ||
209 | dev_info(i1480->dev, "firmware uploaded successfully\n"); | ||
210 | error_rc_release: | ||
211 | if (i1480->rc_release) | ||
212 | i1480->rc_release(i1480); | ||
213 | result = 0; | ||
214 | error: | ||
215 | return result; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(i1480_fw_upload); | ||
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h new file mode 100644 index 000000000000..46f45e800f36 --- /dev/null +++ b/drivers/uwb/i1480/dfu/i1480-dfu.h | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * i1480 Device Firmware Upload | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * This driver is the firmware uploader for the Intel Wireless UWB | ||
23 | * Link 1480 device (both in the USB and PCI incarnations). | ||
24 | * | ||
25 | * The process is quite simple: we stop the device, write the firmware | ||
26 | * to its memory and then restart it. Wait for the device to let us | ||
27 | * know it is done booting firmware. Ready. | ||
28 | * | ||
29 | * We might have to upload before or after a phy firmware (which might | ||
30 | * be done in two methods, using a normal firmware image or through | ||
31 | * the MPI port). | ||
32 | * | ||
33 | * Because USB and PCI use common methods, we just make ops out of the | ||
34 | * common operations (read, write, wait_init_done and cmd) and | ||
35 | * implement them in usb.c and pci.c. | ||
36 | * | ||
37 | * The flow is (some parts omitted): | ||
38 | * | ||
39 | * i1480_{usb,pci}_probe() On enumerate/discovery | ||
40 | * i1480_fw_upload() | ||
41 | * i1480_pre_fw_upload() | ||
42 | * __mac_fw_upload() | ||
43 | * fw_hdrs_load() | ||
44 | * mac_fw_hdrs_push() | ||
45 | * i1480->write() [i1480_{usb,pci}_write()] | ||
46 | * i1480_fw_cmp() | ||
47 | * i1480->read() [i1480_{usb,pci}_read()] | ||
48 | * i1480_mac_fw_upload() | ||
49 | * __mac_fw_upload() | ||
50 | * i1480->setup(() | ||
51 | * i1480->wait_init_done() | ||
52 | * i1480_cmd_reset() | ||
53 | * i1480->cmd() [i1480_{usb,pci}_cmd()] | ||
54 | * ... | ||
55 | * i1480_phy_fw_upload() | ||
56 | * request_firmware() | ||
57 | * i1480_mpi_write() | ||
58 | * i1480->cmd() [i1480_{usb,pci}_cmd()] | ||
59 | * | ||
60 | * Once the probe function enumerates the device and uploads the | ||
61 | * firmware, we just exit with -ENODEV, as we don't really want to | ||
62 | * attach to the device. | ||
63 | */ | ||
64 | #ifndef __i1480_DFU_H__ | ||
65 | #define __i1480_DFU_H__ | ||
66 | |||
67 | #include <linux/uwb/spec.h> | ||
68 | #include <linux/types.h> | ||
69 | #include <linux/completion.h> | ||
70 | |||
71 | #define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018)) | ||
72 | |||
73 | #if i1480_FW > 0x00000302 | ||
74 | #define i1480_RCEB_EXTENDED | ||
75 | #endif | ||
76 | |||
77 | struct uwb_rccb; | ||
78 | struct uwb_rceb; | ||
79 | |||
80 | /* | ||
81 | * Common firmware upload handlers | ||
82 | * | ||
83 | * Normally you embed this struct in another one specific to your hw. | ||
84 | * | ||
85 | * @write Write to device's memory from buffer. | ||
86 | * @read Read from device's memory to i1480->evt_buf. | ||
87 | * @setup Setup device after basic firmware is uploaded | ||
88 | * @wait_init_done | ||
89 | * Wait for the device to send a notification saying init | ||
90 | * is done. | ||
91 | * @cmd FOP for issuing the command to the hardware. The | ||
92 | * command data is contained in i1480->cmd_buf and the size | ||
93 | * is supplied as an argument. The command replied is put | ||
94 | * in i1480->evt_buf and the size in i1480->evt_result (or if | ||
95 | * an error, a < 0 errno code). | ||
96 | * | ||
97 | * @cmd_buf Memory buffer used to send commands to the device. | ||
98 | * Allocated by the upper layers i1480_fw_upload(). | ||
99 | * Size has to be @buf_size. | ||
100 | * @evt_buf Memory buffer used to place the async notifications | ||
101 | * received by the hw. Allocated by the upper layers | ||
102 | * i1480_fw_upload(). | ||
103 | * Size has to be @buf_size. | ||
104 | * @cmd_complete | ||
105 | * Low level driver uses this to notify code waiting afor | ||
106 | * an event that the event has arrived and data is in | ||
107 | * i1480->evt_buf (and size/result in i1480->evt_result). | ||
108 | * @hw_rev | ||
109 | * Use this value to activate dfu code to support new revisions | ||
110 | * of hardware. i1480_init() sets this to a default value. | ||
111 | * It should be updated by the USB and PCI code. | ||
112 | */ | ||
113 | struct i1480 { | ||
114 | struct device *dev; | ||
115 | |||
116 | int (*write)(struct i1480 *, u32 addr, const void *, size_t); | ||
117 | int (*read)(struct i1480 *, u32 addr, size_t); | ||
118 | int (*rc_setup)(struct i1480 *); | ||
119 | void (*rc_release)(struct i1480 *); | ||
120 | int (*wait_init_done)(struct i1480 *); | ||
121 | int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size); | ||
122 | const char *pre_fw_name; | ||
123 | const char *mac_fw_name; | ||
124 | const char *mac_fw_name_deprecate; /* FIXME: Will go away */ | ||
125 | const char *phy_fw_name; | ||
126 | u8 hw_rev; | ||
127 | |||
128 | size_t buf_size; /* size of both evt_buf and cmd_buf */ | ||
129 | void *evt_buf, *cmd_buf; | ||
130 | ssize_t evt_result; | ||
131 | struct completion evt_complete; | ||
132 | }; | ||
133 | |||
134 | static inline | ||
135 | void i1480_init(struct i1480 *i1480) | ||
136 | { | ||
137 | i1480->hw_rev = 1; | ||
138 | init_completion(&i1480->evt_complete); | ||
139 | } | ||
140 | |||
141 | extern int i1480_fw_upload(struct i1480 *); | ||
142 | extern int i1480_pre_fw_upload(struct i1480 *); | ||
143 | extern int i1480_mac_fw_upload(struct i1480 *); | ||
144 | extern int i1480_phy_fw_upload(struct i1480 *); | ||
145 | extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t); | ||
146 | extern int i1480_rceb_check(const struct i1480 *, | ||
147 | const struct uwb_rceb *, const char *, u8, | ||
148 | u8, unsigned); | ||
149 | |||
150 | enum { | ||
151 | /* Vendor specific command type */ | ||
152 | i1480_CET_VS1 = 0xfd, | ||
153 | /* i1480 commands */ | ||
154 | i1480_CMD_SET_IP_MAS = 0x000e, | ||
155 | i1480_CMD_GET_MAC_PHY_INFO = 0x0003, | ||
156 | i1480_CMD_MPI_WRITE = 0x000f, | ||
157 | i1480_CMD_MPI_READ = 0x0010, | ||
158 | /* i1480 events */ | ||
159 | #if i1480_FW > 0x00000302 | ||
160 | i1480_EVT_CONFIRM = 0x0002, | ||
161 | i1480_EVT_RM_INIT_DONE = 0x0101, | ||
162 | i1480_EVT_DEV_ADD = 0x0103, | ||
163 | i1480_EVT_DEV_RM = 0x0104, | ||
164 | i1480_EVT_DEV_ID_CHANGE = 0x0105, | ||
165 | i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO, | ||
166 | #else | ||
167 | i1480_EVT_CONFIRM = 0x0002, | ||
168 | i1480_EVT_RM_INIT_DONE = 0x0101, | ||
169 | i1480_EVT_DEV_ADD = 0x0103, | ||
170 | i1480_EVT_DEV_RM = 0x0104, | ||
171 | i1480_EVT_DEV_ID_CHANGE = 0x0105, | ||
172 | i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM, | ||
173 | #endif | ||
174 | }; | ||
175 | |||
176 | |||
177 | struct i1480_evt_confirm { | ||
178 | struct uwb_rceb rceb; | ||
179 | #ifdef i1480_RCEB_EXTENDED | ||
180 | __le16 wParamLength; | ||
181 | #endif | ||
182 | u8 bResultCode; | ||
183 | } __attribute__((packed)); | ||
184 | |||
185 | |||
186 | struct i1480_rceb { | ||
187 | struct uwb_rceb rceb; | ||
188 | #ifdef i1480_RCEB_EXTENDED | ||
189 | __le16 wParamLength; | ||
190 | #endif | ||
191 | } __attribute__((packed)); | ||
192 | |||
193 | |||
194 | /** | ||
195 | * Get MAC & PHY Information confirm event structure | ||
196 | * | ||
197 | * Confirm event returned by the command. | ||
198 | */ | ||
199 | struct i1480_evt_confirm_GMPI { | ||
200 | #if i1480_FW > 0x00000302 | ||
201 | struct uwb_rceb rceb; | ||
202 | __le16 wParamLength; | ||
203 | __le16 status; | ||
204 | u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */ | ||
205 | u8 dev_addr[2]; | ||
206 | __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ | ||
207 | u8 hw_rev; | ||
208 | u8 phy_vendor; | ||
209 | u8 phy_rev; /* major v = >> 8; minor = v & 0xff */ | ||
210 | __le16 mac_caps; | ||
211 | u8 phy_caps[3]; | ||
212 | u8 key_stores; | ||
213 | __le16 mcast_addr_stores; | ||
214 | u8 sec_mode_supported; | ||
215 | #else | ||
216 | struct uwb_rceb rceb; | ||
217 | u8 status; | ||
218 | u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */ | ||
219 | u8 dev_addr[2]; | ||
220 | __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ | ||
221 | __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */ | ||
222 | __le16 mac_caps; | ||
223 | u8 phy_caps; | ||
224 | u8 key_stores; | ||
225 | __le16 mcast_addr_stores; | ||
226 | u8 sec_mode_supported; | ||
227 | #endif | ||
228 | } __attribute__((packed)); | ||
229 | |||
230 | |||
231 | struct i1480_cmd_mpi_write { | ||
232 | struct uwb_rccb rccb; | ||
233 | __le16 size; | ||
234 | u8 data[]; | ||
235 | }; | ||
236 | |||
237 | |||
238 | struct i1480_cmd_mpi_read { | ||
239 | struct uwb_rccb rccb; | ||
240 | __le16 size; | ||
241 | struct { | ||
242 | u8 page, offset; | ||
243 | } __attribute__((packed)) data[]; | ||
244 | } __attribute__((packed)); | ||
245 | |||
246 | |||
247 | struct i1480_evt_mpi_read { | ||
248 | struct uwb_rceb rceb; | ||
249 | #ifdef i1480_RCEB_EXTENDED | ||
250 | __le16 wParamLength; | ||
251 | #endif | ||
252 | u8 bResultCode; | ||
253 | __le16 size; | ||
254 | struct { | ||
255 | u8 page, offset, value; | ||
256 | } __attribute__((packed)) data[]; | ||
257 | } __attribute__((packed)); | ||
258 | |||
259 | |||
260 | #endif /* #ifndef __i1480_DFU_H__ */ | ||
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c new file mode 100644 index 000000000000..2e4d8f07c165 --- /dev/null +++ b/drivers/uwb/i1480/dfu/mac.c | |||
@@ -0,0 +1,527 @@ | |||
1 | /* | ||
2 | * Intel Wireless UWB Link 1480 | ||
3 | * MAC Firmware upload implementation | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Implementation of the code for parsing the firmware file (extract | ||
24 | * the headers and binary code chunks) in the fw_*() functions. The | ||
25 | * code to upload pre and mac firmwares is the same, so it uses a | ||
26 | * common entry point in __mac_fw_upload(), which uses the i1480 | ||
27 | * function pointers to push the firmware to the device. | ||
28 | */ | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/firmware.h> | ||
31 | #include <linux/uwb.h> | ||
32 | #include "i1480-dfu.h" | ||
33 | |||
34 | #define D_LOCAL 0 | ||
35 | #include <linux/uwb/debug.h> | ||
36 | |||
37 | /* | ||
38 | * Descriptor for a continuous segment of MAC fw data | ||
39 | */ | ||
40 | struct fw_hdr { | ||
41 | unsigned long address; | ||
42 | size_t length; | ||
43 | const u32 *bin; | ||
44 | struct fw_hdr *next; | ||
45 | }; | ||
46 | |||
47 | |||
48 | /* Free a chain of firmware headers */ | ||
49 | static | ||
50 | void fw_hdrs_free(struct fw_hdr *hdr) | ||
51 | { | ||
52 | struct fw_hdr *next; | ||
53 | |||
54 | while (hdr) { | ||
55 | next = hdr->next; | ||
56 | kfree(hdr); | ||
57 | hdr = next; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | |||
62 | /* Fill a firmware header descriptor from a memory buffer */ | ||
63 | static | ||
64 | int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt, | ||
65 | const char *_data, const u32 *data_itr, const u32 *data_top) | ||
66 | { | ||
67 | size_t hdr_offset = (const char *) data_itr - _data; | ||
68 | size_t remaining_size = (void *) data_top - (void *) data_itr; | ||
69 | if (data_itr + 2 > data_top) { | ||
70 | dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at " | ||
71 | "offset %zu, limit %zu\n", | ||
72 | hdr_cnt, hdr_offset, | ||
73 | (const char *) data_itr + 2 - _data, | ||
74 | (const char *) data_top - _data); | ||
75 | return -EINVAL; | ||
76 | } | ||
77 | hdr->next = NULL; | ||
78 | hdr->address = le32_to_cpu(*data_itr++); | ||
79 | hdr->length = le32_to_cpu(*data_itr++); | ||
80 | hdr->bin = data_itr; | ||
81 | if (hdr->length > remaining_size) { | ||
82 | dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; " | ||
83 | "chunk too long (%zu bytes), only %zu left\n", | ||
84 | hdr_cnt, hdr_offset, hdr->length, remaining_size); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | |||
91 | /** | ||
92 | * Get a buffer where the firmware is supposed to be and create a | ||
93 | * chain of headers linking them together. | ||
94 | * | ||
95 | * @phdr: where to place the pointer to the first header (headers link | ||
96 | * to the next via the @hdr->next ptr); need to free the whole | ||
97 | * chain when done. | ||
98 | * | ||
99 | * @_data: Pointer to the data buffer. | ||
100 | * | ||
101 | * @_data_size: Size of the data buffer (bytes); data size has to be a | ||
102 | * multiple of 4. Function will fail if not. | ||
103 | * | ||
104 | * Goes over the whole binary blob; reads the first chunk and creates | ||
105 | * a fw hdr from it (which points to where the data is in @_data and | ||
106 | * the length of the chunk); then goes on to the next chunk until | ||
107 | * done. Each header is linked to the next. | ||
108 | */ | ||
109 | static | ||
110 | int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr, | ||
111 | const char *_data, size_t data_size) | ||
112 | { | ||
113 | int result; | ||
114 | unsigned hdr_cnt = 0; | ||
115 | u32 *data = (u32 *) _data, *data_itr, *data_top; | ||
116 | struct fw_hdr *hdr, **prev_hdr = phdr; | ||
117 | |||
118 | result = -EINVAL; | ||
119 | /* Check size is ok and pointer is aligned */ | ||
120 | if (data_size % sizeof(u32) != 0) | ||
121 | goto error; | ||
122 | if ((unsigned long) _data % sizeof(u16) != 0) | ||
123 | goto error; | ||
124 | *phdr = NULL; | ||
125 | data_itr = data; | ||
126 | data_top = (u32 *) (_data + data_size); | ||
127 | while (data_itr < data_top) { | ||
128 | result = -ENOMEM; | ||
129 | hdr = kmalloc(sizeof(*hdr), GFP_KERNEL); | ||
130 | if (hdr == NULL) { | ||
131 | dev_err(i1480->dev, "Cannot allocate fw header " | ||
132 | "for chunk #%u\n", hdr_cnt); | ||
133 | goto error_alloc; | ||
134 | } | ||
135 | result = fw_hdr_load(i1480, hdr, hdr_cnt, | ||
136 | _data, data_itr, data_top); | ||
137 | if (result < 0) | ||
138 | goto error_load; | ||
139 | data_itr += 2 + hdr->length; | ||
140 | *prev_hdr = hdr; | ||
141 | prev_hdr = &hdr->next; | ||
142 | hdr_cnt++; | ||
143 | }; | ||
144 | *prev_hdr = NULL; | ||
145 | return 0; | ||
146 | |||
147 | error_load: | ||
148 | kfree(hdr); | ||
149 | error_alloc: | ||
150 | fw_hdrs_free(*phdr); | ||
151 | error: | ||
152 | return result; | ||
153 | } | ||
154 | |||
155 | |||
156 | /** | ||
157 | * Compares a chunk of fw with one in the devices's memory | ||
158 | * | ||
159 | * @i1480: Device instance | ||
160 | * @hdr: Pointer to the firmware chunk | ||
161 | * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset | ||
162 | * where the difference was found (plus one). | ||
163 | * | ||
164 | * Kind of dirty and simplistic, but does the trick in both the PCI | ||
165 | * and USB version. We do a quick[er] memcmp(), and if it fails, we do | ||
166 | * a byte-by-byte to find the offset. | ||
167 | */ | ||
168 | static | ||
169 | ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr) | ||
170 | { | ||
171 | ssize_t result = 0; | ||
172 | u32 src_itr = 0, cnt; | ||
173 | size_t size = hdr->length*sizeof(hdr->bin[0]); | ||
174 | size_t chunk_size; | ||
175 | u8 *bin = (u8 *) hdr->bin; | ||
176 | |||
177 | while (size > 0) { | ||
178 | chunk_size = size < i1480->buf_size ? size : i1480->buf_size; | ||
179 | result = i1480->read(i1480, hdr->address + src_itr, chunk_size); | ||
180 | if (result < 0) { | ||
181 | dev_err(i1480->dev, "error reading for verification: " | ||
182 | "%zd\n", result); | ||
183 | goto error; | ||
184 | } | ||
185 | if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { | ||
186 | u8 *buf = i1480->cmd_buf; | ||
187 | d_printf(2, i1480->dev, | ||
188 | "original data @ %p + %u, %zu bytes\n", | ||
189 | bin, src_itr, result); | ||
190 | d_dump(4, i1480->dev, bin + src_itr, result); | ||
191 | for (cnt = 0; cnt < result; cnt++) | ||
192 | if (bin[src_itr + cnt] != buf[cnt]) { | ||
193 | dev_err(i1480->dev, "byte failed at " | ||
194 | "src_itr %u cnt %u [0x%02x " | ||
195 | "vs 0x%02x]\n", src_itr, cnt, | ||
196 | bin[src_itr + cnt], buf[cnt]); | ||
197 | result = src_itr + cnt + 1; | ||
198 | goto cmp_failed; | ||
199 | } | ||
200 | } | ||
201 | src_itr += result; | ||
202 | size -= result; | ||
203 | } | ||
204 | result = 0; | ||
205 | error: | ||
206 | cmp_failed: | ||
207 | return result; | ||
208 | } | ||
209 | |||
210 | |||
211 | /** | ||
212 | * Writes firmware headers to the device. | ||
213 | * | ||
214 | * @prd: PRD instance | ||
215 | * @hdr: Processed firmware | ||
216 | * @returns: 0 if ok, < 0 errno on error. | ||
217 | */ | ||
218 | static | ||
219 | int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr, | ||
220 | const char *fw_name, const char *fw_tag) | ||
221 | { | ||
222 | struct device *dev = i1480->dev; | ||
223 | ssize_t result = 0; | ||
224 | struct fw_hdr *hdr_itr; | ||
225 | int verif_retry_count; | ||
226 | |||
227 | d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); | ||
228 | /* Now, header by header, push them to the hw */ | ||
229 | for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { | ||
230 | verif_retry_count = 0; | ||
231 | retry: | ||
232 | dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n", | ||
233 | hdr_itr->length * sizeof(hdr_itr->bin[0]), | ||
234 | hdr_itr->address); | ||
235 | result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin, | ||
236 | hdr_itr->length*sizeof(hdr_itr->bin[0])); | ||
237 | if (result < 0) { | ||
238 | dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):" | ||
239 | " %zd\n", fw_tag, fw_name, | ||
240 | hdr_itr->length * sizeof(hdr_itr->bin[0]), | ||
241 | hdr_itr->address, result); | ||
242 | break; | ||
243 | } | ||
244 | result = i1480_fw_cmp(i1480, hdr_itr); | ||
245 | if (result < 0) { | ||
246 | dev_err(dev, "%s fw '%s': verification read " | ||
247 | "failed (%zuB @ 0x%lx): %zd\n", | ||
248 | fw_tag, fw_name, | ||
249 | hdr_itr->length * sizeof(hdr_itr->bin[0]), | ||
250 | hdr_itr->address, result); | ||
251 | break; | ||
252 | } | ||
253 | if (result > 0) { /* Offset where it failed + 1 */ | ||
254 | result--; | ||
255 | dev_err(dev, "%s fw '%s': WARNING: verification " | ||
256 | "failed at 0x%lx: retrying\n", | ||
257 | fw_tag, fw_name, hdr_itr->address + result); | ||
258 | if (++verif_retry_count < 3) | ||
259 | goto retry; /* write this block again! */ | ||
260 | dev_err(dev, "%s fw '%s': verification failed at 0x%lx: " | ||
261 | "tried %d times\n", fw_tag, fw_name, | ||
262 | hdr_itr->address + result, verif_retry_count); | ||
263 | result = -EINVAL; | ||
264 | break; | ||
265 | } | ||
266 | } | ||
267 | d_fnend(3, dev, "(%zd)\n", result); | ||
268 | return result; | ||
269 | } | ||
270 | |||
271 | |||
272 | /** Puts the device in firmware upload mode.*/ | ||
273 | static | ||
274 | int mac_fw_upload_enable(struct i1480 *i1480) | ||
275 | { | ||
276 | int result; | ||
277 | u32 reg = 0x800000c0; | ||
278 | u32 *buffer = (u32 *)i1480->cmd_buf; | ||
279 | |||
280 | if (i1480->hw_rev > 1) | ||
281 | reg = 0x8000d0d4; | ||
282 | result = i1480->read(i1480, reg, sizeof(u32)); | ||
283 | if (result < 0) | ||
284 | goto error_cmd; | ||
285 | *buffer &= ~i1480_FW_UPLOAD_MODE_MASK; | ||
286 | result = i1480->write(i1480, reg, buffer, sizeof(u32)); | ||
287 | if (result < 0) | ||
288 | goto error_cmd; | ||
289 | return 0; | ||
290 | error_cmd: | ||
291 | dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result); | ||
292 | return result; | ||
293 | } | ||
294 | |||
295 | |||
296 | /** Gets the device out of firmware upload mode. */ | ||
297 | static | ||
298 | int mac_fw_upload_disable(struct i1480 *i1480) | ||
299 | { | ||
300 | int result; | ||
301 | u32 reg = 0x800000c0; | ||
302 | u32 *buffer = (u32 *)i1480->cmd_buf; | ||
303 | |||
304 | if (i1480->hw_rev > 1) | ||
305 | reg = 0x8000d0d4; | ||
306 | result = i1480->read(i1480, reg, sizeof(u32)); | ||
307 | if (result < 0) | ||
308 | goto error_cmd; | ||
309 | *buffer |= i1480_FW_UPLOAD_MODE_MASK; | ||
310 | result = i1480->write(i1480, reg, buffer, sizeof(u32)); | ||
311 | if (result < 0) | ||
312 | goto error_cmd; | ||
313 | return 0; | ||
314 | error_cmd: | ||
315 | dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result); | ||
316 | return result; | ||
317 | } | ||
318 | |||
319 | |||
320 | |||
321 | /** | ||
322 | * Generic function for uploading a MAC firmware. | ||
323 | * | ||
324 | * @i1480: Device instance | ||
325 | * @fw_name: Name of firmware file to upload. | ||
326 | * @fw_tag: Name of the firmware type (for messages) | ||
327 | * [eg: MAC, PRE] | ||
328 | * @do_wait: Wait for device to emit initialization done message (0 | ||
329 | * for PRE fws, 1 for MAC fws). | ||
330 | * @returns: 0 if ok, < 0 errno on error. | ||
331 | */ | ||
332 | static | ||
333 | int __mac_fw_upload(struct i1480 *i1480, const char *fw_name, | ||
334 | const char *fw_tag) | ||
335 | { | ||
336 | int result; | ||
337 | const struct firmware *fw; | ||
338 | struct fw_hdr *fw_hdrs; | ||
339 | |||
340 | d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); | ||
341 | result = request_firmware(&fw, fw_name, i1480->dev); | ||
342 | if (result < 0) /* Up to caller to complain on -ENOENT */ | ||
343 | goto out; | ||
344 | d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); | ||
345 | result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); | ||
346 | if (result < 0) { | ||
347 | dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " | ||
348 | "file: %d\n", fw_tag, fw_name, result); | ||
349 | goto out_release; | ||
350 | } | ||
351 | result = mac_fw_upload_enable(i1480); | ||
352 | if (result < 0) | ||
353 | goto out_hdrs_release; | ||
354 | result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag); | ||
355 | mac_fw_upload_disable(i1480); | ||
356 | out_hdrs_release: | ||
357 | if (result >= 0) | ||
358 | dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name); | ||
359 | else | ||
360 | dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), " | ||
361 | "power cycle device\n", fw_tag, fw_name, result); | ||
362 | fw_hdrs_free(fw_hdrs); | ||
363 | out_release: | ||
364 | release_firmware(fw); | ||
365 | out: | ||
366 | d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, | ||
367 | result); | ||
368 | return result; | ||
369 | } | ||
370 | |||
371 | |||
372 | /** | ||
373 | * Upload a pre-PHY firmware | ||
374 | * | ||
375 | */ | ||
376 | int i1480_pre_fw_upload(struct i1480 *i1480) | ||
377 | { | ||
378 | int result; | ||
379 | result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE"); | ||
380 | if (result == 0) | ||
381 | msleep(400); | ||
382 | return result; | ||
383 | } | ||
384 | |||
385 | |||
386 | /** | ||
387 | * Reset a the MAC and PHY | ||
388 | * | ||
389 | * @i1480: Device's instance | ||
390 | * @returns: 0 if ok, < 0 errno code on error | ||
391 | * | ||
392 | * We put the command on kmalloc'ed memory as some arches cannot do | ||
393 | * USB from the stack. The reply event is copied from an stage buffer, | ||
394 | * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. | ||
395 | * | ||
396 | * We issue the reset to make sure the UWB controller reinits the PHY; | ||
397 | * this way we can now if the PHY init went ok. | ||
398 | */ | ||
399 | static | ||
400 | int i1480_cmd_reset(struct i1480 *i1480) | ||
401 | { | ||
402 | int result; | ||
403 | struct uwb_rccb *cmd = (void *) i1480->cmd_buf; | ||
404 | struct i1480_evt_reset { | ||
405 | struct uwb_rceb rceb; | ||
406 | u8 bResultCode; | ||
407 | } __attribute__((packed)) *reply = (void *) i1480->evt_buf; | ||
408 | |||
409 | result = -ENOMEM; | ||
410 | cmd->bCommandType = UWB_RC_CET_GENERAL; | ||
411 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); | ||
412 | reply->rceb.bEventType = UWB_RC_CET_GENERAL; | ||
413 | reply->rceb.wEvent = UWB_RC_CMD_RESET; | ||
414 | result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply)); | ||
415 | if (result < 0) | ||
416 | goto out; | ||
417 | if (reply->bResultCode != UWB_RC_RES_SUCCESS) { | ||
418 | dev_err(i1480->dev, "RESET: command execution failed: %u\n", | ||
419 | reply->bResultCode); | ||
420 | result = -EIO; | ||
421 | } | ||
422 | out: | ||
423 | return result; | ||
424 | |||
425 | } | ||
426 | |||
427 | |||
428 | /* Wait for the MAC FW to start running */ | ||
429 | static | ||
430 | int i1480_fw_is_running_q(struct i1480 *i1480) | ||
431 | { | ||
432 | int cnt = 0; | ||
433 | int result; | ||
434 | u32 *val = (u32 *) i1480->cmd_buf; | ||
435 | |||
436 | d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); | ||
437 | for (cnt = 0; cnt < 10; cnt++) { | ||
438 | msleep(100); | ||
439 | result = i1480->read(i1480, 0x80080000, 4); | ||
440 | if (result < 0) { | ||
441 | dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result); | ||
442 | goto out; | ||
443 | } | ||
444 | if (*val == 0x55555555UL) /* fw running? cool */ | ||
445 | goto out; | ||
446 | } | ||
447 | dev_err(i1480->dev, "Timed out waiting for fw to start\n"); | ||
448 | result = -ETIMEDOUT; | ||
449 | out: | ||
450 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
451 | return result; | ||
452 | |||
453 | } | ||
454 | |||
455 | |||
456 | /** | ||
457 | * Upload MAC firmware, wait for it to start | ||
458 | * | ||
459 | * @i1480: Device instance | ||
460 | * @fw_name: Name of the file that contains the firmware | ||
461 | * | ||
462 | * This has to be called after the pre fw has been uploaded (if | ||
463 | * there is any). | ||
464 | */ | ||
465 | int i1480_mac_fw_upload(struct i1480 *i1480) | ||
466 | { | ||
467 | int result = 0, deprecated_name = 0; | ||
468 | struct i1480_rceb *rcebe = (void *) i1480->evt_buf; | ||
469 | |||
470 | d_fnstart(3, i1480->dev, "(%p)\n", i1480); | ||
471 | result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); | ||
472 | if (result == -ENOENT) { | ||
473 | result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, | ||
474 | "MAC"); | ||
475 | deprecated_name = 1; | ||
476 | } | ||
477 | if (result < 0) | ||
478 | return result; | ||
479 | if (deprecated_name == 1) | ||
480 | dev_warn(i1480->dev, | ||
481 | "WARNING: firmware file name %s is deprecated, " | ||
482 | "please rename to %s\n", | ||
483 | i1480->mac_fw_name_deprecate, i1480->mac_fw_name); | ||
484 | result = i1480_fw_is_running_q(i1480); | ||
485 | if (result < 0) | ||
486 | goto error_fw_not_running; | ||
487 | result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0; | ||
488 | if (result < 0) { | ||
489 | dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n", | ||
490 | result); | ||
491 | goto error_setup; | ||
492 | } | ||
493 | result = i1480->wait_init_done(i1480); /* wait init'on */ | ||
494 | if (result < 0) { | ||
495 | dev_err(i1480->dev, "MAC fw '%s': Initialization timed out " | ||
496 | "(%d)\n", i1480->mac_fw_name, result); | ||
497 | goto error_init_timeout; | ||
498 | } | ||
499 | /* verify we got the right initialization done event */ | ||
500 | if (i1480->evt_result != sizeof(*rcebe)) { | ||
501 | dev_err(i1480->dev, "MAC fw '%s': initialization event returns " | ||
502 | "wrong size (%zu bytes vs %zu needed)\n", | ||
503 | i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); | ||
504 | dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); | ||
505 | goto error_size; | ||
506 | } | ||
507 | result = -EIO; | ||
508 | if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1, | ||
509 | i1480_EVT_RM_INIT_DONE) < 0) { | ||
510 | dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x " | ||
511 | "received; expected 0x%02x/%04x/00\n", | ||
512 | rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent), | ||
513 | rcebe->rceb.bEventContext, i1480_CET_VS1, | ||
514 | i1480_EVT_RM_INIT_DONE); | ||
515 | goto error_init_timeout; | ||
516 | } | ||
517 | result = i1480_cmd_reset(i1480); | ||
518 | if (result < 0) | ||
519 | dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n", | ||
520 | i1480->mac_fw_name, result); | ||
521 | error_fw_not_running: | ||
522 | error_init_timeout: | ||
523 | error_size: | ||
524 | error_setup: | ||
525 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
526 | return result; | ||
527 | } | ||
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c new file mode 100644 index 000000000000..3b1a87de8e63 --- /dev/null +++ b/drivers/uwb/i1480/dfu/phy.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Intel Wireless UWB Link 1480 | ||
3 | * PHY parameters upload | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Code for uploading the PHY parameters to the PHY through the UWB | ||
24 | * Radio Control interface. | ||
25 | * | ||
26 | * We just send the data through the MPI interface using HWA-like | ||
27 | * commands and then reset the PHY to make sure it is ok. | ||
28 | */ | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/firmware.h> | ||
32 | #include <linux/usb/wusb.h> | ||
33 | #include "i1480-dfu.h" | ||
34 | |||
35 | |||
36 | /** | ||
37 | * Write a value array to an address of the MPI interface | ||
38 | * | ||
39 | * @i1480: Device descriptor | ||
40 | * @data: Data array to write | ||
41 | * @size: Size of the data array | ||
42 | * @returns: 0 if ok, < 0 errno code on error. | ||
43 | * | ||
44 | * The data array is organized into pairs: | ||
45 | * | ||
46 | * ADDRESS VALUE | ||
47 | * | ||
48 | * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has | ||
49 | * to be a multiple of three. | ||
50 | */ | ||
51 | static | ||
52 | int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size) | ||
53 | { | ||
54 | int result; | ||
55 | struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf; | ||
56 | struct i1480_evt_confirm *reply = i1480->evt_buf; | ||
57 | |||
58 | BUG_ON(size > 480); | ||
59 | result = -ENOMEM; | ||
60 | cmd->rccb.bCommandType = i1480_CET_VS1; | ||
61 | cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE); | ||
62 | cmd->size = cpu_to_le16(size); | ||
63 | memcpy(cmd->data, data, size); | ||
64 | reply->rceb.bEventType = i1480_CET_VS1; | ||
65 | reply->rceb.wEvent = i1480_CMD_MPI_WRITE; | ||
66 | result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); | ||
67 | if (result < 0) | ||
68 | goto out; | ||
69 | if (reply->bResultCode != UWB_RC_RES_SUCCESS) { | ||
70 | dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n", | ||
71 | reply->bResultCode); | ||
72 | result = -EIO; | ||
73 | } | ||
74 | out: | ||
75 | return result; | ||
76 | } | ||
77 | |||
78 | |||
79 | /** | ||
80 | * Read a value array to from an address of the MPI interface | ||
81 | * | ||
82 | * @i1480: Device descriptor | ||
83 | * @data: where to place the read array | ||
84 | * @srcaddr: Where to read from | ||
85 | * @size: Size of the data read array | ||
86 | * @returns: 0 if ok, < 0 errno code on error. | ||
87 | * | ||
88 | * The command data array is organized into pairs ADDR0 ADDR1..., and | ||
89 | * the returned data in ADDR0 VALUE0 ADDR1 VALUE1... | ||
90 | * | ||
91 | * We generate the command array to be a sequential read and then | ||
92 | * rearrange the result. | ||
93 | * | ||
94 | * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply. | ||
95 | * | ||
96 | * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount | ||
97 | * of values we can read is (512 - sizeof(*reply)) / 3 | ||
98 | */ | ||
99 | static | ||
100 | int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size) | ||
101 | { | ||
102 | int result; | ||
103 | struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf; | ||
104 | struct i1480_evt_mpi_read *reply = i1480->evt_buf; | ||
105 | unsigned cnt; | ||
106 | |||
107 | memset(i1480->cmd_buf, 0x69, 512); | ||
108 | memset(i1480->evt_buf, 0x69, 512); | ||
109 | |||
110 | BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); | ||
111 | result = -ENOMEM; | ||
112 | cmd->rccb.bCommandType = i1480_CET_VS1; | ||
113 | cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ); | ||
114 | cmd->size = cpu_to_le16(3*size); | ||
115 | for (cnt = 0; cnt < size; cnt++) { | ||
116 | cmd->data[cnt].page = (srcaddr + cnt) >> 8; | ||
117 | cmd->data[cnt].offset = (srcaddr + cnt) & 0xff; | ||
118 | } | ||
119 | reply->rceb.bEventType = i1480_CET_VS1; | ||
120 | reply->rceb.wEvent = i1480_CMD_MPI_READ; | ||
121 | result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size, | ||
122 | sizeof(*reply) + 3*size); | ||
123 | if (result < 0) | ||
124 | goto out; | ||
125 | if (reply->bResultCode != UWB_RC_RES_SUCCESS) { | ||
126 | dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n", | ||
127 | reply->bResultCode); | ||
128 | result = -EIO; | ||
129 | } | ||
130 | for (cnt = 0; cnt < size; cnt++) { | ||
131 | if (reply->data[cnt].page != (srcaddr + cnt) >> 8) | ||
132 | dev_err(i1480->dev, "MPI-READ: page inconsistency at " | ||
133 | "index %u: expected 0x%02x, got 0x%02x\n", cnt, | ||
134 | (srcaddr + cnt) >> 8, reply->data[cnt].page); | ||
135 | if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) | ||
136 | dev_err(i1480->dev, "MPI-READ: offset inconsistency at " | ||
137 | "index %u: expected 0x%02x, got 0x%02x\n", cnt, | ||
138 | (srcaddr + cnt) & 0x00ff, | ||
139 | reply->data[cnt].offset); | ||
140 | data[cnt] = reply->data[cnt].value; | ||
141 | } | ||
142 | result = 0; | ||
143 | out: | ||
144 | return result; | ||
145 | } | ||
146 | |||
147 | |||
148 | /** | ||
149 | * Upload a PHY firmware, wait for it to start | ||
150 | * | ||
151 | * @i1480: Device instance | ||
152 | * @fw_name: Name of the file that contains the firmware | ||
153 | * | ||
154 | * We assume the MAC fw is up and running. This means we can use the | ||
155 | * MPI interface to write the PHY firmware. Once done, we issue an | ||
156 | * MBOA Reset, which will force the MAC to reset and reinitialize the | ||
157 | * PHY. If that works, we are ready to go. | ||
158 | * | ||
159 | * Max packet size for the MPI write is 512, so the max buffer is 480 | ||
160 | * (which gives us 160 byte triads of MSB, LSB and VAL for the data). | ||
161 | */ | ||
162 | int i1480_phy_fw_upload(struct i1480 *i1480) | ||
163 | { | ||
164 | int result; | ||
165 | const struct firmware *fw; | ||
166 | const char *data_itr, *data_top; | ||
167 | const size_t MAX_BLK_SIZE = 480; /* 160 triads */ | ||
168 | size_t data_size; | ||
169 | u8 phy_stat; | ||
170 | |||
171 | result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev); | ||
172 | if (result < 0) | ||
173 | goto out; | ||
174 | /* Loop writing data in chunks as big as possible until done. */ | ||
175 | for (data_itr = fw->data, data_top = data_itr + fw->size; | ||
176 | data_itr < data_top; data_itr += MAX_BLK_SIZE) { | ||
177 | data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr)); | ||
178 | result = i1480_mpi_write(i1480, data_itr, data_size); | ||
179 | if (result < 0) | ||
180 | goto error_mpi_write; | ||
181 | } | ||
182 | /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */ | ||
183 | result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1); | ||
184 | if (result < 0) { | ||
185 | dev_err(i1480->dev, "PHY: can't get status: %d\n", result); | ||
186 | goto error_mpi_status; | ||
187 | } | ||
188 | if (phy_stat != 0) { | ||
189 | result = -ENODEV; | ||
190 | dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat); | ||
191 | goto error_phy_status; | ||
192 | } | ||
193 | dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name); | ||
194 | error_phy_status: | ||
195 | error_mpi_status: | ||
196 | error_mpi_write: | ||
197 | release_firmware(fw); | ||
198 | if (result < 0) | ||
199 | dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), " | ||
200 | "power cycle device\n", i1480->phy_fw_name, result); | ||
201 | out: | ||
202 | return result; | ||
203 | } | ||
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c new file mode 100644 index 000000000000..98eeeff051aa --- /dev/null +++ b/drivers/uwb/i1480/dfu/usb.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* | ||
2 | * Intel Wireless UWB Link 1480 | ||
3 | * USB SKU firmware upload implementation | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * This driver will prepare the i1480 device to behave as a real | ||
24 | * Wireless USB HWA adaptor by uploading the firmware. | ||
25 | * | ||
26 | * When the device is connected or driver is loaded, i1480_usb_probe() | ||
27 | * is called--this will allocate and initialize the device structure, | ||
28 | * fill in the pointers to the common functions (read, write, | ||
29 | * wait_init_done and cmd for HWA command execution) and once that is | ||
30 | * done, call the common firmware uploading routine. Then clean up and | ||
31 | * return -ENODEV, as we don't attach to the device. | ||
32 | * | ||
33 | * The rest are the basic ops we implement that the fw upload code | ||
34 | * uses to do its job. All the ops in the common code are i1480->NAME, | ||
35 | * the functions are i1480_usb_NAME(). | ||
36 | */ | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/version.h> | ||
39 | #include <linux/usb.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/uwb.h> | ||
43 | #include <linux/usb/wusb.h> | ||
44 | #include <linux/usb/wusb-wa.h> | ||
45 | #include "i1480-dfu.h" | ||
46 | |||
47 | #define D_LOCAL 0 | ||
48 | #include <linux/uwb/debug.h> | ||
49 | |||
50 | |||
51 | struct i1480_usb { | ||
52 | struct i1480 i1480; | ||
53 | struct usb_device *usb_dev; | ||
54 | struct usb_interface *usb_iface; | ||
55 | struct urb *neep_urb; /* URB for reading from EP1 */ | ||
56 | }; | ||
57 | |||
58 | |||
59 | static | ||
60 | void i1480_usb_init(struct i1480_usb *i1480_usb) | ||
61 | { | ||
62 | i1480_init(&i1480_usb->i1480); | ||
63 | } | ||
64 | |||
65 | |||
66 | static | ||
67 | int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface) | ||
68 | { | ||
69 | struct usb_device *usb_dev = interface_to_usbdev(iface); | ||
70 | int result = -ENOMEM; | ||
71 | |||
72 | i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ | ||
73 | i1480_usb->usb_iface = usb_get_intf(iface); | ||
74 | usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */ | ||
75 | i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
76 | if (i1480_usb->neep_urb == NULL) | ||
77 | goto error; | ||
78 | return 0; | ||
79 | |||
80 | error: | ||
81 | usb_set_intfdata(iface, NULL); | ||
82 | usb_put_intf(iface); | ||
83 | usb_put_dev(usb_dev); | ||
84 | return result; | ||
85 | } | ||
86 | |||
87 | |||
88 | static | ||
89 | void i1480_usb_destroy(struct i1480_usb *i1480_usb) | ||
90 | { | ||
91 | usb_kill_urb(i1480_usb->neep_urb); | ||
92 | usb_free_urb(i1480_usb->neep_urb); | ||
93 | usb_set_intfdata(i1480_usb->usb_iface, NULL); | ||
94 | usb_put_intf(i1480_usb->usb_iface); | ||
95 | usb_put_dev(i1480_usb->usb_dev); | ||
96 | } | ||
97 | |||
98 | |||
99 | /** | ||
100 | * Write a buffer to a memory address in the i1480 device | ||
101 | * | ||
102 | * @i1480: i1480 instance | ||
103 | * @memory_address: | ||
104 | * Address where to write the data buffer to. | ||
105 | * @buffer: Buffer to the data | ||
106 | * @size: Size of the buffer [has to be < 512]. | ||
107 | * @returns: 0 if ok, < 0 errno code on error. | ||
108 | * | ||
109 | * Data buffers to USB cannot be on the stack or in vmalloc'ed areas, | ||
110 | * so we copy it to the local i1480 buffer before proceeding. In any | ||
111 | * case, we have a max size we can send, soooo. | ||
112 | */ | ||
113 | static | ||
114 | int i1480_usb_write(struct i1480 *i1480, u32 memory_address, | ||
115 | const void *buffer, size_t size) | ||
116 | { | ||
117 | int result = 0; | ||
118 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | ||
119 | size_t buffer_size, itr = 0; | ||
120 | |||
121 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", | ||
122 | i1480, memory_address, buffer, size); | ||
123 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | ||
124 | while (size > 0) { | ||
125 | buffer_size = size < i1480->buf_size ? size : i1480->buf_size; | ||
126 | memcpy(i1480->cmd_buf, buffer + itr, buffer_size); | ||
127 | result = usb_control_msg( | ||
128 | i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), | ||
129 | 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | ||
130 | cpu_to_le16(memory_address & 0xffff), | ||
131 | cpu_to_le16((memory_address >> 16) & 0xffff), | ||
132 | i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); | ||
133 | if (result < 0) | ||
134 | break; | ||
135 | d_printf(3, i1480->dev, | ||
136 | "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", | ||
137 | memory_address, result, buffer_size); | ||
138 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
139 | itr += result; | ||
140 | memory_address += result; | ||
141 | size -= result; | ||
142 | } | ||
143 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", | ||
144 | i1480, memory_address, buffer, size, result); | ||
145 | return result; | ||
146 | } | ||
147 | |||
148 | |||
149 | /** | ||
150 | * Read a block [max size 512] of the device's memory to @i1480's buffer. | ||
151 | * | ||
152 | * @i1480: i1480 instance | ||
153 | * @memory_address: | ||
154 | * Address where to read from. | ||
155 | * @size: Size to read. Smaller than or equal to 512. | ||
156 | * @returns: >= 0 number of bytes written if ok, < 0 errno code on error. | ||
157 | * | ||
158 | * NOTE: if the memory address or block is incorrect, you might get a | ||
159 | * stall or a different memory read. Caller has to verify the | ||
160 | * memory address and size passed back in the @neh structure. | ||
161 | */ | ||
162 | static | ||
163 | int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) | ||
164 | { | ||
165 | ssize_t result = 0, bytes = 0; | ||
166 | size_t itr, read_size = i1480->buf_size; | ||
167 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | ||
168 | |||
169 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", | ||
170 | i1480, addr, size); | ||
171 | BUG_ON(size > i1480->buf_size); | ||
172 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | ||
173 | BUG_ON(read_size > 512); | ||
174 | |||
175 | if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */ | ||
176 | read_size = 4; | ||
177 | |||
178 | for (itr = 0; itr < size; itr += read_size) { | ||
179 | size_t itr_addr = addr + itr; | ||
180 | size_t itr_size = min(read_size, size - itr); | ||
181 | result = usb_control_msg( | ||
182 | i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0), | ||
183 | 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | ||
184 | cpu_to_le16(itr_addr & 0xffff), | ||
185 | cpu_to_le16((itr_addr >> 16) & 0xffff), | ||
186 | i1480->cmd_buf + itr, itr_size, | ||
187 | 100 /* FIXME: arbitrary */); | ||
188 | if (result < 0) { | ||
189 | dev_err(i1480->dev, "%s: USB read error: %zd\n", | ||
190 | __func__, result); | ||
191 | goto out; | ||
192 | } | ||
193 | if (result != itr_size) { | ||
194 | result = -EIO; | ||
195 | dev_err(i1480->dev, | ||
196 | "%s: partial read got only %zu bytes vs %zu expected\n", | ||
197 | __func__, result, itr_size); | ||
198 | goto out; | ||
199 | } | ||
200 | bytes += result; | ||
201 | } | ||
202 | result = bytes; | ||
203 | out: | ||
204 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", | ||
205 | i1480, addr, size, result); | ||
206 | if (result > 0) | ||
207 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
208 | return result; | ||
209 | } | ||
210 | |||
211 | |||
212 | /** | ||
213 | * Callback for reads on the notification/event endpoint | ||
214 | * | ||
215 | * Just enables the completion read handler. | ||
216 | */ | ||
217 | static | ||
218 | void i1480_usb_neep_cb(struct urb *urb) | ||
219 | { | ||
220 | struct i1480 *i1480 = urb->context; | ||
221 | struct device *dev = i1480->dev; | ||
222 | |||
223 | switch (urb->status) { | ||
224 | case 0: | ||
225 | break; | ||
226 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | ||
227 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
228 | dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status); | ||
229 | break; | ||
230 | case -ESHUTDOWN: /* going away! */ | ||
231 | dev_dbg(dev, "NEEP: down %d\n", urb->status); | ||
232 | break; | ||
233 | default: | ||
234 | dev_err(dev, "NEEP: unknown status %d\n", urb->status); | ||
235 | break; | ||
236 | } | ||
237 | i1480->evt_result = urb->actual_length; | ||
238 | complete(&i1480->evt_complete); | ||
239 | return; | ||
240 | } | ||
241 | |||
242 | |||
243 | /** | ||
244 | * Wait for the MAC FW to initialize | ||
245 | * | ||
246 | * MAC FW sends a 0xfd/0101/00 notification to EP1 when done | ||
247 | * initializing. Get that notification into i1480->evt_buf; upper layer | ||
248 | * will verify it. | ||
249 | * | ||
250 | * Set i1480->evt_result with the result of getting the event or its | ||
251 | * size (if succesful). | ||
252 | * | ||
253 | * Delivers the data directly to i1480->evt_buf | ||
254 | */ | ||
255 | static | ||
256 | int i1480_usb_wait_init_done(struct i1480 *i1480) | ||
257 | { | ||
258 | int result; | ||
259 | struct device *dev = i1480->dev; | ||
260 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | ||
261 | struct usb_endpoint_descriptor *epd; | ||
262 | |||
263 | d_fnstart(3, dev, "(%p)\n", i1480); | ||
264 | init_completion(&i1480->evt_complete); | ||
265 | i1480->evt_result = -EINPROGRESS; | ||
266 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | ||
267 | usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev, | ||
268 | usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), | ||
269 | i1480->evt_buf, i1480->buf_size, | ||
270 | i1480_usb_neep_cb, i1480, epd->bInterval); | ||
271 | result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); | ||
272 | if (result < 0) { | ||
273 | dev_err(dev, "init done: cannot submit NEEP read: %d\n", | ||
274 | result); | ||
275 | goto error_submit; | ||
276 | } | ||
277 | /* Wait for the USB callback to get the data */ | ||
278 | result = wait_for_completion_interruptible_timeout( | ||
279 | &i1480->evt_complete, HZ); | ||
280 | if (result <= 0) { | ||
281 | result = result == 0 ? -ETIMEDOUT : result; | ||
282 | goto error_wait; | ||
283 | } | ||
284 | usb_kill_urb(i1480_usb->neep_urb); | ||
285 | d_fnend(3, dev, "(%p) = 0\n", i1480); | ||
286 | return 0; | ||
287 | |||
288 | error_wait: | ||
289 | usb_kill_urb(i1480_usb->neep_urb); | ||
290 | error_submit: | ||
291 | i1480->evt_result = result; | ||
292 | d_fnend(3, dev, "(%p) = %d\n", i1480, result); | ||
293 | return result; | ||
294 | } | ||
295 | |||
296 | |||
297 | /** | ||
298 | * Generic function for issuing commands to the i1480 | ||
299 | * | ||
300 | * @i1480: i1480 instance | ||
301 | * @cmd_name: Name of the command (for error messages) | ||
302 | * @cmd: Pointer to command buffer | ||
303 | * @cmd_size: Size of the command buffer | ||
304 | * @reply: Buffer for the reply event | ||
305 | * @reply_size: Expected size back (including RCEB); the reply buffer | ||
306 | * is assumed to be as big as this. | ||
307 | * @returns: >= 0 size of the returned event data if ok, | ||
308 | * < 0 errno code on error. | ||
309 | * | ||
310 | * Arms the NE handle, issues the command to the device and checks the | ||
311 | * basics of the reply event. | ||
312 | */ | ||
313 | static | ||
314 | int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) | ||
315 | { | ||
316 | int result; | ||
317 | struct device *dev = i1480->dev; | ||
318 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | ||
319 | struct usb_endpoint_descriptor *epd; | ||
320 | struct uwb_rccb *cmd = i1480->cmd_buf; | ||
321 | u8 iface_no; | ||
322 | |||
323 | d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
324 | /* Post a read on the notification & event endpoint */ | ||
325 | iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
326 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | ||
327 | usb_fill_int_urb( | ||
328 | i1480_usb->neep_urb, i1480_usb->usb_dev, | ||
329 | usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), | ||
330 | i1480->evt_buf, i1480->buf_size, | ||
331 | i1480_usb_neep_cb, i1480, epd->bInterval); | ||
332 | result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); | ||
333 | if (result < 0) { | ||
334 | dev_err(dev, "%s: cannot submit NEEP read: %d\n", | ||
335 | cmd_name, result); | ||
336 | goto error_submit_ep1; | ||
337 | } | ||
338 | /* Now post the command on EP0 */ | ||
339 | result = usb_control_msg( | ||
340 | i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), | ||
341 | WA_EXEC_RC_CMD, | ||
342 | USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, | ||
343 | 0, iface_no, | ||
344 | cmd, cmd_size, | ||
345 | 100 /* FIXME: this is totally arbitrary */); | ||
346 | if (result < 0) { | ||
347 | dev_err(dev, "%s: control request failed: %d\n", | ||
348 | cmd_name, result); | ||
349 | goto error_submit_ep0; | ||
350 | } | ||
351 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
352 | i1480, cmd_name, cmd_size, result); | ||
353 | return result; | ||
354 | |||
355 | error_submit_ep0: | ||
356 | usb_kill_urb(i1480_usb->neep_urb); | ||
357 | error_submit_ep1: | ||
358 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
359 | i1480, cmd_name, cmd_size, result); | ||
360 | return result; | ||
361 | } | ||
362 | |||
363 | |||
364 | /* | ||
365 | * Probe a i1480 device for uploading firmware. | ||
366 | * | ||
367 | * We attach only to interface #0, which is the radio control interface. | ||
368 | */ | ||
369 | static | ||
370 | int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) | ||
371 | { | ||
372 | struct i1480_usb *i1480_usb; | ||
373 | struct i1480 *i1480; | ||
374 | struct device *dev = &iface->dev; | ||
375 | int result; | ||
376 | |||
377 | result = -ENODEV; | ||
378 | if (iface->cur_altsetting->desc.bInterfaceNumber != 0) { | ||
379 | dev_dbg(dev, "not attaching to iface %d\n", | ||
380 | iface->cur_altsetting->desc.bInterfaceNumber); | ||
381 | goto error; | ||
382 | } | ||
383 | if (iface->num_altsetting > 1 | ||
384 | && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { | ||
385 | /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ | ||
386 | result = usb_set_interface(interface_to_usbdev(iface), 0, 1); | ||
387 | if (result < 0) | ||
388 | dev_warn(dev, | ||
389 | "can't set altsetting 1 on iface 0: %d\n", | ||
390 | result); | ||
391 | } | ||
392 | |||
393 | result = -ENOMEM; | ||
394 | i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); | ||
395 | if (i1480_usb == NULL) { | ||
396 | dev_err(dev, "Unable to allocate instance\n"); | ||
397 | goto error; | ||
398 | } | ||
399 | i1480_usb_init(i1480_usb); | ||
400 | |||
401 | i1480 = &i1480_usb->i1480; | ||
402 | i1480->buf_size = 512; | ||
403 | i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL); | ||
404 | if (i1480->cmd_buf == NULL) { | ||
405 | dev_err(dev, "Cannot allocate transfer buffers\n"); | ||
406 | result = -ENOMEM; | ||
407 | goto error_buf_alloc; | ||
408 | } | ||
409 | i1480->evt_buf = i1480->cmd_buf + i1480->buf_size; | ||
410 | |||
411 | result = i1480_usb_create(i1480_usb, iface); | ||
412 | if (result < 0) { | ||
413 | dev_err(dev, "Cannot create instance: %d\n", result); | ||
414 | goto error_create; | ||
415 | } | ||
416 | |||
417 | /* setup the fops and upload the firmare */ | ||
418 | i1480->pre_fw_name = "i1480-pre-phy-0.0.bin"; | ||
419 | i1480->mac_fw_name = "i1480-usb-0.0.bin"; | ||
420 | i1480->mac_fw_name_deprecate = "ptc-0.0.bin"; | ||
421 | i1480->phy_fw_name = "i1480-phy-0.0.bin"; | ||
422 | i1480->dev = &iface->dev; | ||
423 | i1480->write = i1480_usb_write; | ||
424 | i1480->read = i1480_usb_read; | ||
425 | i1480->rc_setup = NULL; | ||
426 | i1480->wait_init_done = i1480_usb_wait_init_done; | ||
427 | i1480->cmd = i1480_usb_cmd; | ||
428 | |||
429 | result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */ | ||
430 | if (result >= 0) { | ||
431 | usb_reset_device(i1480_usb->usb_dev); | ||
432 | result = -ENODEV; /* we don't want to bind to the iface */ | ||
433 | } | ||
434 | i1480_usb_destroy(i1480_usb); | ||
435 | error_create: | ||
436 | kfree(i1480->cmd_buf); | ||
437 | error_buf_alloc: | ||
438 | kfree(i1480_usb); | ||
439 | error: | ||
440 | return result; | ||
441 | } | ||
442 | |||
443 | #define i1480_USB_DEV(v, p) \ | ||
444 | { \ | ||
445 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ | ||
446 | | USB_DEVICE_ID_MATCH_DEV_INFO \ | ||
447 | | USB_DEVICE_ID_MATCH_INT_INFO, \ | ||
448 | .idVendor = (v), \ | ||
449 | .idProduct = (p), \ | ||
450 | .bDeviceClass = 0xff, \ | ||
451 | .bDeviceSubClass = 0xff, \ | ||
452 | .bDeviceProtocol = 0xff, \ | ||
453 | .bInterfaceClass = 0xff, \ | ||
454 | .bInterfaceSubClass = 0xff, \ | ||
455 | .bInterfaceProtocol = 0xff, \ | ||
456 | } | ||
457 | |||
458 | |||
459 | /** USB device ID's that we handle */ | ||
460 | static struct usb_device_id i1480_usb_id_table[] = { | ||
461 | i1480_USB_DEV(0x8086, 0xdf3b), | ||
462 | i1480_USB_DEV(0x15a9, 0x0005), | ||
463 | i1480_USB_DEV(0x07d1, 0x3802), | ||
464 | i1480_USB_DEV(0x050d, 0x305a), | ||
465 | i1480_USB_DEV(0x3495, 0x3007), | ||
466 | {}, | ||
467 | }; | ||
468 | MODULE_DEVICE_TABLE(usb, i1480_usb_id_table); | ||
469 | |||
470 | |||
471 | static struct usb_driver i1480_dfu_driver = { | ||
472 | .name = "i1480-dfu-usb", | ||
473 | .id_table = i1480_usb_id_table, | ||
474 | .probe = i1480_usb_probe, | ||
475 | .disconnect = NULL, | ||
476 | }; | ||
477 | |||
478 | |||
479 | /* | ||
480 | * Initialize the i1480 DFU driver. | ||
481 | * | ||
482 | * We also need to register our function for guessing event sizes. | ||
483 | */ | ||
484 | static int __init i1480_dfu_driver_init(void) | ||
485 | { | ||
486 | return usb_register(&i1480_dfu_driver); | ||
487 | } | ||
488 | module_init(i1480_dfu_driver_init); | ||
489 | |||
490 | |||
491 | static void __exit i1480_dfu_driver_exit(void) | ||
492 | { | ||
493 | usb_deregister(&i1480_dfu_driver); | ||
494 | } | ||
495 | module_exit(i1480_dfu_driver_exit); | ||
496 | |||
497 | |||
498 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
499 | MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB"); | ||
500 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c new file mode 100644 index 000000000000..7bf8c6febae7 --- /dev/null +++ b/drivers/uwb/i1480/i1480-est.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Intel Wireless UWB Link 1480 | ||
3 | * Event Size tables for Wired Adaptors | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/init.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/usb.h> | ||
29 | #include <linux/uwb.h> | ||
30 | #include "dfu/i1480-dfu.h" | ||
31 | |||
32 | |||
33 | /** Event size table for wEvents 0x00XX */ | ||
34 | static struct uwb_est_entry i1480_est_fd00[] = { | ||
35 | /* Anybody expecting this response has to use | ||
36 | * neh->extra_size to specify the real size that will | ||
37 | * come back. */ | ||
38 | [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) }, | ||
39 | [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) }, | ||
40 | #ifdef i1480_RCEB_EXTENDED | ||
41 | [0x09] = { | ||
42 | .size = sizeof(struct i1480_rceb), | ||
43 | .offset = 1 + offsetof(struct i1480_rceb, wParamLength), | ||
44 | }, | ||
45 | #endif | ||
46 | }; | ||
47 | |||
48 | /** Event size table for wEvents 0x01XX */ | ||
49 | static struct uwb_est_entry i1480_est_fd01[] = { | ||
50 | [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) }, | ||
51 | [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 }, | ||
52 | [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 }, | ||
53 | [0xff & i1480_EVT_DEV_ID_CHANGE] = { | ||
54 | .size = sizeof(struct i1480_rceb) + 2 }, | ||
55 | }; | ||
56 | |||
57 | static int i1480_est_init(void) | ||
58 | { | ||
59 | int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, | ||
60 | i1480_est_fd00, | ||
61 | ARRAY_SIZE(i1480_est_fd00)); | ||
62 | if (result < 0) { | ||
63 | printk(KERN_ERR "Can't register EST table fd00: %d\n", result); | ||
64 | return result; | ||
65 | } | ||
66 | result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, | ||
67 | i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); | ||
68 | if (result < 0) { | ||
69 | printk(KERN_ERR "Can't register EST table fd01: %d\n", result); | ||
70 | return result; | ||
71 | } | ||
72 | return 0; | ||
73 | } | ||
74 | module_init(i1480_est_init); | ||
75 | |||
76 | static void i1480_est_exit(void) | ||
77 | { | ||
78 | uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, | ||
79 | i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); | ||
80 | uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, | ||
81 | i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); | ||
82 | } | ||
83 | module_exit(i1480_est_exit); | ||
84 | |||
85 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
86 | MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables"); | ||
87 | MODULE_LICENSE("GPL"); | ||
88 | |||
89 | /** | ||
90 | * USB device ID's that we handle | ||
91 | * | ||
92 | * [so we are loaded when this kind device is connected] | ||
93 | */ | ||
94 | static struct usb_device_id i1480_est_id_table[] = { | ||
95 | { USB_DEVICE(0x8086, 0xdf3b), }, | ||
96 | { USB_DEVICE(0x8086, 0x0c3b), }, | ||
97 | { }, | ||
98 | }; | ||
99 | MODULE_DEVICE_TABLE(usb, i1480_est_id_table); | ||
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h new file mode 100644 index 000000000000..18a8b0e4567b --- /dev/null +++ b/drivers/uwb/i1480/i1480-wlp.h | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Intel 1480 Wireless UWB Link | ||
3 | * WLP specific definitions | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * FIXME: docs | ||
25 | */ | ||
26 | |||
27 | #ifndef __i1480_wlp_h__ | ||
28 | #define __i1480_wlp_h__ | ||
29 | |||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/list.h> | ||
32 | #include <linux/uwb.h> | ||
33 | #include <linux/if_ether.h> | ||
34 | #include <asm/byteorder.h> | ||
35 | |||
36 | /* New simplified header format? */ | ||
37 | #undef WLP_HDR_FMT_2 /* FIXME: rename */ | ||
38 | |||
39 | /** | ||
40 | * Values of the Delivery ID & Type field when PCA or DRP | ||
41 | * | ||
42 | * The Delivery ID & Type field in the WLP TX header indicates whether | ||
43 | * the frame is PCA or DRP. This is done based on the high level bit of | ||
44 | * this field. | ||
45 | * We use this constant to test if the traffic is PCA or DRP as follows: | ||
46 | * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP) | ||
47 | * this is DRP traffic | ||
48 | * else | ||
49 | * this is PCA traffic | ||
50 | */ | ||
51 | enum deliver_id_type_bit { | ||
52 | WLP_DRP = 8, | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * WLP TX header | ||
57 | * | ||
58 | * Indicates UWB/WLP-specific transmission parameters for a network | ||
59 | * packet. | ||
60 | */ | ||
61 | struct wlp_tx_hdr { | ||
62 | /* dword 0 */ | ||
63 | struct uwb_dev_addr dstaddr; | ||
64 | u8 key_index; | ||
65 | u8 mac_params; | ||
66 | /* dword 1 */ | ||
67 | u8 phy_params; | ||
68 | #ifndef WLP_HDR_FMT_2 | ||
69 | u8 reserved; | ||
70 | __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */ | ||
71 | /* dword 2 */ | ||
72 | u8 oui2; /* if all LE, it could be merged */ | ||
73 | __le16 prid; | ||
74 | #endif | ||
75 | } __attribute__((packed)); | ||
76 | |||
77 | static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr) | ||
78 | { | ||
79 | return hdr->mac_params & 0x0f; | ||
80 | } | ||
81 | |||
82 | static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr) | ||
83 | { | ||
84 | return (hdr->mac_params >> 4) & 0x07; | ||
85 | } | ||
86 | |||
87 | static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr) | ||
88 | { | ||
89 | return (hdr->mac_params >> 7) & 0x01; | ||
90 | } | ||
91 | |||
92 | static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id) | ||
93 | { | ||
94 | hdr->mac_params = (hdr->mac_params & ~0x0f) | id; | ||
95 | } | ||
96 | |||
97 | static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr, | ||
98 | enum uwb_ack_pol policy) | ||
99 | { | ||
100 | hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4); | ||
101 | } | ||
102 | |||
103 | static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts) | ||
104 | { | ||
105 | hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7); | ||
106 | } | ||
107 | |||
108 | static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr) | ||
109 | { | ||
110 | return hdr->phy_params & 0x0f; | ||
111 | } | ||
112 | |||
113 | static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr) | ||
114 | { | ||
115 | return (hdr->phy_params >> 4) & 0x0f; | ||
116 | } | ||
117 | |||
118 | static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate) | ||
119 | { | ||
120 | hdr->phy_params = (hdr->phy_params & ~0x0f) | rate; | ||
121 | } | ||
122 | |||
123 | static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr) | ||
124 | { | ||
125 | hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4); | ||
126 | } | ||
127 | |||
128 | |||
129 | /** | ||
130 | * WLP RX header | ||
131 | * | ||
132 | * Provides UWB/WLP-specific transmission data for a received | ||
133 | * network packet. | ||
134 | */ | ||
135 | struct wlp_rx_hdr { | ||
136 | /* dword 0 */ | ||
137 | struct uwb_dev_addr dstaddr; | ||
138 | struct uwb_dev_addr srcaddr; | ||
139 | /* dword 1 */ | ||
140 | u8 LQI; | ||
141 | s8 RSSI; | ||
142 | u8 reserved3; | ||
143 | #ifndef WLP_HDR_FMT_2 | ||
144 | u8 oui0; | ||
145 | /* dword 2 */ | ||
146 | __le16 oui12; | ||
147 | __le16 prid; | ||
148 | #endif | ||
149 | } __attribute__((packed)); | ||
150 | |||
151 | |||
152 | /** User configurable options for WLP */ | ||
153 | struct wlp_options { | ||
154 | struct mutex mutex; /* access to user configurable options*/ | ||
155 | struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */ | ||
156 | u8 pca_base_priority; | ||
157 | u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/ | ||
158 | }; | ||
159 | |||
160 | |||
161 | static inline | ||
162 | void wlp_options_init(struct wlp_options *options) | ||
163 | { | ||
164 | mutex_init(&options->mutex); | ||
165 | wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM); | ||
166 | wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1); | ||
167 | /* FIXME: default to phy caps */ | ||
168 | wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480); | ||
169 | #ifndef WLP_HDR_FMT_2 | ||
170 | options->def_tx_hdr.prid = cpu_to_le16(0x0000); | ||
171 | #endif | ||
172 | } | ||
173 | |||
174 | |||
175 | /* sysfs helpers */ | ||
176 | |||
177 | extern ssize_t uwb_pca_base_priority_store(struct wlp_options *, | ||
178 | const char *, size_t); | ||
179 | extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *); | ||
180 | extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t); | ||
181 | extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *); | ||
182 | extern ssize_t uwb_ack_policy_store(struct wlp_options *, | ||
183 | const char *, size_t); | ||
184 | extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *); | ||
185 | extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t); | ||
186 | extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *); | ||
187 | extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t); | ||
188 | extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *); | ||
189 | |||
190 | |||
191 | /** Simple bandwidth allocation (temporary and too simple) */ | ||
192 | struct wlp_bw_allocs { | ||
193 | const char *name; | ||
194 | struct { | ||
195 | u8 mask, stream; | ||
196 | } tx, rx; | ||
197 | }; | ||
198 | |||
199 | |||
200 | #endif /* #ifndef __i1480_wlp_h__ */ | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile new file mode 100644 index 000000000000..fe6709b8e68b --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o | ||
2 | |||
3 | i1480u-wlp-objs := \ | ||
4 | lc.o \ | ||
5 | netdev.o \ | ||
6 | rx.o \ | ||
7 | sysfs.o \ | ||
8 | tx.o | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h new file mode 100644 index 000000000000..5f1b2951bb83 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * Intel 1480 Wireless UWB Link USB | ||
3 | * Header formats, constants, general internal interfaces | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This is not an standard interface. | ||
25 | * | ||
26 | * FIXME: docs | ||
27 | * | ||
28 | * i1480u-wlp is pretty simple: two endpoints, one for tx, one for | ||
29 | * rx. rx is polled. Network packets (ethernet, whatever) are wrapped | ||
30 | * in i1480 TX or RX headers (for sending over the air), and these | ||
31 | * packets are wrapped in UNTD headers (for sending to the WLP UWB | ||
32 | * controller). | ||
33 | * | ||
34 | * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets | ||
35 | * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the | ||
36 | * i1480 packet is broken in chunks/packets: | ||
37 | * | ||
38 | * UNTD-1st.hdr + i1480.hdr + payload | ||
39 | * UNTD-next.hdr + payload | ||
40 | * ... | ||
41 | * UNTD-last.hdr + payload | ||
42 | * | ||
43 | * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE. | ||
44 | * | ||
45 | * All HW structures and bitmaps are little endian, so we need to play | ||
46 | * ugly tricks when defining bitfields. Hoping for the day GCC | ||
47 | * implements __attribute__((endian(1234))). | ||
48 | * | ||
49 | * FIXME: ROADMAP to the whole implementation | ||
50 | */ | ||
51 | |||
52 | #ifndef __i1480u_wlp_h__ | ||
53 | #define __i1480u_wlp_h__ | ||
54 | |||
55 | #include <linux/usb.h> | ||
56 | #include <linux/netdevice.h> | ||
57 | #include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */ | ||
58 | #include <linux/wlp.h> | ||
59 | #include "../i1480-wlp.h" | ||
60 | |||
61 | #undef i1480u_FLOW_CONTROL /* Enable flow control code */ | ||
62 | |||
63 | /** | ||
64 | * Basic flow control | ||
65 | */ | ||
66 | enum { | ||
67 | i1480u_TX_INFLIGHT_MAX = 1000, | ||
68 | i1480u_TX_INFLIGHT_THRESHOLD = 100, | ||
69 | }; | ||
70 | |||
71 | /** Maximum size of a transaction that we can tx/rx */ | ||
72 | enum { | ||
73 | /* Maximum packet size computed as follows: max UNTD header (8) + | ||
74 | * i1480 RX header (8) + max Ethernet header and payload (4096) + | ||
75 | * Padding added by skb_reserve (2) to make post Ethernet payload | ||
76 | * start on 16 byte boundary*/ | ||
77 | i1480u_MAX_RX_PKT_SIZE = 4114, | ||
78 | i1480u_MAX_FRG_SIZE = 512, | ||
79 | i1480u_RX_BUFS = 9, | ||
80 | }; | ||
81 | |||
82 | |||
83 | /** | ||
84 | * UNTD packet type | ||
85 | * | ||
86 | * We need to fragment any payload whose UNTD packet is going to be | ||
87 | * bigger than i1480u_MAX_FRG_SIZE. | ||
88 | */ | ||
89 | enum i1480u_pkt_type { | ||
90 | i1480u_PKT_FRAG_1ST = 0x1, | ||
91 | i1480u_PKT_FRAG_NXT = 0x0, | ||
92 | i1480u_PKT_FRAG_LST = 0x2, | ||
93 | i1480u_PKT_FRAG_CMP = 0x3 | ||
94 | }; | ||
95 | enum { | ||
96 | i1480u_PKT_NONE = 0x4, | ||
97 | }; | ||
98 | |||
99 | /** USB Network Transfer Descriptor - common */ | ||
100 | struct untd_hdr { | ||
101 | u8 type; | ||
102 | __le16 len; | ||
103 | } __attribute__((packed)); | ||
104 | |||
105 | static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr) | ||
106 | { | ||
107 | return hdr->type & 0x03; | ||
108 | } | ||
109 | |||
110 | static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr) | ||
111 | { | ||
112 | return (hdr->type >> 2) & 0x01; | ||
113 | } | ||
114 | |||
115 | static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type) | ||
116 | { | ||
117 | hdr->type = (hdr->type & ~0x03) | type; | ||
118 | } | ||
119 | |||
120 | static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx) | ||
121 | { | ||
122 | hdr->type = (hdr->type & ~0x04) | (rx_tx << 2); | ||
123 | } | ||
124 | |||
125 | |||
126 | /** | ||
127 | * USB Network Transfer Descriptor - Complete Packet | ||
128 | * | ||
129 | * This is for a packet that is smaller (header + payload) than | ||
130 | * i1480u_MAX_FRG_SIZE. | ||
131 | * | ||
132 | * @hdr.total_len is the size of the payload; the payload doesn't | ||
133 | * count this header nor the padding, but includes the size of i1480 | ||
134 | * header. | ||
135 | */ | ||
136 | struct untd_hdr_cmp { | ||
137 | struct untd_hdr hdr; | ||
138 | u8 padding; | ||
139 | } __attribute__((packed)); | ||
140 | |||
141 | |||
142 | /** | ||
143 | * USB Network Transfer Descriptor - First fragment | ||
144 | * | ||
145 | * @hdr.len is the size of the *whole packet* (excluding UNTD | ||
146 | * headers); @fragment_len is the size of the payload (excluding UNTD | ||
147 | * headers, but including i1480 headers). | ||
148 | */ | ||
149 | struct untd_hdr_1st { | ||
150 | struct untd_hdr hdr; | ||
151 | __le16 fragment_len; | ||
152 | u8 padding[3]; | ||
153 | } __attribute__((packed)); | ||
154 | |||
155 | |||
156 | /** | ||
157 | * USB Network Transfer Descriptor - Next / Last [Rest] | ||
158 | * | ||
159 | * @hdr.len is the size of the payload, not including headrs. | ||
160 | */ | ||
161 | struct untd_hdr_rst { | ||
162 | struct untd_hdr hdr; | ||
163 | u8 padding; | ||
164 | } __attribute__((packed)); | ||
165 | |||
166 | |||
167 | /** | ||
168 | * Transmission context | ||
169 | * | ||
170 | * Wraps all the stuff needed to track a pending/active tx | ||
171 | * operation. | ||
172 | */ | ||
173 | struct i1480u_tx { | ||
174 | struct list_head list_node; | ||
175 | struct i1480u *i1480u; | ||
176 | struct urb *urb; | ||
177 | |||
178 | struct sk_buff *skb; | ||
179 | struct wlp_tx_hdr *wlp_tx_hdr; | ||
180 | |||
181 | void *buf; /* if NULL, no new buf was used */ | ||
182 | size_t buf_size; | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * Basic flow control | ||
187 | * | ||
188 | * We maintain a basic flow control counter. "count" how many TX URBs are | ||
189 | * outstanding. Only allow "max" | ||
190 | * TX URBs to be outstanding. If this value is reached the queue will be | ||
191 | * stopped. The queue will be restarted when there are | ||
192 | * "threshold" URBs outstanding. | ||
193 | * Maintain a counter of how many time the TX queue needed to be restarted | ||
194 | * due to the "max" being exceeded and the "threshold" reached again. The | ||
195 | * timestamp "restart_ts" is to keep track from when the counter was last | ||
196 | * queried (see sysfs handling of file wlp_tx_inflight). | ||
197 | */ | ||
198 | struct i1480u_tx_inflight { | ||
199 | atomic_t count; | ||
200 | unsigned long max; | ||
201 | unsigned long threshold; | ||
202 | unsigned long restart_ts; | ||
203 | atomic_t restart_count; | ||
204 | }; | ||
205 | |||
206 | /** | ||
207 | * Instance of a i1480u WLP interface | ||
208 | * | ||
209 | * Keeps references to the USB device that wraps it, as well as it's | ||
210 | * interface and associated UWB host controller. As well, it also | ||
211 | * keeps a link to the netdevice for integration into the networking | ||
212 | * stack. | ||
213 | * We maintian separate error history for the tx and rx endpoints because | ||
214 | * the implementation does not rely on locking - having one shared | ||
215 | * structure between endpoints may cause problems. Adding locking to the | ||
216 | * implementation will have higher cost than adding a separate structure. | ||
217 | */ | ||
218 | struct i1480u { | ||
219 | struct usb_device *usb_dev; | ||
220 | struct usb_interface *usb_iface; | ||
221 | struct net_device *net_dev; | ||
222 | |||
223 | spinlock_t lock; | ||
224 | struct net_device_stats stats; | ||
225 | |||
226 | /* RX context handling */ | ||
227 | struct sk_buff *rx_skb; | ||
228 | struct uwb_dev_addr rx_srcaddr; | ||
229 | size_t rx_untd_pkt_size; | ||
230 | struct i1480u_rx_buf { | ||
231 | struct i1480u *i1480u; /* back pointer */ | ||
232 | struct urb *urb; | ||
233 | struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */ | ||
234 | } rx_buf[i1480u_RX_BUFS]; /* N bufs */ | ||
235 | |||
236 | spinlock_t tx_list_lock; /* TX context */ | ||
237 | struct list_head tx_list; | ||
238 | u8 tx_stream; | ||
239 | |||
240 | struct stats lqe_stats, rssi_stats; /* radio statistics */ | ||
241 | |||
242 | /* Options we can set from sysfs */ | ||
243 | struct wlp_options options; | ||
244 | struct uwb_notifs_handler uwb_notifs_handler; | ||
245 | struct edc tx_errors; | ||
246 | struct edc rx_errors; | ||
247 | struct wlp wlp; | ||
248 | #ifdef i1480u_FLOW_CONTROL | ||
249 | struct urb *notif_urb; | ||
250 | struct edc notif_edc; /* error density counter */ | ||
251 | u8 notif_buffer[1]; | ||
252 | #endif | ||
253 | struct i1480u_tx_inflight tx_inflight; | ||
254 | }; | ||
255 | |||
256 | /* Internal interfaces */ | ||
257 | extern void i1480u_rx_cb(struct urb *urb); | ||
258 | extern int i1480u_rx_setup(struct i1480u *); | ||
259 | extern void i1480u_rx_release(struct i1480u *); | ||
260 | extern void i1480u_tx_release(struct i1480u *); | ||
261 | extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *, | ||
262 | struct uwb_dev_addr *); | ||
263 | extern void i1480u_stop_queue(struct wlp *); | ||
264 | extern void i1480u_start_queue(struct wlp *); | ||
265 | extern int i1480u_sysfs_setup(struct i1480u *); | ||
266 | extern void i1480u_sysfs_release(struct i1480u *); | ||
267 | |||
268 | /* netdev interface */ | ||
269 | extern int i1480u_open(struct net_device *); | ||
270 | extern int i1480u_stop(struct net_device *); | ||
271 | extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *); | ||
272 | extern void i1480u_tx_timeout(struct net_device *); | ||
273 | extern int i1480u_set_config(struct net_device *, struct ifmap *); | ||
274 | extern struct net_device_stats *i1480u_get_stats(struct net_device *); | ||
275 | extern int i1480u_change_mtu(struct net_device *, int); | ||
276 | extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs); | ||
277 | |||
278 | /* bandwidth allocation callback */ | ||
279 | extern void i1480u_bw_alloc_cb(struct uwb_rsv *); | ||
280 | |||
281 | /* Sys FS */ | ||
282 | extern struct attribute_group i1480u_wlp_attr_group; | ||
283 | |||
284 | #endif /* #ifndef __i1480u_wlp_h__ */ | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c new file mode 100644 index 000000000000..737d60cd5b73 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/lc.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Driver for the Linux Network stack. | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * This implements a very simple network driver for the WLP USB | ||
26 | * device that is associated to a UWB (Ultra Wide Band) host. | ||
27 | * | ||
28 | * This is seen as an interface of a composite device. Once the UWB | ||
29 | * host has an association to another WLP capable device, the | ||
30 | * networking interface (aka WLP) can start to send packets back and | ||
31 | * forth. | ||
32 | * | ||
33 | * Limitations: | ||
34 | * | ||
35 | * - Hand cranked; can't ifup the interface until there is an association | ||
36 | * | ||
37 | * - BW allocation very simplistic [see i1480u_mas_set() and callees]. | ||
38 | * | ||
39 | * | ||
40 | * ROADMAP: | ||
41 | * | ||
42 | * ENTRY POINTS (driver model): | ||
43 | * | ||
44 | * i1480u_driver_{exit,init}(): initialization of the driver. | ||
45 | * | ||
46 | * i1480u_probe(): called by the driver code when a device | ||
47 | * matching 'i1480u_id_table' is connected. | ||
48 | * | ||
49 | * This allocs a netdev instance, inits with | ||
50 | * i1480u_add(), then registers_netdev(). | ||
51 | * i1480u_init() | ||
52 | * i1480u_add() | ||
53 | * | ||
54 | * i1480u_disconnect(): device has been disconnected/module | ||
55 | * is being removed. | ||
56 | * i1480u_rm() | ||
57 | */ | ||
58 | #include <linux/version.h> | ||
59 | #include <linux/if_arp.h> | ||
60 | #include <linux/etherdevice.h> | ||
61 | #include <linux/uwb/debug.h> | ||
62 | #include "i1480u-wlp.h" | ||
63 | |||
64 | |||
65 | |||
66 | static inline | ||
67 | void i1480u_init(struct i1480u *i1480u) | ||
68 | { | ||
69 | /* nothing so far... doesn't it suck? */ | ||
70 | spin_lock_init(&i1480u->lock); | ||
71 | INIT_LIST_HEAD(&i1480u->tx_list); | ||
72 | spin_lock_init(&i1480u->tx_list_lock); | ||
73 | wlp_options_init(&i1480u->options); | ||
74 | edc_init(&i1480u->tx_errors); | ||
75 | edc_init(&i1480u->rx_errors); | ||
76 | #ifdef i1480u_FLOW_CONTROL | ||
77 | edc_init(&i1480u->notif_edc); | ||
78 | #endif | ||
79 | stats_init(&i1480u->lqe_stats); | ||
80 | stats_init(&i1480u->rssi_stats); | ||
81 | wlp_init(&i1480u->wlp); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * Fill WLP device information structure | ||
86 | * | ||
87 | * The structure will contain a few character arrays, each ending with a | ||
88 | * null terminated string. Each string has to fit (excluding terminating | ||
89 | * character) into a specified range obtained from the WLP substack. | ||
90 | * | ||
91 | * It is still not clear exactly how this device information should be | ||
92 | * obtained. Until we find out we use the USB device descriptor as backup, some | ||
93 | * information elements have intuitive mappings, other not. | ||
94 | */ | ||
95 | static | ||
96 | void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info) | ||
97 | { | ||
98 | struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); | ||
99 | struct usb_device *usb_dev = i1480u->usb_dev; | ||
100 | /* Treat device name and model name the same */ | ||
101 | if (usb_dev->descriptor.iProduct) { | ||
102 | usb_string(usb_dev, usb_dev->descriptor.iProduct, | ||
103 | dev_info->name, sizeof(dev_info->name)); | ||
104 | usb_string(usb_dev, usb_dev->descriptor.iProduct, | ||
105 | dev_info->model_name, sizeof(dev_info->model_name)); | ||
106 | } | ||
107 | if (usb_dev->descriptor.iManufacturer) | ||
108 | usb_string(usb_dev, usb_dev->descriptor.iManufacturer, | ||
109 | dev_info->manufacturer, | ||
110 | sizeof(dev_info->manufacturer)); | ||
111 | scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x", | ||
112 | __le16_to_cpu(usb_dev->descriptor.bcdDevice)); | ||
113 | if (usb_dev->descriptor.iSerialNumber) | ||
114 | usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, | ||
115 | dev_info->serial, sizeof(dev_info->serial)); | ||
116 | /* FIXME: where should we obtain category? */ | ||
117 | dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER); | ||
118 | /* FIXME: Complete OUI and OUIsubdiv attributes */ | ||
119 | } | ||
120 | |||
121 | #ifdef i1480u_FLOW_CONTROL | ||
122 | /** | ||
123 | * Callback for the notification endpoint | ||
124 | * | ||
125 | * This mostly controls the xon/xoff protocol. In case of hard error, | ||
126 | * we stop the queue. If not, we always retry. | ||
127 | */ | ||
128 | static | ||
129 | void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs) | ||
130 | { | ||
131 | struct i1480u *i1480u = urb->context; | ||
132 | struct usb_interface *usb_iface = i1480u->usb_iface; | ||
133 | struct device *dev = &usb_iface->dev; | ||
134 | int result; | ||
135 | |||
136 | switch (urb->status) { | ||
137 | case 0: /* Got valid data, do xon/xoff */ | ||
138 | switch (i1480u->notif_buffer[0]) { | ||
139 | case 'N': | ||
140 | dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies); | ||
141 | netif_stop_queue(i1480u->net_dev); | ||
142 | break; | ||
143 | case 'A': | ||
144 | dev_err(dev, "XON STARTING queue at %lu\n", jiffies); | ||
145 | netif_start_queue(i1480u->net_dev); | ||
146 | break; | ||
147 | default: | ||
148 | dev_err(dev, "NEP: unknown data 0x%02hhx\n", | ||
149 | i1480u->notif_buffer[0]); | ||
150 | } | ||
151 | break; | ||
152 | case -ECONNRESET: /* Controlled situation ... */ | ||
153 | case -ENOENT: /* we killed the URB... */ | ||
154 | dev_err(dev, "NEP: URB reset/noent %d\n", urb->status); | ||
155 | goto error; | ||
156 | case -ESHUTDOWN: /* going away! */ | ||
157 | dev_err(dev, "NEP: URB down %d\n", urb->status); | ||
158 | goto error; | ||
159 | default: /* Retry unless it gets ugly */ | ||
160 | if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS, | ||
161 | EDC_ERROR_TIMEFRAME)) { | ||
162 | dev_err(dev, "NEP: URB max acceptable errors " | ||
163 | "exceeded; resetting device\n"); | ||
164 | goto error_reset; | ||
165 | } | ||
166 | dev_err(dev, "NEP: URB error %d\n", urb->status); | ||
167 | break; | ||
168 | } | ||
169 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
170 | if (result < 0) { | ||
171 | dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n", | ||
172 | result); | ||
173 | goto error_reset; | ||
174 | } | ||
175 | return; | ||
176 | |||
177 | error_reset: | ||
178 | wlp_reset_all(&i1480-wlp); | ||
179 | error: | ||
180 | netif_stop_queue(i1480u->net_dev); | ||
181 | return; | ||
182 | } | ||
183 | #endif | ||
184 | |||
185 | static | ||
186 | int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface) | ||
187 | { | ||
188 | int result = -ENODEV; | ||
189 | struct wlp *wlp = &i1480u->wlp; | ||
190 | struct usb_device *usb_dev = interface_to_usbdev(iface); | ||
191 | struct net_device *net_dev = i1480u->net_dev; | ||
192 | struct uwb_rc *rc; | ||
193 | struct uwb_dev *uwb_dev; | ||
194 | #ifdef i1480u_FLOW_CONTROL | ||
195 | struct usb_endpoint_descriptor *epd; | ||
196 | #endif | ||
197 | |||
198 | i1480u->usb_dev = usb_get_dev(usb_dev); | ||
199 | i1480u->usb_iface = iface; | ||
200 | rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev); | ||
201 | if (rc == NULL) { | ||
202 | dev_err(&iface->dev, "Cannot get associated UWB Radio " | ||
203 | "Controller\n"); | ||
204 | goto out; | ||
205 | } | ||
206 | wlp->xmit_frame = i1480u_xmit_frame; | ||
207 | wlp->fill_device_info = i1480u_fill_device_info; | ||
208 | wlp->stop_queue = i1480u_stop_queue; | ||
209 | wlp->start_queue = i1480u_start_queue; | ||
210 | result = wlp_setup(wlp, rc); | ||
211 | if (result < 0) { | ||
212 | dev_err(&iface->dev, "Cannot setup WLP\n"); | ||
213 | goto error_wlp_setup; | ||
214 | } | ||
215 | result = 0; | ||
216 | ether_setup(net_dev); /* make it an etherdevice */ | ||
217 | uwb_dev = &rc->uwb_dev; | ||
218 | /* FIXME: hookup address change notifications? */ | ||
219 | |||
220 | memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data, | ||
221 | sizeof(net_dev->dev_addr)); | ||
222 | |||
223 | net_dev->hard_header_len = sizeof(struct untd_hdr_cmp) | ||
224 | + sizeof(struct wlp_tx_hdr) | ||
225 | + WLP_DATA_HLEN | ||
226 | + ETH_HLEN; | ||
227 | net_dev->mtu = 3500; | ||
228 | net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */ | ||
229 | |||
230 | /* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */ | ||
231 | /* FIXME: multicast disabled */ | ||
232 | net_dev->flags &= ~IFF_MULTICAST; | ||
233 | net_dev->features &= ~NETIF_F_SG; | ||
234 | net_dev->features &= ~NETIF_F_FRAGLIST; | ||
235 | /* All NETIF_F_*_CSUM disabled */ | ||
236 | net_dev->features |= NETIF_F_HIGHDMA; | ||
237 | net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */ | ||
238 | |||
239 | net_dev->open = i1480u_open; | ||
240 | net_dev->stop = i1480u_stop; | ||
241 | net_dev->hard_start_xmit = i1480u_hard_start_xmit; | ||
242 | net_dev->tx_timeout = i1480u_tx_timeout; | ||
243 | net_dev->get_stats = i1480u_get_stats; | ||
244 | net_dev->set_config = i1480u_set_config; | ||
245 | net_dev->change_mtu = i1480u_change_mtu; | ||
246 | |||
247 | #ifdef i1480u_FLOW_CONTROL | ||
248 | /* Notification endpoint setup (submitted when we open the device) */ | ||
249 | i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
250 | if (i1480u->notif_urb == NULL) { | ||
251 | dev_err(&iface->dev, "Unable to allocate notification URB\n"); | ||
252 | result = -ENOMEM; | ||
253 | goto error_urb_alloc; | ||
254 | } | ||
255 | epd = &iface->cur_altsetting->endpoint[0].desc; | ||
256 | usb_fill_int_urb(i1480u->notif_urb, usb_dev, | ||
257 | usb_rcvintpipe(usb_dev, epd->bEndpointAddress), | ||
258 | i1480u->notif_buffer, sizeof(i1480u->notif_buffer), | ||
259 | i1480u_notif_cb, i1480u, epd->bInterval); | ||
260 | |||
261 | #endif | ||
262 | |||
263 | i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX; | ||
264 | i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD; | ||
265 | i1480u->tx_inflight.restart_ts = jiffies; | ||
266 | usb_set_intfdata(iface, i1480u); | ||
267 | return result; | ||
268 | |||
269 | #ifdef i1480u_FLOW_CONTROL | ||
270 | error_urb_alloc: | ||
271 | #endif | ||
272 | wlp_remove(wlp); | ||
273 | error_wlp_setup: | ||
274 | uwb_rc_put(rc); | ||
275 | out: | ||
276 | usb_put_dev(i1480u->usb_dev); | ||
277 | return result; | ||
278 | } | ||
279 | |||
280 | static void i1480u_rm(struct i1480u *i1480u) | ||
281 | { | ||
282 | struct uwb_rc *rc = i1480u->wlp.rc; | ||
283 | usb_set_intfdata(i1480u->usb_iface, NULL); | ||
284 | #ifdef i1480u_FLOW_CONTROL | ||
285 | usb_kill_urb(i1480u->notif_urb); | ||
286 | usb_free_urb(i1480u->notif_urb); | ||
287 | #endif | ||
288 | wlp_remove(&i1480u->wlp); | ||
289 | uwb_rc_put(rc); | ||
290 | usb_put_dev(i1480u->usb_dev); | ||
291 | } | ||
292 | |||
293 | /** Just setup @net_dev's i1480u private data */ | ||
294 | static void i1480u_netdev_setup(struct net_device *net_dev) | ||
295 | { | ||
296 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
297 | /* Initialize @i1480u */ | ||
298 | memset(i1480u, 0, sizeof(*i1480u)); | ||
299 | i1480u_init(i1480u); | ||
300 | } | ||
301 | |||
302 | /** | ||
303 | * Probe a i1480u interface and register it | ||
304 | * | ||
305 | * @iface: USB interface to link to | ||
306 | * @id: USB class/subclass/protocol id | ||
307 | * @returns: 0 if ok, < 0 errno code on error. | ||
308 | * | ||
309 | * Does basic housekeeping stuff and then allocs a netdev with space | ||
310 | * for the i1480u data. Initializes, registers in i1480u, registers in | ||
311 | * netdev, ready to go. | ||
312 | */ | ||
313 | static int i1480u_probe(struct usb_interface *iface, | ||
314 | const struct usb_device_id *id) | ||
315 | { | ||
316 | int result; | ||
317 | struct net_device *net_dev; | ||
318 | struct device *dev = &iface->dev; | ||
319 | struct i1480u *i1480u; | ||
320 | |||
321 | /* Allocate instance [calls i1480u_netdev_setup() on it] */ | ||
322 | result = -ENOMEM; | ||
323 | net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup); | ||
324 | if (net_dev == NULL) { | ||
325 | dev_err(dev, "no memory for network device instance\n"); | ||
326 | goto error_alloc_netdev; | ||
327 | } | ||
328 | SET_NETDEV_DEV(net_dev, dev); | ||
329 | i1480u = netdev_priv(net_dev); | ||
330 | i1480u->net_dev = net_dev; | ||
331 | result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */ | ||
332 | if (result < 0) { | ||
333 | dev_err(dev, "cannot add i1480u device: %d\n", result); | ||
334 | goto error_i1480u_add; | ||
335 | } | ||
336 | result = register_netdev(net_dev); /* Okey dokey, bring it up */ | ||
337 | if (result < 0) { | ||
338 | dev_err(dev, "cannot register network device: %d\n", result); | ||
339 | goto error_register_netdev; | ||
340 | } | ||
341 | i1480u_sysfs_setup(i1480u); | ||
342 | if (result < 0) | ||
343 | goto error_sysfs_init; | ||
344 | return 0; | ||
345 | |||
346 | error_sysfs_init: | ||
347 | unregister_netdev(net_dev); | ||
348 | error_register_netdev: | ||
349 | i1480u_rm(i1480u); | ||
350 | error_i1480u_add: | ||
351 | free_netdev(net_dev); | ||
352 | error_alloc_netdev: | ||
353 | return result; | ||
354 | } | ||
355 | |||
356 | |||
357 | /** | ||
358 | * Disconect a i1480u from the system. | ||
359 | * | ||
360 | * i1480u_stop() has been called before, so al the rx and tx contexts | ||
361 | * have been taken down already. Make sure the queue is stopped, | ||
362 | * unregister netdev and i1480u, free and kill. | ||
363 | */ | ||
364 | static void i1480u_disconnect(struct usb_interface *iface) | ||
365 | { | ||
366 | struct i1480u *i1480u; | ||
367 | struct net_device *net_dev; | ||
368 | |||
369 | i1480u = usb_get_intfdata(iface); | ||
370 | net_dev = i1480u->net_dev; | ||
371 | netif_stop_queue(net_dev); | ||
372 | #ifdef i1480u_FLOW_CONTROL | ||
373 | usb_kill_urb(i1480u->notif_urb); | ||
374 | #endif | ||
375 | i1480u_sysfs_release(i1480u); | ||
376 | unregister_netdev(net_dev); | ||
377 | i1480u_rm(i1480u); | ||
378 | free_netdev(net_dev); | ||
379 | } | ||
380 | |||
381 | static struct usb_device_id i1480u_id_table[] = { | ||
382 | { | ||
383 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ | ||
384 | | USB_DEVICE_ID_MATCH_DEV_INFO \ | ||
385 | | USB_DEVICE_ID_MATCH_INT_INFO, | ||
386 | .idVendor = 0x8086, | ||
387 | .idProduct = 0x0c3b, | ||
388 | .bDeviceClass = 0xef, | ||
389 | .bDeviceSubClass = 0x02, | ||
390 | .bDeviceProtocol = 0x02, | ||
391 | .bInterfaceClass = 0xff, | ||
392 | .bInterfaceSubClass = 0xff, | ||
393 | .bInterfaceProtocol = 0xff, | ||
394 | }, | ||
395 | {}, | ||
396 | }; | ||
397 | MODULE_DEVICE_TABLE(usb, i1480u_id_table); | ||
398 | |||
399 | static struct usb_driver i1480u_driver = { | ||
400 | .name = KBUILD_MODNAME, | ||
401 | .probe = i1480u_probe, | ||
402 | .disconnect = i1480u_disconnect, | ||
403 | .id_table = i1480u_id_table, | ||
404 | }; | ||
405 | |||
406 | static int __init i1480u_driver_init(void) | ||
407 | { | ||
408 | return usb_register(&i1480u_driver); | ||
409 | } | ||
410 | module_init(i1480u_driver_init); | ||
411 | |||
412 | |||
413 | static void __exit i1480u_driver_exit(void) | ||
414 | { | ||
415 | usb_deregister(&i1480u_driver); | ||
416 | } | ||
417 | module_exit(i1480u_driver_exit); | ||
418 | |||
419 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
420 | MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB"); | ||
421 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c new file mode 100644 index 000000000000..8802ac43d872 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Driver for the Linux Network stack. | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * Implementation of the netdevice linkage (except tx and rx related stuff). | ||
26 | * | ||
27 | * ROADMAP: | ||
28 | * | ||
29 | * ENTRY POINTS (Net device): | ||
30 | * | ||
31 | * i1480u_open(): Called when we ifconfig up the interface; | ||
32 | * associates to a UWB host controller, reserves | ||
33 | * bandwidth (MAS), sets up RX USB URB and starts | ||
34 | * the queue. | ||
35 | * | ||
36 | * i1480u_stop(): Called when we ifconfig down a interface; | ||
37 | * reverses _open(). | ||
38 | * | ||
39 | * i1480u_set_config(): | ||
40 | */ | ||
41 | |||
42 | #include <linux/if_arp.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <linux/uwb/debug.h> | ||
45 | #include "i1480u-wlp.h" | ||
46 | |||
47 | struct i1480u_cmd_set_ip_mas { | ||
48 | struct uwb_rccb rccb; | ||
49 | struct uwb_dev_addr addr; | ||
50 | u8 stream; | ||
51 | u8 owner; | ||
52 | u8 type; /* enum uwb_drp_type */ | ||
53 | u8 baMAS[32]; | ||
54 | } __attribute__((packed)); | ||
55 | |||
56 | |||
57 | static | ||
58 | int i1480u_set_ip_mas( | ||
59 | struct uwb_rc *rc, | ||
60 | const struct uwb_dev_addr *dstaddr, | ||
61 | u8 stream, u8 owner, u8 type, unsigned long *mas) | ||
62 | { | ||
63 | |||
64 | int result; | ||
65 | struct i1480u_cmd_set_ip_mas *cmd; | ||
66 | struct uwb_rc_evt_confirm reply; | ||
67 | |||
68 | result = -ENOMEM; | ||
69 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
70 | if (cmd == NULL) | ||
71 | goto error_kzalloc; | ||
72 | cmd->rccb.bCommandType = 0xfd; | ||
73 | cmd->rccb.wCommand = cpu_to_le16(0x000e); | ||
74 | cmd->addr = *dstaddr; | ||
75 | cmd->stream = stream; | ||
76 | cmd->owner = owner; | ||
77 | cmd->type = type; | ||
78 | if (mas == NULL) | ||
79 | memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS)); | ||
80 | else | ||
81 | memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS)); | ||
82 | reply.rceb.bEventType = 0xfd; | ||
83 | reply.rceb.wEvent = cpu_to_le16(0x000e); | ||
84 | result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd), | ||
85 | &reply.rceb, sizeof(reply)); | ||
86 | if (result < 0) | ||
87 | goto error_cmd; | ||
88 | if (reply.bResultCode != UWB_RC_RES_FAIL) { | ||
89 | dev_err(&rc->uwb_dev.dev, | ||
90 | "SET-IP-MAS: command execution failed: %d\n", | ||
91 | reply.bResultCode); | ||
92 | result = -EIO; | ||
93 | } | ||
94 | error_cmd: | ||
95 | kfree(cmd); | ||
96 | error_kzalloc: | ||
97 | return result; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Inform a WLP interface of a MAS reservation | ||
102 | * | ||
103 | * @rc is assumed refcnted. | ||
104 | */ | ||
105 | /* FIXME: detect if remote device is WLP capable? */ | ||
106 | static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc, | ||
107 | u8 stream, u8 owner, u8 type, unsigned long *mas) | ||
108 | { | ||
109 | int result = 0; | ||
110 | struct device *dev = &rc->uwb_dev.dev; | ||
111 | |||
112 | result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner, | ||
113 | type, mas); | ||
114 | if (result < 0) { | ||
115 | char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE]; | ||
116 | uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf), | ||
117 | &rc->uwb_dev.dev_addr); | ||
118 | uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf), | ||
119 | &uwb_dev->dev_addr); | ||
120 | dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n", | ||
121 | rcaddrbuf, devaddrbuf, result); | ||
122 | } | ||
123 | return result; | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * Called by bandwidth allocator when change occurs in reservation. | ||
128 | * | ||
129 | * @rsv: The reservation that is being established, modified, or | ||
130 | * terminated. | ||
131 | * | ||
132 | * When a reservation is established, modified, or terminated the upper layer | ||
133 | * (WLP here) needs set/update the currently available Media Access Slots | ||
134 | * that can be use for IP traffic. | ||
135 | * | ||
136 | * Our action taken during failure depends on how the reservation is being | ||
137 | * changed: | ||
138 | * - if reservation is being established we do nothing if we cannot set the | ||
139 | * new MAS to be used | ||
140 | * - if reservation is being terminated we revert back to PCA whether the | ||
141 | * SET IP MAS command succeeds or not. | ||
142 | */ | ||
143 | void i1480u_bw_alloc_cb(struct uwb_rsv *rsv) | ||
144 | { | ||
145 | int result = 0; | ||
146 | struct i1480u *i1480u = rsv->pal_priv; | ||
147 | struct device *dev = &i1480u->usb_iface->dev; | ||
148 | struct uwb_dev *target_dev = rsv->target.dev; | ||
149 | struct uwb_rc *rc = i1480u->wlp.rc; | ||
150 | u8 stream = rsv->stream; | ||
151 | int type = rsv->type; | ||
152 | int is_owner = rsv->owner == &rc->uwb_dev; | ||
153 | unsigned long *bmp = rsv->mas.bm; | ||
154 | |||
155 | dev_err(dev, "WLP callback called - sending set ip mas\n"); | ||
156 | /*user cannot change options while setting configuration*/ | ||
157 | mutex_lock(&i1480u->options.mutex); | ||
158 | switch (rsv->state) { | ||
159 | case UWB_RSV_STATE_T_ACCEPTED: | ||
160 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
161 | result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner, | ||
162 | type, bmp); | ||
163 | if (result < 0) { | ||
164 | dev_err(dev, "MAS reservation failed: %d\n", result); | ||
165 | goto out; | ||
166 | } | ||
167 | if (is_owner) { | ||
168 | wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr, | ||
169 | WLP_DRP | stream); | ||
170 | wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0); | ||
171 | } | ||
172 | break; | ||
173 | case UWB_RSV_STATE_NONE: | ||
174 | /* revert back to PCA */ | ||
175 | result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner, | ||
176 | type, bmp); | ||
177 | if (result < 0) | ||
178 | dev_err(dev, "MAS reservation failed: %d\n", result); | ||
179 | /* Revert to PCA even though SET IP MAS failed. */ | ||
180 | wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr, | ||
181 | i1480u->options.pca_base_priority); | ||
182 | wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1); | ||
183 | break; | ||
184 | default: | ||
185 | dev_err(dev, "unexpected WLP reservation state: %s (%d).\n", | ||
186 | uwb_rsv_state_str(rsv->state), rsv->state); | ||
187 | break; | ||
188 | } | ||
189 | out: | ||
190 | mutex_unlock(&i1480u->options.mutex); | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * | ||
196 | * Called on 'ifconfig up' | ||
197 | */ | ||
198 | int i1480u_open(struct net_device *net_dev) | ||
199 | { | ||
200 | int result; | ||
201 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
202 | struct wlp *wlp = &i1480u->wlp; | ||
203 | struct uwb_rc *rc; | ||
204 | struct device *dev = &i1480u->usb_iface->dev; | ||
205 | |||
206 | rc = wlp->rc; | ||
207 | result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ | ||
208 | if (result < 0) | ||
209 | goto error_rx_setup; | ||
210 | netif_wake_queue(net_dev); | ||
211 | #ifdef i1480u_FLOW_CONTROL | ||
212 | result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; | ||
213 | if (result < 0) { | ||
214 | dev_err(dev, "Can't submit notification URB: %d\n", result); | ||
215 | goto error_notif_urb_submit; | ||
216 | } | ||
217 | #endif | ||
218 | i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; | ||
219 | i1480u->uwb_notifs_handler.data = i1480u; | ||
220 | if (uwb_bg_joined(rc)) | ||
221 | netif_carrier_on(net_dev); | ||
222 | else | ||
223 | netif_carrier_off(net_dev); | ||
224 | uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); | ||
225 | /* Interface is up with an address, now we can create WSS */ | ||
226 | result = wlp_wss_setup(net_dev, &wlp->wss); | ||
227 | if (result < 0) { | ||
228 | dev_err(dev, "Can't create WSS: %d. \n", result); | ||
229 | goto error_notif_deregister; | ||
230 | } | ||
231 | return 0; | ||
232 | error_notif_deregister: | ||
233 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
234 | #ifdef i1480u_FLOW_CONTROL | ||
235 | error_notif_urb_submit: | ||
236 | #endif | ||
237 | netif_stop_queue(net_dev); | ||
238 | i1480u_rx_release(i1480u); | ||
239 | error_rx_setup: | ||
240 | return result; | ||
241 | } | ||
242 | |||
243 | |||
244 | /** | ||
245 | * Called on 'ifconfig down' | ||
246 | */ | ||
247 | int i1480u_stop(struct net_device *net_dev) | ||
248 | { | ||
249 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
250 | struct wlp *wlp = &i1480u->wlp; | ||
251 | struct uwb_rc *rc = wlp->rc; | ||
252 | |||
253 | BUG_ON(wlp->rc == NULL); | ||
254 | wlp_wss_remove(&wlp->wss); | ||
255 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
256 | netif_carrier_off(net_dev); | ||
257 | #ifdef i1480u_FLOW_CONTROL | ||
258 | usb_kill_urb(i1480u->notif_urb); | ||
259 | #endif | ||
260 | netif_stop_queue(net_dev); | ||
261 | i1480u_rx_release(i1480u); | ||
262 | i1480u_tx_release(i1480u); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | |||
267 | /** Report statistics */ | ||
268 | struct net_device_stats *i1480u_get_stats(struct net_device *net_dev) | ||
269 | { | ||
270 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
271 | return &i1480u->stats; | ||
272 | } | ||
273 | |||
274 | |||
275 | /** | ||
276 | * | ||
277 | * Change the interface config--we probably don't have to do anything. | ||
278 | */ | ||
279 | int i1480u_set_config(struct net_device *net_dev, struct ifmap *map) | ||
280 | { | ||
281 | int result; | ||
282 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
283 | BUG_ON(i1480u->wlp.rc == NULL); | ||
284 | result = 0; | ||
285 | return result; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * Change the MTU of the interface | ||
290 | */ | ||
291 | int i1480u_change_mtu(struct net_device *net_dev, int mtu) | ||
292 | { | ||
293 | static union { | ||
294 | struct wlp_tx_hdr tx; | ||
295 | struct wlp_rx_hdr rx; | ||
296 | } i1480u_all_hdrs; | ||
297 | |||
298 | if (mtu < ETH_HLEN) /* We encap eth frames */ | ||
299 | return -ERANGE; | ||
300 | if (mtu > 4000 - sizeof(i1480u_all_hdrs)) | ||
301 | return -ERANGE; | ||
302 | net_dev->mtu = mtu; | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | |||
307 | /** | ||
308 | * Callback function to handle events from UWB | ||
309 | * When we see other devices we know the carrier is ok, | ||
310 | * if we are the only device in the beacon group we set the carrier | ||
311 | * state to off. | ||
312 | * */ | ||
313 | void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, | ||
314 | enum uwb_notifs event) | ||
315 | { | ||
316 | struct i1480u *i1480u = data; | ||
317 | struct net_device *net_dev = i1480u->net_dev; | ||
318 | struct device *dev = &i1480u->usb_iface->dev; | ||
319 | switch (event) { | ||
320 | case UWB_NOTIF_BG_JOIN: | ||
321 | netif_carrier_on(net_dev); | ||
322 | dev_info(dev, "Link is up\n"); | ||
323 | break; | ||
324 | case UWB_NOTIF_BG_LEAVE: | ||
325 | netif_carrier_off(net_dev); | ||
326 | dev_info(dev, "Link is down\n"); | ||
327 | break; | ||
328 | default: | ||
329 | dev_err(dev, "don't know how to handle event %d from uwb\n", | ||
330 | event); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * Stop the network queue | ||
336 | * | ||
337 | * Enable WLP substack to stop network queue. We also set the flow control | ||
338 | * threshold at this time to prevent the flow control from restarting the | ||
339 | * queue. | ||
340 | * | ||
341 | * we are loosing the current threshold value here ... FIXME? | ||
342 | */ | ||
343 | void i1480u_stop_queue(struct wlp *wlp) | ||
344 | { | ||
345 | struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); | ||
346 | struct net_device *net_dev = i1480u->net_dev; | ||
347 | i1480u->tx_inflight.threshold = 0; | ||
348 | netif_stop_queue(net_dev); | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * Start the network queue | ||
353 | * | ||
354 | * Enable WLP substack to start network queue. Also re-enable the flow | ||
355 | * control to manage the queue again. | ||
356 | * | ||
357 | * We re-enable the flow control by storing the default threshold in the | ||
358 | * flow control threshold. This means that if the user modified the | ||
359 | * threshold before the queue was stopped and restarted that information | ||
360 | * will be lost. FIXME? | ||
361 | */ | ||
362 | void i1480u_start_queue(struct wlp *wlp) | ||
363 | { | ||
364 | struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); | ||
365 | struct net_device *net_dev = i1480u->net_dev; | ||
366 | i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD; | ||
367 | netif_start_queue(net_dev); | ||
368 | } | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c new file mode 100644 index 000000000000..9fc035354a76 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/rx.c | |||
@@ -0,0 +1,486 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Driver for the Linux Network stack. | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * i1480u's RX handling is simple. i1480u will send the received | ||
24 | * network packets broken up in fragments; 1 to N fragments make a | ||
25 | * packet, we assemble them together and deliver the packet with netif_rx(). | ||
26 | * | ||
27 | * Beacuse each USB transfer is a *single* fragment (except when the | ||
28 | * transfer contains a first fragment), each URB called thus | ||
29 | * back contains one or two fragments. So we queue N URBs, each with its own | ||
30 | * fragment buffer. When a URB is done, we process it (adding to the | ||
31 | * current skb from the fragment buffer until complete). Once | ||
32 | * processed, we requeue the URB. There is always a bunch of URBs | ||
33 | * ready to take data, so the intergap should be minimal. | ||
34 | * | ||
35 | * An URB's transfer buffer is the data field of a socket buffer. This | ||
36 | * reduces copying as data can be passed directly to network layer. If a | ||
37 | * complete packet or 1st fragment is received the URB's transfer buffer is | ||
38 | * taken away from it and used to send data to the network layer. In this | ||
39 | * case a new transfer buffer is allocated to the URB before being requeued. | ||
40 | * If a "NEXT" or "LAST" fragment is received, the fragment contents is | ||
41 | * appended to the RX packet under construction and the transfer buffer | ||
42 | * is reused. To be able to use this buffer to assemble complete packets | ||
43 | * we set each buffer's size to that of the MAX ethernet packet that can | ||
44 | * be received. There is thus room for improvement in memory usage. | ||
45 | * | ||
46 | * When the max tx fragment size increases, we should be able to read | ||
47 | * data into the skbs directly with very simple code. | ||
48 | * | ||
49 | * ROADMAP: | ||
50 | * | ||
51 | * ENTRY POINTS: | ||
52 | * | ||
53 | * i1480u_rx_setup(): setup RX context [from i1480u_open()] | ||
54 | * | ||
55 | * i1480u_rx_release(): release RX context [from i1480u_stop()] | ||
56 | * | ||
57 | * i1480u_rx_cb(): called when the RX USB URB receives a | ||
58 | * packet. It removes the header and pushes it up | ||
59 | * the Linux netdev stack with netif_rx(). | ||
60 | * | ||
61 | * i1480u_rx_buffer() | ||
62 | * i1480u_drop() and i1480u_fix() | ||
63 | * i1480u_skb_deliver | ||
64 | * | ||
65 | */ | ||
66 | |||
67 | #include <linux/netdevice.h> | ||
68 | #include <linux/etherdevice.h> | ||
69 | #include "i1480u-wlp.h" | ||
70 | |||
71 | #define D_LOCAL 0 | ||
72 | #include <linux/uwb/debug.h> | ||
73 | |||
74 | |||
75 | /** | ||
76 | * Setup the RX context | ||
77 | * | ||
78 | * Each URB is provided with a transfer_buffer that is the data field | ||
79 | * of a new socket buffer. | ||
80 | */ | ||
81 | int i1480u_rx_setup(struct i1480u *i1480u) | ||
82 | { | ||
83 | int result, cnt; | ||
84 | struct device *dev = &i1480u->usb_iface->dev; | ||
85 | struct net_device *net_dev = i1480u->net_dev; | ||
86 | struct usb_endpoint_descriptor *epd; | ||
87 | struct sk_buff *skb; | ||
88 | |||
89 | /* Alloc RX stuff */ | ||
90 | i1480u->rx_skb = NULL; /* not in process of receiving packet */ | ||
91 | result = -ENOMEM; | ||
92 | epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc; | ||
93 | for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { | ||
94 | struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt]; | ||
95 | rx_buf->i1480u = i1480u; | ||
96 | skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE); | ||
97 | if (!skb) { | ||
98 | dev_err(dev, | ||
99 | "RX: cannot allocate RX buffer %d\n", cnt); | ||
100 | result = -ENOMEM; | ||
101 | goto error; | ||
102 | } | ||
103 | skb->dev = net_dev; | ||
104 | skb->ip_summed = CHECKSUM_NONE; | ||
105 | skb_reserve(skb, 2); | ||
106 | rx_buf->data = skb; | ||
107 | rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL); | ||
108 | if (unlikely(rx_buf->urb == NULL)) { | ||
109 | dev_err(dev, "RX: cannot allocate URB %d\n", cnt); | ||
110 | result = -ENOMEM; | ||
111 | goto error; | ||
112 | } | ||
113 | usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev, | ||
114 | usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress), | ||
115 | rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2, | ||
116 | i1480u_rx_cb, rx_buf); | ||
117 | result = usb_submit_urb(rx_buf->urb, GFP_NOIO); | ||
118 | if (unlikely(result < 0)) { | ||
119 | dev_err(dev, "RX: cannot submit URB %d: %d\n", | ||
120 | cnt, result); | ||
121 | goto error; | ||
122 | } | ||
123 | } | ||
124 | return 0; | ||
125 | |||
126 | error: | ||
127 | i1480u_rx_release(i1480u); | ||
128 | return result; | ||
129 | } | ||
130 | |||
131 | |||
132 | /** Release resources associated to the rx context */ | ||
133 | void i1480u_rx_release(struct i1480u *i1480u) | ||
134 | { | ||
135 | int cnt; | ||
136 | for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { | ||
137 | if (i1480u->rx_buf[cnt].data) | ||
138 | dev_kfree_skb(i1480u->rx_buf[cnt].data); | ||
139 | if (i1480u->rx_buf[cnt].urb) { | ||
140 | usb_kill_urb(i1480u->rx_buf[cnt].urb); | ||
141 | usb_free_urb(i1480u->rx_buf[cnt].urb); | ||
142 | } | ||
143 | } | ||
144 | if (i1480u->rx_skb != NULL) | ||
145 | dev_kfree_skb(i1480u->rx_skb); | ||
146 | } | ||
147 | |||
148 | static | ||
149 | void i1480u_rx_unlink_urbs(struct i1480u *i1480u) | ||
150 | { | ||
151 | int cnt; | ||
152 | for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { | ||
153 | if (i1480u->rx_buf[cnt].urb) | ||
154 | usb_unlink_urb(i1480u->rx_buf[cnt].urb); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /** Fix an out-of-sequence packet */ | ||
159 | #define i1480u_fix(i1480u, msg...) \ | ||
160 | do { \ | ||
161 | if (printk_ratelimit()) \ | ||
162 | dev_err(&i1480u->usb_iface->dev, msg); \ | ||
163 | dev_kfree_skb_irq(i1480u->rx_skb); \ | ||
164 | i1480u->rx_skb = NULL; \ | ||
165 | i1480u->rx_untd_pkt_size = 0; \ | ||
166 | } while (0) | ||
167 | |||
168 | |||
169 | /** Drop an out-of-sequence packet */ | ||
170 | #define i1480u_drop(i1480u, msg...) \ | ||
171 | do { \ | ||
172 | if (printk_ratelimit()) \ | ||
173 | dev_err(&i1480u->usb_iface->dev, msg); \ | ||
174 | i1480u->stats.rx_dropped++; \ | ||
175 | } while (0) | ||
176 | |||
177 | |||
178 | |||
179 | |||
180 | /** Finalizes setting up the SKB and delivers it | ||
181 | * | ||
182 | * We first pass the incoming frame to WLP substack for verification. It | ||
183 | * may also be a WLP association frame in which case WLP will take over the | ||
184 | * processing. If WLP does not take it over it will still verify it, if the | ||
185 | * frame is invalid the skb will be freed by WLP and we will not continue | ||
186 | * parsing. | ||
187 | * */ | ||
188 | static | ||
189 | void i1480u_skb_deliver(struct i1480u *i1480u) | ||
190 | { | ||
191 | int should_parse; | ||
192 | struct net_device *net_dev = i1480u->net_dev; | ||
193 | struct device *dev = &i1480u->usb_iface->dev; | ||
194 | |||
195 | d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", | ||
196 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
197 | d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); | ||
198 | should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, | ||
199 | &i1480u->rx_srcaddr); | ||
200 | if (!should_parse) | ||
201 | goto out; | ||
202 | i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); | ||
203 | d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", | ||
204 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
205 | d_dump(7, dev, i1480u->rx_skb->data, | ||
206 | i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); | ||
207 | i1480u->stats.rx_packets++; | ||
208 | i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; | ||
209 | net_dev->last_rx = jiffies; | ||
210 | /* FIXME: flow control: check netif_rx() retval */ | ||
211 | |||
212 | netif_rx(i1480u->rx_skb); /* deliver */ | ||
213 | out: | ||
214 | i1480u->rx_skb = NULL; | ||
215 | i1480u->rx_untd_pkt_size = 0; | ||
216 | } | ||
217 | |||
218 | |||
219 | /** | ||
220 | * Process a buffer of data received from the USB RX endpoint | ||
221 | * | ||
222 | * First fragment arrives with next or last fragment. All other fragments | ||
223 | * arrive alone. | ||
224 | * | ||
225 | * /me hates long functions. | ||
226 | */ | ||
227 | static | ||
228 | void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf) | ||
229 | { | ||
230 | unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */ | ||
231 | size_t untd_hdr_size, untd_frg_size; | ||
232 | size_t i1480u_hdr_size; | ||
233 | struct wlp_rx_hdr *i1480u_hdr = NULL; | ||
234 | |||
235 | struct i1480u *i1480u = rx_buf->i1480u; | ||
236 | struct sk_buff *skb = rx_buf->data; | ||
237 | int size_left = rx_buf->urb->actual_length; | ||
238 | void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */ | ||
239 | struct untd_hdr *untd_hdr; | ||
240 | |||
241 | struct net_device *net_dev = i1480u->net_dev; | ||
242 | struct device *dev = &i1480u->usb_iface->dev; | ||
243 | struct sk_buff *new_skb; | ||
244 | |||
245 | #if 0 | ||
246 | dev_fnstart(dev, | ||
247 | "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left); | ||
248 | dev_err(dev, "RX packet, %zu bytes\n", size_left); | ||
249 | dump_bytes(dev, ptr, size_left); | ||
250 | #endif | ||
251 | i1480u_hdr_size = sizeof(struct wlp_rx_hdr); | ||
252 | |||
253 | while (size_left > 0) { | ||
254 | if (pkt_completed) { | ||
255 | i1480u_drop(i1480u, "RX: fragment follows completed" | ||
256 | "packet in same buffer. Dropping\n"); | ||
257 | break; | ||
258 | } | ||
259 | untd_hdr = ptr; | ||
260 | if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */ | ||
261 | i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n"); | ||
262 | goto out; | ||
263 | } | ||
264 | if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */ | ||
265 | i1480u_drop(i1480u, "RX: TX bit set! Dropping\n"); | ||
266 | goto out; | ||
267 | } | ||
268 | switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */ | ||
269 | case i1480u_PKT_FRAG_1ST: { | ||
270 | struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr; | ||
271 | dev_dbg(dev, "1st fragment\n"); | ||
272 | untd_hdr_size = sizeof(struct untd_hdr_1st); | ||
273 | if (i1480u->rx_skb != NULL) | ||
274 | i1480u_fix(i1480u, "RX: 1st fragment out of " | ||
275 | "sequence! Fixing\n"); | ||
276 | if (size_left < untd_hdr_size + i1480u_hdr_size) { | ||
277 | i1480u_drop(i1480u, "RX: short 1st fragment! " | ||
278 | "Dropping\n"); | ||
279 | goto out; | ||
280 | } | ||
281 | i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len) | ||
282 | - i1480u_hdr_size; | ||
283 | untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len); | ||
284 | if (size_left < untd_hdr_size + untd_frg_size) { | ||
285 | i1480u_drop(i1480u, | ||
286 | "RX: short payload! Dropping\n"); | ||
287 | goto out; | ||
288 | } | ||
289 | i1480u->rx_skb = skb; | ||
290 | i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size; | ||
291 | i1480u->rx_srcaddr = i1480u_hdr->srcaddr; | ||
292 | skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size); | ||
293 | skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size); | ||
294 | stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7); | ||
295 | stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18); | ||
296 | rx_buf->data = NULL; /* need to create new buffer */ | ||
297 | break; | ||
298 | } | ||
299 | case i1480u_PKT_FRAG_NXT: { | ||
300 | dev_dbg(dev, "nxt fragment\n"); | ||
301 | untd_hdr_size = sizeof(struct untd_hdr_rst); | ||
302 | if (i1480u->rx_skb == NULL) { | ||
303 | i1480u_drop(i1480u, "RX: next fragment out of " | ||
304 | "sequence! Dropping\n"); | ||
305 | goto out; | ||
306 | } | ||
307 | if (size_left < untd_hdr_size) { | ||
308 | i1480u_drop(i1480u, "RX: short NXT fragment! " | ||
309 | "Dropping\n"); | ||
310 | goto out; | ||
311 | } | ||
312 | untd_frg_size = le16_to_cpu(untd_hdr->len); | ||
313 | if (size_left < untd_hdr_size + untd_frg_size) { | ||
314 | i1480u_drop(i1480u, | ||
315 | "RX: short payload! Dropping\n"); | ||
316 | goto out; | ||
317 | } | ||
318 | memmove(skb_put(i1480u->rx_skb, untd_frg_size), | ||
319 | ptr + untd_hdr_size, untd_frg_size); | ||
320 | break; | ||
321 | } | ||
322 | case i1480u_PKT_FRAG_LST: { | ||
323 | dev_dbg(dev, "Lst fragment\n"); | ||
324 | untd_hdr_size = sizeof(struct untd_hdr_rst); | ||
325 | if (i1480u->rx_skb == NULL) { | ||
326 | i1480u_drop(i1480u, "RX: last fragment out of " | ||
327 | "sequence! Dropping\n"); | ||
328 | goto out; | ||
329 | } | ||
330 | if (size_left < untd_hdr_size) { | ||
331 | i1480u_drop(i1480u, "RX: short LST fragment! " | ||
332 | "Dropping\n"); | ||
333 | goto out; | ||
334 | } | ||
335 | untd_frg_size = le16_to_cpu(untd_hdr->len); | ||
336 | if (size_left < untd_frg_size + untd_hdr_size) { | ||
337 | i1480u_drop(i1480u, | ||
338 | "RX: short payload! Dropping\n"); | ||
339 | goto out; | ||
340 | } | ||
341 | memmove(skb_put(i1480u->rx_skb, untd_frg_size), | ||
342 | ptr + untd_hdr_size, untd_frg_size); | ||
343 | pkt_completed = 1; | ||
344 | break; | ||
345 | } | ||
346 | case i1480u_PKT_FRAG_CMP: { | ||
347 | dev_dbg(dev, "cmp fragment\n"); | ||
348 | untd_hdr_size = sizeof(struct untd_hdr_cmp); | ||
349 | if (i1480u->rx_skb != NULL) | ||
350 | i1480u_fix(i1480u, "RX: fix out-of-sequence CMP" | ||
351 | " fragment!\n"); | ||
352 | if (size_left < untd_hdr_size + i1480u_hdr_size) { | ||
353 | i1480u_drop(i1480u, "RX: short CMP fragment! " | ||
354 | "Dropping\n"); | ||
355 | goto out; | ||
356 | } | ||
357 | i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len); | ||
358 | untd_frg_size = i1480u->rx_untd_pkt_size; | ||
359 | if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) { | ||
360 | i1480u_drop(i1480u, | ||
361 | "RX: short payload! Dropping\n"); | ||
362 | goto out; | ||
363 | } | ||
364 | i1480u->rx_skb = skb; | ||
365 | i1480u_hdr = (void *) untd_hdr + untd_hdr_size; | ||
366 | i1480u->rx_srcaddr = i1480u_hdr->srcaddr; | ||
367 | stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7); | ||
368 | stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18); | ||
369 | skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size); | ||
370 | skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size); | ||
371 | rx_buf->data = NULL; /* for hand off skb to network stack */ | ||
372 | pkt_completed = 1; | ||
373 | i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */ | ||
374 | break; | ||
375 | } | ||
376 | default: | ||
377 | i1480u_drop(i1480u, "RX: unknown packet type %u! " | ||
378 | "Dropping\n", untd_hdr_type(untd_hdr)); | ||
379 | goto out; | ||
380 | } | ||
381 | size_left -= untd_hdr_size + untd_frg_size; | ||
382 | if (size_left > 0) | ||
383 | ptr += untd_hdr_size + untd_frg_size; | ||
384 | } | ||
385 | if (pkt_completed) | ||
386 | i1480u_skb_deliver(i1480u); | ||
387 | out: | ||
388 | /* recreate needed RX buffers*/ | ||
389 | if (rx_buf->data == NULL) { | ||
390 | /* buffer is being used to receive packet, create new */ | ||
391 | new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE); | ||
392 | if (!new_skb) { | ||
393 | if (printk_ratelimit()) | ||
394 | dev_err(dev, | ||
395 | "RX: cannot allocate RX buffer\n"); | ||
396 | } else { | ||
397 | new_skb->dev = net_dev; | ||
398 | new_skb->ip_summed = CHECKSUM_NONE; | ||
399 | skb_reserve(new_skb, 2); | ||
400 | rx_buf->data = new_skb; | ||
401 | } | ||
402 | } | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | |||
407 | /** | ||
408 | * Called when an RX URB has finished receiving or has found some kind | ||
409 | * of error condition. | ||
410 | * | ||
411 | * LIMITATIONS: | ||
412 | * | ||
413 | * - We read USB-transfers, each transfer contains a SINGLE fragment | ||
414 | * (can contain a complete packet, or a 1st, next, or last fragment | ||
415 | * of a packet). | ||
416 | * Looks like a transfer can contain more than one fragment (07/18/06) | ||
417 | * | ||
418 | * - Each transfer buffer is the size of the maximum packet size (minus | ||
419 | * headroom), i1480u_MAX_PKT_SIZE - 2 | ||
420 | * | ||
421 | * - We always read the full USB-transfer, no partials. | ||
422 | * | ||
423 | * - Each transfer is read directly into a skb. This skb will be used to | ||
424 | * send data to the upper layers if it is the first fragment or a complete | ||
425 | * packet. In the other cases the data will be copied from the skb to | ||
426 | * another skb that is being prepared for the upper layers from a prev | ||
427 | * first fragment. | ||
428 | * | ||
429 | * It is simply too much of a pain. Gosh, there should be a unified | ||
430 | * SG infrastructure for *everything* [so that I could declare a SG | ||
431 | * buffer, pass it to USB for receiving, append some space to it if | ||
432 | * I wish, receive more until I have the whole chunk, adapt | ||
433 | * pointers on each fragment to remove hardware headers and then | ||
434 | * attach that to an skbuff and netif_rx()]. | ||
435 | */ | ||
436 | void i1480u_rx_cb(struct urb *urb) | ||
437 | { | ||
438 | int result; | ||
439 | int do_parse_buffer = 1; | ||
440 | struct i1480u_rx_buf *rx_buf = urb->context; | ||
441 | struct i1480u *i1480u = rx_buf->i1480u; | ||
442 | struct device *dev = &i1480u->usb_iface->dev; | ||
443 | unsigned long flags; | ||
444 | u8 rx_buf_idx = rx_buf - i1480u->rx_buf; | ||
445 | |||
446 | switch (urb->status) { | ||
447 | case 0: | ||
448 | break; | ||
449 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | ||
450 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
451 | case -ESHUTDOWN: /* going away! */ | ||
452 | dev_err(dev, "RX URB[%u]: goind down %d\n", | ||
453 | rx_buf_idx, urb->status); | ||
454 | goto error; | ||
455 | default: | ||
456 | dev_err(dev, "RX URB[%u]: unknown status %d\n", | ||
457 | rx_buf_idx, urb->status); | ||
458 | if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS, | ||
459 | EDC_ERROR_TIMEFRAME)) { | ||
460 | dev_err(dev, "RX: max acceptable errors exceeded," | ||
461 | " resetting device.\n"); | ||
462 | i1480u_rx_unlink_urbs(i1480u); | ||
463 | wlp_reset_all(&i1480u->wlp); | ||
464 | goto error; | ||
465 | } | ||
466 | do_parse_buffer = 0; | ||
467 | break; | ||
468 | } | ||
469 | spin_lock_irqsave(&i1480u->lock, flags); | ||
470 | /* chew the data fragments, extract network packets */ | ||
471 | if (do_parse_buffer) { | ||
472 | i1480u_rx_buffer(rx_buf); | ||
473 | if (rx_buf->data) { | ||
474 | rx_buf->urb->transfer_buffer = rx_buf->data->data; | ||
475 | result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC); | ||
476 | if (result < 0) { | ||
477 | dev_err(dev, "RX URB[%u]: cannot submit %d\n", | ||
478 | rx_buf_idx, result); | ||
479 | } | ||
480 | } | ||
481 | } | ||
482 | spin_unlock_irqrestore(&i1480u->lock, flags); | ||
483 | error: | ||
484 | return; | ||
485 | } | ||
486 | |||
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c new file mode 100644 index 000000000000..a1d8ca6ac935 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Sysfs interfaces | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/uwb/debug.h> | ||
29 | #include <linux/device.h> | ||
30 | #include "i1480u-wlp.h" | ||
31 | |||
32 | |||
33 | /** | ||
34 | * | ||
35 | * @dev: Class device from the net_device; assumed refcnted. | ||
36 | * | ||
37 | * Yes, I don't lock--we assume it is refcounted and I am getting a | ||
38 | * single byte value that is kind of atomic to read. | ||
39 | */ | ||
40 | ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf) | ||
41 | { | ||
42 | return sprintf(buf, "%u\n", | ||
43 | wlp_tx_hdr_phy_rate(&options->def_tx_hdr)); | ||
44 | } | ||
45 | EXPORT_SYMBOL_GPL(uwb_phy_rate_show); | ||
46 | |||
47 | |||
48 | ssize_t uwb_phy_rate_store(struct wlp_options *options, | ||
49 | const char *buf, size_t size) | ||
50 | { | ||
51 | ssize_t result; | ||
52 | unsigned rate; | ||
53 | |||
54 | result = sscanf(buf, "%u\n", &rate); | ||
55 | if (result != 1) { | ||
56 | result = -EINVAL; | ||
57 | goto out; | ||
58 | } | ||
59 | result = -EINVAL; | ||
60 | if (rate >= UWB_PHY_RATE_INVALID) | ||
61 | goto out; | ||
62 | wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate); | ||
63 | result = 0; | ||
64 | out: | ||
65 | return result < 0 ? result : size; | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(uwb_phy_rate_store); | ||
68 | |||
69 | |||
70 | ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf) | ||
71 | { | ||
72 | return sprintf(buf, "%u\n", | ||
73 | wlp_tx_hdr_rts_cts(&options->def_tx_hdr)); | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(uwb_rts_cts_show); | ||
76 | |||
77 | |||
78 | ssize_t uwb_rts_cts_store(struct wlp_options *options, | ||
79 | const char *buf, size_t size) | ||
80 | { | ||
81 | ssize_t result; | ||
82 | unsigned value; | ||
83 | |||
84 | result = sscanf(buf, "%u\n", &value); | ||
85 | if (result != 1) { | ||
86 | result = -EINVAL; | ||
87 | goto out; | ||
88 | } | ||
89 | result = -EINVAL; | ||
90 | wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value); | ||
91 | result = 0; | ||
92 | out: | ||
93 | return result < 0 ? result : size; | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(uwb_rts_cts_store); | ||
96 | |||
97 | |||
98 | ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf) | ||
99 | { | ||
100 | return sprintf(buf, "%u\n", | ||
101 | wlp_tx_hdr_ack_policy(&options->def_tx_hdr)); | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(uwb_ack_policy_show); | ||
104 | |||
105 | |||
106 | ssize_t uwb_ack_policy_store(struct wlp_options *options, | ||
107 | const char *buf, size_t size) | ||
108 | { | ||
109 | ssize_t result; | ||
110 | unsigned value; | ||
111 | |||
112 | result = sscanf(buf, "%u\n", &value); | ||
113 | if (result != 1 || value > UWB_ACK_B_REQ) { | ||
114 | result = -EINVAL; | ||
115 | goto out; | ||
116 | } | ||
117 | wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value); | ||
118 | result = 0; | ||
119 | out: | ||
120 | return result < 0 ? result : size; | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(uwb_ack_policy_store); | ||
123 | |||
124 | |||
125 | /** | ||
126 | * Show the PCA base priority. | ||
127 | * | ||
128 | * We can access without locking, as the value is (for now) orthogonal | ||
129 | * to other values. | ||
130 | */ | ||
131 | ssize_t uwb_pca_base_priority_show(const struct wlp_options *options, | ||
132 | char *buf) | ||
133 | { | ||
134 | return sprintf(buf, "%u\n", | ||
135 | options->pca_base_priority); | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show); | ||
138 | |||
139 | |||
140 | /** | ||
141 | * Set the PCA base priority. | ||
142 | * | ||
143 | * We can access without locking, as the value is (for now) orthogonal | ||
144 | * to other values. | ||
145 | */ | ||
146 | ssize_t uwb_pca_base_priority_store(struct wlp_options *options, | ||
147 | const char *buf, size_t size) | ||
148 | { | ||
149 | ssize_t result = -EINVAL; | ||
150 | u8 pca_base_priority; | ||
151 | |||
152 | result = sscanf(buf, "%hhu\n", &pca_base_priority); | ||
153 | if (result != 1) { | ||
154 | result = -EINVAL; | ||
155 | goto out; | ||
156 | } | ||
157 | result = -EINVAL; | ||
158 | if (pca_base_priority >= 8) | ||
159 | goto out; | ||
160 | options->pca_base_priority = pca_base_priority; | ||
161 | /* Update TX header if we are currently using PCA. */ | ||
162 | if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0) | ||
163 | wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority); | ||
164 | result = 0; | ||
165 | out: | ||
166 | return result < 0 ? result : size; | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store); | ||
169 | |||
170 | /** | ||
171 | * Show current inflight values | ||
172 | * | ||
173 | * Will print the current MAX and THRESHOLD values for the basic flow | ||
174 | * control. In addition it will report how many times the TX queue needed | ||
175 | * to be restarted since the last time this query was made. | ||
176 | */ | ||
177 | static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight, | ||
178 | char *buf) | ||
179 | { | ||
180 | ssize_t result; | ||
181 | unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ; | ||
182 | unsigned long restart_count = atomic_read(&inflight->restart_count); | ||
183 | |||
184 | result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n" | ||
185 | "#read: threshold max inflight_count restarts " | ||
186 | "seconds restarts/sec\n" | ||
187 | "#write: threshold max\n", | ||
188 | inflight->threshold, inflight->max, | ||
189 | atomic_read(&inflight->count), | ||
190 | restart_count, sec_elapsed, | ||
191 | sec_elapsed == 0 ? 0 : restart_count/sec_elapsed); | ||
192 | inflight->restart_ts = jiffies; | ||
193 | atomic_set(&inflight->restart_count, 0); | ||
194 | return result; | ||
195 | } | ||
196 | |||
197 | static | ||
198 | ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight, | ||
199 | const char *buf, size_t size) | ||
200 | { | ||
201 | unsigned long in_threshold, in_max; | ||
202 | ssize_t result; | ||
203 | result = sscanf(buf, "%lu %lu", &in_threshold, &in_max); | ||
204 | if (result != 2) | ||
205 | return -EINVAL; | ||
206 | if (in_max <= in_threshold) | ||
207 | return -EINVAL; | ||
208 | inflight->max = in_max; | ||
209 | inflight->threshold = in_threshold; | ||
210 | return size; | ||
211 | } | ||
212 | /* | ||
213 | * Glue (or function adaptors) for accesing info on sysfs | ||
214 | * | ||
215 | * [we need this indirection because the PCI driver does almost the | ||
216 | * same] | ||
217 | * | ||
218 | * Linux 2.6.21 changed how 'struct netdevice' does attributes (from | ||
219 | * having a 'struct class_dev' to having a 'struct device'). That is | ||
220 | * quite of a pain. | ||
221 | * | ||
222 | * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE() | ||
223 | * create adaptors for extracting the 'struct i1480u' from a 'struct | ||
224 | * dev' and calling a function for doing a sysfs operation (as we have | ||
225 | * them factorized already). i1480u_ATTR creates the attribute file | ||
226 | * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a | ||
227 | * class_device_attr_NAME or device_attr_NAME (for group registration). | ||
228 | */ | ||
229 | #include <linux/version.h> | ||
230 | |||
231 | #define i1480u_SHOW(name, fn, param) \ | ||
232 | static ssize_t i1480u_show_##name(struct device *dev, \ | ||
233 | struct device_attribute *attr,\ | ||
234 | char *buf) \ | ||
235 | { \ | ||
236 | struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \ | ||
237 | return fn(&i1480u->param, buf); \ | ||
238 | } | ||
239 | |||
240 | #define i1480u_STORE(name, fn, param) \ | ||
241 | static ssize_t i1480u_store_##name(struct device *dev, \ | ||
242 | struct device_attribute *attr,\ | ||
243 | const char *buf, size_t size)\ | ||
244 | { \ | ||
245 | struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \ | ||
246 | return fn(&i1480u->param, buf, size); \ | ||
247 | } | ||
248 | |||
249 | #define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \ | ||
250 | i1480u_show_##name,\ | ||
251 | i1480u_store_##name) | ||
252 | |||
253 | #define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \ | ||
254 | S_IRUGO, \ | ||
255 | i1480u_show_##name, NULL) | ||
256 | |||
257 | #define i1480u_ATTR_NAME(a) (dev_attr_##a) | ||
258 | |||
259 | |||
260 | /* | ||
261 | * Sysfs adaptors | ||
262 | */ | ||
263 | i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options); | ||
264 | i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options); | ||
265 | i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR); | ||
266 | |||
267 | i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options); | ||
268 | i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options); | ||
269 | i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR); | ||
270 | |||
271 | i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options); | ||
272 | i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options); | ||
273 | i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR); | ||
274 | |||
275 | i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options); | ||
276 | i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options); | ||
277 | i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR); | ||
278 | |||
279 | i1480u_SHOW(wlp_eda, wlp_eda_show, wlp); | ||
280 | i1480u_STORE(wlp_eda, wlp_eda_store, wlp); | ||
281 | i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR); | ||
282 | |||
283 | i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp); | ||
284 | i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp); | ||
285 | i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR); | ||
286 | |||
287 | i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp); | ||
288 | i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp); | ||
289 | i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR); | ||
290 | |||
291 | i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp); | ||
292 | i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp); | ||
293 | i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR); | ||
294 | |||
295 | i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp); | ||
296 | i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp); | ||
297 | i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR); | ||
298 | |||
299 | i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp); | ||
300 | i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp); | ||
301 | i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR); | ||
302 | |||
303 | i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp); | ||
304 | i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp); | ||
305 | i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR); | ||
306 | |||
307 | i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp); | ||
308 | i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp); | ||
309 | i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR); | ||
310 | |||
311 | i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp); | ||
312 | i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp); | ||
313 | i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR); | ||
314 | |||
315 | i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp); | ||
316 | i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp); | ||
317 | i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR); | ||
318 | |||
319 | i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp); | ||
320 | i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp); | ||
321 | i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR); | ||
322 | |||
323 | i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp); | ||
324 | i1480u_ATTR_SHOW(wlp_neighborhood); | ||
325 | |||
326 | i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss); | ||
327 | i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss); | ||
328 | i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR); | ||
329 | |||
330 | /* | ||
331 | * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over | ||
332 | * the last 256 received WLP frames (ECMA-368 13.3). | ||
333 | * | ||
334 | * [the -7dB that have to be substracted from the LQI to make the LQE | ||
335 | * are already taken into account]. | ||
336 | */ | ||
337 | i1480u_SHOW(wlp_lqe, stats_show, lqe_stats); | ||
338 | i1480u_STORE(wlp_lqe, stats_store, lqe_stats); | ||
339 | i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR); | ||
340 | |||
341 | /* | ||
342 | * Show the Receive Signal Strength Indicator averaged over all the | ||
343 | * received WLP frames (ECMA-368 13.3). Still is not clear what | ||
344 | * this value is, but is kind of a percentage of the signal strength | ||
345 | * at the antenna. | ||
346 | */ | ||
347 | i1480u_SHOW(wlp_rssi, stats_show, rssi_stats); | ||
348 | i1480u_STORE(wlp_rssi, stats_store, rssi_stats); | ||
349 | i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR); | ||
350 | |||
351 | /** | ||
352 | * We maintain a basic flow control counter. "count" how many TX URBs are | ||
353 | * outstanding. Only allow "max" | ||
354 | * TX URBs to be outstanding. If this value is reached the queue will be | ||
355 | * stopped. The queue will be restarted when there are | ||
356 | * "threshold" URBs outstanding. | ||
357 | */ | ||
358 | i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight); | ||
359 | i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight); | ||
360 | i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR); | ||
361 | |||
362 | static struct attribute *i1480u_attrs[] = { | ||
363 | &i1480u_ATTR_NAME(uwb_phy_rate).attr, | ||
364 | &i1480u_ATTR_NAME(uwb_rts_cts).attr, | ||
365 | &i1480u_ATTR_NAME(uwb_ack_policy).attr, | ||
366 | &i1480u_ATTR_NAME(uwb_pca_base_priority).attr, | ||
367 | &i1480u_ATTR_NAME(wlp_lqe).attr, | ||
368 | &i1480u_ATTR_NAME(wlp_rssi).attr, | ||
369 | &i1480u_ATTR_NAME(wlp_eda).attr, | ||
370 | &i1480u_ATTR_NAME(wlp_uuid).attr, | ||
371 | &i1480u_ATTR_NAME(wlp_dev_name).attr, | ||
372 | &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr, | ||
373 | &i1480u_ATTR_NAME(wlp_dev_model_name).attr, | ||
374 | &i1480u_ATTR_NAME(wlp_dev_model_nr).attr, | ||
375 | &i1480u_ATTR_NAME(wlp_dev_serial).attr, | ||
376 | &i1480u_ATTR_NAME(wlp_dev_prim_category).attr, | ||
377 | &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr, | ||
378 | &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr, | ||
379 | &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr, | ||
380 | &i1480u_ATTR_NAME(wlp_neighborhood).attr, | ||
381 | &i1480u_ATTR_NAME(wss_activate).attr, | ||
382 | &i1480u_ATTR_NAME(wlp_tx_inflight).attr, | ||
383 | NULL, | ||
384 | }; | ||
385 | |||
386 | static struct attribute_group i1480u_attr_group = { | ||
387 | .name = NULL, /* we want them in the same directory */ | ||
388 | .attrs = i1480u_attrs, | ||
389 | }; | ||
390 | |||
391 | int i1480u_sysfs_setup(struct i1480u *i1480u) | ||
392 | { | ||
393 | int result; | ||
394 | struct device *dev = &i1480u->usb_iface->dev; | ||
395 | result = sysfs_create_group(&i1480u->net_dev->dev.kobj, | ||
396 | &i1480u_attr_group); | ||
397 | if (result < 0) | ||
398 | dev_err(dev, "cannot initialize sysfs attributes: %d\n", | ||
399 | result); | ||
400 | return result; | ||
401 | } | ||
402 | |||
403 | |||
404 | void i1480u_sysfs_release(struct i1480u *i1480u) | ||
405 | { | ||
406 | sysfs_remove_group(&i1480u->net_dev->dev.kobj, | ||
407 | &i1480u_attr_group); | ||
408 | } | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c new file mode 100644 index 000000000000..3426bfb68240 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/tx.c | |||
@@ -0,0 +1,632 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Deal with TX (massaging data to transmit, handling it) | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Transmission engine. Get an skb, create from that a WLP transmit | ||
24 | * context, add a WLP TX header (which we keep prefilled in the | ||
25 | * device's instance), fill out the target-specific fields and | ||
26 | * fire it. | ||
27 | * | ||
28 | * ROADMAP: | ||
29 | * | ||
30 | * Entry points: | ||
31 | * | ||
32 | * i1480u_tx_release(): called by i1480u_disconnect() to release | ||
33 | * pending tx contexts. | ||
34 | * | ||
35 | * i1480u_tx_cb(): callback for TX contexts (USB URBs) | ||
36 | * i1480u_tx_destroy(): | ||
37 | * | ||
38 | * i1480u_tx_timeout(): called for timeout handling from the | ||
39 | * network stack. | ||
40 | * | ||
41 | * i1480u_hard_start_xmit(): called for transmitting an skb from | ||
42 | * the network stack. Will interact with WLP | ||
43 | * substack to verify and prepare frame. | ||
44 | * i1480u_xmit_frame(): actual transmission on hardware | ||
45 | * | ||
46 | * i1480u_tx_create() Creates TX context | ||
47 | * i1480u_tx_create_1() For packets in 1 fragment | ||
48 | * i1480u_tx_create_n() For packets in >1 fragments | ||
49 | * | ||
50 | * TODO: | ||
51 | * | ||
52 | * - FIXME: rewrite using usb_sg_*(), add asynch support to | ||
53 | * usb_sg_*(). It might not make too much sense as most of | ||
54 | * the times the MTU will be smaller than one page... | ||
55 | */ | ||
56 | |||
57 | #include "i1480u-wlp.h" | ||
58 | #define D_LOCAL 5 | ||
59 | #include <linux/uwb/debug.h> | ||
60 | |||
61 | enum { | ||
62 | /* This is only for Next and Last TX packets */ | ||
63 | i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE | ||
64 | - sizeof(struct untd_hdr_rst), | ||
65 | }; | ||
66 | |||
67 | /** Free resources allocated to a i1480u tx context. */ | ||
68 | static | ||
69 | void i1480u_tx_free(struct i1480u_tx *wtx) | ||
70 | { | ||
71 | kfree(wtx->buf); | ||
72 | if (wtx->skb) | ||
73 | dev_kfree_skb_irq(wtx->skb); | ||
74 | usb_free_urb(wtx->urb); | ||
75 | kfree(wtx); | ||
76 | } | ||
77 | |||
78 | static | ||
79 | void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx) | ||
80 | { | ||
81 | unsigned long flags; | ||
82 | spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */ | ||
83 | list_del(&wtx->list_node); | ||
84 | i1480u_tx_free(wtx); | ||
85 | spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); | ||
86 | } | ||
87 | |||
88 | static | ||
89 | void i1480u_tx_unlink_urbs(struct i1480u *i1480u) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | struct i1480u_tx *wtx, *next; | ||
93 | |||
94 | spin_lock_irqsave(&i1480u->tx_list_lock, flags); | ||
95 | list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) { | ||
96 | usb_unlink_urb(wtx->urb); | ||
97 | } | ||
98 | spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); | ||
99 | } | ||
100 | |||
101 | |||
102 | /** | ||
103 | * Callback for a completed tx USB URB. | ||
104 | * | ||
105 | * TODO: | ||
106 | * | ||
107 | * - FIXME: recover errors more gracefully | ||
108 | * - FIXME: handle NAKs (I dont think they come here) for flow ctl | ||
109 | */ | ||
110 | static | ||
111 | void i1480u_tx_cb(struct urb *urb) | ||
112 | { | ||
113 | struct i1480u_tx *wtx = urb->context; | ||
114 | struct i1480u *i1480u = wtx->i1480u; | ||
115 | struct net_device *net_dev = i1480u->net_dev; | ||
116 | struct device *dev = &i1480u->usb_iface->dev; | ||
117 | unsigned long flags; | ||
118 | |||
119 | switch (urb->status) { | ||
120 | case 0: | ||
121 | spin_lock_irqsave(&i1480u->lock, flags); | ||
122 | i1480u->stats.tx_packets++; | ||
123 | i1480u->stats.tx_bytes += urb->actual_length; | ||
124 | spin_unlock_irqrestore(&i1480u->lock, flags); | ||
125 | break; | ||
126 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | ||
127 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | ||
128 | dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status); | ||
129 | netif_stop_queue(net_dev); | ||
130 | break; | ||
131 | case -ESHUTDOWN: /* going away! */ | ||
132 | dev_dbg(dev, "notif endp: down %d\n", urb->status); | ||
133 | netif_stop_queue(net_dev); | ||
134 | break; | ||
135 | default: | ||
136 | dev_err(dev, "TX: unknown URB status %d\n", urb->status); | ||
137 | if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS, | ||
138 | EDC_ERROR_TIMEFRAME)) { | ||
139 | dev_err(dev, "TX: max acceptable errors exceeded." | ||
140 | "Reset device.\n"); | ||
141 | netif_stop_queue(net_dev); | ||
142 | i1480u_tx_unlink_urbs(i1480u); | ||
143 | wlp_reset_all(&i1480u->wlp); | ||
144 | } | ||
145 | break; | ||
146 | } | ||
147 | i1480u_tx_destroy(i1480u, wtx); | ||
148 | if (atomic_dec_return(&i1480u->tx_inflight.count) | ||
149 | <= i1480u->tx_inflight.threshold | ||
150 | && netif_queue_stopped(net_dev) | ||
151 | && i1480u->tx_inflight.threshold != 0) { | ||
152 | if (d_test(2) && printk_ratelimit()) | ||
153 | d_printf(2, dev, "Restart queue. \n"); | ||
154 | netif_start_queue(net_dev); | ||
155 | atomic_inc(&i1480u->tx_inflight.restart_count); | ||
156 | } | ||
157 | return; | ||
158 | } | ||
159 | |||
160 | |||
161 | /** | ||
162 | * Given a buffer that doesn't fit in a single fragment, create an | ||
163 | * scatter/gather structure for delivery to the USB pipe. | ||
164 | * | ||
165 | * Implements functionality of i1480u_tx_create(). | ||
166 | * | ||
167 | * @wtx: tx descriptor | ||
168 | * @skb: skb to send | ||
169 | * @gfp_mask: gfp allocation mask | ||
170 | * @returns: Pointer to @wtx if ok, NULL on error. | ||
171 | * | ||
172 | * Sorry, TOO LONG a function, but breaking it up is kind of hard | ||
173 | * | ||
174 | * This will break the buffer in chunks smaller than | ||
175 | * i1480u_MAX_FRG_SIZE (including the header) and add proper headers | ||
176 | * to each: | ||
177 | * | ||
178 | * 1st header \ | ||
179 | * i1480 tx header | fragment 1 | ||
180 | * fragment data / | ||
181 | * nxt header \ fragment 2 | ||
182 | * fragment data / | ||
183 | * .. | ||
184 | * .. | ||
185 | * last header \ fragment 3 | ||
186 | * last fragment data / | ||
187 | * | ||
188 | * This does not fill the i1480 TX header, it is left up to the | ||
189 | * caller to do that; you can get it from @wtx->wlp_tx_hdr. | ||
190 | * | ||
191 | * This function consumes the skb unless there is an error. | ||
192 | */ | ||
193 | static | ||
194 | int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | ||
195 | gfp_t gfp_mask) | ||
196 | { | ||
197 | int result; | ||
198 | void *pl; | ||
199 | size_t pl_size; | ||
200 | |||
201 | void *pl_itr, *buf_itr; | ||
202 | size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0; | ||
203 | struct untd_hdr_1st *untd_hdr_1st; | ||
204 | struct wlp_tx_hdr *wlp_tx_hdr; | ||
205 | struct untd_hdr_rst *untd_hdr_rst; | ||
206 | |||
207 | wtx->skb = NULL; | ||
208 | pl = skb->data; | ||
209 | pl_itr = pl; | ||
210 | pl_size = skb->len; | ||
211 | pl_size_left = pl_size; /* payload size */ | ||
212 | /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus | ||
213 | * the headers */ | ||
214 | pl_size_1st = i1480u_MAX_FRG_SIZE | ||
215 | - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr); | ||
216 | BUG_ON(pl_size_1st > pl_size); | ||
217 | pl_size_left -= pl_size_1st; | ||
218 | /* The rest have an smaller header (no i1480 TX header). We | ||
219 | * need to break up the payload in blocks smaller than | ||
220 | * i1480u_MAX_PL_SIZE (payload excluding header). */ | ||
221 | frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE; | ||
222 | /* Allocate space for the new buffer. In this new buffer we'll | ||
223 | * place the headers followed by the data fragment, headers, | ||
224 | * data fragments, etc.. | ||
225 | */ | ||
226 | result = -ENOMEM; | ||
227 | wtx->buf_size = sizeof(*untd_hdr_1st) | ||
228 | + sizeof(*wlp_tx_hdr) | ||
229 | + frgs * sizeof(*untd_hdr_rst) | ||
230 | + pl_size; | ||
231 | wtx->buf = kmalloc(wtx->buf_size, gfp_mask); | ||
232 | if (wtx->buf == NULL) | ||
233 | goto error_buf_alloc; | ||
234 | |||
235 | buf_itr = wtx->buf; /* We got the space, let's fill it up */ | ||
236 | /* Fill 1st fragment */ | ||
237 | untd_hdr_1st = buf_itr; | ||
238 | buf_itr += sizeof(*untd_hdr_1st); | ||
239 | untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST); | ||
240 | untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0); | ||
241 | untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr)); | ||
242 | untd_hdr_1st->fragment_len = | ||
243 | cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr)); | ||
244 | memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding)); | ||
245 | /* Set up i1480 header info */ | ||
246 | wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr; | ||
247 | buf_itr += sizeof(*wlp_tx_hdr); | ||
248 | /* Copy the first fragment */ | ||
249 | memcpy(buf_itr, pl_itr, pl_size_1st); | ||
250 | pl_itr += pl_size_1st; | ||
251 | buf_itr += pl_size_1st; | ||
252 | |||
253 | /* Now do each remaining fragment */ | ||
254 | result = -EINVAL; | ||
255 | while (pl_size_left > 0) { | ||
256 | d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", | ||
257 | pl_size_left, buf_itr - wtx->buf); | ||
258 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf | ||
259 | > wtx->buf_size) { | ||
260 | printk(KERN_ERR "BUG: no space for header\n"); | ||
261 | goto error_bug; | ||
262 | } | ||
263 | d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", | ||
264 | pl_size_left, buf_itr - wtx->buf); | ||
265 | untd_hdr_rst = buf_itr; | ||
266 | buf_itr += sizeof(*untd_hdr_rst); | ||
267 | if (pl_size_left > i1480u_MAX_PL_SIZE) { | ||
268 | frg_pl_size = i1480u_MAX_PL_SIZE; | ||
269 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT); | ||
270 | } else { | ||
271 | frg_pl_size = pl_size_left; | ||
272 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); | ||
273 | } | ||
274 | d_printf(5, NULL, | ||
275 | "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
276 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
277 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); | ||
278 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); | ||
279 | untd_hdr_rst->padding = 0; | ||
280 | if (buf_itr + frg_pl_size - wtx->buf | ||
281 | > wtx->buf_size) { | ||
282 | printk(KERN_ERR "BUG: no space for payload\n"); | ||
283 | goto error_bug; | ||
284 | } | ||
285 | memcpy(buf_itr, pl_itr, frg_pl_size); | ||
286 | buf_itr += frg_pl_size; | ||
287 | pl_itr += frg_pl_size; | ||
288 | pl_size_left -= frg_pl_size; | ||
289 | d_printf(5, NULL, | ||
290 | "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
291 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
292 | } | ||
293 | dev_kfree_skb_irq(skb); | ||
294 | return 0; | ||
295 | |||
296 | error_bug: | ||
297 | printk(KERN_ERR | ||
298 | "BUG: skb %u bytes\n" | ||
299 | "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n" | ||
300 | "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n", | ||
301 | skb->len, | ||
302 | frg_pl_size, i1480u_MAX_FRG_SIZE, | ||
303 | buf_itr - wtx->buf, wtx->buf_size, pl_size_left); | ||
304 | |||
305 | kfree(wtx->buf); | ||
306 | error_buf_alloc: | ||
307 | return result; | ||
308 | } | ||
309 | |||
310 | |||
311 | /** | ||
312 | * Given a buffer that fits in a single fragment, fill out a @wtx | ||
313 | * struct for transmitting it down the USB pipe. | ||
314 | * | ||
315 | * Uses the fact that we have space reserved in front of the skbuff | ||
316 | * for hardware headers :] | ||
317 | * | ||
318 | * This does not fill the i1480 TX header, it is left up to the | ||
319 | * caller to do that; you can get it from @wtx->wlp_tx_hdr. | ||
320 | * | ||
321 | * @pl: pointer to payload data | ||
322 | * @pl_size: size of the payuload | ||
323 | * | ||
324 | * This function does not consume the @skb. | ||
325 | */ | ||
326 | static | ||
327 | int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb, | ||
328 | gfp_t gfp_mask) | ||
329 | { | ||
330 | struct untd_hdr_cmp *untd_hdr_cmp; | ||
331 | struct wlp_tx_hdr *wlp_tx_hdr; | ||
332 | |||
333 | wtx->buf = NULL; | ||
334 | wtx->skb = skb; | ||
335 | BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr)); | ||
336 | wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr)); | ||
337 | wtx->wlp_tx_hdr = wlp_tx_hdr; | ||
338 | BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp)); | ||
339 | untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp)); | ||
340 | |||
341 | untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP); | ||
342 | untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0); | ||
343 | untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp)); | ||
344 | untd_hdr_cmp->padding = 0; | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | |||
349 | /** | ||
350 | * Given a skb to transmit, massage it to become palatable for the TX pipe | ||
351 | * | ||
352 | * This will break the buffer in chunks smaller than | ||
353 | * i1480u_MAX_FRG_SIZE and add proper headers to each. | ||
354 | * | ||
355 | * 1st header \ | ||
356 | * i1480 tx header | fragment 1 | ||
357 | * fragment data / | ||
358 | * nxt header \ fragment 2 | ||
359 | * fragment data / | ||
360 | * .. | ||
361 | * .. | ||
362 | * last header \ fragment 3 | ||
363 | * last fragment data / | ||
364 | * | ||
365 | * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE. | ||
366 | * | ||
367 | * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the | ||
368 | * following is composed: | ||
369 | * | ||
370 | * complete header \ | ||
371 | * i1480 tx header | single fragment | ||
372 | * packet data / | ||
373 | * | ||
374 | * We were going to use s/g support, but because the interface is | ||
375 | * synch and at the end there is plenty of overhead to do it, it | ||
376 | * didn't seem that worth for data that is going to be smaller than | ||
377 | * one page. | ||
378 | */ | ||
379 | static | ||
380 | struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u, | ||
381 | struct sk_buff *skb, gfp_t gfp_mask) | ||
382 | { | ||
383 | int result; | ||
384 | struct usb_endpoint_descriptor *epd; | ||
385 | int usb_pipe; | ||
386 | unsigned long flags; | ||
387 | |||
388 | struct i1480u_tx *wtx; | ||
389 | const size_t pl_max_size = | ||
390 | i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp) | ||
391 | - sizeof(struct wlp_tx_hdr); | ||
392 | |||
393 | wtx = kmalloc(sizeof(*wtx), gfp_mask); | ||
394 | if (wtx == NULL) | ||
395 | goto error_wtx_alloc; | ||
396 | wtx->urb = usb_alloc_urb(0, gfp_mask); | ||
397 | if (wtx->urb == NULL) | ||
398 | goto error_urb_alloc; | ||
399 | epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc; | ||
400 | usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress); | ||
401 | /* Fits in a single complete packet or need to split? */ | ||
402 | if (skb->len > pl_max_size) { | ||
403 | result = i1480u_tx_create_n(wtx, skb, gfp_mask); | ||
404 | if (result < 0) | ||
405 | goto error_create; | ||
406 | usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe, | ||
407 | wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx); | ||
408 | } else { | ||
409 | result = i1480u_tx_create_1(wtx, skb, gfp_mask); | ||
410 | if (result < 0) | ||
411 | goto error_create; | ||
412 | usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe, | ||
413 | skb->data, skb->len, i1480u_tx_cb, wtx); | ||
414 | } | ||
415 | spin_lock_irqsave(&i1480u->tx_list_lock, flags); | ||
416 | list_add(&wtx->list_node, &i1480u->tx_list); | ||
417 | spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); | ||
418 | return wtx; | ||
419 | |||
420 | error_create: | ||
421 | kfree(wtx->urb); | ||
422 | error_urb_alloc: | ||
423 | kfree(wtx); | ||
424 | error_wtx_alloc: | ||
425 | return NULL; | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * Actual fragmentation and transmission of frame | ||
430 | * | ||
431 | * @wlp: WLP substack data structure | ||
432 | * @skb: To be transmitted | ||
433 | * @dst: Device address of destination | ||
434 | * @returns: 0 on success, <0 on failure | ||
435 | * | ||
436 | * This function can also be called directly (not just from | ||
437 | * hard_start_xmit), so we also check here if the interface is up before | ||
438 | * taking sending anything. | ||
439 | */ | ||
440 | int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | ||
441 | struct uwb_dev_addr *dst) | ||
442 | { | ||
443 | int result = -ENXIO; | ||
444 | struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); | ||
445 | struct device *dev = &i1480u->usb_iface->dev; | ||
446 | struct net_device *net_dev = i1480u->net_dev; | ||
447 | struct i1480u_tx *wtx; | ||
448 | struct wlp_tx_hdr *wlp_tx_hdr; | ||
449 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; | ||
450 | #if 0 | ||
451 | int lockup = 50; | ||
452 | #endif | ||
453 | |||
454 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
455 | net_dev); | ||
456 | BUG_ON(i1480u->wlp.rc == NULL); | ||
457 | if ((net_dev->flags & IFF_UP) == 0) | ||
458 | goto out; | ||
459 | result = -EBUSY; | ||
460 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { | ||
461 | if (d_test(2) && printk_ratelimit()) | ||
462 | d_printf(2, dev, "Max frames in flight " | ||
463 | "stopping queue.\n"); | ||
464 | netif_stop_queue(net_dev); | ||
465 | goto error_max_inflight; | ||
466 | } | ||
467 | result = -ENOMEM; | ||
468 | wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC); | ||
469 | if (unlikely(wtx == NULL)) { | ||
470 | if (printk_ratelimit()) | ||
471 | dev_err(dev, "TX: no memory for WLP TX URB," | ||
472 | "dropping packet (in flight %d)\n", | ||
473 | atomic_read(&i1480u->tx_inflight.count)); | ||
474 | netif_stop_queue(net_dev); | ||
475 | goto error_wtx_alloc; | ||
476 | } | ||
477 | wtx->i1480u = i1480u; | ||
478 | /* Fill out the i1480 header; @i1480u->def_tx_hdr read without | ||
479 | * locking. We do so because they are kind of orthogonal to | ||
480 | * each other (and thus not changed in an atomic batch). | ||
481 | * The ETH header is right after the WLP TX header. */ | ||
482 | wlp_tx_hdr = wtx->wlp_tx_hdr; | ||
483 | *wlp_tx_hdr = i1480u->options.def_tx_hdr; | ||
484 | wlp_tx_hdr->dstaddr = *dst; | ||
485 | if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast)) | ||
486 | && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) { | ||
487 | /*Broadcast message directed to DRP host. Send as best effort | ||
488 | * on PCA. */ | ||
489 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); | ||
490 | } | ||
491 | |||
492 | #if 0 | ||
493 | dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); | ||
494 | dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
495 | #endif | ||
496 | #if 0 | ||
497 | /* simulates a device lockup after every lockup# packets */ | ||
498 | if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { | ||
499 | /* Simulate a dropped transmit interrupt */ | ||
500 | net_dev->trans_start = jiffies; | ||
501 | netif_stop_queue(net_dev); | ||
502 | dev_err(dev, "Simulate lockup at %ld\n", jiffies); | ||
503 | return result; | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ | ||
508 | if (result < 0) { | ||
509 | dev_err(dev, "TX: cannot submit URB: %d\n", result); | ||
510 | /* We leave the freeing of skb to calling function */ | ||
511 | wtx->skb = NULL; | ||
512 | goto error_tx_urb_submit; | ||
513 | } | ||
514 | atomic_inc(&i1480u->tx_inflight.count); | ||
515 | net_dev->trans_start = jiffies; | ||
516 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
517 | net_dev, result); | ||
518 | return result; | ||
519 | |||
520 | error_tx_urb_submit: | ||
521 | i1480u_tx_destroy(i1480u, wtx); | ||
522 | error_wtx_alloc: | ||
523 | error_max_inflight: | ||
524 | out: | ||
525 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
526 | net_dev, result); | ||
527 | return result; | ||
528 | } | ||
529 | |||
530 | |||
531 | /** | ||
532 | * Transmit an skb Called when an skbuf has to be transmitted | ||
533 | * | ||
534 | * The skb is first passed to WLP substack to ensure this is a valid | ||
535 | * frame. If valid the device address of destination will be filled and | ||
536 | * the WLP header prepended to the skb. If this step fails we fake sending | ||
537 | * the frame, if we return an error the network stack will just keep trying. | ||
538 | * | ||
539 | * Broadcast frames inside a WSS needs to be treated special as multicast is | ||
540 | * not supported. A broadcast frame is sent as unicast to each member of the | ||
541 | * WSS - this is done by the WLP substack when it finds a broadcast frame. | ||
542 | * So, we test if the WLP substack took over the skb and only transmit it | ||
543 | * if it has not (been taken over). | ||
544 | * | ||
545 | * @net_dev->xmit_lock is held | ||
546 | */ | ||
547 | int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | ||
548 | { | ||
549 | int result; | ||
550 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
551 | struct device *dev = &i1480u->usb_iface->dev; | ||
552 | struct uwb_dev_addr dst; | ||
553 | |||
554 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
555 | net_dev); | ||
556 | BUG_ON(i1480u->wlp.rc == NULL); | ||
557 | if ((net_dev->flags & IFF_UP) == 0) | ||
558 | goto error; | ||
559 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); | ||
560 | if (result < 0) { | ||
561 | dev_err(dev, "WLP verification of TX frame failed (%d). " | ||
562 | "Dropping packet.\n", result); | ||
563 | goto error; | ||
564 | } else if (result == 1) { | ||
565 | d_printf(6, dev, "WLP will transmit frame. \n"); | ||
566 | /* trans_start time will be set when WLP actually transmits | ||
567 | * the frame */ | ||
568 | goto out; | ||
569 | } | ||
570 | d_printf(6, dev, "Transmitting frame. \n"); | ||
571 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); | ||
572 | if (result < 0) { | ||
573 | dev_err(dev, "Frame TX failed (%d).\n", result); | ||
574 | goto error; | ||
575 | } | ||
576 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
577 | net_dev, result); | ||
578 | return NETDEV_TX_OK; | ||
579 | error: | ||
580 | dev_kfree_skb_any(skb); | ||
581 | i1480u->stats.tx_dropped++; | ||
582 | out: | ||
583 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
584 | net_dev, result); | ||
585 | return NETDEV_TX_OK; | ||
586 | } | ||
587 | |||
588 | |||
589 | /** | ||
590 | * Called when a pkt transmission doesn't complete in a reasonable period | ||
591 | * Device reset may sleep - do it outside of interrupt context (delayed) | ||
592 | */ | ||
593 | void i1480u_tx_timeout(struct net_device *net_dev) | ||
594 | { | ||
595 | struct i1480u *i1480u = netdev_priv(net_dev); | ||
596 | |||
597 | wlp_reset_all(&i1480u->wlp); | ||
598 | } | ||
599 | |||
600 | |||
601 | void i1480u_tx_release(struct i1480u *i1480u) | ||
602 | { | ||
603 | unsigned long flags; | ||
604 | struct i1480u_tx *wtx, *next; | ||
605 | int count = 0, empty; | ||
606 | |||
607 | spin_lock_irqsave(&i1480u->tx_list_lock, flags); | ||
608 | list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) { | ||
609 | count++; | ||
610 | usb_unlink_urb(wtx->urb); | ||
611 | } | ||
612 | spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); | ||
613 | count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */ | ||
614 | /* | ||
615 | * We don't like this sollution too much (dirty as it is), but | ||
616 | * it is cheaper than putting a refcount on each i1480u_tx and | ||
617 | * i1480uting for all of them to go away... | ||
618 | * | ||
619 | * Called when no more packets can be added to tx_list | ||
620 | * so can i1480ut for it to be empty. | ||
621 | */ | ||
622 | while (1) { | ||
623 | spin_lock_irqsave(&i1480u->tx_list_lock, flags); | ||
624 | empty = list_empty(&i1480u->tx_list); | ||
625 | spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); | ||
626 | if (empty) | ||
627 | break; | ||
628 | count--; | ||
629 | BUG_ON(count == 0); | ||
630 | msleep(20); | ||
631 | } | ||
632 | } | ||
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c new file mode 100644 index 000000000000..cf6f3d152b9d --- /dev/null +++ b/drivers/uwb/ie.c | |||
@@ -0,0 +1,541 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Information Element Handling | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * Reinette Chatre <reinette.chatre@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * FIXME: docs | ||
25 | */ | ||
26 | |||
27 | #include "uwb-internal.h" | ||
28 | #define D_LOCAL 0 | ||
29 | #include <linux/uwb/debug.h> | ||
30 | |||
31 | /** | ||
32 | * uwb_ie_next - get the next IE in a buffer | ||
33 | * @ptr: start of the buffer containing the IE data | ||
34 | * @len: length of the buffer | ||
35 | * | ||
36 | * Both @ptr and @len are updated so subsequent calls to uwb_ie_next() | ||
37 | * will get the next IE. | ||
38 | * | ||
39 | * NULL is returned (and @ptr and @len will not be updated) if there | ||
40 | * are no more IEs in the buffer or the buffer is too short. | ||
41 | */ | ||
42 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len) | ||
43 | { | ||
44 | struct uwb_ie_hdr *hdr; | ||
45 | size_t ie_len; | ||
46 | |||
47 | if (*len < sizeof(struct uwb_ie_hdr)) | ||
48 | return NULL; | ||
49 | |||
50 | hdr = *ptr; | ||
51 | ie_len = sizeof(struct uwb_ie_hdr) + hdr->length; | ||
52 | |||
53 | if (*len < ie_len) | ||
54 | return NULL; | ||
55 | |||
56 | *ptr += ie_len; | ||
57 | *len -= ie_len; | ||
58 | |||
59 | return hdr; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(uwb_ie_next); | ||
62 | |||
63 | /** | ||
64 | * Get the IEs that a radio controller is sending in its beacon | ||
65 | * | ||
66 | * @uwb_rc: UWB Radio Controller | ||
67 | * @returns: Size read from the system | ||
68 | * | ||
69 | * We don't need to lock the uwb_rc's mutex because we don't modify | ||
70 | * anything. Once done with the iedata buffer, call | ||
71 | * uwb_rc_ie_release(iedata). Don't call kfree on it. | ||
72 | */ | ||
73 | ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) | ||
74 | { | ||
75 | ssize_t result; | ||
76 | struct device *dev = &uwb_rc->uwb_dev.dev; | ||
77 | struct uwb_rccb *cmd = NULL; | ||
78 | struct uwb_rceb *reply = NULL; | ||
79 | struct uwb_rc_evt_get_ie *get_ie; | ||
80 | |||
81 | d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); | ||
82 | result = -ENOMEM; | ||
83 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
84 | if (cmd == NULL) | ||
85 | goto error_kzalloc; | ||
86 | cmd->bCommandType = UWB_RC_CET_GENERAL; | ||
87 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); | ||
88 | result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), | ||
89 | UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, | ||
90 | &reply); | ||
91 | if (result < 0) | ||
92 | goto error_cmd; | ||
93 | get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); | ||
94 | if (result < sizeof(*get_ie)) { | ||
95 | dev_err(dev, "not enough data returned for decoding GET IE " | ||
96 | "(%zu bytes received vs %zu needed)\n", | ||
97 | result, sizeof(*get_ie)); | ||
98 | result = -EINVAL; | ||
99 | } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { | ||
100 | dev_err(dev, "not enough data returned for decoding GET IE " | ||
101 | "payload (%zu bytes received vs %zu needed)\n", result, | ||
102 | sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); | ||
103 | result = -EINVAL; | ||
104 | } else | ||
105 | *pget_ie = get_ie; | ||
106 | error_cmd: | ||
107 | kfree(cmd); | ||
108 | error_kzalloc: | ||
109 | d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); | ||
110 | return result; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(uwb_rc_get_ie); | ||
113 | |||
114 | |||
115 | /* | ||
116 | * Given a pointer to an IE, print it in ASCII/hex followed by a new line | ||
117 | * | ||
118 | * @ie_hdr: pointer to the IE header. Length is in there, and it is | ||
119 | * guaranteed that the ie_hdr->length bytes following it are | ||
120 | * safely accesible. | ||
121 | * | ||
122 | * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx | ||
123 | */ | ||
124 | int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
125 | size_t offset, void *_ctx) | ||
126 | { | ||
127 | struct uwb_buf_ctx *ctx = _ctx; | ||
128 | const u8 *pl = (void *)(ie_hdr + 1); | ||
129 | u8 pl_itr; | ||
130 | |||
131 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, | ||
132 | "%02x %02x ", (unsigned) ie_hdr->element_id, | ||
133 | (unsigned) ie_hdr->length); | ||
134 | pl_itr = 0; | ||
135 | while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) | ||
136 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, | ||
137 | ctx->size - ctx->bytes, | ||
138 | "%02x ", (unsigned) pl[pl_itr++]); | ||
139 | if (ctx->bytes < ctx->size) | ||
140 | ctx->buf[ctx->bytes++] = '\n'; | ||
141 | return 0; | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); | ||
144 | |||
145 | |||
146 | /** | ||
147 | * Verify that a pointer in a buffer points to valid IE | ||
148 | * | ||
149 | * @start: pointer to start of buffer in which IE appears | ||
150 | * @itr: pointer to IE inside buffer that will be verified | ||
151 | * @top: pointer to end of buffer | ||
152 | * | ||
153 | * @returns: 0 if IE is valid, <0 otherwise | ||
154 | * | ||
155 | * Verification involves checking that the buffer can contain a | ||
156 | * header and the amount of data reported in the IE header can be found in | ||
157 | * the buffer. | ||
158 | */ | ||
159 | static | ||
160 | int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, | ||
161 | const void *itr, const void *top) | ||
162 | { | ||
163 | struct device *dev = &uwb_dev->dev; | ||
164 | const struct uwb_ie_hdr *ie_hdr; | ||
165 | |||
166 | if (top - itr < sizeof(*ie_hdr)) { | ||
167 | dev_err(dev, "Bad IE: no data to decode header " | ||
168 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
169 | top - itr, sizeof(*ie_hdr), itr - start); | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | ie_hdr = itr; | ||
173 | itr += sizeof(*ie_hdr); | ||
174 | if (top - itr < ie_hdr->length) { | ||
175 | dev_err(dev, "Bad IE: not enough data for payload " | ||
176 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
177 | top - itr, (size_t)ie_hdr->length, | ||
178 | (void *)ie_hdr - start); | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | |||
185 | /** | ||
186 | * Walk a buffer filled with consecutive IE's a buffer | ||
187 | * | ||
188 | * @uwb_dev: UWB device this IEs belong to (for err messages mainly) | ||
189 | * | ||
190 | * @fn: function to call with each IE; if it returns 0, we keep | ||
191 | * traversing the buffer. If it returns !0, we'll stop and return | ||
192 | * that value. | ||
193 | * | ||
194 | * @data: pointer passed to @fn | ||
195 | * | ||
196 | * @buf: buffer where the consecutive IEs are located | ||
197 | * | ||
198 | * @size: size of @buf | ||
199 | * | ||
200 | * Each IE is checked for basic correctness (there is space left for | ||
201 | * the header and the payload). If that test is failed, we stop | ||
202 | * processing. For every good IE, @fn is called. | ||
203 | */ | ||
204 | ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, | ||
205 | const void *buf, size_t size) | ||
206 | { | ||
207 | ssize_t result = 0; | ||
208 | const struct uwb_ie_hdr *ie_hdr; | ||
209 | const void *itr = buf, *top = itr + size; | ||
210 | |||
211 | while (itr < top) { | ||
212 | if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) | ||
213 | break; | ||
214 | ie_hdr = itr; | ||
215 | itr += sizeof(*ie_hdr) + ie_hdr->length; | ||
216 | result = fn(uwb_dev, ie_hdr, itr - buf, data); | ||
217 | if (result != 0) | ||
218 | break; | ||
219 | } | ||
220 | return result; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(uwb_ie_for_each); | ||
223 | |||
224 | |||
225 | /** | ||
226 | * Replace all IEs currently being transmitted by a device | ||
227 | * | ||
228 | * @cmd: pointer to the SET-IE command with the IEs to set | ||
229 | * @size: size of @buf | ||
230 | */ | ||
231 | int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd) | ||
232 | { | ||
233 | int result; | ||
234 | struct device *dev = &rc->uwb_dev.dev; | ||
235 | struct uwb_rc_evt_set_ie reply; | ||
236 | |||
237 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
238 | reply.rceb.wEvent = UWB_RC_CMD_SET_IE; | ||
239 | result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb, | ||
240 | sizeof(*cmd) + le16_to_cpu(cmd->wIELength), | ||
241 | &reply.rceb, sizeof(reply)); | ||
242 | if (result < 0) | ||
243 | goto error_cmd; | ||
244 | else if (result != sizeof(reply)) { | ||
245 | dev_err(dev, "SET-IE: not enough data to decode reply " | ||
246 | "(%d bytes received vs %zu needed)\n", | ||
247 | result, sizeof(reply)); | ||
248 | result = -EIO; | ||
249 | } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
250 | dev_err(dev, "SET-IE: command execution failed: %s (%d)\n", | ||
251 | uwb_rc_strerror(reply.bResultCode), reply.bResultCode); | ||
252 | result = -EIO; | ||
253 | } else | ||
254 | result = 0; | ||
255 | error_cmd: | ||
256 | return result; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * Determine by IE id if IE is host settable | ||
261 | * WUSB 1.0 [8.6.2.8 Table 8.85] | ||
262 | * | ||
263 | * EXCEPTION: | ||
264 | * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE | ||
265 | * is required for the WLP substack to perform association with its WSS so | ||
266 | * we hope that the WUSB spec will be changed to reflect this. | ||
267 | */ | ||
268 | static | ||
269 | int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) | ||
270 | { | ||
271 | if (element_id == UWB_PCA_AVAILABILITY || | ||
272 | element_id == UWB_BP_SWITCH_IE || | ||
273 | element_id == UWB_MAC_CAPABILITIES_IE || | ||
274 | element_id == UWB_PHY_CAPABILITIES_IE || | ||
275 | element_id == UWB_APP_SPEC_PROBE_IE || | ||
276 | element_id == UWB_IDENTIFICATION_IE || | ||
277 | element_id == UWB_MASTER_KEY_ID_IE || | ||
278 | element_id == UWB_IE_WLP || | ||
279 | element_id == UWB_APP_SPEC_IE) | ||
280 | return 1; | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | |||
285 | /** | ||
286 | * Extract Host Settable IEs from IE | ||
287 | * | ||
288 | * @ie_data: pointer to buffer containing all IEs | ||
289 | * @size: size of buffer | ||
290 | * | ||
291 | * @returns: length of buffer that only includes host settable IEs | ||
292 | * | ||
293 | * Given a buffer of IEs we move all Host Settable IEs to front of buffer | ||
294 | * by overwriting the IEs that are not Host Settable. | ||
295 | * Buffer length is adjusted accordingly. | ||
296 | */ | ||
297 | static | ||
298 | ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, | ||
299 | void *ie_data, size_t size) | ||
300 | { | ||
301 | size_t new_len = size; | ||
302 | struct uwb_ie_hdr *ie_hdr; | ||
303 | size_t ie_length; | ||
304 | void *itr = ie_data, *top = itr + size; | ||
305 | |||
306 | while (itr < top) { | ||
307 | if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) | ||
308 | break; | ||
309 | ie_hdr = itr; | ||
310 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | ||
311 | if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { | ||
312 | itr += ie_length; | ||
313 | } else { | ||
314 | memmove(itr, itr + ie_length, top - (itr + ie_length)); | ||
315 | new_len -= ie_length; | ||
316 | top -= ie_length; | ||
317 | } | ||
318 | } | ||
319 | return new_len; | ||
320 | } | ||
321 | |||
322 | |||
323 | /* Cleanup the whole IE management subsystem */ | ||
324 | void uwb_rc_ie_init(struct uwb_rc *uwb_rc) | ||
325 | { | ||
326 | mutex_init(&uwb_rc->ies_mutex); | ||
327 | } | ||
328 | |||
329 | |||
330 | /** | ||
331 | * Set up cache for host settable IEs currently being transmitted | ||
332 | * | ||
333 | * First we just call GET-IE to get the current IEs being transmitted | ||
334 | * (or we workaround and pretend we did) and (because the format is | ||
335 | * the same) reuse that as the IE cache (with the command prefix, as | ||
336 | * explained in 'struct uwb_rc'). | ||
337 | * | ||
338 | * @returns: size of cache created | ||
339 | */ | ||
340 | ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) | ||
341 | { | ||
342 | struct device *dev = &uwb_rc->uwb_dev.dev; | ||
343 | ssize_t result; | ||
344 | size_t capacity; | ||
345 | struct uwb_rc_evt_get_ie *ie_info; | ||
346 | |||
347 | d_fnstart(3, dev, "(%p)\n", uwb_rc); | ||
348 | mutex_lock(&uwb_rc->ies_mutex); | ||
349 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | ||
350 | if (result < 0) | ||
351 | goto error_get_ie; | ||
352 | capacity = result; | ||
353 | d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, | ||
354 | (size_t)le16_to_cpu(ie_info->wIELength), ie_info); | ||
355 | |||
356 | /* Remove IEs that host should not set. */ | ||
357 | result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, | ||
358 | ie_info->IEData, le16_to_cpu(ie_info->wIELength)); | ||
359 | if (result < 0) | ||
360 | goto error_parse; | ||
361 | d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); | ||
362 | uwb_rc->ies = (void *) ie_info; | ||
363 | uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; | ||
364 | uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); | ||
365 | uwb_rc->ies_capacity = capacity; | ||
366 | d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", | ||
367 | ie_info, result, capacity); | ||
368 | result = 0; | ||
369 | error_parse: | ||
370 | error_get_ie: | ||
371 | mutex_unlock(&uwb_rc->ies_mutex); | ||
372 | d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); | ||
373 | return result; | ||
374 | } | ||
375 | |||
376 | |||
377 | /* Cleanup the whole IE management subsystem */ | ||
378 | void uwb_rc_ie_release(struct uwb_rc *uwb_rc) | ||
379 | { | ||
380 | kfree(uwb_rc->ies); | ||
381 | uwb_rc->ies = NULL; | ||
382 | uwb_rc->ies_capacity = 0; | ||
383 | } | ||
384 | |||
385 | |||
386 | static | ||
387 | int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
388 | size_t offset, void *_ctx) | ||
389 | { | ||
390 | size_t *acc_size = _ctx; | ||
391 | *acc_size += sizeof(*ie_hdr) + ie_hdr->length; | ||
392 | d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | |||
397 | /** | ||
398 | * Add a new IE to IEs currently being transmitted by device | ||
399 | * | ||
400 | * @ies: the buffer containing the new IE or IEs to be added to | ||
401 | * the device's beacon. The buffer will be verified for | ||
402 | * consistence (meaning the headers should be right) and | ||
403 | * consistent with the buffer size. | ||
404 | * @size: size of @ies (in bytes, total buffer size) | ||
405 | * @returns: 0 if ok, <0 errno code on error | ||
406 | * | ||
407 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB | ||
408 | * after the device sent the first beacon that includes the IEs specified | ||
409 | * in the SET IE command. We thus cannot send this command if the device is | ||
410 | * not beaconing. Instead, a SET IE command will be sent later right after | ||
411 | * we start beaconing. | ||
412 | * | ||
413 | * Setting an IE on the device will overwrite all current IEs in device. So | ||
414 | * we take the current IEs being transmitted by the device, append the | ||
415 | * new one, and call SET IE with all the IEs needed. | ||
416 | * | ||
417 | * The local IE cache will only be updated with the new IE if SET IE | ||
418 | * completed successfully. | ||
419 | */ | ||
420 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, | ||
421 | const struct uwb_ie_hdr *ies, size_t size) | ||
422 | { | ||
423 | int result = 0; | ||
424 | struct device *dev = &uwb_rc->uwb_dev.dev; | ||
425 | struct uwb_rc_cmd_set_ie *new_ies; | ||
426 | size_t ies_size, total_size, acc_size = 0; | ||
427 | |||
428 | if (uwb_rc->ies == NULL) | ||
429 | return -ESHUTDOWN; | ||
430 | uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); | ||
431 | if (acc_size != size) { | ||
432 | dev_err(dev, "BUG: bad IEs, misconstructed headers " | ||
433 | "[%zu bytes reported vs %zu calculated]\n", | ||
434 | size, acc_size); | ||
435 | WARN_ON(1); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | mutex_lock(&uwb_rc->ies_mutex); | ||
439 | ies_size = le16_to_cpu(uwb_rc->ies->wIELength); | ||
440 | total_size = sizeof(*uwb_rc->ies) + ies_size; | ||
441 | if (total_size + size > uwb_rc->ies_capacity) { | ||
442 | d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " | ||
443 | "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, | ||
444 | total_size + size); | ||
445 | new_ies = kzalloc(total_size + size, GFP_KERNEL); | ||
446 | if (new_ies == NULL) { | ||
447 | dev_err(dev, "No memory for adding new IE\n"); | ||
448 | result = -ENOMEM; | ||
449 | goto error_alloc; | ||
450 | } | ||
451 | memcpy(new_ies, uwb_rc->ies, total_size); | ||
452 | uwb_rc->ies_capacity = total_size + size; | ||
453 | kfree(uwb_rc->ies); | ||
454 | uwb_rc->ies = new_ies; | ||
455 | d_printf(4, dev, "New IE cache at %p capacity %zu\n", | ||
456 | uwb_rc->ies, uwb_rc->ies_capacity); | ||
457 | } | ||
458 | memcpy((void *)uwb_rc->ies + total_size, ies, size); | ||
459 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); | ||
460 | if (uwb_rc->beaconing != -1) { | ||
461 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | ||
462 | if (result < 0) { | ||
463 | dev_err(dev, "Cannot set new IE on device: %d\n", | ||
464 | result); | ||
465 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size); | ||
466 | } else | ||
467 | result = 0; | ||
468 | } | ||
469 | d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", | ||
470 | le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, | ||
471 | uwb_rc->ies); | ||
472 | error_alloc: | ||
473 | mutex_unlock(&uwb_rc->ies_mutex); | ||
474 | return result; | ||
475 | } | ||
476 | EXPORT_SYMBOL_GPL(uwb_rc_ie_add); | ||
477 | |||
478 | |||
479 | /* | ||
480 | * Remove an IE from internal cache | ||
481 | * | ||
482 | * We are dealing with our internal IE cache so no need to verify that the | ||
483 | * IEs are valid (it has been done already). | ||
484 | * | ||
485 | * Should be called with ies_mutex held | ||
486 | * | ||
487 | * We do not break out once an IE is found in the cache. It is currently | ||
488 | * possible to have more than one IE with the same ID included in the | ||
489 | * beacon. We don't reallocate, we just mark the size smaller. | ||
490 | */ | ||
491 | static | ||
492 | int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) | ||
493 | { | ||
494 | struct uwb_ie_hdr *ie_hdr; | ||
495 | size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); | ||
496 | void *itr = uwb_rc->ies->IEData; | ||
497 | void *top = itr + new_len; | ||
498 | |||
499 | while (itr < top) { | ||
500 | ie_hdr = itr; | ||
501 | if (ie_hdr->element_id != to_remove) { | ||
502 | itr += sizeof(*ie_hdr) + ie_hdr->length; | ||
503 | } else { | ||
504 | int ie_length; | ||
505 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | ||
506 | if (top - itr != ie_length) | ||
507 | memmove(itr, itr + ie_length, top - itr + ie_length); | ||
508 | top -= ie_length; | ||
509 | new_len -= ie_length; | ||
510 | } | ||
511 | } | ||
512 | uwb_rc->ies->wIELength = cpu_to_le16(new_len); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | |||
517 | /** | ||
518 | * Remove an IE currently being transmitted by device | ||
519 | * | ||
520 | * @element_id: id of IE to be removed from device's beacon | ||
521 | */ | ||
522 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) | ||
523 | { | ||
524 | struct device *dev = &uwb_rc->uwb_dev.dev; | ||
525 | int result; | ||
526 | |||
527 | if (uwb_rc->ies == NULL) | ||
528 | return -ESHUTDOWN; | ||
529 | mutex_lock(&uwb_rc->ies_mutex); | ||
530 | result = uwb_rc_ie_cache_rm(uwb_rc, element_id); | ||
531 | if (result < 0) | ||
532 | dev_err(dev, "Cannot remove IE from cache.\n"); | ||
533 | if (uwb_rc->beaconing != -1) { | ||
534 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | ||
535 | if (result < 0) | ||
536 | dev_err(dev, "Cannot set new IE on device.\n"); | ||
537 | } | ||
538 | mutex_unlock(&uwb_rc->ies_mutex); | ||
539 | return result; | ||
540 | } | ||
541 | EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); | ||
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c new file mode 100644 index 000000000000..15f856c9689a --- /dev/null +++ b/drivers/uwb/lc-dev.c | |||
@@ -0,0 +1,492 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Life cycle of devices | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/kdev_t.h> | ||
30 | #include <linux/random.h> | ||
31 | #include "uwb-internal.h" | ||
32 | |||
33 | #define D_LOCAL 1 | ||
34 | #include <linux/uwb/debug.h> | ||
35 | |||
36 | |||
37 | /* We initialize addresses to 0xff (invalid, as it is bcast) */ | ||
38 | static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) | ||
39 | { | ||
40 | memset(&addr->data, 0xff, sizeof(addr->data)); | ||
41 | } | ||
42 | |||
43 | static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr) | ||
44 | { | ||
45 | memset(&addr->data, 0xff, sizeof(addr->data)); | ||
46 | } | ||
47 | |||
48 | /* @returns !0 if a device @addr is a broadcast address */ | ||
49 | static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr) | ||
50 | { | ||
51 | static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } }; | ||
52 | return !uwb_dev_addr_cmp(addr, &bcast); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Add callback @new to be called when an event occurs in @rc. | ||
57 | */ | ||
58 | int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new) | ||
59 | { | ||
60 | if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) | ||
61 | return -ERESTARTSYS; | ||
62 | list_add(&new->list_node, &rc->notifs_chain.list); | ||
63 | mutex_unlock(&rc->notifs_chain.mutex); | ||
64 | return 0; | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(uwb_notifs_register); | ||
67 | |||
68 | /* | ||
69 | * Remove event handler (callback) | ||
70 | */ | ||
71 | int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry) | ||
72 | { | ||
73 | if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) | ||
74 | return -ERESTARTSYS; | ||
75 | list_del(&entry->list_node); | ||
76 | mutex_unlock(&rc->notifs_chain.mutex); | ||
77 | return 0; | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(uwb_notifs_deregister); | ||
80 | |||
81 | /* | ||
82 | * Notify all event handlers of a given event on @rc | ||
83 | * | ||
84 | * We are called with a valid reference to the device, or NULL if the | ||
85 | * event is not for a particular event (e.g., a BG join event). | ||
86 | */ | ||
87 | void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event) | ||
88 | { | ||
89 | struct uwb_notifs_handler *handler; | ||
90 | if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) | ||
91 | return; | ||
92 | if (!list_empty(&rc->notifs_chain.list)) { | ||
93 | list_for_each_entry(handler, &rc->notifs_chain.list, list_node) { | ||
94 | handler->cb(handler->data, uwb_dev, event); | ||
95 | } | ||
96 | } | ||
97 | mutex_unlock(&rc->notifs_chain.mutex); | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Release the backing device of a uwb_dev that has been dynamically allocated. | ||
102 | */ | ||
103 | static void uwb_dev_sys_release(struct device *dev) | ||
104 | { | ||
105 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
106 | |||
107 | d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); | ||
108 | uwb_bce_put(uwb_dev->bce); | ||
109 | d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); | ||
110 | memset(uwb_dev, 0x69, sizeof(*uwb_dev)); | ||
111 | kfree(uwb_dev); | ||
112 | d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Initialize a UWB device instance | ||
117 | * | ||
118 | * Alloc, zero and call this function. | ||
119 | */ | ||
120 | void uwb_dev_init(struct uwb_dev *uwb_dev) | ||
121 | { | ||
122 | mutex_init(&uwb_dev->mutex); | ||
123 | device_initialize(&uwb_dev->dev); | ||
124 | uwb_dev->dev.release = uwb_dev_sys_release; | ||
125 | uwb_dev_addr_init(&uwb_dev->dev_addr); | ||
126 | uwb_mac_addr_init(&uwb_dev->mac_addr); | ||
127 | bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS); | ||
128 | } | ||
129 | |||
130 | static ssize_t uwb_dev_EUI_48_show(struct device *dev, | ||
131 | struct device_attribute *attr, char *buf) | ||
132 | { | ||
133 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
134 | char addr[UWB_ADDR_STRSIZE]; | ||
135 | |||
136 | uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr); | ||
137 | return sprintf(buf, "%s\n", addr); | ||
138 | } | ||
139 | static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL); | ||
140 | |||
141 | static ssize_t uwb_dev_DevAddr_show(struct device *dev, | ||
142 | struct device_attribute *attr, char *buf) | ||
143 | { | ||
144 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
145 | char addr[UWB_ADDR_STRSIZE]; | ||
146 | |||
147 | uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr); | ||
148 | return sprintf(buf, "%s\n", addr); | ||
149 | } | ||
150 | static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL); | ||
151 | |||
152 | /* | ||
153 | * Show the BPST of this device. | ||
154 | * | ||
155 | * Calculated from the receive time of the device's beacon and it's | ||
156 | * slot number. | ||
157 | */ | ||
158 | static ssize_t uwb_dev_BPST_show(struct device *dev, | ||
159 | struct device_attribute *attr, char *buf) | ||
160 | { | ||
161 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
162 | struct uwb_beca_e *bce; | ||
163 | struct uwb_beacon_frame *bf; | ||
164 | u16 bpst; | ||
165 | |||
166 | bce = uwb_dev->bce; | ||
167 | mutex_lock(&bce->mutex); | ||
168 | bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; | ||
169 | bpst = bce->be->wBPSTOffset | ||
170 | - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US); | ||
171 | mutex_unlock(&bce->mutex); | ||
172 | |||
173 | return sprintf(buf, "%d\n", bpst); | ||
174 | } | ||
175 | static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL); | ||
176 | |||
177 | /* | ||
178 | * Show the IEs a device is beaconing | ||
179 | * | ||
180 | * We need to access the beacon cache, so we just lock it really | ||
181 | * quick, print the IEs and unlock. | ||
182 | * | ||
183 | * We have a reference on the cache entry, so that should be | ||
184 | * quite safe. | ||
185 | */ | ||
186 | static ssize_t uwb_dev_IEs_show(struct device *dev, | ||
187 | struct device_attribute *attr, char *buf) | ||
188 | { | ||
189 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
190 | |||
191 | return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE); | ||
192 | } | ||
193 | static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL); | ||
194 | |||
195 | static ssize_t uwb_dev_LQE_show(struct device *dev, | ||
196 | struct device_attribute *attr, char *buf) | ||
197 | { | ||
198 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
199 | struct uwb_beca_e *bce = uwb_dev->bce; | ||
200 | size_t result; | ||
201 | |||
202 | mutex_lock(&bce->mutex); | ||
203 | result = stats_show(&uwb_dev->bce->lqe_stats, buf); | ||
204 | mutex_unlock(&bce->mutex); | ||
205 | return result; | ||
206 | } | ||
207 | |||
208 | static ssize_t uwb_dev_LQE_store(struct device *dev, | ||
209 | struct device_attribute *attr, | ||
210 | const char *buf, size_t size) | ||
211 | { | ||
212 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
213 | struct uwb_beca_e *bce = uwb_dev->bce; | ||
214 | ssize_t result; | ||
215 | |||
216 | mutex_lock(&bce->mutex); | ||
217 | result = stats_store(&uwb_dev->bce->lqe_stats, buf, size); | ||
218 | mutex_unlock(&bce->mutex); | ||
219 | return result; | ||
220 | } | ||
221 | static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store); | ||
222 | |||
223 | static ssize_t uwb_dev_RSSI_show(struct device *dev, | ||
224 | struct device_attribute *attr, char *buf) | ||
225 | { | ||
226 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
227 | struct uwb_beca_e *bce = uwb_dev->bce; | ||
228 | size_t result; | ||
229 | |||
230 | mutex_lock(&bce->mutex); | ||
231 | result = stats_show(&uwb_dev->bce->rssi_stats, buf); | ||
232 | mutex_unlock(&bce->mutex); | ||
233 | return result; | ||
234 | } | ||
235 | |||
236 | static ssize_t uwb_dev_RSSI_store(struct device *dev, | ||
237 | struct device_attribute *attr, | ||
238 | const char *buf, size_t size) | ||
239 | { | ||
240 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
241 | struct uwb_beca_e *bce = uwb_dev->bce; | ||
242 | ssize_t result; | ||
243 | |||
244 | mutex_lock(&bce->mutex); | ||
245 | result = stats_store(&uwb_dev->bce->rssi_stats, buf, size); | ||
246 | mutex_unlock(&bce->mutex); | ||
247 | return result; | ||
248 | } | ||
249 | static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store); | ||
250 | |||
251 | |||
252 | static struct attribute *dev_attrs[] = { | ||
253 | &dev_attr_EUI_48.attr, | ||
254 | &dev_attr_DevAddr.attr, | ||
255 | &dev_attr_BPST.attr, | ||
256 | &dev_attr_IEs.attr, | ||
257 | &dev_attr_LQE.attr, | ||
258 | &dev_attr_RSSI.attr, | ||
259 | NULL, | ||
260 | }; | ||
261 | |||
262 | static struct attribute_group dev_attr_group = { | ||
263 | .attrs = dev_attrs, | ||
264 | }; | ||
265 | |||
266 | static struct attribute_group *groups[] = { | ||
267 | &dev_attr_group, | ||
268 | NULL, | ||
269 | }; | ||
270 | |||
271 | /** | ||
272 | * Device SYSFS registration | ||
273 | * | ||
274 | * | ||
275 | */ | ||
276 | static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) | ||
277 | { | ||
278 | int result; | ||
279 | struct device *dev; | ||
280 | |||
281 | d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); | ||
282 | BUG_ON(parent_dev == NULL); | ||
283 | |||
284 | dev = &uwb_dev->dev; | ||
285 | /* Device sysfs files are only useful for neighbor devices not | ||
286 | local radio controllers. */ | ||
287 | if (&uwb_dev->rc->uwb_dev != uwb_dev) | ||
288 | dev->groups = groups; | ||
289 | dev->parent = parent_dev; | ||
290 | dev_set_drvdata(dev, uwb_dev); | ||
291 | |||
292 | result = device_add(dev); | ||
293 | d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); | ||
294 | return result; | ||
295 | } | ||
296 | |||
297 | |||
298 | static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) | ||
299 | { | ||
300 | d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); | ||
301 | dev_set_drvdata(&uwb_dev->dev, NULL); | ||
302 | device_del(&uwb_dev->dev); | ||
303 | d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); | ||
304 | } | ||
305 | |||
306 | |||
307 | /** | ||
308 | * Register and initialize a new UWB device | ||
309 | * | ||
310 | * Did you call uwb_dev_init() on it? | ||
311 | * | ||
312 | * @parent_rc: is the parent radio controller who has the link to the | ||
313 | * device. When registering the UWB device that is a UWB | ||
314 | * Radio Controller, we point back to it. | ||
315 | * | ||
316 | * If registering the device that is part of a radio, caller has set | ||
317 | * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will | ||
318 | * be allocated. | ||
319 | */ | ||
320 | int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, | ||
321 | struct uwb_rc *parent_rc) | ||
322 | { | ||
323 | int result; | ||
324 | struct device *dev; | ||
325 | |||
326 | BUG_ON(uwb_dev == NULL); | ||
327 | BUG_ON(parent_dev == NULL); | ||
328 | BUG_ON(parent_rc == NULL); | ||
329 | |||
330 | mutex_lock(&uwb_dev->mutex); | ||
331 | dev = &uwb_dev->dev; | ||
332 | uwb_dev->rc = parent_rc; | ||
333 | result = __uwb_dev_sys_add(uwb_dev, parent_dev); | ||
334 | if (result < 0) | ||
335 | printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n", | ||
336 | dev_name(dev), result); | ||
337 | mutex_unlock(&uwb_dev->mutex); | ||
338 | return result; | ||
339 | } | ||
340 | |||
341 | |||
342 | void uwb_dev_rm(struct uwb_dev *uwb_dev) | ||
343 | { | ||
344 | mutex_lock(&uwb_dev->mutex); | ||
345 | __uwb_dev_sys_rm(uwb_dev); | ||
346 | mutex_unlock(&uwb_dev->mutex); | ||
347 | } | ||
348 | |||
349 | |||
350 | static | ||
351 | int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev) | ||
352 | { | ||
353 | struct uwb_dev *target_uwb_dev = __target_uwb_dev; | ||
354 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
355 | if (uwb_dev == target_uwb_dev) { | ||
356 | uwb_dev_get(uwb_dev); | ||
357 | return 1; | ||
358 | } else | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | |||
363 | /** | ||
364 | * Given a UWB device descriptor, validate and refcount it | ||
365 | * | ||
366 | * @returns NULL if the device does not exist or is quiescing; the ptr to | ||
367 | * it otherwise. | ||
368 | */ | ||
369 | struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev) | ||
370 | { | ||
371 | if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev)) | ||
372 | return uwb_dev; | ||
373 | else | ||
374 | return NULL; | ||
375 | } | ||
376 | EXPORT_SYMBOL_GPL(uwb_dev_try_get); | ||
377 | |||
378 | |||
379 | /** | ||
380 | * Remove a device from the system [grunt for other functions] | ||
381 | */ | ||
382 | int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) | ||
383 | { | ||
384 | struct device *dev = &uwb_dev->dev; | ||
385 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; | ||
386 | |||
387 | d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); | ||
388 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); | ||
389 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); | ||
390 | dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", | ||
391 | macbuf, devbuf, | ||
392 | rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", | ||
393 | rc ? dev_name(rc->uwb_dev.dev.parent) : ""); | ||
394 | uwb_dev_rm(uwb_dev); | ||
395 | uwb_dev_put(uwb_dev); /* for the creation in _onair() */ | ||
396 | d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | |||
401 | /** | ||
402 | * A device went off the air, clean up after it! | ||
403 | * | ||
404 | * This is called by the UWB Daemon (through the beacon purge function | ||
405 | * uwb_bcn_cache_purge) when it is detected that a device has been in | ||
406 | * radio silence for a while. | ||
407 | * | ||
408 | * If this device is actually a local radio controller we don't need | ||
409 | * to go through the offair process, as it is not registered as that. | ||
410 | * | ||
411 | * NOTE: uwb_bcn_cache.mutex is held! | ||
412 | */ | ||
413 | void uwbd_dev_offair(struct uwb_beca_e *bce) | ||
414 | { | ||
415 | struct uwb_dev *uwb_dev; | ||
416 | |||
417 | uwb_dev = bce->uwb_dev; | ||
418 | if (uwb_dev) { | ||
419 | uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR); | ||
420 | __uwb_dev_offair(uwb_dev, uwb_dev->rc); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | |||
425 | /** | ||
426 | * A device went on the air, start it up! | ||
427 | * | ||
428 | * This is called by the UWB Daemon when it is detected that a device | ||
429 | * has popped up in the radio range of the radio controller. | ||
430 | * | ||
431 | * It will just create the freaking device, register the beacon and | ||
432 | * stuff and yatla, done. | ||
433 | * | ||
434 | * | ||
435 | * NOTE: uwb_beca.mutex is held, bce->mutex is held | ||
436 | */ | ||
437 | void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) | ||
438 | { | ||
439 | int result; | ||
440 | struct device *dev = &rc->uwb_dev.dev; | ||
441 | struct uwb_dev *uwb_dev; | ||
442 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; | ||
443 | |||
444 | uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr); | ||
445 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr); | ||
446 | uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL); | ||
447 | if (uwb_dev == NULL) { | ||
448 | dev_err(dev, "new device %s: Cannot allocate memory\n", | ||
449 | macbuf); | ||
450 | return; | ||
451 | } | ||
452 | uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ | ||
453 | uwb_dev->mac_addr = *bce->mac_addr; | ||
454 | uwb_dev->dev_addr = bce->dev_addr; | ||
455 | dev_set_name(&uwb_dev->dev, macbuf); | ||
456 | result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); | ||
457 | if (result < 0) { | ||
458 | dev_err(dev, "new device %s: cannot instantiate device\n", | ||
459 | macbuf); | ||
460 | goto error_dev_add; | ||
461 | } | ||
462 | /* plug the beacon cache */ | ||
463 | bce->uwb_dev = uwb_dev; | ||
464 | uwb_dev->bce = bce; | ||
465 | uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ | ||
466 | dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", | ||
467 | macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, | ||
468 | dev_name(rc->uwb_dev.dev.parent)); | ||
469 | uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR); | ||
470 | return; | ||
471 | |||
472 | error_dev_add: | ||
473 | kfree(uwb_dev); | ||
474 | return; | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * Iterate over the list of UWB devices, calling a @function on each | ||
479 | * | ||
480 | * See docs for bus_for_each().... | ||
481 | * | ||
482 | * @rc: radio controller for the devices. | ||
483 | * @function: function to call. | ||
484 | * @priv: data to pass to @function. | ||
485 | * @returns: 0 if no invocation of function() returned a value | ||
486 | * different to zero. That value otherwise. | ||
487 | */ | ||
488 | int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv) | ||
489 | { | ||
490 | return device_for_each_child(&rc->uwb_dev.dev, priv, function); | ||
491 | } | ||
492 | EXPORT_SYMBOL_GPL(uwb_dev_for_each); | ||
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c new file mode 100644 index 000000000000..ee5772f00d42 --- /dev/null +++ b/drivers/uwb/lc-rc.c | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Life cycle of radio controllers | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * | ||
25 | * A UWB radio controller is also a UWB device, so it embeds one... | ||
26 | * | ||
27 | * List of RCs comes from the 'struct class uwb_rc_class'. | ||
28 | */ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/device.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <linux/random.h> | ||
35 | #include <linux/kdev_t.h> | ||
36 | #include <linux/etherdevice.h> | ||
37 | #include <linux/usb.h> | ||
38 | |||
39 | #define D_LOCAL 1 | ||
40 | #include <linux/uwb/debug.h> | ||
41 | #include "uwb-internal.h" | ||
42 | |||
43 | static int uwb_rc_index_match(struct device *dev, void *data) | ||
44 | { | ||
45 | int *index = data; | ||
46 | struct uwb_rc *rc = dev_get_drvdata(dev); | ||
47 | |||
48 | if (rc->index == *index) | ||
49 | return 1; | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static struct uwb_rc *uwb_rc_find_by_index(int index) | ||
54 | { | ||
55 | struct device *dev; | ||
56 | struct uwb_rc *rc = NULL; | ||
57 | |||
58 | dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); | ||
59 | if (dev) | ||
60 | rc = dev_get_drvdata(dev); | ||
61 | return rc; | ||
62 | } | ||
63 | |||
64 | static int uwb_rc_new_index(void) | ||
65 | { | ||
66 | int index = 0; | ||
67 | |||
68 | for (;;) { | ||
69 | if (!uwb_rc_find_by_index(index)) | ||
70 | return index; | ||
71 | if (++index < 0) | ||
72 | index = 0; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * Release the backing device of a uwb_rc that has been dynamically allocated. | ||
78 | */ | ||
79 | static void uwb_rc_sys_release(struct device *dev) | ||
80 | { | ||
81 | struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); | ||
82 | struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); | ||
83 | |||
84 | uwb_rc_neh_destroy(rc); | ||
85 | uwb_rc_ie_release(rc); | ||
86 | d_printf(1, dev, "freed uwb_rc %p\n", rc); | ||
87 | kfree(rc); | ||
88 | } | ||
89 | |||
90 | |||
91 | void uwb_rc_init(struct uwb_rc *rc) | ||
92 | { | ||
93 | struct uwb_dev *uwb_dev = &rc->uwb_dev; | ||
94 | |||
95 | uwb_dev_init(uwb_dev); | ||
96 | rc->uwb_dev.dev.class = &uwb_rc_class; | ||
97 | rc->uwb_dev.dev.release = uwb_rc_sys_release; | ||
98 | uwb_rc_neh_create(rc); | ||
99 | rc->beaconing = -1; | ||
100 | rc->scan_type = UWB_SCAN_DISABLED; | ||
101 | INIT_LIST_HEAD(&rc->notifs_chain.list); | ||
102 | mutex_init(&rc->notifs_chain.mutex); | ||
103 | uwb_drp_avail_init(rc); | ||
104 | uwb_rc_ie_init(rc); | ||
105 | uwb_rsv_init(rc); | ||
106 | uwb_rc_pal_init(rc); | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(uwb_rc_init); | ||
109 | |||
110 | |||
111 | struct uwb_rc *uwb_rc_alloc(void) | ||
112 | { | ||
113 | struct uwb_rc *rc; | ||
114 | rc = kzalloc(sizeof(*rc), GFP_KERNEL); | ||
115 | if (rc == NULL) | ||
116 | return NULL; | ||
117 | uwb_rc_init(rc); | ||
118 | return rc; | ||
119 | } | ||
120 | EXPORT_SYMBOL_GPL(uwb_rc_alloc); | ||
121 | |||
122 | static struct attribute *rc_attrs[] = { | ||
123 | &dev_attr_mac_address.attr, | ||
124 | &dev_attr_scan.attr, | ||
125 | &dev_attr_beacon.attr, | ||
126 | NULL, | ||
127 | }; | ||
128 | |||
129 | static struct attribute_group rc_attr_group = { | ||
130 | .attrs = rc_attrs, | ||
131 | }; | ||
132 | |||
133 | /* | ||
134 | * Registration of sysfs specific stuff | ||
135 | */ | ||
136 | static int uwb_rc_sys_add(struct uwb_rc *rc) | ||
137 | { | ||
138 | return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); | ||
139 | } | ||
140 | |||
141 | |||
142 | static void __uwb_rc_sys_rm(struct uwb_rc *rc) | ||
143 | { | ||
144 | sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it | ||
149 | * @rc: the radio controller. | ||
150 | * | ||
151 | * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF | ||
152 | * then a random locally administered EUI-48 is generated and set on | ||
153 | * the device. The probability of address collisions is sufficiently | ||
154 | * unlikely (1/2^40 = 9.1e-13) that they're not checked for. | ||
155 | */ | ||
156 | static | ||
157 | int uwb_rc_mac_addr_setup(struct uwb_rc *rc) | ||
158 | { | ||
159 | int result; | ||
160 | struct device *dev = &rc->uwb_dev.dev; | ||
161 | struct uwb_dev *uwb_dev = &rc->uwb_dev; | ||
162 | char devname[UWB_ADDR_STRSIZE]; | ||
163 | struct uwb_mac_addr addr; | ||
164 | |||
165 | result = uwb_rc_mac_addr_get(rc, &addr); | ||
166 | if (result < 0) { | ||
167 | dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result); | ||
168 | return result; | ||
169 | } | ||
170 | |||
171 | if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) { | ||
172 | addr.data[0] = 0x02; /* locally adminstered and unicast */ | ||
173 | get_random_bytes(&addr.data[1], sizeof(addr.data)-1); | ||
174 | |||
175 | result = uwb_rc_mac_addr_set(rc, &addr); | ||
176 | if (result < 0) { | ||
177 | uwb_mac_addr_print(devname, sizeof(devname), &addr); | ||
178 | dev_err(dev, "cannot set EUI-48 address %s: %d\n", | ||
179 | devname, result); | ||
180 | return result; | ||
181 | } | ||
182 | } | ||
183 | uwb_dev->mac_addr = addr; | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | |||
188 | |||
189 | static int uwb_rc_setup(struct uwb_rc *rc) | ||
190 | { | ||
191 | int result; | ||
192 | struct device *dev = &rc->uwb_dev.dev; | ||
193 | |||
194 | result = uwb_rc_reset(rc); | ||
195 | if (result < 0) { | ||
196 | dev_err(dev, "cannot reset UWB radio: %d\n", result); | ||
197 | goto error; | ||
198 | } | ||
199 | result = uwb_rc_mac_addr_setup(rc); | ||
200 | if (result < 0) { | ||
201 | dev_err(dev, "cannot setup UWB MAC address: %d\n", result); | ||
202 | goto error; | ||
203 | } | ||
204 | result = uwb_rc_dev_addr_assign(rc); | ||
205 | if (result < 0) { | ||
206 | dev_err(dev, "cannot assign UWB DevAddr: %d\n", result); | ||
207 | goto error; | ||
208 | } | ||
209 | result = uwb_rc_ie_setup(rc); | ||
210 | if (result < 0) { | ||
211 | dev_err(dev, "cannot setup IE subsystem: %d\n", result); | ||
212 | goto error_ie_setup; | ||
213 | } | ||
214 | result = uwb_rsv_setup(rc); | ||
215 | if (result < 0) { | ||
216 | dev_err(dev, "cannot setup reservation subsystem: %d\n", result); | ||
217 | goto error_rsv_setup; | ||
218 | } | ||
219 | uwb_dbg_add_rc(rc); | ||
220 | return 0; | ||
221 | |||
222 | error_rsv_setup: | ||
223 | uwb_rc_ie_release(rc); | ||
224 | error_ie_setup: | ||
225 | error: | ||
226 | return result; | ||
227 | } | ||
228 | |||
229 | |||
230 | /** | ||
231 | * Register a new UWB radio controller | ||
232 | * | ||
233 | * Did you call uwb_rc_init() on your rc? | ||
234 | * | ||
235 | * We assume that this is being called with a > 0 refcount on | ||
236 | * it [through ops->{get|put}_device(). We'll take our own, though. | ||
237 | * | ||
238 | * @parent_dev is our real device, the one that provides the actual UWB device | ||
239 | */ | ||
240 | int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) | ||
241 | { | ||
242 | int result; | ||
243 | struct device *dev; | ||
244 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; | ||
245 | |||
246 | rc->index = uwb_rc_new_index(); | ||
247 | |||
248 | dev = &rc->uwb_dev.dev; | ||
249 | dev_set_name(dev, "uwb%d", rc->index); | ||
250 | |||
251 | rc->priv = priv; | ||
252 | |||
253 | result = rc->start(rc); | ||
254 | if (result < 0) | ||
255 | goto error_rc_start; | ||
256 | |||
257 | result = uwb_rc_setup(rc); | ||
258 | if (result < 0) { | ||
259 | dev_err(dev, "cannot setup UWB radio controller: %d\n", result); | ||
260 | goto error_rc_setup; | ||
261 | } | ||
262 | |||
263 | result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc); | ||
264 | if (result < 0 && result != -EADDRNOTAVAIL) | ||
265 | goto error_dev_add; | ||
266 | |||
267 | result = uwb_rc_sys_add(rc); | ||
268 | if (result < 0) { | ||
269 | dev_err(parent_dev, "cannot register UWB radio controller " | ||
270 | "dev attributes: %d\n", result); | ||
271 | goto error_sys_add; | ||
272 | } | ||
273 | |||
274 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr); | ||
275 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr); | ||
276 | dev_info(dev, | ||
277 | "new uwb radio controller (mac %s dev %s) on %s %s\n", | ||
278 | macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev)); | ||
279 | rc->ready = 1; | ||
280 | return 0; | ||
281 | |||
282 | error_sys_add: | ||
283 | uwb_dev_rm(&rc->uwb_dev); | ||
284 | error_dev_add: | ||
285 | error_rc_setup: | ||
286 | rc->stop(rc); | ||
287 | uwbd_flush(rc); | ||
288 | error_rc_start: | ||
289 | return result; | ||
290 | } | ||
291 | EXPORT_SYMBOL_GPL(uwb_rc_add); | ||
292 | |||
293 | |||
294 | static int uwb_dev_offair_helper(struct device *dev, void *priv) | ||
295 | { | ||
296 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
297 | |||
298 | return __uwb_dev_offair(uwb_dev, uwb_dev->rc); | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * Remove a Radio Controller; stop beaconing/scanning, disconnect all children | ||
303 | */ | ||
304 | void uwb_rc_rm(struct uwb_rc *rc) | ||
305 | { | ||
306 | rc->ready = 0; | ||
307 | |||
308 | uwb_dbg_del_rc(rc); | ||
309 | uwb_rsv_cleanup(rc); | ||
310 | uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); | ||
311 | if (rc->beaconing >= 0) | ||
312 | uwb_rc_beacon(rc, -1, 0); | ||
313 | if (rc->scan_type != UWB_SCAN_DISABLED) | ||
314 | uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); | ||
315 | uwb_rc_reset(rc); | ||
316 | |||
317 | rc->stop(rc); | ||
318 | uwbd_flush(rc); | ||
319 | |||
320 | uwb_dev_lock(&rc->uwb_dev); | ||
321 | rc->priv = NULL; | ||
322 | rc->cmd = NULL; | ||
323 | uwb_dev_unlock(&rc->uwb_dev); | ||
324 | mutex_lock(&uwb_beca.mutex); | ||
325 | uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); | ||
326 | __uwb_rc_sys_rm(rc); | ||
327 | mutex_unlock(&uwb_beca.mutex); | ||
328 | uwb_dev_rm(&rc->uwb_dev); | ||
329 | } | ||
330 | EXPORT_SYMBOL_GPL(uwb_rc_rm); | ||
331 | |||
332 | static int find_rc_try_get(struct device *dev, void *data) | ||
333 | { | ||
334 | struct uwb_rc *target_rc = data; | ||
335 | struct uwb_rc *rc = dev_get_drvdata(dev); | ||
336 | |||
337 | if (rc == NULL) { | ||
338 | WARN_ON(1); | ||
339 | return 0; | ||
340 | } | ||
341 | if (rc == target_rc) { | ||
342 | if (rc->ready == 0) | ||
343 | return 0; | ||
344 | else | ||
345 | return 1; | ||
346 | } | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * Given a radio controller descriptor, validate and refcount it | ||
352 | * | ||
353 | * @returns NULL if the rc does not exist or is quiescing; the ptr to | ||
354 | * it otherwise. | ||
355 | */ | ||
356 | struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) | ||
357 | { | ||
358 | struct device *dev; | ||
359 | struct uwb_rc *rc = NULL; | ||
360 | |||
361 | dev = class_find_device(&uwb_rc_class, NULL, target_rc, | ||
362 | find_rc_try_get); | ||
363 | if (dev) { | ||
364 | rc = dev_get_drvdata(dev); | ||
365 | __uwb_rc_get(rc); | ||
366 | } | ||
367 | return rc; | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(__uwb_rc_try_get); | ||
370 | |||
371 | /* | ||
372 | * RC get for external refcount acquirers... | ||
373 | * | ||
374 | * Increments the refcount of the device and it's backend modules | ||
375 | */ | ||
376 | static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc) | ||
377 | { | ||
378 | if (rc->ready == 0) | ||
379 | return NULL; | ||
380 | uwb_dev_get(&rc->uwb_dev); | ||
381 | return rc; | ||
382 | } | ||
383 | |||
384 | static int find_rc_grandpa(struct device *dev, void *data) | ||
385 | { | ||
386 | struct device *grandpa_dev = data; | ||
387 | struct uwb_rc *rc = dev_get_drvdata(dev); | ||
388 | |||
389 | if (rc->uwb_dev.dev.parent->parent == grandpa_dev) { | ||
390 | rc = uwb_rc_get(rc); | ||
391 | return 1; | ||
392 | } | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * Locate and refcount a radio controller given a common grand-parent | ||
398 | * | ||
399 | * @grandpa_dev Pointer to the 'grandparent' device structure. | ||
400 | * @returns NULL If the rc does not exist or is quiescing; the ptr to | ||
401 | * it otherwise, properly referenced. | ||
402 | * | ||
403 | * The Radio Control interface (or the UWB Radio Controller) is always | ||
404 | * an interface of a device. The parent is the interface, the | ||
405 | * grandparent is the device that encapsulates the interface. | ||
406 | * | ||
407 | * There is no need to lock around as the "grandpa" would be | ||
408 | * refcounted by the target, and to remove the referemes, the | ||
409 | * uwb_rc_class->sem would have to be taken--we hold it, ergo we | ||
410 | * should be safe. | ||
411 | */ | ||
412 | struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) | ||
413 | { | ||
414 | struct device *dev; | ||
415 | struct uwb_rc *rc = NULL; | ||
416 | |||
417 | dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev, | ||
418 | find_rc_grandpa); | ||
419 | if (dev) | ||
420 | rc = dev_get_drvdata(dev); | ||
421 | return rc; | ||
422 | } | ||
423 | EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); | ||
424 | |||
425 | /** | ||
426 | * Find a radio controller by device address | ||
427 | * | ||
428 | * @returns the pointer to the radio controller, properly referenced | ||
429 | */ | ||
430 | static int find_rc_dev(struct device *dev, void *data) | ||
431 | { | ||
432 | struct uwb_dev_addr *addr = data; | ||
433 | struct uwb_rc *rc = dev_get_drvdata(dev); | ||
434 | |||
435 | if (rc == NULL) { | ||
436 | WARN_ON(1); | ||
437 | return 0; | ||
438 | } | ||
439 | if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) { | ||
440 | rc = uwb_rc_get(rc); | ||
441 | return 1; | ||
442 | } | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) | ||
447 | { | ||
448 | struct device *dev; | ||
449 | struct uwb_rc *rc = NULL; | ||
450 | |||
451 | dev = class_find_device(&uwb_rc_class, NULL, (void *)addr, | ||
452 | find_rc_dev); | ||
453 | if (dev) | ||
454 | rc = dev_get_drvdata(dev); | ||
455 | |||
456 | return rc; | ||
457 | } | ||
458 | EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev); | ||
459 | |||
460 | /** | ||
461 | * Drop a reference on a radio controller | ||
462 | * | ||
463 | * This is the version that should be done by entities external to the | ||
464 | * UWB Radio Control stack (ie: clients of the API). | ||
465 | */ | ||
466 | void uwb_rc_put(struct uwb_rc *rc) | ||
467 | { | ||
468 | __uwb_rc_put(rc); | ||
469 | } | ||
470 | EXPORT_SYMBOL_GPL(uwb_rc_put); | ||
471 | |||
472 | /* | ||
473 | * | ||
474 | * | ||
475 | */ | ||
476 | ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) | ||
477 | { | ||
478 | ssize_t result; | ||
479 | struct uwb_rc_evt_get_ie *ie_info; | ||
480 | struct uwb_buf_ctx ctx; | ||
481 | |||
482 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | ||
483 | if (result < 0) | ||
484 | goto error_get_ie; | ||
485 | ctx.buf = buf; | ||
486 | ctx.size = size; | ||
487 | ctx.bytes = 0; | ||
488 | uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, | ||
489 | ie_info->IEData, result - sizeof(*ie_info)); | ||
490 | result = ctx.bytes; | ||
491 | kfree(ie_info); | ||
492 | error_get_ie: | ||
493 | return result; | ||
494 | } | ||
495 | |||
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c new file mode 100644 index 000000000000..9b4eb64327ac --- /dev/null +++ b/drivers/uwb/neh.c | |||
@@ -0,0 +1,616 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: Radio Control Interface (WUSB[8]) | ||
3 | * Notification and Event Handling | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI | ||
24 | * card delivers a stream of notifications and events to the | ||
25 | * notification end event endpoint or area. This code takes care of | ||
26 | * getting a buffer with that data, breaking it up in separate | ||
27 | * notifications and events and then deliver those. | ||
28 | * | ||
29 | * Events are answers to commands and they carry a context ID that | ||
30 | * associates them to the command. Notifications are that, | ||
31 | * notifications, they come out of the blue and have a context ID of | ||
32 | * zero. Think of the context ID kind of like a handler. The | ||
33 | * uwb_rc_neh_* code deals with managing context IDs. | ||
34 | * | ||
35 | * This is why you require a handle to operate on a UWB host. When you | ||
36 | * open a handle a context ID is assigned to you. | ||
37 | * | ||
38 | * So, as it is done is: | ||
39 | * | ||
40 | * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id) | ||
41 | * 2. Issue command [rc->cmd(rc, ...)] | ||
42 | * 3. Arm the timeout timer [uwb_rc_neh_arm()] | ||
43 | * 4, Release the reference to the neh [uwb_rc_neh_put()] | ||
44 | * 5. Wait for the callback | ||
45 | * 6. Command result (RCEB) is passed to the callback | ||
46 | * | ||
47 | * If (2) fails, you should remove the handle [uwb_rc_neh_rm()] | ||
48 | * instead of arming the timer. | ||
49 | * | ||
50 | * Handles are for using in *serialized* code, single thread. | ||
51 | * | ||
52 | * When the notification/event comes, the IRQ handler/endpoint | ||
53 | * callback passes the data read to uwb_rc_neh_grok() which will break | ||
54 | * it up in a discrete series of events, look up who is listening for | ||
55 | * them and execute the pertinent callbacks. | ||
56 | * | ||
57 | * If the reader detects an error while reading the data stream, call | ||
58 | * uwb_rc_neh_error(). | ||
59 | * | ||
60 | * CONSTRAINTS/ASSUMPTIONS: | ||
61 | * | ||
62 | * - Most notifications/events are small (less thank .5k), copying | ||
63 | * around is ok. | ||
64 | * | ||
65 | * - Notifications/events are ALWAYS smaller than PAGE_SIZE | ||
66 | * | ||
67 | * - Notifications/events always come in a single piece (ie: a buffer | ||
68 | * will always contain entire notifications/events). | ||
69 | * | ||
70 | * - we cannot know in advance how long each event is (because they | ||
71 | * lack a length field in their header--smart move by the standards | ||
72 | * body, btw). So we need a facility to get the event size given the | ||
73 | * header. This is what the EST code does (notif/Event Size | ||
74 | * Tables), check nest.c--as well, you can associate the size to | ||
75 | * the handle [w/ neh->extra_size()]. | ||
76 | * | ||
77 | * - Most notifications/events are fixed size; only a few are variable | ||
78 | * size (NEST takes care of that). | ||
79 | * | ||
80 | * - Listeners of events expect them, so they usually provide a | ||
81 | * buffer, as they know the size. Listeners to notifications don't, | ||
82 | * so we allocate their buffers dynamically. | ||
83 | */ | ||
84 | #include <linux/kernel.h> | ||
85 | #include <linux/timer.h> | ||
86 | #include <linux/err.h> | ||
87 | |||
88 | #include "uwb-internal.h" | ||
89 | #define D_LOCAL 0 | ||
90 | #include <linux/uwb/debug.h> | ||
91 | |||
92 | /* | ||
93 | * UWB Radio Controller Notification/Event Handle | ||
94 | * | ||
95 | * Represents an entity waiting for an event coming from the UWB Radio | ||
96 | * Controller with a given context id (context) and type (evt_type and | ||
97 | * evt). On reception of the notification/event, the callback (cb) is | ||
98 | * called with the event. | ||
99 | * | ||
100 | * If the timer expires before the event is received, the callback is | ||
101 | * called with -ETIMEDOUT as the event size. | ||
102 | */ | ||
103 | struct uwb_rc_neh { | ||
104 | struct kref kref; | ||
105 | |||
106 | struct uwb_rc *rc; | ||
107 | u8 evt_type; | ||
108 | __le16 evt; | ||
109 | u8 context; | ||
110 | uwb_rc_cmd_cb_f cb; | ||
111 | void *arg; | ||
112 | |||
113 | struct timer_list timer; | ||
114 | struct list_head list_node; | ||
115 | }; | ||
116 | |||
117 | static void uwb_rc_neh_timer(unsigned long arg); | ||
118 | |||
119 | static void uwb_rc_neh_release(struct kref *kref) | ||
120 | { | ||
121 | struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref); | ||
122 | |||
123 | kfree(neh); | ||
124 | } | ||
125 | |||
126 | static void uwb_rc_neh_get(struct uwb_rc_neh *neh) | ||
127 | { | ||
128 | kref_get(&neh->kref); | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * uwb_rc_neh_put - release reference to a neh | ||
133 | * @neh: the neh | ||
134 | */ | ||
135 | void uwb_rc_neh_put(struct uwb_rc_neh *neh) | ||
136 | { | ||
137 | kref_put(&neh->kref, uwb_rc_neh_release); | ||
138 | } | ||
139 | |||
140 | |||
141 | /** | ||
142 | * Assigns @neh a context id from @rc's pool | ||
143 | * | ||
144 | * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken | ||
145 | * @neh: Notification/Event Handle | ||
146 | * @returns 0 if context id was assigned ok; < 0 errno on error (if | ||
147 | * all the context IDs are taken). | ||
148 | * | ||
149 | * (assumes @wa is locked). | ||
150 | * | ||
151 | * NOTE: WUSB spec reserves context ids 0x00 for notifications and | ||
152 | * 0xff is invalid, so they must not be used. Initialization | ||
153 | * fills up those two in the bitmap so they are not allocated. | ||
154 | * | ||
155 | * We spread the allocation around to reduce the posiblity of two | ||
156 | * consecutive opened @neh's getting the same context ID assigned (to | ||
157 | * avoid surprises with late events that timed out long time ago). So | ||
158 | * first we search from where @rc->ctx_roll is, if not found, we | ||
159 | * search from zero. | ||
160 | */ | ||
161 | static | ||
162 | int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh) | ||
163 | { | ||
164 | int result; | ||
165 | result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX, | ||
166 | rc->ctx_roll++); | ||
167 | if (result < UWB_RC_CTX_MAX) | ||
168 | goto found; | ||
169 | result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX); | ||
170 | if (result < UWB_RC_CTX_MAX) | ||
171 | goto found; | ||
172 | return -ENFILE; | ||
173 | found: | ||
174 | set_bit(result, rc->ctx_bm); | ||
175 | neh->context = result; | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | |||
180 | /** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */ | ||
181 | static | ||
182 | void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh) | ||
183 | { | ||
184 | struct device *dev = &rc->uwb_dev.dev; | ||
185 | if (neh->context == 0) | ||
186 | return; | ||
187 | if (test_bit(neh->context, rc->ctx_bm) == 0) { | ||
188 | dev_err(dev, "context %u not set in bitmap\n", | ||
189 | neh->context); | ||
190 | WARN_ON(1); | ||
191 | } | ||
192 | clear_bit(neh->context, rc->ctx_bm); | ||
193 | neh->context = 0; | ||
194 | } | ||
195 | |||
196 | /** | ||
197 | * uwb_rc_neh_add - add a neh for a radio controller command | ||
198 | * @rc: the radio controller | ||
199 | * @cmd: the radio controller command | ||
200 | * @expected_type: the type of the expected response event | ||
201 | * @expected_event: the expected event ID | ||
202 | * @cb: callback for when the event is received | ||
203 | * @arg: argument for the callback | ||
204 | * | ||
205 | * Creates a neh and adds it to the list of those waiting for an | ||
206 | * event. A context ID will be assigned to the command. | ||
207 | */ | ||
208 | struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, | ||
209 | u8 expected_type, u16 expected_event, | ||
210 | uwb_rc_cmd_cb_f cb, void *arg) | ||
211 | { | ||
212 | int result; | ||
213 | unsigned long flags; | ||
214 | struct device *dev = &rc->uwb_dev.dev; | ||
215 | struct uwb_rc_neh *neh; | ||
216 | |||
217 | neh = kzalloc(sizeof(*neh), GFP_KERNEL); | ||
218 | if (neh == NULL) { | ||
219 | result = -ENOMEM; | ||
220 | goto error_kzalloc; | ||
221 | } | ||
222 | |||
223 | kref_init(&neh->kref); | ||
224 | INIT_LIST_HEAD(&neh->list_node); | ||
225 | init_timer(&neh->timer); | ||
226 | neh->timer.function = uwb_rc_neh_timer; | ||
227 | neh->timer.data = (unsigned long)neh; | ||
228 | |||
229 | neh->rc = rc; | ||
230 | neh->evt_type = expected_type; | ||
231 | neh->evt = cpu_to_le16(expected_event); | ||
232 | neh->cb = cb; | ||
233 | neh->arg = arg; | ||
234 | |||
235 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
236 | result = __uwb_rc_ctx_get(rc, neh); | ||
237 | if (result >= 0) { | ||
238 | cmd->bCommandContext = neh->context; | ||
239 | list_add_tail(&neh->list_node, &rc->neh_list); | ||
240 | uwb_rc_neh_get(neh); | ||
241 | } | ||
242 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
243 | if (result < 0) | ||
244 | goto error_ctx_get; | ||
245 | |||
246 | return neh; | ||
247 | |||
248 | error_ctx_get: | ||
249 | kfree(neh); | ||
250 | error_kzalloc: | ||
251 | dev_err(dev, "cannot open handle to radio controller: %d\n", result); | ||
252 | return ERR_PTR(result); | ||
253 | } | ||
254 | |||
255 | static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | ||
256 | { | ||
257 | del_timer(&neh->timer); | ||
258 | __uwb_rc_ctx_put(rc, neh); | ||
259 | list_del(&neh->list_node); | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * uwb_rc_neh_rm - remove a neh. | ||
264 | * @rc: the radio controller | ||
265 | * @neh: the neh to remove | ||
266 | * | ||
267 | * Remove an active neh immediately instead of waiting for the event | ||
268 | * (or a time out). | ||
269 | */ | ||
270 | void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | |||
274 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
275 | __uwb_rc_neh_rm(rc, neh); | ||
276 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
277 | |||
278 | uwb_rc_neh_put(neh); | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * uwb_rc_neh_arm - arm an event handler timeout timer | ||
283 | * | ||
284 | * @rc: UWB Radio Controller | ||
285 | * @neh: Notification/event handler for @rc | ||
286 | * | ||
287 | * The timer is only armed if the neh is active. | ||
288 | */ | ||
289 | void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | ||
290 | { | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
294 | if (neh->context) | ||
295 | mod_timer(&neh->timer, | ||
296 | jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS)); | ||
297 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
298 | } | ||
299 | |||
300 | static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size) | ||
301 | { | ||
302 | (*neh->cb)(neh->rc, neh->arg, rceb, size); | ||
303 | uwb_rc_neh_put(neh); | ||
304 | } | ||
305 | |||
306 | static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb) | ||
307 | { | ||
308 | return neh->evt_type == rceb->bEventType | ||
309 | && neh->evt == rceb->wEvent | ||
310 | && neh->context == rceb->bEventContext; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * Find the handle waiting for a RC Radio Control Event | ||
315 | * | ||
316 | * @rc: UWB Radio Controller | ||
317 | * @rceb: Pointer to the RCEB buffer | ||
318 | * @event_size: Pointer to the size of the RCEB buffer. Might be | ||
319 | * adjusted to take into account the @neh->extra_size | ||
320 | * settings. | ||
321 | * | ||
322 | * If the listener has no buffer (NULL buffer), one is allocated for | ||
323 | * the right size (the amount of data received). @neh->ptr will point | ||
324 | * to the event payload, which always starts with a 'struct | ||
325 | * uwb_rceb'. kfree() it when done. | ||
326 | */ | ||
327 | static | ||
328 | struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc, | ||
329 | const struct uwb_rceb *rceb) | ||
330 | { | ||
331 | struct uwb_rc_neh *neh = NULL, *h; | ||
332 | unsigned long flags; | ||
333 | |||
334 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
335 | |||
336 | list_for_each_entry(h, &rc->neh_list, list_node) { | ||
337 | if (uwb_rc_neh_match(h, rceb)) { | ||
338 | neh = h; | ||
339 | break; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (neh) | ||
344 | __uwb_rc_neh_rm(rc, neh); | ||
345 | |||
346 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
347 | |||
348 | return neh; | ||
349 | } | ||
350 | |||
351 | |||
352 | /** | ||
353 | * Process notifications coming from the radio control interface | ||
354 | * | ||
355 | * @rc: UWB Radio Control Interface descriptor | ||
356 | * @neh: Notification/Event Handler @neh->ptr points to | ||
357 | * @uwb_evt->buffer. | ||
358 | * | ||
359 | * This function is called by the event/notif handling subsystem when | ||
360 | * notifications arrive (hwarc_probe() arms a notification/event handle | ||
361 | * that calls back this function for every received notification; this | ||
362 | * function then will rearm itself). | ||
363 | * | ||
364 | * Notification data buffers are dynamically allocated by the NEH | ||
365 | * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually | ||
366 | * allocated is space to contain the notification data. | ||
367 | * | ||
368 | * Buffers are prefixed with a Radio Control Event Block (RCEB) as | ||
369 | * defined by the WUSB Wired-Adapter Radio Control interface. We | ||
370 | * just use it for the notification code. | ||
371 | * | ||
372 | * On each case statement we just transcode endianess of the different | ||
373 | * fields. We declare a pointer to a RCI definition of an event, and | ||
374 | * then to a UWB definition of the same event (which are the same, | ||
375 | * remember). Event if we use different pointers | ||
376 | */ | ||
377 | static | ||
378 | void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size) | ||
379 | { | ||
380 | struct device *dev = &rc->uwb_dev.dev; | ||
381 | struct uwb_event *uwb_evt; | ||
382 | |||
383 | if (size == -ESHUTDOWN) | ||
384 | return; | ||
385 | if (size < 0) { | ||
386 | dev_err(dev, "ignoring event with error code %zu\n", | ||
387 | size); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC); | ||
392 | if (unlikely(uwb_evt == NULL)) { | ||
393 | dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n", | ||
394 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | ||
395 | rceb->bEventContext); | ||
396 | return; | ||
397 | } | ||
398 | uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ | ||
399 | uwb_evt->ts_jiffies = jiffies; | ||
400 | uwb_evt->type = UWB_EVT_TYPE_NOTIF; | ||
401 | uwb_evt->notif.size = size; | ||
402 | uwb_evt->notif.rceb = rceb; | ||
403 | |||
404 | switch (le16_to_cpu(rceb->wEvent)) { | ||
405 | /* Trap some vendor specific events | ||
406 | * | ||
407 | * FIXME: move this to handling in ptc-est, where we | ||
408 | * register a NULL event handler for these two guys | ||
409 | * using the Intel IDs. | ||
410 | */ | ||
411 | case 0x0103: | ||
412 | dev_info(dev, "FIXME: DEVICE ADD\n"); | ||
413 | return; | ||
414 | case 0x0104: | ||
415 | dev_info(dev, "FIXME: DEVICE RM\n"); | ||
416 | return; | ||
417 | default: | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | uwbd_event_queue(uwb_evt); | ||
422 | } | ||
423 | |||
424 | static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size) | ||
425 | { | ||
426 | struct device *dev = &rc->uwb_dev.dev; | ||
427 | struct uwb_rc_neh *neh; | ||
428 | struct uwb_rceb *notif; | ||
429 | |||
430 | if (rceb->bEventContext == 0) { | ||
431 | notif = kmalloc(size, GFP_ATOMIC); | ||
432 | if (notif) { | ||
433 | memcpy(notif, rceb, size); | ||
434 | uwb_rc_notif(rc, notif, size); | ||
435 | } else | ||
436 | dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n", | ||
437 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | ||
438 | rceb->bEventContext, size); | ||
439 | } else { | ||
440 | neh = uwb_rc_neh_lookup(rc, rceb); | ||
441 | if (neh) | ||
442 | uwb_rc_neh_cb(neh, rceb, size); | ||
443 | else | ||
444 | dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", | ||
445 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | ||
446 | rceb->bEventContext, size); | ||
447 | } | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * Given a buffer with one or more UWB RC events/notifications, break | ||
452 | * them up and dispatch them. | ||
453 | * | ||
454 | * @rc: UWB Radio Controller | ||
455 | * @buf: Buffer with the stream of notifications/events | ||
456 | * @buf_size: Amount of data in the buffer | ||
457 | * | ||
458 | * Note each notification/event starts always with a 'struct | ||
459 | * uwb_rceb', so the minimum size if 4 bytes. | ||
460 | * | ||
461 | * The device may pass us events formatted differently than expected. | ||
462 | * These are first filtered, potentially creating a new event in a new | ||
463 | * memory location. If a new event is created by the filter it is also | ||
464 | * freed here. | ||
465 | * | ||
466 | * For each notif/event, tries to guess the size looking at the EST | ||
467 | * tables, then looks for a neh that is waiting for that event and if | ||
468 | * found, copies the payload to the neh's buffer and calls it back. If | ||
469 | * not, the data is ignored. | ||
470 | * | ||
471 | * Note that if we can't find a size description in the EST tables, we | ||
472 | * still might find a size in the 'neh' handle in uwb_rc_neh_lookup(). | ||
473 | * | ||
474 | * Assumptions: | ||
475 | * | ||
476 | * @rc->neh_lock is NOT taken | ||
477 | * | ||
478 | * We keep track of various sizes here: | ||
479 | * size: contains the size of the buffer that is processed for the | ||
480 | * incoming event. this buffer may contain events that are not | ||
481 | * formatted as WHCI. | ||
482 | * real_size: the actual space taken by this event in the buffer. | ||
483 | * We need to keep track of the real size of an event to be able to | ||
484 | * advance the buffer correctly. | ||
485 | * event_size: the size of the event as expected by the core layer | ||
486 | * [OR] the size of the event after filtering. if the filtering | ||
487 | * created a new event in a new memory location then this is | ||
488 | * effectively the size of a new event buffer | ||
489 | */ | ||
490 | void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) | ||
491 | { | ||
492 | struct device *dev = &rc->uwb_dev.dev; | ||
493 | void *itr; | ||
494 | struct uwb_rceb *rceb; | ||
495 | size_t size, real_size, event_size; | ||
496 | int needtofree; | ||
497 | |||
498 | d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); | ||
499 | d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); | ||
500 | itr = buf; | ||
501 | size = buf_size; | ||
502 | while (size > 0) { | ||
503 | if (size < sizeof(*rceb)) { | ||
504 | dev_err(dev, "not enough data in event buffer to " | ||
505 | "process incoming events (%zu left, minimum is " | ||
506 | "%zu)\n", size, sizeof(*rceb)); | ||
507 | break; | ||
508 | } | ||
509 | |||
510 | rceb = itr; | ||
511 | if (rc->filter_event) { | ||
512 | needtofree = rc->filter_event(rc, &rceb, size, | ||
513 | &real_size, &event_size); | ||
514 | if (needtofree < 0 && needtofree != -ENOANO) { | ||
515 | dev_err(dev, "BUG: Unable to filter event " | ||
516 | "(0x%02x/%04x/%02x) from " | ||
517 | "device. \n", rceb->bEventType, | ||
518 | le16_to_cpu(rceb->wEvent), | ||
519 | rceb->bEventContext); | ||
520 | break; | ||
521 | } | ||
522 | } else | ||
523 | needtofree = -ENOANO; | ||
524 | /* do real processing if there was no filtering or the | ||
525 | * filtering didn't act */ | ||
526 | if (needtofree == -ENOANO) { | ||
527 | ssize_t ret = uwb_est_find_size(rc, rceb, size); | ||
528 | if (ret < 0) | ||
529 | break; | ||
530 | if (ret > size) { | ||
531 | dev_err(dev, "BUG: hw sent incomplete event " | ||
532 | "0x%02x/%04x/%02x (%zd bytes), only got " | ||
533 | "%zu bytes. We don't handle that.\n", | ||
534 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | ||
535 | rceb->bEventContext, ret, size); | ||
536 | break; | ||
537 | } | ||
538 | real_size = event_size = ret; | ||
539 | } | ||
540 | uwb_rc_neh_grok_event(rc, rceb, event_size); | ||
541 | |||
542 | if (needtofree == 1) | ||
543 | kfree(rceb); | ||
544 | |||
545 | itr += real_size; | ||
546 | size -= real_size; | ||
547 | d_printf(2, dev, "consumed %zd bytes, %zu left\n", | ||
548 | event_size, size); | ||
549 | } | ||
550 | d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); | ||
551 | } | ||
552 | EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); | ||
553 | |||
554 | |||
555 | /** | ||
556 | * The entity that reads from the device notification/event channel has | ||
557 | * detected an error. | ||
558 | * | ||
559 | * @rc: UWB Radio Controller | ||
560 | * @error: Errno error code | ||
561 | * | ||
562 | */ | ||
563 | void uwb_rc_neh_error(struct uwb_rc *rc, int error) | ||
564 | { | ||
565 | struct uwb_rc_neh *neh, *next; | ||
566 | unsigned long flags; | ||
567 | |||
568 | BUG_ON(error >= 0); | ||
569 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
570 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | ||
571 | __uwb_rc_neh_rm(rc, neh); | ||
572 | uwb_rc_neh_cb(neh, NULL, error); | ||
573 | } | ||
574 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
575 | } | ||
576 | EXPORT_SYMBOL_GPL(uwb_rc_neh_error); | ||
577 | |||
578 | |||
579 | static void uwb_rc_neh_timer(unsigned long arg) | ||
580 | { | ||
581 | struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg; | ||
582 | struct uwb_rc *rc = neh->rc; | ||
583 | unsigned long flags; | ||
584 | |||
585 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
586 | __uwb_rc_neh_rm(rc, neh); | ||
587 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
588 | |||
589 | uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); | ||
590 | } | ||
591 | |||
592 | /** Initializes the @rc's neh subsystem | ||
593 | */ | ||
594 | void uwb_rc_neh_create(struct uwb_rc *rc) | ||
595 | { | ||
596 | spin_lock_init(&rc->neh_lock); | ||
597 | INIT_LIST_HEAD(&rc->neh_list); | ||
598 | set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */ | ||
599 | set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */ | ||
600 | rc->ctx_roll = 1; | ||
601 | } | ||
602 | |||
603 | |||
604 | /** Release's the @rc's neh subsystem */ | ||
605 | void uwb_rc_neh_destroy(struct uwb_rc *rc) | ||
606 | { | ||
607 | unsigned long flags; | ||
608 | struct uwb_rc_neh *neh, *next; | ||
609 | |||
610 | spin_lock_irqsave(&rc->neh_lock, flags); | ||
611 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | ||
612 | __uwb_rc_neh_rm(rc, neh); | ||
613 | uwb_rc_neh_put(neh); | ||
614 | } | ||
615 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
616 | } | ||
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c new file mode 100644 index 000000000000..1afb38eacb9a --- /dev/null +++ b/drivers/uwb/pal.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * UWB PAL support. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/uwb.h> | ||
20 | |||
21 | #include "uwb-internal.h" | ||
22 | |||
23 | /** | ||
24 | * uwb_pal_init - initialize a UWB PAL | ||
25 | * @pal: the PAL to initialize | ||
26 | */ | ||
27 | void uwb_pal_init(struct uwb_pal *pal) | ||
28 | { | ||
29 | INIT_LIST_HEAD(&pal->node); | ||
30 | } | ||
31 | EXPORT_SYMBOL_GPL(uwb_pal_init); | ||
32 | |||
33 | /** | ||
34 | * uwb_pal_register - register a UWB PAL | ||
35 | * @rc: the radio controller the PAL will be using | ||
36 | * @pal: the PAL | ||
37 | * | ||
38 | * The PAL must be initialized with uwb_pal_init(). | ||
39 | */ | ||
40 | int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) | ||
41 | { | ||
42 | int ret; | ||
43 | |||
44 | if (pal->device) { | ||
45 | ret = sysfs_create_link(&pal->device->kobj, | ||
46 | &rc->uwb_dev.dev.kobj, "uwb_rc"); | ||
47 | if (ret < 0) | ||
48 | return ret; | ||
49 | ret = sysfs_create_link(&rc->uwb_dev.dev.kobj, | ||
50 | &pal->device->kobj, pal->name); | ||
51 | if (ret < 0) { | ||
52 | sysfs_remove_link(&pal->device->kobj, "uwb_rc"); | ||
53 | return ret; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | spin_lock(&rc->pal_lock); | ||
58 | list_add(&pal->node, &rc->pals); | ||
59 | spin_unlock(&rc->pal_lock); | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(uwb_pal_register); | ||
64 | |||
65 | /** | ||
66 | * uwb_pal_register - unregister a UWB PAL | ||
67 | * @rc: the radio controller the PAL was using | ||
68 | * @pal: the PAL | ||
69 | */ | ||
70 | void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) | ||
71 | { | ||
72 | spin_lock(&rc->pal_lock); | ||
73 | list_del(&pal->node); | ||
74 | spin_unlock(&rc->pal_lock); | ||
75 | |||
76 | if (pal->device) { | ||
77 | sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); | ||
78 | sysfs_remove_link(&pal->device->kobj, "uwb_rc"); | ||
79 | } | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(uwb_pal_unregister); | ||
82 | |||
83 | /** | ||
84 | * uwb_rc_pal_init - initialize the PAL related parts of a radio controller | ||
85 | * @rc: the radio controller | ||
86 | */ | ||
87 | void uwb_rc_pal_init(struct uwb_rc *rc) | ||
88 | { | ||
89 | spin_lock_init(&rc->pal_lock); | ||
90 | INIT_LIST_HEAD(&rc->pals); | ||
91 | } | ||
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c new file mode 100644 index 000000000000..8de856fa7958 --- /dev/null +++ b/drivers/uwb/reset.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * UWB basic command support and radio reset | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: | ||
24 | * | ||
25 | * - docs | ||
26 | * | ||
27 | * - Now we are serializing (using the uwb_dev->mutex) the command | ||
28 | * execution; it should be parallelized as much as possible some | ||
29 | * day. | ||
30 | */ | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/err.h> | ||
33 | |||
34 | #include "uwb-internal.h" | ||
35 | #define D_LOCAL 0 | ||
36 | #include <linux/uwb/debug.h> | ||
37 | |||
38 | /** | ||
39 | * Command result codes (WUSB1.0[T8-69]) | ||
40 | */ | ||
41 | static | ||
42 | const char *__strerror[] = { | ||
43 | "success", | ||
44 | "failure", | ||
45 | "hardware failure", | ||
46 | "no more slots", | ||
47 | "beacon is too large", | ||
48 | "invalid parameter", | ||
49 | "unsupported power level", | ||
50 | "time out (wa) or invalid ie data (whci)", | ||
51 | "beacon size exceeded", | ||
52 | "cancelled", | ||
53 | "invalid state", | ||
54 | "invalid size", | ||
55 | "ack not recieved", | ||
56 | "no more asie notification", | ||
57 | }; | ||
58 | |||
59 | |||
60 | /** Return a string matching the given error code */ | ||
61 | const char *uwb_rc_strerror(unsigned code) | ||
62 | { | ||
63 | if (code == 255) | ||
64 | return "time out"; | ||
65 | if (code >= ARRAY_SIZE(__strerror)) | ||
66 | return "unknown error"; | ||
67 | return __strerror[code]; | ||
68 | } | ||
69 | |||
70 | int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, | ||
71 | struct uwb_rccb *cmd, size_t cmd_size, | ||
72 | u8 expected_type, u16 expected_event, | ||
73 | uwb_rc_cmd_cb_f cb, void *arg) | ||
74 | { | ||
75 | struct device *dev = &rc->uwb_dev.dev; | ||
76 | struct uwb_rc_neh *neh; | ||
77 | int needtofree = 0; | ||
78 | int result; | ||
79 | |||
80 | uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */ | ||
81 | if (rc->priv == NULL) { | ||
82 | uwb_dev_unlock(&rc->uwb_dev); | ||
83 | return -ESHUTDOWN; | ||
84 | } | ||
85 | |||
86 | if (rc->filter_cmd) { | ||
87 | needtofree = rc->filter_cmd(rc, &cmd, &cmd_size); | ||
88 | if (needtofree < 0 && needtofree != -ENOANO) { | ||
89 | dev_err(dev, "%s: filter error: %d\n", | ||
90 | cmd_name, needtofree); | ||
91 | uwb_dev_unlock(&rc->uwb_dev); | ||
92 | return needtofree; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg); | ||
97 | if (IS_ERR(neh)) { | ||
98 | result = PTR_ERR(neh); | ||
99 | goto out; | ||
100 | } | ||
101 | |||
102 | result = rc->cmd(rc, cmd, cmd_size); | ||
103 | uwb_dev_unlock(&rc->uwb_dev); | ||
104 | if (result < 0) | ||
105 | uwb_rc_neh_rm(rc, neh); | ||
106 | else | ||
107 | uwb_rc_neh_arm(rc, neh); | ||
108 | uwb_rc_neh_put(neh); | ||
109 | out: | ||
110 | if (needtofree == 1) | ||
111 | kfree(cmd); | ||
112 | return result < 0 ? result : 0; | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(uwb_rc_cmd_async); | ||
115 | |||
116 | struct uwb_rc_cmd_done_params { | ||
117 | struct completion completion; | ||
118 | struct uwb_rceb *reply; | ||
119 | ssize_t reply_size; | ||
120 | }; | ||
121 | |||
122 | static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg, | ||
123 | struct uwb_rceb *reply, ssize_t reply_size) | ||
124 | { | ||
125 | struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg; | ||
126 | |||
127 | if (reply_size > 0) { | ||
128 | if (p->reply) | ||
129 | reply_size = min(p->reply_size, reply_size); | ||
130 | else | ||
131 | p->reply = kmalloc(reply_size, GFP_ATOMIC); | ||
132 | |||
133 | if (p->reply) | ||
134 | memcpy(p->reply, reply, reply_size); | ||
135 | else | ||
136 | reply_size = -ENOMEM; | ||
137 | } | ||
138 | p->reply_size = reply_size; | ||
139 | complete(&p->completion); | ||
140 | } | ||
141 | |||
142 | |||
143 | /** | ||
144 | * Generic function for issuing commands to the Radio Control Interface | ||
145 | * | ||
146 | * @rc: UWB Radio Control descriptor | ||
147 | * @cmd_name: Name of the command being issued (for error messages) | ||
148 | * @cmd: Pointer to rccb structure containing the command; | ||
149 | * normally you embed this structure as the first member of | ||
150 | * the full command structure. | ||
151 | * @cmd_size: Size of the whole command buffer pointed to by @cmd. | ||
152 | * @reply: Pointer to where to store the reply | ||
153 | * @reply_size: @reply's size | ||
154 | * @expected_type: Expected type in the return event | ||
155 | * @expected_event: Expected event code in the return event | ||
156 | * @preply: Here a pointer to where the event data is received will | ||
157 | * be stored. Once done with the data, free with kfree(). | ||
158 | * | ||
159 | * This function is generic; it works for commands that return a fixed | ||
160 | * and known size or for commands that return a variable amount of data. | ||
161 | * | ||
162 | * If a buffer is provided, that is used, although it could be chopped | ||
163 | * to the maximum size of the buffer. If the buffer is NULL, then one | ||
164 | * be allocated in *preply with the whole contents of the reply. | ||
165 | * | ||
166 | * @rc needs to be referenced | ||
167 | */ | ||
168 | static | ||
169 | ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, | ||
170 | struct uwb_rccb *cmd, size_t cmd_size, | ||
171 | struct uwb_rceb *reply, size_t reply_size, | ||
172 | u8 expected_type, u16 expected_event, | ||
173 | struct uwb_rceb **preply) | ||
174 | { | ||
175 | ssize_t result = 0; | ||
176 | struct device *dev = &rc->uwb_dev.dev; | ||
177 | struct uwb_rc_cmd_done_params params; | ||
178 | |||
179 | init_completion(¶ms.completion); | ||
180 | params.reply = reply; | ||
181 | params.reply_size = reply_size; | ||
182 | |||
183 | result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size, | ||
184 | expected_type, expected_event, | ||
185 | uwb_rc_cmd_done, ¶ms); | ||
186 | if (result) | ||
187 | return result; | ||
188 | |||
189 | wait_for_completion(¶ms.completion); | ||
190 | |||
191 | if (preply) | ||
192 | *preply = params.reply; | ||
193 | |||
194 | if (params.reply_size < 0) | ||
195 | dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x " | ||
196 | "reception failed: %d\n", cmd_name, | ||
197 | expected_type, expected_event, cmd->bCommandContext, | ||
198 | (int)params.reply_size); | ||
199 | return params.reply_size; | ||
200 | } | ||
201 | |||
202 | |||
203 | /** | ||
204 | * Generic function for issuing commands to the Radio Control Interface | ||
205 | * | ||
206 | * @rc: UWB Radio Control descriptor | ||
207 | * @cmd_name: Name of the command being issued (for error messages) | ||
208 | * @cmd: Pointer to rccb structure containing the command; | ||
209 | * normally you embed this structure as the first member of | ||
210 | * the full command structure. | ||
211 | * @cmd_size: Size of the whole command buffer pointed to by @cmd. | ||
212 | * @reply: Pointer to the beginning of the confirmation event | ||
213 | * buffer. Normally bigger than an 'struct hwarc_rceb'. | ||
214 | * You need to fill out reply->bEventType and reply->wEvent (in | ||
215 | * cpu order) as the function will use them to verify the | ||
216 | * confirmation event. | ||
217 | * @reply_size: Size of the reply buffer | ||
218 | * | ||
219 | * The function checks that the length returned in the reply is at | ||
220 | * least as big as @reply_size; if not, it will be deemed an error and | ||
221 | * -EIO returned. | ||
222 | * | ||
223 | * @rc needs to be referenced | ||
224 | */ | ||
225 | ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, | ||
226 | struct uwb_rccb *cmd, size_t cmd_size, | ||
227 | struct uwb_rceb *reply, size_t reply_size) | ||
228 | { | ||
229 | struct device *dev = &rc->uwb_dev.dev; | ||
230 | ssize_t result; | ||
231 | |||
232 | result = __uwb_rc_cmd(rc, cmd_name, | ||
233 | cmd, cmd_size, reply, reply_size, | ||
234 | reply->bEventType, reply->wEvent, NULL); | ||
235 | |||
236 | if (result > 0 && result < reply_size) { | ||
237 | dev_err(dev, "%s: not enough data returned for decoding reply " | ||
238 | "(%zu bytes received vs at least %zu needed)\n", | ||
239 | cmd_name, result, reply_size); | ||
240 | result = -EIO; | ||
241 | } | ||
242 | return result; | ||
243 | } | ||
244 | EXPORT_SYMBOL_GPL(uwb_rc_cmd); | ||
245 | |||
246 | |||
247 | /** | ||
248 | * Generic function for issuing commands to the Radio Control | ||
249 | * Interface that return an unknown amount of data | ||
250 | * | ||
251 | * @rc: UWB Radio Control descriptor | ||
252 | * @cmd_name: Name of the command being issued (for error messages) | ||
253 | * @cmd: Pointer to rccb structure containing the command; | ||
254 | * normally you embed this structure as the first member of | ||
255 | * the full command structure. | ||
256 | * @cmd_size: Size of the whole command buffer pointed to by @cmd. | ||
257 | * @expected_type: Expected type in the return event | ||
258 | * @expected_event: Expected event code in the return event | ||
259 | * @preply: Here a pointer to where the event data is received will | ||
260 | * be stored. Once done with the data, free with kfree(). | ||
261 | * | ||
262 | * The function checks that the length returned in the reply is at | ||
263 | * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an | ||
264 | * error and -EIO returned. | ||
265 | * | ||
266 | * @rc needs to be referenced | ||
267 | */ | ||
268 | ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, | ||
269 | struct uwb_rccb *cmd, size_t cmd_size, | ||
270 | u8 expected_type, u16 expected_event, | ||
271 | struct uwb_rceb **preply) | ||
272 | { | ||
273 | return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0, | ||
274 | expected_type, expected_event, preply); | ||
275 | } | ||
276 | EXPORT_SYMBOL_GPL(uwb_rc_vcmd); | ||
277 | |||
278 | |||
279 | /** | ||
280 | * Reset a UWB Host Controller (and all radio settings) | ||
281 | * | ||
282 | * @rc: Host Controller descriptor | ||
283 | * @returns: 0 if ok, < 0 errno code on error | ||
284 | * | ||
285 | * We put the command on kmalloc'ed memory as some arches cannot do | ||
286 | * USB from the stack. The reply event is copied from an stage buffer, | ||
287 | * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. | ||
288 | */ | ||
289 | int uwb_rc_reset(struct uwb_rc *rc) | ||
290 | { | ||
291 | int result = -ENOMEM; | ||
292 | struct uwb_rc_evt_confirm reply; | ||
293 | struct uwb_rccb *cmd; | ||
294 | size_t cmd_size = sizeof(*cmd); | ||
295 | |||
296 | mutex_lock(&rc->uwb_dev.mutex); | ||
297 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
298 | if (cmd == NULL) | ||
299 | goto error_kzalloc; | ||
300 | cmd->bCommandType = UWB_RC_CET_GENERAL; | ||
301 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); | ||
302 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
303 | reply.rceb.wEvent = UWB_RC_CMD_RESET; | ||
304 | result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size, | ||
305 | &reply.rceb, sizeof(reply)); | ||
306 | if (result < 0) | ||
307 | goto error_cmd; | ||
308 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
309 | dev_err(&rc->uwb_dev.dev, | ||
310 | "RESET: command execution failed: %s (%d)\n", | ||
311 | uwb_rc_strerror(reply.bResultCode), reply.bResultCode); | ||
312 | result = -EIO; | ||
313 | } | ||
314 | error_cmd: | ||
315 | kfree(cmd); | ||
316 | error_kzalloc: | ||
317 | mutex_unlock(&rc->uwb_dev.mutex); | ||
318 | return result; | ||
319 | } | ||
320 | |||
321 | int uwbd_msg_handle_reset(struct uwb_event *evt) | ||
322 | { | ||
323 | struct uwb_rc *rc = evt->rc; | ||
324 | int ret; | ||
325 | |||
326 | /* Need to prevent the RC hardware module going away while in | ||
327 | the rc->reset() call. */ | ||
328 | if (!try_module_get(rc->owner)) | ||
329 | return 0; | ||
330 | |||
331 | dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); | ||
332 | ret = rc->reset(rc); | ||
333 | if (ret) | ||
334 | dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); | ||
335 | |||
336 | module_put(rc->owner); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * uwb_rc_reset_all - request a reset of the radio controller and PALs | ||
342 | * @rc: the radio controller of the hardware device to be reset. | ||
343 | * | ||
344 | * The full hardware reset of the radio controller and all the PALs | ||
345 | * will be scheduled. | ||
346 | */ | ||
347 | void uwb_rc_reset_all(struct uwb_rc *rc) | ||
348 | { | ||
349 | struct uwb_event *evt; | ||
350 | |||
351 | evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC); | ||
352 | if (unlikely(evt == NULL)) | ||
353 | return; | ||
354 | |||
355 | evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ | ||
356 | evt->ts_jiffies = jiffies; | ||
357 | evt->type = UWB_EVT_TYPE_MSG; | ||
358 | evt->message = UWB_EVT_MSG_RESET; | ||
359 | |||
360 | uwbd_event_queue(evt); | ||
361 | } | ||
362 | EXPORT_SYMBOL_GPL(uwb_rc_reset_all); | ||
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c new file mode 100644 index 000000000000..bae16204576d --- /dev/null +++ b/drivers/uwb/rsv.c | |||
@@ -0,0 +1,680 @@ | |||
1 | /* | ||
2 | * UWB reservation management. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/uwb.h> | ||
21 | |||
22 | #include "uwb-internal.h" | ||
23 | |||
24 | static void uwb_rsv_timer(unsigned long arg); | ||
25 | |||
26 | static const char *rsv_states[] = { | ||
27 | [UWB_RSV_STATE_NONE] = "none", | ||
28 | [UWB_RSV_STATE_O_INITIATED] = "initiated", | ||
29 | [UWB_RSV_STATE_O_PENDING] = "pending", | ||
30 | [UWB_RSV_STATE_O_MODIFIED] = "modified", | ||
31 | [UWB_RSV_STATE_O_ESTABLISHED] = "established", | ||
32 | [UWB_RSV_STATE_T_ACCEPTED] = "accepted", | ||
33 | [UWB_RSV_STATE_T_DENIED] = "denied", | ||
34 | [UWB_RSV_STATE_T_PENDING] = "pending", | ||
35 | }; | ||
36 | |||
37 | static const char *rsv_types[] = { | ||
38 | [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", | ||
39 | [UWB_DRP_TYPE_HARD] = "hard", | ||
40 | [UWB_DRP_TYPE_SOFT] = "soft", | ||
41 | [UWB_DRP_TYPE_PRIVATE] = "private", | ||
42 | [UWB_DRP_TYPE_PCA] = "pca", | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * uwb_rsv_state_str - return a string for a reservation state | ||
47 | * @state: the reservation state. | ||
48 | */ | ||
49 | const char *uwb_rsv_state_str(enum uwb_rsv_state state) | ||
50 | { | ||
51 | if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) | ||
52 | return "unknown"; | ||
53 | return rsv_states[state]; | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(uwb_rsv_state_str); | ||
56 | |||
57 | /** | ||
58 | * uwb_rsv_type_str - return a string for a reservation type | ||
59 | * @type: the reservation type | ||
60 | */ | ||
61 | const char *uwb_rsv_type_str(enum uwb_drp_type type) | ||
62 | { | ||
63 | if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) | ||
64 | return "invalid"; | ||
65 | return rsv_types[type]; | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); | ||
68 | |||
69 | static void uwb_rsv_dump(struct uwb_rsv *rsv) | ||
70 | { | ||
71 | struct device *dev = &rsv->rc->uwb_dev.dev; | ||
72 | struct uwb_dev_addr devaddr; | ||
73 | char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; | ||
74 | |||
75 | uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); | ||
76 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
77 | devaddr = rsv->target.dev->dev_addr; | ||
78 | else | ||
79 | devaddr = rsv->target.devaddr; | ||
80 | uwb_dev_addr_print(target, sizeof(target), &devaddr); | ||
81 | |||
82 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Get a free stream index for a reservation. | ||
87 | * | ||
88 | * If the target is a DevAddr (e.g., a WUSB cluster reservation) then | ||
89 | * the stream is allocated from a pool of per-RC stream indexes, | ||
90 | * otherwise a unique stream index for the target is selected. | ||
91 | */ | ||
92 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | ||
93 | { | ||
94 | struct uwb_rc *rc = rsv->rc; | ||
95 | unsigned long *streams_bm; | ||
96 | int stream; | ||
97 | |||
98 | switch (rsv->target.type) { | ||
99 | case UWB_RSV_TARGET_DEV: | ||
100 | streams_bm = rsv->target.dev->streams; | ||
101 | break; | ||
102 | case UWB_RSV_TARGET_DEVADDR: | ||
103 | streams_bm = rc->uwb_dev.streams; | ||
104 | break; | ||
105 | default: | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | |||
109 | stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); | ||
110 | if (stream >= UWB_NUM_STREAMS) | ||
111 | return -EBUSY; | ||
112 | |||
113 | rsv->stream = stream; | ||
114 | set_bit(stream, streams_bm); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | ||
120 | { | ||
121 | struct uwb_rc *rc = rsv->rc; | ||
122 | unsigned long *streams_bm; | ||
123 | |||
124 | switch (rsv->target.type) { | ||
125 | case UWB_RSV_TARGET_DEV: | ||
126 | streams_bm = rsv->target.dev->streams; | ||
127 | break; | ||
128 | case UWB_RSV_TARGET_DEVADDR: | ||
129 | streams_bm = rc->uwb_dev.streams; | ||
130 | break; | ||
131 | default: | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | clear_bit(rsv->stream, streams_bm); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Generate a MAS allocation with a single row component. | ||
140 | */ | ||
141 | static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, | ||
142 | int first_mas, int mas_per_zone, | ||
143 | int zs, int ze) | ||
144 | { | ||
145 | struct uwb_mas_bm col; | ||
146 | int z; | ||
147 | |||
148 | bitmap_zero(mas->bm, UWB_NUM_MAS); | ||
149 | bitmap_zero(col.bm, UWB_NUM_MAS); | ||
150 | bitmap_fill(col.bm, mas_per_zone); | ||
151 | bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); | ||
152 | |||
153 | for (z = zs; z <= ze; z++) { | ||
154 | bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); | ||
155 | bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Allocate some MAS for this reservation based on current local | ||
161 | * availability, the reservation parameters (max_mas, min_mas, | ||
162 | * sparsity), and the WiMedia rules for MAS allocations. | ||
163 | * | ||
164 | * Returns -EBUSY is insufficient free MAS are available. | ||
165 | * | ||
166 | * FIXME: to simplify this, only safe reservations with a single row | ||
167 | * component in zones 1 to 15 are tried (zone 0 is skipped to avoid | ||
168 | * problems with the MAS reserved for the BP). | ||
169 | * | ||
170 | * [ECMA-368] section B.2. | ||
171 | */ | ||
172 | static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) | ||
173 | { | ||
174 | static const int safe_mas_in_row[UWB_NUM_ZONES] = { | ||
175 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | ||
176 | }; | ||
177 | int n, r; | ||
178 | struct uwb_mas_bm mas; | ||
179 | bool found = false; | ||
180 | |||
181 | /* | ||
182 | * Search all valid safe allocations until either: too few MAS | ||
183 | * are available; or the smallest allocation with sufficient | ||
184 | * MAS is found. | ||
185 | * | ||
186 | * The top of the zones are preferred, so space for larger | ||
187 | * allocations is available in the bottom of the zone (e.g., a | ||
188 | * 15 MAS allocation should start in row 14 leaving space for | ||
189 | * a 120 MAS allocation at row 0). | ||
190 | */ | ||
191 | for (n = safe_mas_in_row[0]; n >= 1; n--) { | ||
192 | int num_mas; | ||
193 | |||
194 | num_mas = n * (UWB_NUM_ZONES - 1); | ||
195 | if (num_mas < rsv->min_mas) | ||
196 | break; | ||
197 | if (found && num_mas < rsv->max_mas) | ||
198 | break; | ||
199 | |||
200 | for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { | ||
201 | if (safe_mas_in_row[r] < n) | ||
202 | continue; | ||
203 | uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); | ||
204 | if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { | ||
205 | found = true; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | |||
211 | if (!found) | ||
212 | return -EBUSY; | ||
213 | |||
214 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | ||
219 | { | ||
220 | int sframes = UWB_MAX_LOST_BEACONS; | ||
221 | |||
222 | /* | ||
223 | * Multicast reservations can become established within 1 | ||
224 | * super frame and should not be terminated if no response is | ||
225 | * received. | ||
226 | */ | ||
227 | if (rsv->is_multicast) { | ||
228 | if (rsv->state == UWB_RSV_STATE_O_INITIATED) | ||
229 | sframes = 1; | ||
230 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) | ||
231 | sframes = 0; | ||
232 | } | ||
233 | |||
234 | rsv->expired = false; | ||
235 | if (sframes > 0) { | ||
236 | /* | ||
237 | * Add an additional 2 superframes to account for the | ||
238 | * time to send the SET DRP IE command. | ||
239 | */ | ||
240 | unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; | ||
241 | mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
242 | } else | ||
243 | del_timer(&rsv->timer); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Update a reservations state, and schedule an update of the | ||
248 | * transmitted DRP IEs. | ||
249 | */ | ||
250 | static void uwb_rsv_state_update(struct uwb_rsv *rsv, | ||
251 | enum uwb_rsv_state new_state) | ||
252 | { | ||
253 | rsv->state = new_state; | ||
254 | rsv->ie_valid = false; | ||
255 | |||
256 | uwb_rsv_dump(rsv); | ||
257 | |||
258 | uwb_rsv_stroke_timer(rsv); | ||
259 | uwb_rsv_sched_update(rsv->rc); | ||
260 | } | ||
261 | |||
262 | static void uwb_rsv_callback(struct uwb_rsv *rsv) | ||
263 | { | ||
264 | if (rsv->callback) | ||
265 | rsv->callback(rsv); | ||
266 | } | ||
267 | |||
268 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | ||
269 | { | ||
270 | if (rsv->state == new_state) { | ||
271 | switch (rsv->state) { | ||
272 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
273 | case UWB_RSV_STATE_T_ACCEPTED: | ||
274 | case UWB_RSV_STATE_NONE: | ||
275 | uwb_rsv_stroke_timer(rsv); | ||
276 | break; | ||
277 | default: | ||
278 | /* Expecting a state transition so leave timer | ||
279 | as-is. */ | ||
280 | break; | ||
281 | } | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | switch (new_state) { | ||
286 | case UWB_RSV_STATE_NONE: | ||
287 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
288 | uwb_rsv_put_stream(rsv); | ||
289 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); | ||
290 | uwb_rsv_callback(rsv); | ||
291 | break; | ||
292 | case UWB_RSV_STATE_O_INITIATED: | ||
293 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); | ||
294 | break; | ||
295 | case UWB_RSV_STATE_O_PENDING: | ||
296 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); | ||
297 | break; | ||
298 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
299 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | ||
300 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
301 | uwb_rsv_callback(rsv); | ||
302 | break; | ||
303 | case UWB_RSV_STATE_T_ACCEPTED: | ||
304 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | ||
305 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
306 | uwb_rsv_callback(rsv); | ||
307 | break; | ||
308 | case UWB_RSV_STATE_T_DENIED: | ||
309 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); | ||
310 | break; | ||
311 | default: | ||
312 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", | ||
313 | uwb_rsv_state_str(new_state), new_state); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | ||
318 | { | ||
319 | struct uwb_rsv *rsv; | ||
320 | |||
321 | rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); | ||
322 | if (!rsv) | ||
323 | return NULL; | ||
324 | |||
325 | INIT_LIST_HEAD(&rsv->rc_node); | ||
326 | INIT_LIST_HEAD(&rsv->pal_node); | ||
327 | init_timer(&rsv->timer); | ||
328 | rsv->timer.function = uwb_rsv_timer; | ||
329 | rsv->timer.data = (unsigned long)rsv; | ||
330 | |||
331 | rsv->rc = rc; | ||
332 | |||
333 | return rsv; | ||
334 | } | ||
335 | |||
336 | static void uwb_rsv_free(struct uwb_rsv *rsv) | ||
337 | { | ||
338 | uwb_dev_put(rsv->owner); | ||
339 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
340 | uwb_dev_put(rsv->target.dev); | ||
341 | kfree(rsv); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * uwb_rsv_create - allocate and initialize a UWB reservation structure | ||
346 | * @rc: the radio controller | ||
347 | * @cb: callback to use when the reservation completes or terminates | ||
348 | * @pal_priv: data private to the PAL to be passed in the callback | ||
349 | * | ||
350 | * The callback is called when the state of the reservation changes from: | ||
351 | * | ||
352 | * - pending to accepted | ||
353 | * - pending to denined | ||
354 | * - accepted to terminated | ||
355 | * - pending to terminated | ||
356 | */ | ||
357 | struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) | ||
358 | { | ||
359 | struct uwb_rsv *rsv; | ||
360 | |||
361 | rsv = uwb_rsv_alloc(rc); | ||
362 | if (!rsv) | ||
363 | return NULL; | ||
364 | |||
365 | rsv->callback = cb; | ||
366 | rsv->pal_priv = pal_priv; | ||
367 | |||
368 | return rsv; | ||
369 | } | ||
370 | EXPORT_SYMBOL_GPL(uwb_rsv_create); | ||
371 | |||
372 | void uwb_rsv_remove(struct uwb_rsv *rsv) | ||
373 | { | ||
374 | if (rsv->state != UWB_RSV_STATE_NONE) | ||
375 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
376 | del_timer_sync(&rsv->timer); | ||
377 | list_del(&rsv->rc_node); | ||
378 | uwb_rsv_free(rsv); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * uwb_rsv_destroy - free a UWB reservation structure | ||
383 | * @rsv: the reservation to free | ||
384 | * | ||
385 | * The reservation will be terminated if it is pending or established. | ||
386 | */ | ||
387 | void uwb_rsv_destroy(struct uwb_rsv *rsv) | ||
388 | { | ||
389 | struct uwb_rc *rc = rsv->rc; | ||
390 | |||
391 | mutex_lock(&rc->rsvs_mutex); | ||
392 | uwb_rsv_remove(rsv); | ||
393 | mutex_unlock(&rc->rsvs_mutex); | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | ||
396 | |||
397 | /** | ||
398 | * usb_rsv_establish - start a reservation establishment | ||
399 | * @rsv: the reservation | ||
400 | * | ||
401 | * The PAL should fill in @rsv's owner, target, type, max_mas, | ||
402 | * min_mas, sparsity and is_multicast fields. If the target is a | ||
403 | * uwb_dev it must be referenced. | ||
404 | * | ||
405 | * The reservation's callback will be called when the reservation is | ||
406 | * accepted, denied or times out. | ||
407 | */ | ||
408 | int uwb_rsv_establish(struct uwb_rsv *rsv) | ||
409 | { | ||
410 | struct uwb_rc *rc = rsv->rc; | ||
411 | int ret; | ||
412 | |||
413 | mutex_lock(&rc->rsvs_mutex); | ||
414 | |||
415 | ret = uwb_rsv_get_stream(rsv); | ||
416 | if (ret) | ||
417 | goto out; | ||
418 | |||
419 | ret = uwb_rsv_alloc_mas(rsv); | ||
420 | if (ret) { | ||
421 | uwb_rsv_put_stream(rsv); | ||
422 | goto out; | ||
423 | } | ||
424 | |||
425 | list_add_tail(&rsv->rc_node, &rc->reservations); | ||
426 | rsv->owner = &rc->uwb_dev; | ||
427 | uwb_dev_get(rsv->owner); | ||
428 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); | ||
429 | out: | ||
430 | mutex_unlock(&rc->rsvs_mutex); | ||
431 | return ret; | ||
432 | } | ||
433 | EXPORT_SYMBOL_GPL(uwb_rsv_establish); | ||
434 | |||
435 | /** | ||
436 | * uwb_rsv_modify - modify an already established reservation | ||
437 | * @rsv: the reservation to modify | ||
438 | * @max_mas: new maximum MAS to reserve | ||
439 | * @min_mas: new minimum MAS to reserve | ||
440 | * @sparsity: new sparsity to use | ||
441 | * | ||
442 | * FIXME: implement this once there are PALs that use it. | ||
443 | */ | ||
444 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) | ||
445 | { | ||
446 | return -ENOSYS; | ||
447 | } | ||
448 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); | ||
449 | |||
450 | /** | ||
451 | * uwb_rsv_terminate - terminate an established reservation | ||
452 | * @rsv: the reservation to terminate | ||
453 | * | ||
454 | * A reservation is terminated by removing the DRP IE from the beacon, | ||
455 | * the other end will consider the reservation to be terminated when | ||
456 | * it does not see the DRP IE for at least mMaxLostBeacons. | ||
457 | * | ||
458 | * If applicable, the reference to the target uwb_dev will be released. | ||
459 | */ | ||
460 | void uwb_rsv_terminate(struct uwb_rsv *rsv) | ||
461 | { | ||
462 | struct uwb_rc *rc = rsv->rc; | ||
463 | |||
464 | mutex_lock(&rc->rsvs_mutex); | ||
465 | |||
466 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
467 | |||
468 | mutex_unlock(&rc->rsvs_mutex); | ||
469 | } | ||
470 | EXPORT_SYMBOL_GPL(uwb_rsv_terminate); | ||
471 | |||
472 | /** | ||
473 | * uwb_rsv_accept - accept a new reservation from a peer | ||
474 | * @rsv: the reservation | ||
475 | * @cb: call back for reservation changes | ||
476 | * @pal_priv: data to be passed in the above call back | ||
477 | * | ||
478 | * Reservation requests from peers are denied unless a PAL accepts it | ||
479 | * by calling this function. | ||
480 | */ | ||
481 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) | ||
482 | { | ||
483 | rsv->callback = cb; | ||
484 | rsv->pal_priv = pal_priv; | ||
485 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; | ||
486 | } | ||
487 | EXPORT_SYMBOL_GPL(uwb_rsv_accept); | ||
488 | |||
489 | /* | ||
490 | * Is a received DRP IE for this reservation? | ||
491 | */ | ||
492 | static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, | ||
493 | struct uwb_ie_drp *drp_ie) | ||
494 | { | ||
495 | struct uwb_dev_addr *rsv_src; | ||
496 | int stream; | ||
497 | |||
498 | stream = uwb_ie_drp_stream_index(drp_ie); | ||
499 | |||
500 | if (rsv->stream != stream) | ||
501 | return false; | ||
502 | |||
503 | switch (rsv->target.type) { | ||
504 | case UWB_RSV_TARGET_DEVADDR: | ||
505 | return rsv->stream == stream; | ||
506 | case UWB_RSV_TARGET_DEV: | ||
507 | if (uwb_ie_drp_owner(drp_ie)) | ||
508 | rsv_src = &rsv->owner->dev_addr; | ||
509 | else | ||
510 | rsv_src = &rsv->target.dev->dev_addr; | ||
511 | return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; | ||
512 | } | ||
513 | return false; | ||
514 | } | ||
515 | |||
516 | static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | ||
517 | struct uwb_dev *src, | ||
518 | struct uwb_ie_drp *drp_ie) | ||
519 | { | ||
520 | struct uwb_rsv *rsv; | ||
521 | struct uwb_pal *pal; | ||
522 | enum uwb_rsv_state state; | ||
523 | |||
524 | rsv = uwb_rsv_alloc(rc); | ||
525 | if (!rsv) | ||
526 | return NULL; | ||
527 | |||
528 | rsv->rc = rc; | ||
529 | rsv->owner = src; | ||
530 | uwb_dev_get(rsv->owner); | ||
531 | rsv->target.type = UWB_RSV_TARGET_DEV; | ||
532 | rsv->target.dev = &rc->uwb_dev; | ||
533 | rsv->type = uwb_ie_drp_type(drp_ie); | ||
534 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); | ||
535 | set_bit(rsv->stream, rsv->owner->streams); | ||
536 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); | ||
537 | |||
538 | /* | ||
539 | * See if any PALs are interested in this reservation. If not, | ||
540 | * deny the request. | ||
541 | */ | ||
542 | rsv->state = UWB_RSV_STATE_T_DENIED; | ||
543 | spin_lock(&rc->pal_lock); | ||
544 | list_for_each_entry(pal, &rc->pals, node) { | ||
545 | if (pal->new_rsv) | ||
546 | pal->new_rsv(rsv); | ||
547 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) | ||
548 | break; | ||
549 | } | ||
550 | spin_unlock(&rc->pal_lock); | ||
551 | |||
552 | list_add_tail(&rsv->rc_node, &rc->reservations); | ||
553 | state = rsv->state; | ||
554 | rsv->state = UWB_RSV_STATE_NONE; | ||
555 | uwb_rsv_set_state(rsv, state); | ||
556 | |||
557 | return rsv; | ||
558 | } | ||
559 | |||
560 | /** | ||
561 | * uwb_rsv_find - find a reservation for a received DRP IE. | ||
562 | * @rc: the radio controller | ||
563 | * @src: source of the DRP IE | ||
564 | * @drp_ie: the DRP IE | ||
565 | * | ||
566 | * If the reservation cannot be found and the DRP IE is from a peer | ||
567 | * attempting to establish a new reservation, create a new reservation | ||
568 | * and add it to the list. | ||
569 | */ | ||
570 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, | ||
571 | struct uwb_ie_drp *drp_ie) | ||
572 | { | ||
573 | struct uwb_rsv *rsv; | ||
574 | |||
575 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
576 | if (uwb_rsv_match(rsv, src, drp_ie)) | ||
577 | return rsv; | ||
578 | } | ||
579 | |||
580 | if (uwb_ie_drp_owner(drp_ie)) | ||
581 | return uwb_rsv_new_target(rc, src, drp_ie); | ||
582 | |||
583 | return NULL; | ||
584 | } | ||
585 | |||
586 | /* | ||
587 | * Go through all the reservations and check for timeouts and (if | ||
588 | * necessary) update their DRP IEs. | ||
589 | * | ||
590 | * FIXME: look at building the SET_DRP_IE command here rather than | ||
591 | * having to rescan the list in uwb_rc_send_all_drp_ie(). | ||
592 | */ | ||
593 | static bool uwb_rsv_update_all(struct uwb_rc *rc) | ||
594 | { | ||
595 | struct uwb_rsv *rsv, *t; | ||
596 | bool ie_updated = false; | ||
597 | |||
598 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
599 | if (rsv->expired) | ||
600 | uwb_drp_handle_timeout(rsv); | ||
601 | if (!rsv->ie_valid) { | ||
602 | uwb_drp_ie_update(rsv); | ||
603 | ie_updated = true; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | return ie_updated; | ||
608 | } | ||
609 | |||
610 | void uwb_rsv_sched_update(struct uwb_rc *rc) | ||
611 | { | ||
612 | queue_work(rc->rsv_workq, &rc->rsv_update_work); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Update DRP IEs and, if necessary, the DRP Availability IE and send | ||
617 | * the updated IEs to the radio controller. | ||
618 | */ | ||
619 | static void uwb_rsv_update_work(struct work_struct *work) | ||
620 | { | ||
621 | struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); | ||
622 | bool ie_updated; | ||
623 | |||
624 | mutex_lock(&rc->rsvs_mutex); | ||
625 | |||
626 | ie_updated = uwb_rsv_update_all(rc); | ||
627 | |||
628 | if (!rc->drp_avail.ie_valid) { | ||
629 | uwb_drp_avail_ie_update(rc); | ||
630 | ie_updated = true; | ||
631 | } | ||
632 | |||
633 | if (ie_updated) | ||
634 | uwb_rc_send_all_drp_ie(rc); | ||
635 | |||
636 | mutex_unlock(&rc->rsvs_mutex); | ||
637 | } | ||
638 | |||
639 | static void uwb_rsv_timer(unsigned long arg) | ||
640 | { | ||
641 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; | ||
642 | |||
643 | rsv->expired = true; | ||
644 | uwb_rsv_sched_update(rsv->rc); | ||
645 | } | ||
646 | |||
647 | void uwb_rsv_init(struct uwb_rc *rc) | ||
648 | { | ||
649 | INIT_LIST_HEAD(&rc->reservations); | ||
650 | mutex_init(&rc->rsvs_mutex); | ||
651 | INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | ||
652 | |||
653 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); | ||
654 | } | ||
655 | |||
656 | int uwb_rsv_setup(struct uwb_rc *rc) | ||
657 | { | ||
658 | char name[16]; | ||
659 | |||
660 | snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); | ||
661 | rc->rsv_workq = create_singlethread_workqueue(name); | ||
662 | if (rc->rsv_workq == NULL) | ||
663 | return -ENOMEM; | ||
664 | |||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | void uwb_rsv_cleanup(struct uwb_rc *rc) | ||
669 | { | ||
670 | struct uwb_rsv *rsv, *t; | ||
671 | |||
672 | mutex_lock(&rc->rsvs_mutex); | ||
673 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
674 | uwb_rsv_remove(rsv); | ||
675 | } | ||
676 | mutex_unlock(&rc->rsvs_mutex); | ||
677 | |||
678 | cancel_work_sync(&rc->rsv_update_work); | ||
679 | destroy_workqueue(rc->rsv_workq); | ||
680 | } | ||
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c new file mode 100644 index 000000000000..2d270748f32b --- /dev/null +++ b/drivers/uwb/scan.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Scanning management | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * | ||
24 | * FIXME: docs | ||
25 | * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal | ||
26 | * with each other. Currently seems that START_BEACON while | ||
27 | * SCAN_ONLY will cancel the scan, so we need to update the | ||
28 | * state here. Clarification request sent by email on | ||
29 | * 10/05/2005. | ||
30 | * 10/28/2005 No clear answer heard--maybe we'll hack the API | ||
31 | * so that when we start beaconing, if the HC is | ||
32 | * scanning in a mode not compatible with beaconing | ||
33 | * we just fail. | ||
34 | */ | ||
35 | |||
36 | #include <linux/device.h> | ||
37 | #include <linux/err.h> | ||
38 | #include "uwb-internal.h" | ||
39 | |||
40 | |||
41 | /** | ||
42 | * Start/stop scanning in a radio controller | ||
43 | * | ||
44 | * @rc: UWB Radio Controlller | ||
45 | * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12] | ||
46 | * @type: Type of scanning to do. | ||
47 | * @bpst_offset: value at which to start scanning (if type == | ||
48 | * UWB_SCAN_ONLY_STARTTIME) | ||
49 | * @returns: 0 if ok, < 0 errno code on error | ||
50 | * | ||
51 | * We put the command on kmalloc'ed memory as some arches cannot do | ||
52 | * USB from the stack. The reply event is copied from an stage buffer, | ||
53 | * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. | ||
54 | */ | ||
55 | int uwb_rc_scan(struct uwb_rc *rc, | ||
56 | unsigned channel, enum uwb_scan_type type, | ||
57 | unsigned bpst_offset) | ||
58 | { | ||
59 | int result; | ||
60 | struct uwb_rc_cmd_scan *cmd; | ||
61 | struct uwb_rc_evt_confirm reply; | ||
62 | |||
63 | result = -ENOMEM; | ||
64 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | ||
65 | if (cmd == NULL) | ||
66 | goto error_kzalloc; | ||
67 | mutex_lock(&rc->uwb_dev.mutex); | ||
68 | cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; | ||
69 | cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN); | ||
70 | cmd->bChannelNumber = channel; | ||
71 | cmd->bScanState = type; | ||
72 | cmd->wStartTime = cpu_to_le16(bpst_offset); | ||
73 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | ||
74 | reply.rceb.wEvent = UWB_RC_CMD_SCAN; | ||
75 | result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd), | ||
76 | &reply.rceb, sizeof(reply)); | ||
77 | if (result < 0) | ||
78 | goto error_cmd; | ||
79 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
80 | dev_err(&rc->uwb_dev.dev, | ||
81 | "SCAN: command execution failed: %s (%d)\n", | ||
82 | uwb_rc_strerror(reply.bResultCode), reply.bResultCode); | ||
83 | result = -EIO; | ||
84 | goto error_cmd; | ||
85 | } | ||
86 | rc->scanning = channel; | ||
87 | rc->scan_type = type; | ||
88 | error_cmd: | ||
89 | mutex_unlock(&rc->uwb_dev.mutex); | ||
90 | kfree(cmd); | ||
91 | error_kzalloc: | ||
92 | return result; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Print scanning state | ||
97 | */ | ||
98 | static ssize_t uwb_rc_scan_show(struct device *dev, | ||
99 | struct device_attribute *attr, char *buf) | ||
100 | { | ||
101 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
102 | struct uwb_rc *rc = uwb_dev->rc; | ||
103 | ssize_t result; | ||
104 | |||
105 | mutex_lock(&rc->uwb_dev.mutex); | ||
106 | result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type); | ||
107 | mutex_unlock(&rc->uwb_dev.mutex); | ||
108 | return result; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * | ||
113 | */ | ||
114 | static ssize_t uwb_rc_scan_store(struct device *dev, | ||
115 | struct device_attribute *attr, | ||
116 | const char *buf, size_t size) | ||
117 | { | ||
118 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
119 | struct uwb_rc *rc = uwb_dev->rc; | ||
120 | unsigned channel; | ||
121 | unsigned type; | ||
122 | unsigned bpst_offset = 0; | ||
123 | ssize_t result = -EINVAL; | ||
124 | |||
125 | result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset); | ||
126 | if (result >= 2 && type < UWB_SCAN_TOP) | ||
127 | result = uwb_rc_scan(rc, channel, type, bpst_offset); | ||
128 | |||
129 | return result < 0 ? result : size; | ||
130 | } | ||
131 | |||
132 | /** Radio Control sysfs interface (declaration) */ | ||
133 | DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store); | ||
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c new file mode 100644 index 000000000000..2d8d62d9f53e --- /dev/null +++ b/drivers/uwb/umc-bus.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * Bus for UWB Multi-interface Controller capabilities. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This file is released under the GNU GPL v2. | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sysfs.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | #include <linux/uwb/umc.h> | ||
12 | #include <linux/pci.h> | ||
13 | |||
14 | static int umc_bus_unbind_helper(struct device *dev, void *data) | ||
15 | { | ||
16 | struct device *parent = data; | ||
17 | |||
18 | if (dev->parent == parent && dev->driver) | ||
19 | device_release_driver(dev); | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | /** | ||
24 | * umc_controller_reset - reset the whole UMC controller | ||
25 | * @umc: the UMC device for the radio controller. | ||
26 | * | ||
27 | * Drivers will be unbound from all UMC devices belonging to the | ||
28 | * controller and then the radio controller will be rebound. The | ||
29 | * radio controller is expected to do a full hardware reset when it is | ||
30 | * probed. | ||
31 | * | ||
32 | * If this is called while a probe() or remove() is in progress it | ||
33 | * will return -EAGAIN and not perform the reset. | ||
34 | */ | ||
35 | int umc_controller_reset(struct umc_dev *umc) | ||
36 | { | ||
37 | struct device *parent = umc->dev.parent; | ||
38 | int ret; | ||
39 | |||
40 | if (down_trylock(&parent->sem)) | ||
41 | return -EAGAIN; | ||
42 | bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); | ||
43 | ret = device_attach(&umc->dev); | ||
44 | if (ret == 1) | ||
45 | ret = 0; | ||
46 | up(&parent->sem); | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(umc_controller_reset); | ||
51 | |||
52 | /** | ||
53 | * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device. | ||
54 | * @umc_drv: umc driver with match_data pointing to a zero-terminated | ||
55 | * table of pci_device_id's. | ||
56 | * @umc: umc device whose parent is to be matched. | ||
57 | */ | ||
58 | int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc) | ||
59 | { | ||
60 | const struct pci_device_id *id_table = umc_drv->match_data; | ||
61 | struct pci_dev *pci; | ||
62 | |||
63 | if (umc->dev.parent->bus != &pci_bus_type) | ||
64 | return 0; | ||
65 | |||
66 | pci = to_pci_dev(umc->dev.parent); | ||
67 | return pci_match_id(id_table, pci) != NULL; | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(umc_match_pci_id); | ||
70 | |||
71 | static int umc_bus_rescan_helper(struct device *dev, void *data) | ||
72 | { | ||
73 | int ret = 0; | ||
74 | |||
75 | if (!dev->driver) | ||
76 | ret = device_attach(dev); | ||
77 | |||
78 | return ret < 0 ? ret : 0; | ||
79 | } | ||
80 | |||
81 | static void umc_bus_rescan(void) | ||
82 | { | ||
83 | int err; | ||
84 | |||
85 | /* | ||
86 | * We can't use bus_rescan_devices() here as it deadlocks when | ||
87 | * it tries to retake the dev->parent semaphore. | ||
88 | */ | ||
89 | err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); | ||
90 | if (err < 0) | ||
91 | printk(KERN_WARNING "%s: rescan of bus failed: %d\n", | ||
92 | KBUILD_MODNAME, err); | ||
93 | } | ||
94 | |||
95 | static int umc_bus_match(struct device *dev, struct device_driver *drv) | ||
96 | { | ||
97 | struct umc_dev *umc = to_umc_dev(dev); | ||
98 | struct umc_driver *umc_driver = to_umc_driver(drv); | ||
99 | |||
100 | if (umc->cap_id == umc_driver->cap_id) { | ||
101 | if (umc_driver->match) | ||
102 | return umc_driver->match(umc_driver, umc); | ||
103 | else | ||
104 | return 1; | ||
105 | } | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static int umc_device_probe(struct device *dev) | ||
110 | { | ||
111 | struct umc_dev *umc; | ||
112 | struct umc_driver *umc_driver; | ||
113 | int err; | ||
114 | |||
115 | umc_driver = to_umc_driver(dev->driver); | ||
116 | umc = to_umc_dev(dev); | ||
117 | |||
118 | get_device(dev); | ||
119 | err = umc_driver->probe(umc); | ||
120 | if (err) | ||
121 | put_device(dev); | ||
122 | else | ||
123 | umc_bus_rescan(); | ||
124 | |||
125 | return err; | ||
126 | } | ||
127 | |||
128 | static int umc_device_remove(struct device *dev) | ||
129 | { | ||
130 | struct umc_dev *umc; | ||
131 | struct umc_driver *umc_driver; | ||
132 | |||
133 | umc_driver = to_umc_driver(dev->driver); | ||
134 | umc = to_umc_dev(dev); | ||
135 | |||
136 | umc_driver->remove(umc); | ||
137 | put_device(dev); | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static int umc_device_suspend(struct device *dev, pm_message_t state) | ||
142 | { | ||
143 | struct umc_dev *umc; | ||
144 | struct umc_driver *umc_driver; | ||
145 | int err = 0; | ||
146 | |||
147 | umc = to_umc_dev(dev); | ||
148 | |||
149 | if (dev->driver) { | ||
150 | umc_driver = to_umc_driver(dev->driver); | ||
151 | if (umc_driver->suspend) | ||
152 | err = umc_driver->suspend(umc, state); | ||
153 | } | ||
154 | return err; | ||
155 | } | ||
156 | |||
157 | static int umc_device_resume(struct device *dev) | ||
158 | { | ||
159 | struct umc_dev *umc; | ||
160 | struct umc_driver *umc_driver; | ||
161 | int err = 0; | ||
162 | |||
163 | umc = to_umc_dev(dev); | ||
164 | |||
165 | if (dev->driver) { | ||
166 | umc_driver = to_umc_driver(dev->driver); | ||
167 | if (umc_driver->resume) | ||
168 | err = umc_driver->resume(umc); | ||
169 | } | ||
170 | return err; | ||
171 | } | ||
172 | |||
173 | static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
174 | { | ||
175 | struct umc_dev *umc = to_umc_dev(dev); | ||
176 | |||
177 | return sprintf(buf, "0x%02x\n", umc->cap_id); | ||
178 | } | ||
179 | |||
180 | static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
181 | { | ||
182 | struct umc_dev *umc = to_umc_dev(dev); | ||
183 | |||
184 | return sprintf(buf, "0x%04x\n", umc->version); | ||
185 | } | ||
186 | |||
187 | static struct device_attribute umc_dev_attrs[] = { | ||
188 | __ATTR_RO(capability_id), | ||
189 | __ATTR_RO(version), | ||
190 | __ATTR_NULL, | ||
191 | }; | ||
192 | |||
193 | struct bus_type umc_bus_type = { | ||
194 | .name = "umc", | ||
195 | .match = umc_bus_match, | ||
196 | .probe = umc_device_probe, | ||
197 | .remove = umc_device_remove, | ||
198 | .suspend = umc_device_suspend, | ||
199 | .resume = umc_device_resume, | ||
200 | .dev_attrs = umc_dev_attrs, | ||
201 | }; | ||
202 | EXPORT_SYMBOL_GPL(umc_bus_type); | ||
203 | |||
204 | static int __init umc_bus_init(void) | ||
205 | { | ||
206 | return bus_register(&umc_bus_type); | ||
207 | } | ||
208 | module_init(umc_bus_init); | ||
209 | |||
210 | static void __exit umc_bus_exit(void) | ||
211 | { | ||
212 | bus_unregister(&umc_bus_type); | ||
213 | } | ||
214 | module_exit(umc_bus_exit); | ||
215 | |||
216 | MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus"); | ||
217 | MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); | ||
218 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c new file mode 100644 index 000000000000..aa44e1c1a102 --- /dev/null +++ b/drivers/uwb/umc-dev.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * UWB Multi-interface Controller device management. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This file is released under the GNU GPL v2. | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/uwb/umc.h> | ||
10 | #define D_LOCAL 0 | ||
11 | #include <linux/uwb/debug.h> | ||
12 | |||
13 | static void umc_device_release(struct device *dev) | ||
14 | { | ||
15 | struct umc_dev *umc = to_umc_dev(dev); | ||
16 | |||
17 | kfree(umc); | ||
18 | } | ||
19 | |||
20 | /** | ||
21 | * umc_device_create - allocate a child UMC device | ||
22 | * @parent: parent of the new UMC device. | ||
23 | * @n: index of the new device. | ||
24 | * | ||
25 | * The new UMC device will have a bus ID of the parent with '-n' | ||
26 | * appended. | ||
27 | */ | ||
28 | struct umc_dev *umc_device_create(struct device *parent, int n) | ||
29 | { | ||
30 | struct umc_dev *umc; | ||
31 | |||
32 | umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); | ||
33 | if (umc) { | ||
34 | snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", | ||
35 | parent->bus_id, n); | ||
36 | umc->dev.parent = parent; | ||
37 | umc->dev.bus = &umc_bus_type; | ||
38 | umc->dev.release = umc_device_release; | ||
39 | |||
40 | umc->dev.dma_mask = parent->dma_mask; | ||
41 | } | ||
42 | return umc; | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(umc_device_create); | ||
45 | |||
46 | /** | ||
47 | * umc_device_register - register a UMC device | ||
48 | * @umc: pointer to the UMC device | ||
49 | * | ||
50 | * The memory resource for the UMC device is acquired and the device | ||
51 | * registered with the system. | ||
52 | */ | ||
53 | int umc_device_register(struct umc_dev *umc) | ||
54 | { | ||
55 | int err; | ||
56 | |||
57 | d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); | ||
58 | |||
59 | err = request_resource(umc->resource.parent, &umc->resource); | ||
60 | if (err < 0) { | ||
61 | dev_err(&umc->dev, "can't allocate resource range " | ||
62 | "%016Lx to %016Lx: %d\n", | ||
63 | (unsigned long long)umc->resource.start, | ||
64 | (unsigned long long)umc->resource.end, | ||
65 | err); | ||
66 | goto error_request_resource; | ||
67 | } | ||
68 | |||
69 | err = device_register(&umc->dev); | ||
70 | if (err < 0) | ||
71 | goto error_device_register; | ||
72 | d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); | ||
73 | return 0; | ||
74 | |||
75 | error_device_register: | ||
76 | release_resource(&umc->resource); | ||
77 | error_request_resource: | ||
78 | d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); | ||
79 | return err; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(umc_device_register); | ||
82 | |||
83 | /** | ||
84 | * umc_device_unregister - unregister a UMC device | ||
85 | * @umc: pointer to the UMC device | ||
86 | * | ||
87 | * First we unregister the device, make sure the driver can do it's | ||
88 | * resource release thing and then we try to release any left over | ||
89 | * resources. We take a ref to the device, to make sure it doesn't | ||
90 | * dissapear under our feet. | ||
91 | */ | ||
92 | void umc_device_unregister(struct umc_dev *umc) | ||
93 | { | ||
94 | struct device *dev; | ||
95 | if (!umc) | ||
96 | return; | ||
97 | dev = get_device(&umc->dev); | ||
98 | d_fnstart(3, dev, "(umc_dev %p)\n", umc); | ||
99 | device_unregister(&umc->dev); | ||
100 | release_resource(&umc->resource); | ||
101 | d_fnend(3, dev, "(umc_dev %p) = void\n", umc); | ||
102 | put_device(dev); | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(umc_device_unregister); | ||
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c new file mode 100644 index 000000000000..367b5eb85d60 --- /dev/null +++ b/drivers/uwb/umc-drv.c | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * UWB Multi-interface Controller driver management. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This file is released under the GNU GPL v2. | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/uwb/umc.h> | ||
10 | |||
11 | int __umc_driver_register(struct umc_driver *umc_drv, struct module *module, | ||
12 | const char *mod_name) | ||
13 | { | ||
14 | umc_drv->driver.name = umc_drv->name; | ||
15 | umc_drv->driver.owner = module; | ||
16 | umc_drv->driver.mod_name = mod_name; | ||
17 | umc_drv->driver.bus = &umc_bus_type; | ||
18 | |||
19 | return driver_register(&umc_drv->driver); | ||
20 | } | ||
21 | EXPORT_SYMBOL_GPL(__umc_driver_register); | ||
22 | |||
23 | /** | ||
24 | * umc_driver_register - unregister a UMC capabiltity driver. | ||
25 | * @umc_drv: pointer to the driver. | ||
26 | */ | ||
27 | void umc_driver_unregister(struct umc_driver *umc_drv) | ||
28 | { | ||
29 | driver_unregister(&umc_drv->driver); | ||
30 | } | ||
31 | EXPORT_SYMBOL_GPL(umc_driver_unregister); | ||
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c new file mode 100644 index 000000000000..6d232c35d07d --- /dev/null +++ b/drivers/uwb/uwb-debug.c | |||
@@ -0,0 +1,367 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Debug support | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: doc | ||
24 | */ | ||
25 | |||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/notifier.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/debugfs.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <linux/seq_file.h> | ||
34 | |||
35 | #include <linux/uwb/debug-cmd.h> | ||
36 | #define D_LOCAL 0 | ||
37 | #include <linux/uwb/debug.h> | ||
38 | |||
39 | #include "uwb-internal.h" | ||
40 | |||
41 | void dump_bytes(struct device *dev, const void *_buf, size_t rsize) | ||
42 | { | ||
43 | const char *buf = _buf; | ||
44 | char line[32]; | ||
45 | size_t offset = 0; | ||
46 | int cnt, cnt2; | ||
47 | for (cnt = 0; cnt < rsize; cnt += 8) { | ||
48 | size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; | ||
49 | for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { | ||
50 | offset += scnprintf(line + offset, sizeof(line) - offset, | ||
51 | "%02x ", buf[cnt + cnt2] & 0xff); | ||
52 | } | ||
53 | if (dev) | ||
54 | dev_info(dev, "%s\n", line); | ||
55 | else | ||
56 | printk(KERN_INFO "%s\n", line); | ||
57 | } | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(dump_bytes); | ||
60 | |||
61 | /* | ||
62 | * Debug interface | ||
63 | * | ||
64 | * Per radio controller debugfs files (in uwb/uwbN/): | ||
65 | * | ||
66 | * command: Flexible command interface (see <linux/uwb/debug-cmd.h>). | ||
67 | * | ||
68 | * reservations: information on reservations. | ||
69 | * | ||
70 | * accept: Set to true (Y or 1) to accept reservation requests from | ||
71 | * peers. | ||
72 | * | ||
73 | * drp_avail: DRP availability information. | ||
74 | */ | ||
75 | |||
76 | struct uwb_dbg { | ||
77 | struct uwb_pal pal; | ||
78 | |||
79 | u32 accept; | ||
80 | struct list_head rsvs; | ||
81 | |||
82 | struct dentry *root_d; | ||
83 | struct dentry *command_f; | ||
84 | struct dentry *reservations_f; | ||
85 | struct dentry *accept_f; | ||
86 | struct dentry *drp_avail_f; | ||
87 | }; | ||
88 | |||
89 | static struct dentry *root_dir; | ||
90 | |||
91 | static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) | ||
92 | { | ||
93 | struct uwb_rc *rc = rsv->rc; | ||
94 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | struct uwb_dev_addr devaddr; | ||
96 | char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; | ||
97 | |||
98 | uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); | ||
99 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
100 | devaddr = rsv->target.dev->dev_addr; | ||
101 | else | ||
102 | devaddr = rsv->target.devaddr; | ||
103 | uwb_dev_addr_print(target, sizeof(target), &devaddr); | ||
104 | |||
105 | dev_dbg(dev, "debug: rsv %s -> %s: %s\n", | ||
106 | owner, target, uwb_rsv_state_str(rsv->state)); | ||
107 | } | ||
108 | |||
109 | static int cmd_rsv_establish(struct uwb_rc *rc, | ||
110 | struct uwb_dbg_cmd_rsv_establish *cmd) | ||
111 | { | ||
112 | struct uwb_mac_addr macaddr; | ||
113 | struct uwb_rsv *rsv; | ||
114 | struct uwb_dev *target; | ||
115 | int ret; | ||
116 | |||
117 | memcpy(&macaddr, cmd->target, sizeof(macaddr)); | ||
118 | target = uwb_dev_get_by_macaddr(rc, &macaddr); | ||
119 | if (target == NULL) | ||
120 | return -ENODEV; | ||
121 | |||
122 | rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); | ||
123 | if (rsv == NULL) { | ||
124 | uwb_dev_put(target); | ||
125 | return -ENOMEM; | ||
126 | } | ||
127 | |||
128 | rsv->owner = &rc->uwb_dev; | ||
129 | rsv->target.type = UWB_RSV_TARGET_DEV; | ||
130 | rsv->target.dev = target; | ||
131 | rsv->type = cmd->type; | ||
132 | rsv->max_mas = cmd->max_mas; | ||
133 | rsv->min_mas = cmd->min_mas; | ||
134 | rsv->sparsity = cmd->sparsity; | ||
135 | |||
136 | ret = uwb_rsv_establish(rsv); | ||
137 | if (ret) | ||
138 | uwb_rsv_destroy(rsv); | ||
139 | else | ||
140 | list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static int cmd_rsv_terminate(struct uwb_rc *rc, | ||
146 | struct uwb_dbg_cmd_rsv_terminate *cmd) | ||
147 | { | ||
148 | struct uwb_rsv *rsv, *found = NULL; | ||
149 | int i = 0; | ||
150 | |||
151 | list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { | ||
152 | if (i == cmd->index) { | ||
153 | found = rsv; | ||
154 | break; | ||
155 | } | ||
156 | } | ||
157 | if (!found) | ||
158 | return -EINVAL; | ||
159 | |||
160 | list_del(&found->pal_node); | ||
161 | uwb_rsv_terminate(found); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int command_open(struct inode *inode, struct file *file) | ||
167 | { | ||
168 | file->private_data = inode->i_private; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static ssize_t command_write(struct file *file, const char __user *buf, | ||
174 | size_t len, loff_t *off) | ||
175 | { | ||
176 | struct uwb_rc *rc = file->private_data; | ||
177 | struct uwb_dbg_cmd cmd; | ||
178 | int ret; | ||
179 | |||
180 | if (len != sizeof(struct uwb_dbg_cmd)) | ||
181 | return -EINVAL; | ||
182 | |||
183 | if (copy_from_user(&cmd, buf, len) != 0) | ||
184 | return -EFAULT; | ||
185 | |||
186 | switch (cmd.type) { | ||
187 | case UWB_DBG_CMD_RSV_ESTABLISH: | ||
188 | ret = cmd_rsv_establish(rc, &cmd.rsv_establish); | ||
189 | break; | ||
190 | case UWB_DBG_CMD_RSV_TERMINATE: | ||
191 | ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); | ||
192 | break; | ||
193 | default: | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | |||
197 | return ret < 0 ? ret : len; | ||
198 | } | ||
199 | |||
200 | static struct file_operations command_fops = { | ||
201 | .open = command_open, | ||
202 | .write = command_write, | ||
203 | .read = NULL, | ||
204 | .llseek = no_llseek, | ||
205 | .owner = THIS_MODULE, | ||
206 | }; | ||
207 | |||
208 | static int reservations_print(struct seq_file *s, void *p) | ||
209 | { | ||
210 | struct uwb_rc *rc = s->private; | ||
211 | struct uwb_rsv *rsv; | ||
212 | |||
213 | mutex_lock(&rc->rsvs_mutex); | ||
214 | |||
215 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
216 | struct uwb_dev_addr devaddr; | ||
217 | char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; | ||
218 | bool is_owner; | ||
219 | char buf[72]; | ||
220 | |||
221 | uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); | ||
222 | if (rsv->target.type == UWB_RSV_TARGET_DEV) { | ||
223 | devaddr = rsv->target.dev->dev_addr; | ||
224 | is_owner = &rc->uwb_dev == rsv->owner; | ||
225 | } else { | ||
226 | devaddr = rsv->target.devaddr; | ||
227 | is_owner = true; | ||
228 | } | ||
229 | uwb_dev_addr_print(target, sizeof(target), &devaddr); | ||
230 | |||
231 | seq_printf(s, "%c %s -> %s: %s\n", | ||
232 | is_owner ? 'O' : 'T', | ||
233 | owner, target, uwb_rsv_state_str(rsv->state)); | ||
234 | seq_printf(s, " stream: %d type: %s\n", | ||
235 | rsv->stream, uwb_rsv_type_str(rsv->type)); | ||
236 | bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); | ||
237 | seq_printf(s, " %s\n", buf); | ||
238 | } | ||
239 | |||
240 | mutex_unlock(&rc->rsvs_mutex); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static int reservations_open(struct inode *inode, struct file *file) | ||
246 | { | ||
247 | return single_open(file, reservations_print, inode->i_private); | ||
248 | } | ||
249 | |||
250 | static struct file_operations reservations_fops = { | ||
251 | .open = reservations_open, | ||
252 | .read = seq_read, | ||
253 | .llseek = seq_lseek, | ||
254 | .release = single_release, | ||
255 | .owner = THIS_MODULE, | ||
256 | }; | ||
257 | |||
258 | static int drp_avail_print(struct seq_file *s, void *p) | ||
259 | { | ||
260 | struct uwb_rc *rc = s->private; | ||
261 | char buf[72]; | ||
262 | |||
263 | bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS); | ||
264 | seq_printf(s, "global: %s\n", buf); | ||
265 | bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS); | ||
266 | seq_printf(s, "local: %s\n", buf); | ||
267 | bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS); | ||
268 | seq_printf(s, "pending: %s\n", buf); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static int drp_avail_open(struct inode *inode, struct file *file) | ||
274 | { | ||
275 | return single_open(file, drp_avail_print, inode->i_private); | ||
276 | } | ||
277 | |||
278 | static struct file_operations drp_avail_fops = { | ||
279 | .open = drp_avail_open, | ||
280 | .read = seq_read, | ||
281 | .llseek = seq_lseek, | ||
282 | .release = single_release, | ||
283 | .owner = THIS_MODULE, | ||
284 | }; | ||
285 | |||
286 | static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) | ||
287 | { | ||
288 | struct uwb_rc *rc = rsv->rc; | ||
289 | |||
290 | if (rc->dbg->accept) | ||
291 | uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * uwb_dbg_add_rc - add a debug interface for a radio controller | ||
296 | * @rc: the radio controller | ||
297 | */ | ||
298 | void uwb_dbg_add_rc(struct uwb_rc *rc) | ||
299 | { | ||
300 | rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL); | ||
301 | if (rc->dbg == NULL) | ||
302 | return; | ||
303 | |||
304 | INIT_LIST_HEAD(&rc->dbg->rsvs); | ||
305 | |||
306 | uwb_pal_init(&rc->dbg->pal); | ||
307 | rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; | ||
308 | uwb_pal_register(rc, &rc->dbg->pal); | ||
309 | if (root_dir) { | ||
310 | rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), | ||
311 | root_dir); | ||
312 | rc->dbg->command_f = debugfs_create_file("command", 0200, | ||
313 | rc->dbg->root_d, rc, | ||
314 | &command_fops); | ||
315 | rc->dbg->reservations_f = debugfs_create_file("reservations", 0444, | ||
316 | rc->dbg->root_d, rc, | ||
317 | &reservations_fops); | ||
318 | rc->dbg->accept_f = debugfs_create_bool("accept", 0644, | ||
319 | rc->dbg->root_d, | ||
320 | &rc->dbg->accept); | ||
321 | rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444, | ||
322 | rc->dbg->root_d, rc, | ||
323 | &drp_avail_fops); | ||
324 | } | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * uwb_dbg_add_rc - remove a radio controller's debug interface | ||
329 | * @rc: the radio controller | ||
330 | */ | ||
331 | void uwb_dbg_del_rc(struct uwb_rc *rc) | ||
332 | { | ||
333 | struct uwb_rsv *rsv, *t; | ||
334 | |||
335 | if (rc->dbg == NULL) | ||
336 | return; | ||
337 | |||
338 | list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { | ||
339 | uwb_rsv_destroy(rsv); | ||
340 | } | ||
341 | |||
342 | uwb_pal_unregister(rc, &rc->dbg->pal); | ||
343 | |||
344 | if (root_dir) { | ||
345 | debugfs_remove(rc->dbg->drp_avail_f); | ||
346 | debugfs_remove(rc->dbg->accept_f); | ||
347 | debugfs_remove(rc->dbg->reservations_f); | ||
348 | debugfs_remove(rc->dbg->command_f); | ||
349 | debugfs_remove(rc->dbg->root_d); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * uwb_dbg_exit - initialize the debug interface sub-module | ||
355 | */ | ||
356 | void uwb_dbg_init(void) | ||
357 | { | ||
358 | root_dir = debugfs_create_dir("uwb", NULL); | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * uwb_dbg_exit - clean-up the debug interface sub-module | ||
363 | */ | ||
364 | void uwb_dbg_exit(void) | ||
365 | { | ||
366 | debugfs_remove(root_dir); | ||
367 | } | ||
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h new file mode 100644 index 000000000000..2ad307d12961 --- /dev/null +++ b/drivers/uwb/uwb-internal.h | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * UWB internal API | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * This contains most of the internal API for UWB. This is stuff used | ||
23 | * across the stack that of course, is of no interest to the rest. | ||
24 | * | ||
25 | * Some parts might end up going public (like uwb_rc_*())... | ||
26 | */ | ||
27 | |||
28 | #ifndef __UWB_INTERNAL_H__ | ||
29 | #define __UWB_INTERNAL_H__ | ||
30 | |||
31 | #include <linux/version.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/uwb.h> | ||
35 | #include <linux/mutex.h> | ||
36 | |||
37 | struct uwb_beca_e; | ||
38 | |||
39 | /* General device API */ | ||
40 | extern void uwb_dev_init(struct uwb_dev *uwb_dev); | ||
41 | extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *); | ||
42 | extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, | ||
43 | struct uwb_rc *parent_rc); | ||
44 | extern void uwb_dev_rm(struct uwb_dev *uwb_dev); | ||
45 | extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *); | ||
46 | extern void uwbd_dev_offair(struct uwb_beca_e *); | ||
47 | void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event); | ||
48 | |||
49 | /* General UWB Radio Controller Internal API */ | ||
50 | extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *); | ||
51 | static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc) | ||
52 | { | ||
53 | uwb_dev_get(&rc->uwb_dev); | ||
54 | return rc; | ||
55 | } | ||
56 | |||
57 | static inline void __uwb_rc_put(struct uwb_rc *rc) | ||
58 | { | ||
59 | uwb_dev_put(&rc->uwb_dev); | ||
60 | } | ||
61 | |||
62 | extern int uwb_rc_reset(struct uwb_rc *rc); | ||
63 | extern int uwb_rc_beacon(struct uwb_rc *rc, | ||
64 | int channel, unsigned bpst_offset); | ||
65 | extern int uwb_rc_scan(struct uwb_rc *rc, | ||
66 | unsigned channel, enum uwb_scan_type type, | ||
67 | unsigned bpst_offset); | ||
68 | extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); | ||
69 | extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); | ||
70 | extern void uwb_rc_ie_init(struct uwb_rc *); | ||
71 | extern void uwb_rc_ie_init(struct uwb_rc *); | ||
72 | extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); | ||
73 | extern void uwb_rc_ie_release(struct uwb_rc *); | ||
74 | extern int uwb_rc_ie_add(struct uwb_rc *, | ||
75 | const struct uwb_ie_hdr *, size_t); | ||
76 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | ||
77 | |||
78 | extern const char *uwb_rc_strerror(unsigned code); | ||
79 | |||
80 | /* | ||
81 | * Time to wait for a response to an RC command. | ||
82 | * | ||
83 | * Some commands can take a long time to response. e.g., START_BEACON | ||
84 | * may scan for several superframes before joining an existing beacon | ||
85 | * group and this can take around 600 ms. | ||
86 | */ | ||
87 | #define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */ | ||
88 | |||
89 | /* | ||
90 | * Notification/Event Handlers | ||
91 | */ | ||
92 | |||
93 | struct uwb_rc_neh; | ||
94 | |||
95 | void uwb_rc_neh_create(struct uwb_rc *rc); | ||
96 | void uwb_rc_neh_destroy(struct uwb_rc *rc); | ||
97 | |||
98 | struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, | ||
99 | u8 expected_type, u16 expected_event, | ||
100 | uwb_rc_cmd_cb_f cb, void *arg); | ||
101 | void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh); | ||
102 | void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh); | ||
103 | void uwb_rc_neh_put(struct uwb_rc_neh *neh); | ||
104 | |||
105 | /* Event size tables */ | ||
106 | extern int uwb_est_create(void); | ||
107 | extern void uwb_est_destroy(void); | ||
108 | |||
109 | |||
110 | /* | ||
111 | * UWB Events & management daemon | ||
112 | */ | ||
113 | |||
114 | /** | ||
115 | * enum uwb_event_type - types of UWB management daemon events | ||
116 | * | ||
117 | * The UWB management daemon (uwbd) can receive two types of events: | ||
118 | * UWB_EVT_TYPE_NOTIF - notification from the radio controller. | ||
119 | * UWB_EVT_TYPE_MSG - a simple message. | ||
120 | */ | ||
121 | enum uwb_event_type { | ||
122 | UWB_EVT_TYPE_NOTIF, | ||
123 | UWB_EVT_TYPE_MSG, | ||
124 | }; | ||
125 | |||
126 | /** | ||
127 | * struct uwb_event_notif - an event for a radio controller notification | ||
128 | * @size: Size of the buffer (ie: Guaranteed to contain at least | ||
129 | * a full 'struct uwb_rceb') | ||
130 | * @rceb: Pointer to a kmalloced() event payload | ||
131 | */ | ||
132 | struct uwb_event_notif { | ||
133 | size_t size; | ||
134 | struct uwb_rceb *rceb; | ||
135 | }; | ||
136 | |||
137 | /** | ||
138 | * enum uwb_event_message - an event for a message for asynchronous processing | ||
139 | * | ||
140 | * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware. | ||
141 | */ | ||
142 | enum uwb_event_message { | ||
143 | UWB_EVT_MSG_RESET, | ||
144 | }; | ||
145 | |||
146 | /** | ||
147 | * UWB Event | ||
148 | * @rc: Radio controller that emitted the event (referenced) | ||
149 | * @ts_jiffies: Timestamp, when was it received | ||
150 | * @type: This event's type. | ||
151 | */ | ||
152 | struct uwb_event { | ||
153 | struct list_head list_node; | ||
154 | struct uwb_rc *rc; | ||
155 | unsigned long ts_jiffies; | ||
156 | enum uwb_event_type type; | ||
157 | union { | ||
158 | struct uwb_event_notif notif; | ||
159 | enum uwb_event_message message; | ||
160 | }; | ||
161 | }; | ||
162 | |||
163 | extern void uwbd_start(void); | ||
164 | extern void uwbd_stop(void); | ||
165 | extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); | ||
166 | extern void uwbd_event_queue(struct uwb_event *); | ||
167 | void uwbd_flush(struct uwb_rc *rc); | ||
168 | |||
169 | /* UWB event handlers */ | ||
170 | extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); | ||
171 | extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); | ||
172 | extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); | ||
173 | extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *); | ||
174 | extern int uwbd_evt_handle_rc_drp(struct uwb_event *); | ||
175 | extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *); | ||
176 | |||
177 | int uwbd_msg_handle_reset(struct uwb_event *evt); | ||
178 | |||
179 | |||
180 | /* | ||
181 | * Address management | ||
182 | */ | ||
183 | int uwb_rc_dev_addr_assign(struct uwb_rc *rc); | ||
184 | int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt); | ||
185 | |||
186 | /* | ||
187 | * UWB Beacon Cache | ||
188 | * | ||
189 | * Each beacon we received is kept in a cache--when we receive that | ||
190 | * beacon consistently, that means there is a new device that we have | ||
191 | * to add to the system. | ||
192 | */ | ||
193 | |||
194 | extern unsigned long beacon_timeout_ms; | ||
195 | |||
196 | /** Beacon cache list */ | ||
197 | struct uwb_beca { | ||
198 | struct list_head list; | ||
199 | size_t entries; | ||
200 | struct mutex mutex; | ||
201 | }; | ||
202 | |||
203 | extern struct uwb_beca uwb_beca; | ||
204 | |||
205 | /** | ||
206 | * Beacon cache entry | ||
207 | * | ||
208 | * @jiffies_refresh: last time a beacon was received that refreshed | ||
209 | * this cache entry. | ||
210 | * @uwb_dev: device connected to this beacon. This pointer is not | ||
211 | * safe, you need to get it with uwb_dev_try_get() | ||
212 | * | ||
213 | * @hits: how many time we have seen this beacon since last time we | ||
214 | * cleared it | ||
215 | */ | ||
216 | struct uwb_beca_e { | ||
217 | struct mutex mutex; | ||
218 | struct kref refcnt; | ||
219 | struct list_head node; | ||
220 | struct uwb_mac_addr *mac_addr; | ||
221 | struct uwb_dev_addr dev_addr; | ||
222 | u8 hits; | ||
223 | unsigned long ts_jiffies; | ||
224 | struct uwb_dev *uwb_dev; | ||
225 | struct uwb_rc_evt_beacon *be; | ||
226 | struct stats lqe_stats, rssi_stats; /* radio statistics */ | ||
227 | }; | ||
228 | struct uwb_beacon_frame; | ||
229 | extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, | ||
230 | char *, size_t); | ||
231 | extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, | ||
232 | struct uwb_beacon_frame *, | ||
233 | unsigned long); | ||
234 | |||
235 | extern void uwb_bce_kfree(struct kref *_bce); | ||
236 | static inline void uwb_bce_get(struct uwb_beca_e *bce) | ||
237 | { | ||
238 | kref_get(&bce->refcnt); | ||
239 | } | ||
240 | static inline void uwb_bce_put(struct uwb_beca_e *bce) | ||
241 | { | ||
242 | kref_put(&bce->refcnt, uwb_bce_kfree); | ||
243 | } | ||
244 | extern void uwb_beca_purge(void); | ||
245 | extern void uwb_beca_release(void); | ||
246 | |||
247 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | ||
248 | const struct uwb_dev_addr *devaddr); | ||
249 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | ||
250 | const struct uwb_mac_addr *macaddr); | ||
251 | |||
252 | /* -- UWB Sysfs representation */ | ||
253 | extern struct class uwb_rc_class; | ||
254 | extern struct device_attribute dev_attr_mac_address; | ||
255 | extern struct device_attribute dev_attr_beacon; | ||
256 | extern struct device_attribute dev_attr_scan; | ||
257 | |||
258 | /* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */ | ||
259 | void uwb_rsv_init(struct uwb_rc *rc); | ||
260 | int uwb_rsv_setup(struct uwb_rc *rc); | ||
261 | void uwb_rsv_cleanup(struct uwb_rc *rc); | ||
262 | |||
263 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); | ||
264 | void uwb_rsv_remove(struct uwb_rsv *rsv); | ||
265 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, | ||
266 | struct uwb_ie_drp *drp_ie); | ||
267 | void uwb_rsv_sched_update(struct uwb_rc *rc); | ||
268 | |||
269 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv); | ||
270 | int uwb_drp_ie_update(struct uwb_rsv *rsv); | ||
271 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); | ||
272 | |||
273 | void uwb_drp_avail_init(struct uwb_rc *rc); | ||
274 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); | ||
275 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); | ||
276 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); | ||
277 | void uwb_drp_avail_ie_update(struct uwb_rc *rc); | ||
278 | |||
279 | /* -- PAL support */ | ||
280 | void uwb_rc_pal_init(struct uwb_rc *rc); | ||
281 | |||
282 | /* -- Misc */ | ||
283 | |||
284 | extern ssize_t uwb_mac_frame_hdr_print(char *, size_t, | ||
285 | const struct uwb_mac_frame_hdr *); | ||
286 | |||
287 | /* -- Debug interface */ | ||
288 | void uwb_dbg_init(void); | ||
289 | void uwb_dbg_exit(void); | ||
290 | void uwb_dbg_add_rc(struct uwb_rc *rc); | ||
291 | void uwb_dbg_del_rc(struct uwb_rc *rc); | ||
292 | |||
293 | /* Workarounds for version specific stuff */ | ||
294 | |||
295 | static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) | ||
296 | { | ||
297 | down(&uwb_dev->dev.sem); | ||
298 | } | ||
299 | |||
300 | static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev) | ||
301 | { | ||
302 | up(&uwb_dev->dev.sem); | ||
303 | } | ||
304 | |||
305 | #endif /* #ifndef __UWB_INTERNAL_H__ */ | ||
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c new file mode 100644 index 000000000000..78908416e42c --- /dev/null +++ b/drivers/uwb/uwbd.c | |||
@@ -0,0 +1,410 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Neighborhood Management Daemon | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * This daemon takes care of maintaing information that describes the | ||
24 | * UWB neighborhood that the radios in this machine can see. It also | ||
25 | * keeps a tab of which devices are visible, makes sure each HC sits | ||
26 | * on a different channel to avoid interfering, etc. | ||
27 | * | ||
28 | * Different drivers (radio controller, device, any API in general) | ||
29 | * communicate with this daemon through an event queue. Daemon wakes | ||
30 | * up, takes a list of events and handles them one by one; handling | ||
31 | * function is extracted from a table based on the event's type and | ||
32 | * subtype. Events are freed only if the handling function says so. | ||
33 | * | ||
34 | * . Lock protecting the event list has to be an spinlock and locked | ||
35 | * with IRQSAVE because it might be called from an interrupt | ||
36 | * context (ie: when events arrive and the notification drops | ||
37 | * down from the ISR). | ||
38 | * | ||
39 | * . UWB radio controller drivers queue events to the daemon using | ||
40 | * uwbd_event_queue(). They just get the event, chew it to make it | ||
41 | * look like UWBD likes it and pass it in a buffer allocated with | ||
42 | * uwb_event_alloc(). | ||
43 | * | ||
44 | * EVENTS | ||
45 | * | ||
46 | * Events have a type, a subtype, a lenght, some other stuff and the | ||
47 | * data blob, which depends on the event. The header is 'struct | ||
48 | * uwb_event'; for payloads, see 'struct uwbd_evt_*'. | ||
49 | * | ||
50 | * EVENT HANDLER TABLES | ||
51 | * | ||
52 | * To find a handling function for an event, the type is used to index | ||
53 | * a subtype-table in the type-table. The subtype-table is indexed | ||
54 | * with the subtype to get the function that handles the event. Start | ||
55 | * with the main type-table 'uwbd_evt_type_handler'. | ||
56 | * | ||
57 | * DEVICES | ||
58 | * | ||
59 | * Devices are created when a bunch of beacons have been received and | ||
60 | * it is stablished that the device has stable radio presence. CREATED | ||
61 | * only, not configured. Devices are ONLY configured when an | ||
62 | * Application-Specific IE Probe is receieved, in which the device | ||
63 | * declares which Protocol ID it groks. Then the device is CONFIGURED | ||
64 | * (and the driver->probe() stuff of the device model is invoked). | ||
65 | * | ||
66 | * Devices are considered disconnected when a certain number of | ||
67 | * beacons are not received in an amount of time. | ||
68 | * | ||
69 | * Handler functions are called normally uwbd_evt_handle_*(). | ||
70 | */ | ||
71 | |||
72 | #include <linux/kthread.h> | ||
73 | #include <linux/module.h> | ||
74 | #include <linux/freezer.h> | ||
75 | #include "uwb-internal.h" | ||
76 | |||
77 | #define D_LOCAL 1 | ||
78 | #include <linux/uwb/debug.h> | ||
79 | |||
80 | |||
81 | /** | ||
82 | * UWBD Event handler function signature | ||
83 | * | ||
84 | * Return !0 if the event needs not to be freed (ie the handler | ||
85 | * takes/took care of it). 0 means the daemon code will free the | ||
86 | * event. | ||
87 | * | ||
88 | * @evt->rc is already referenced and guaranteed to exist. See | ||
89 | * uwb_evt_handle(). | ||
90 | */ | ||
91 | typedef int (*uwbd_evt_handler_f)(struct uwb_event *); | ||
92 | |||
93 | /** | ||
94 | * Properties of a UWBD event | ||
95 | * | ||
96 | * @handler: the function that will handle this event | ||
97 | * @name: text name of event | ||
98 | */ | ||
99 | struct uwbd_event { | ||
100 | uwbd_evt_handler_f handler; | ||
101 | const char *name; | ||
102 | }; | ||
103 | |||
104 | /** Table of handlers for and properties of the UWBD Radio Control Events */ | ||
105 | static | ||
106 | struct uwbd_event uwbd_events[] = { | ||
107 | [UWB_RC_EVT_BEACON] = { | ||
108 | .handler = uwbd_evt_handle_rc_beacon, | ||
109 | .name = "BEACON_RECEIVED" | ||
110 | }, | ||
111 | [UWB_RC_EVT_BEACON_SIZE] = { | ||
112 | .handler = uwbd_evt_handle_rc_beacon_size, | ||
113 | .name = "BEACON_SIZE_CHANGE" | ||
114 | }, | ||
115 | [UWB_RC_EVT_BPOIE_CHANGE] = { | ||
116 | .handler = uwbd_evt_handle_rc_bpoie_change, | ||
117 | .name = "BPOIE_CHANGE" | ||
118 | }, | ||
119 | [UWB_RC_EVT_BP_SLOT_CHANGE] = { | ||
120 | .handler = uwbd_evt_handle_rc_bp_slot_change, | ||
121 | .name = "BP_SLOT_CHANGE" | ||
122 | }, | ||
123 | [UWB_RC_EVT_DRP_AVAIL] = { | ||
124 | .handler = uwbd_evt_handle_rc_drp_avail, | ||
125 | .name = "DRP_AVAILABILITY_CHANGE" | ||
126 | }, | ||
127 | [UWB_RC_EVT_DRP] = { | ||
128 | .handler = uwbd_evt_handle_rc_drp, | ||
129 | .name = "DRP" | ||
130 | }, | ||
131 | [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { | ||
132 | .handler = uwbd_evt_handle_rc_dev_addr_conflict, | ||
133 | .name = "DEV_ADDR_CONFLICT", | ||
134 | }, | ||
135 | }; | ||
136 | |||
137 | |||
138 | |||
139 | struct uwbd_evt_type_handler { | ||
140 | const char *name; | ||
141 | struct uwbd_event *uwbd_events; | ||
142 | size_t size; | ||
143 | }; | ||
144 | |||
145 | #define UWBD_EVT_TYPE_HANDLER(n,a) { \ | ||
146 | .name = (n), \ | ||
147 | .uwbd_events = (a), \ | ||
148 | .size = sizeof(a)/sizeof((a)[0]) \ | ||
149 | } | ||
150 | |||
151 | |||
152 | /** Table of handlers for each UWBD Event type. */ | ||
153 | static | ||
154 | struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { | ||
155 | [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) | ||
156 | }; | ||
157 | |||
158 | static const | ||
159 | size_t uwbd_evt_type_handlers_len = | ||
160 | sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); | ||
161 | |||
162 | static const struct uwbd_event uwbd_message_handlers[] = { | ||
163 | [UWB_EVT_MSG_RESET] = { | ||
164 | .handler = uwbd_msg_handle_reset, | ||
165 | .name = "reset", | ||
166 | }, | ||
167 | }; | ||
168 | |||
169 | static DEFINE_MUTEX(uwbd_event_mutex); | ||
170 | |||
171 | /** | ||
172 | * Handle an URC event passed to the UWB Daemon | ||
173 | * | ||
174 | * @evt: the event to handle | ||
175 | * @returns: 0 if the event can be kfreed, !0 on the contrary | ||
176 | * (somebody else took ownership) [coincidentally, returning | ||
177 | * a <0 errno code will free it :)]. | ||
178 | * | ||
179 | * Looks up the two indirection tables (one for the type, one for the | ||
180 | * subtype) to decide which function handles it and then calls the | ||
181 | * handler. | ||
182 | * | ||
183 | * The event structure passed to the event handler has the radio | ||
184 | * controller in @evt->rc referenced. The reference will be dropped | ||
185 | * once the handler returns, so if it needs it for longer (async), | ||
186 | * it'll need to take another one. | ||
187 | */ | ||
188 | static | ||
189 | int uwbd_event_handle_urc(struct uwb_event *evt) | ||
190 | { | ||
191 | struct uwbd_evt_type_handler *type_table; | ||
192 | uwbd_evt_handler_f handler; | ||
193 | u8 type, context; | ||
194 | u16 event; | ||
195 | |||
196 | type = evt->notif.rceb->bEventType; | ||
197 | event = le16_to_cpu(evt->notif.rceb->wEvent); | ||
198 | context = evt->notif.rceb->bEventContext; | ||
199 | |||
200 | if (type > uwbd_evt_type_handlers_len) { | ||
201 | printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | type_table = &uwbd_evt_type_handlers[type]; | ||
205 | if (type_table->uwbd_events == NULL) { | ||
206 | printk(KERN_ERR "UWBD: event type %u: unknown\n", type); | ||
207 | return -EINVAL; | ||
208 | } | ||
209 | if (event > type_table->size) { | ||
210 | printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n", | ||
211 | type_table->name, event); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | handler = type_table->uwbd_events[event].handler; | ||
215 | if (handler == NULL) { | ||
216 | printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event); | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | return (*handler)(evt); | ||
220 | } | ||
221 | |||
222 | static void uwbd_event_handle_message(struct uwb_event *evt) | ||
223 | { | ||
224 | struct uwb_rc *rc; | ||
225 | int result; | ||
226 | |||
227 | rc = evt->rc; | ||
228 | |||
229 | if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) { | ||
230 | dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message); | ||
231 | return; | ||
232 | } | ||
233 | |||
234 | /* If this is a reset event we need to drop the | ||
235 | * uwbd_event_mutex or it deadlocks when the reset handler | ||
236 | * attempts to flush the uwbd events. */ | ||
237 | if (evt->message == UWB_EVT_MSG_RESET) | ||
238 | mutex_unlock(&uwbd_event_mutex); | ||
239 | |||
240 | result = uwbd_message_handlers[evt->message].handler(evt); | ||
241 | if (result < 0) | ||
242 | dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", | ||
243 | uwbd_message_handlers[evt->message].name, result); | ||
244 | |||
245 | if (evt->message == UWB_EVT_MSG_RESET) | ||
246 | mutex_lock(&uwbd_event_mutex); | ||
247 | } | ||
248 | |||
249 | static void uwbd_event_handle(struct uwb_event *evt) | ||
250 | { | ||
251 | struct uwb_rc *rc; | ||
252 | int should_keep; | ||
253 | |||
254 | rc = evt->rc; | ||
255 | |||
256 | if (rc->ready) { | ||
257 | switch (evt->type) { | ||
258 | case UWB_EVT_TYPE_NOTIF: | ||
259 | should_keep = uwbd_event_handle_urc(evt); | ||
260 | if (should_keep <= 0) | ||
261 | kfree(evt->notif.rceb); | ||
262 | break; | ||
263 | case UWB_EVT_TYPE_MSG: | ||
264 | uwbd_event_handle_message(evt); | ||
265 | break; | ||
266 | default: | ||
267 | dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); | ||
268 | break; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ | ||
273 | } | ||
274 | /* The UWB Daemon */ | ||
275 | |||
276 | |||
277 | /** Daemon's PID: used to decide if we can queue or not */ | ||
278 | static int uwbd_pid; | ||
279 | /** Daemon's task struct for managing the kthread */ | ||
280 | static struct task_struct *uwbd_task; | ||
281 | /** Daemon's waitqueue for waiting for new events */ | ||
282 | static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); | ||
283 | /** Daemon's list of events; we queue/dequeue here */ | ||
284 | static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); | ||
285 | /** Daemon's list lock to protect concurent access */ | ||
286 | static DEFINE_SPINLOCK(uwbd_event_list_lock); | ||
287 | |||
288 | |||
289 | /** | ||
290 | * UWB Daemon | ||
291 | * | ||
292 | * Listens to all UWB notifications and takes care to track the state | ||
293 | * of the UWB neighboorhood for the kernel. When we do a run, we | ||
294 | * spinlock, move the list to a private copy and release the | ||
295 | * lock. Hold it as little as possible. Not a conflict: it is | ||
296 | * guaranteed we own the events in the private list. | ||
297 | * | ||
298 | * FIXME: should change so we don't have a 1HZ timer all the time, but | ||
299 | * only if there are devices. | ||
300 | */ | ||
301 | static int uwbd(void *unused) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | struct list_head list = LIST_HEAD_INIT(list); | ||
305 | struct uwb_event *evt, *nxt; | ||
306 | int should_stop = 0; | ||
307 | while (1) { | ||
308 | wait_event_interruptible_timeout( | ||
309 | uwbd_wq, | ||
310 | !list_empty(&uwbd_event_list) | ||
311 | || (should_stop = kthread_should_stop()), | ||
312 | HZ); | ||
313 | if (should_stop) | ||
314 | break; | ||
315 | try_to_freeze(); | ||
316 | |||
317 | mutex_lock(&uwbd_event_mutex); | ||
318 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | ||
319 | list_splice_init(&uwbd_event_list, &list); | ||
320 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
321 | list_for_each_entry_safe(evt, nxt, &list, list_node) { | ||
322 | list_del(&evt->list_node); | ||
323 | uwbd_event_handle(evt); | ||
324 | kfree(evt); | ||
325 | } | ||
326 | mutex_unlock(&uwbd_event_mutex); | ||
327 | |||
328 | uwb_beca_purge(); /* Purge devices that left */ | ||
329 | } | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | |||
334 | /** Start the UWB daemon */ | ||
335 | void uwbd_start(void) | ||
336 | { | ||
337 | uwbd_task = kthread_run(uwbd, NULL, "uwbd"); | ||
338 | if (uwbd_task == NULL) | ||
339 | printk(KERN_ERR "UWB: Cannot start management daemon; " | ||
340 | "UWB won't work\n"); | ||
341 | else | ||
342 | uwbd_pid = uwbd_task->pid; | ||
343 | } | ||
344 | |||
345 | /* Stop the UWB daemon and free any unprocessed events */ | ||
346 | void uwbd_stop(void) | ||
347 | { | ||
348 | unsigned long flags; | ||
349 | struct uwb_event *evt, *nxt; | ||
350 | kthread_stop(uwbd_task); | ||
351 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | ||
352 | uwbd_pid = 0; | ||
353 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
354 | if (evt->type == UWB_EVT_TYPE_NOTIF) | ||
355 | kfree(evt->notif.rceb); | ||
356 | kfree(evt); | ||
357 | } | ||
358 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
359 | uwb_beca_release(); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Queue an event for the management daemon | ||
364 | * | ||
365 | * When some lower layer receives an event, it uses this function to | ||
366 | * push it forward to the UWB daemon. | ||
367 | * | ||
368 | * Once you pass the event, you don't own it any more, but the daemon | ||
369 | * does. It will uwb_event_free() it when done, so make sure you | ||
370 | * uwb_event_alloc()ed it or bad things will happen. | ||
371 | * | ||
372 | * If the daemon is not running, we just free the event. | ||
373 | */ | ||
374 | void uwbd_event_queue(struct uwb_event *evt) | ||
375 | { | ||
376 | unsigned long flags; | ||
377 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | ||
378 | if (uwbd_pid != 0) { | ||
379 | list_add(&evt->list_node, &uwbd_event_list); | ||
380 | wake_up_all(&uwbd_wq); | ||
381 | } else { | ||
382 | __uwb_rc_put(evt->rc); | ||
383 | if (evt->type == UWB_EVT_TYPE_NOTIF) | ||
384 | kfree(evt->notif.rceb); | ||
385 | kfree(evt); | ||
386 | } | ||
387 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | void uwbd_flush(struct uwb_rc *rc) | ||
392 | { | ||
393 | struct uwb_event *evt, *nxt; | ||
394 | |||
395 | mutex_lock(&uwbd_event_mutex); | ||
396 | |||
397 | spin_lock_irq(&uwbd_event_list_lock); | ||
398 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
399 | if (evt->rc == rc) { | ||
400 | __uwb_rc_put(rc); | ||
401 | list_del(&evt->list_node); | ||
402 | if (evt->type == UWB_EVT_TYPE_NOTIF) | ||
403 | kfree(evt->notif.rceb); | ||
404 | kfree(evt); | ||
405 | } | ||
406 | } | ||
407 | spin_unlock_irq(&uwbd_event_list_lock); | ||
408 | |||
409 | mutex_unlock(&uwbd_event_mutex); | ||
410 | } | ||
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c new file mode 100644 index 000000000000..1711deadb114 --- /dev/null +++ b/drivers/uwb/whc-rc.c | |||
@@ -0,0 +1,520 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3]) | ||
3 | * Radio Control command/event transport to the UWB stack | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * Initialize and hook up the Radio Control interface. | ||
24 | * | ||
25 | * For each device probed, creates an 'struct whcrc' which contains | ||
26 | * just the representation of the UWB Radio Controller, and the logic | ||
27 | * for reading notifications and passing them to the UWB Core. | ||
28 | * | ||
29 | * So we initialize all of those, register the UWB Radio Controller | ||
30 | * and setup the notification/event handle to pipe the notifications | ||
31 | * to the UWB management Daemon. | ||
32 | * | ||
33 | * Once uwb_rc_add() is called, the UWB stack takes control, resets | ||
34 | * the radio and readies the device to take commands the UWB | ||
35 | * API/user-space. | ||
36 | * | ||
37 | * Note this driver is just a transport driver; the commands are | ||
38 | * formed at the UWB stack and given to this driver who will deliver | ||
39 | * them to the hw and transfer the replies/notifications back to the | ||
40 | * UWB stack through the UWB daemon (UWBD). | ||
41 | */ | ||
42 | #include <linux/version.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/module.h> | ||
45 | #include <linux/pci.h> | ||
46 | #include <linux/dma-mapping.h> | ||
47 | #include <linux/interrupt.h> | ||
48 | #include <linux/workqueue.h> | ||
49 | #include <linux/uwb.h> | ||
50 | #include <linux/uwb/whci.h> | ||
51 | #include <linux/uwb/umc.h> | ||
52 | #include "uwb-internal.h" | ||
53 | |||
54 | #define D_LOCAL 0 | ||
55 | #include <linux/uwb/debug.h> | ||
56 | |||
57 | /** | ||
58 | * Descriptor for an instance of the UWB Radio Control Driver that | ||
59 | * attaches to the URC interface of the WHCI PCI card. | ||
60 | * | ||
61 | * Unless there is a lock specific to the 'data members', all access | ||
62 | * is protected by uwb_rc->mutex. | ||
63 | */ | ||
64 | struct whcrc { | ||
65 | struct umc_dev *umc_dev; | ||
66 | struct uwb_rc *uwb_rc; /* UWB host controller */ | ||
67 | |||
68 | unsigned long area; | ||
69 | void __iomem *rc_base; | ||
70 | size_t rc_len; | ||
71 | spinlock_t irq_lock; | ||
72 | |||
73 | void *evt_buf, *cmd_buf; | ||
74 | dma_addr_t evt_dma_buf, cmd_dma_buf; | ||
75 | wait_queue_head_t cmd_wq; | ||
76 | struct work_struct event_work; | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * Execute an UWB RC command on WHCI/RC | ||
81 | * | ||
82 | * @rc: Instance of a Radio Controller that is a whcrc | ||
83 | * @cmd: Buffer containing the RCCB and payload to execute | ||
84 | * @cmd_size: Size of the command buffer. | ||
85 | * | ||
86 | * We copy the command into whcrc->cmd_buf (as it is pretty and | ||
87 | * aligned`and physically contiguous) and then press the right keys in | ||
88 | * the controller's URCCMD register to get it to read it. We might | ||
89 | * have to wait for the cmd_sem to be open to us. | ||
90 | * | ||
91 | * NOTE: rc's mutex has to be locked | ||
92 | */ | ||
93 | static int whcrc_cmd(struct uwb_rc *uwb_rc, | ||
94 | const struct uwb_rccb *cmd, size_t cmd_size) | ||
95 | { | ||
96 | int result = 0; | ||
97 | struct whcrc *whcrc = uwb_rc->priv; | ||
98 | struct device *dev = &whcrc->umc_dev->dev; | ||
99 | u32 urccmd; | ||
100 | |||
101 | d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); | ||
102 | might_sleep(); | ||
103 | |||
104 | if (cmd_size >= 4096) { | ||
105 | result = -E2BIG; | ||
106 | goto error; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * If the URC is halted, then the hardware has reset itself. | ||
111 | * Attempt to recover by restarting the device and then return | ||
112 | * an error as it's likely that the current command isn't | ||
113 | * valid for a newly started RC. | ||
114 | */ | ||
115 | if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { | ||
116 | dev_err(dev, "requesting reset of halted radio controller\n"); | ||
117 | uwb_rc_reset_all(uwb_rc); | ||
118 | result = -EIO; | ||
119 | goto error; | ||
120 | } | ||
121 | |||
122 | result = wait_event_timeout(whcrc->cmd_wq, | ||
123 | !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); | ||
124 | if (result == 0) { | ||
125 | dev_err(dev, "device is not ready to execute commands\n"); | ||
126 | result = -ETIMEDOUT; | ||
127 | goto error; | ||
128 | } | ||
129 | |||
130 | memmove(whcrc->cmd_buf, cmd, cmd_size); | ||
131 | le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR); | ||
132 | |||
133 | spin_lock(&whcrc->irq_lock); | ||
134 | urccmd = le_readl(whcrc->rc_base + URCCMD); | ||
135 | urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK); | ||
136 | le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size, | ||
137 | whcrc->rc_base + URCCMD); | ||
138 | spin_unlock(&whcrc->irq_lock); | ||
139 | |||
140 | error: | ||
141 | d_fnend(3, dev, "(%p, %p, %zu) = %d\n", | ||
142 | uwb_rc, cmd, cmd_size, result); | ||
143 | return result; | ||
144 | } | ||
145 | |||
146 | static int whcrc_reset(struct uwb_rc *rc) | ||
147 | { | ||
148 | struct whcrc *whcrc = rc->priv; | ||
149 | |||
150 | return umc_controller_reset(whcrc->umc_dev); | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * Reset event reception mechanism and tell hw we are ready to get more | ||
155 | * | ||
156 | * We have read all the events in the event buffer, so we are ready to | ||
157 | * reset it to the beginning. | ||
158 | * | ||
159 | * This is only called during initialization or after an event buffer | ||
160 | * has been retired. This means we can be sure that event processing | ||
161 | * is disabled and it's safe to update the URCEVTADDR register. | ||
162 | * | ||
163 | * There's no need to wait for the event processing to start as the | ||
164 | * URC will not clear URCCMD_ACTIVE until (internal) event buffer | ||
165 | * space is available. | ||
166 | */ | ||
167 | static | ||
168 | void whcrc_enable_events(struct whcrc *whcrc) | ||
169 | { | ||
170 | struct device *dev = &whcrc->umc_dev->dev; | ||
171 | u32 urccmd; | ||
172 | |||
173 | d_fnstart(4, dev, "(whcrc %p)\n", whcrc); | ||
174 | |||
175 | le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); | ||
176 | |||
177 | spin_lock(&whcrc->irq_lock); | ||
178 | urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; | ||
179 | le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); | ||
180 | spin_unlock(&whcrc->irq_lock); | ||
181 | |||
182 | d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); | ||
183 | } | ||
184 | |||
185 | static void whcrc_event_work(struct work_struct *work) | ||
186 | { | ||
187 | struct whcrc *whcrc = container_of(work, struct whcrc, event_work); | ||
188 | struct device *dev = &whcrc->umc_dev->dev; | ||
189 | size_t size; | ||
190 | u64 urcevtaddr; | ||
191 | |||
192 | urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); | ||
193 | size = urcevtaddr & URCEVTADDR_OFFSET_MASK; | ||
194 | |||
195 | d_printf(3, dev, "received %zu octet event\n", size); | ||
196 | d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); | ||
197 | |||
198 | uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); | ||
199 | whcrc_enable_events(whcrc); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * Catch interrupts? | ||
204 | * | ||
205 | * We ack inmediately (and expect the hw to do the right thing and | ||
206 | * raise another IRQ if things have changed :) | ||
207 | */ | ||
208 | static | ||
209 | irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) | ||
210 | { | ||
211 | struct whcrc *whcrc = _whcrc; | ||
212 | struct device *dev = &whcrc->umc_dev->dev; | ||
213 | u32 urcsts; | ||
214 | |||
215 | urcsts = le_readl(whcrc->rc_base + URCSTS); | ||
216 | if (!(urcsts & URCSTS_INT_MASK)) | ||
217 | return IRQ_NONE; | ||
218 | le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); | ||
219 | |||
220 | d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", | ||
221 | le_readl(whcrc->rc_base + URCSTS), urcsts); | ||
222 | |||
223 | if (urcsts & URCSTS_HSE) { | ||
224 | dev_err(dev, "host system error -- hardware halted\n"); | ||
225 | /* FIXME: do something sensible here */ | ||
226 | goto out; | ||
227 | } | ||
228 | if (urcsts & URCSTS_ER) { | ||
229 | d_printf(3, dev, "ER: event ready\n"); | ||
230 | schedule_work(&whcrc->event_work); | ||
231 | } | ||
232 | if (urcsts & URCSTS_RCI) { | ||
233 | d_printf(3, dev, "RCI: ready to execute another command\n"); | ||
234 | wake_up_all(&whcrc->cmd_wq); | ||
235 | } | ||
236 | out: | ||
237 | return IRQ_HANDLED; | ||
238 | } | ||
239 | |||
240 | |||
241 | /** | ||
242 | * Initialize a UMC RC interface: map regions, get (shared) IRQ | ||
243 | */ | ||
244 | static | ||
245 | int whcrc_setup_rc_umc(struct whcrc *whcrc) | ||
246 | { | ||
247 | int result = 0; | ||
248 | struct device *dev = &whcrc->umc_dev->dev; | ||
249 | struct umc_dev *umc_dev = whcrc->umc_dev; | ||
250 | |||
251 | whcrc->area = umc_dev->resource.start; | ||
252 | whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; | ||
253 | result = -EBUSY; | ||
254 | if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) | ||
255 | == NULL) { | ||
256 | dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", | ||
257 | whcrc->rc_len, whcrc->area, result); | ||
258 | goto error_request_region; | ||
259 | } | ||
260 | |||
261 | whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); | ||
262 | if (whcrc->rc_base == NULL) { | ||
263 | dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", | ||
264 | whcrc->rc_len, whcrc->area, result); | ||
265 | goto error_ioremap_nocache; | ||
266 | } | ||
267 | |||
268 | result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, | ||
269 | KBUILD_MODNAME, whcrc); | ||
270 | if (result < 0) { | ||
271 | dev_err(dev, "can't allocate IRQ %d: %d\n", | ||
272 | umc_dev->irq, result); | ||
273 | goto error_request_irq; | ||
274 | } | ||
275 | |||
276 | result = -ENOMEM; | ||
277 | whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, | ||
278 | &whcrc->cmd_dma_buf, GFP_KERNEL); | ||
279 | if (whcrc->cmd_buf == NULL) { | ||
280 | dev_err(dev, "Can't allocate cmd transfer buffer\n"); | ||
281 | goto error_cmd_buffer; | ||
282 | } | ||
283 | |||
284 | whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, | ||
285 | &whcrc->evt_dma_buf, GFP_KERNEL); | ||
286 | if (whcrc->evt_buf == NULL) { | ||
287 | dev_err(dev, "Can't allocate evt transfer buffer\n"); | ||
288 | goto error_evt_buffer; | ||
289 | } | ||
290 | d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", | ||
291 | whcrc->rc_len, whcrc->rc_base, umc_dev->irq); | ||
292 | return 0; | ||
293 | |||
294 | error_evt_buffer: | ||
295 | dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, | ||
296 | whcrc->cmd_dma_buf); | ||
297 | error_cmd_buffer: | ||
298 | free_irq(umc_dev->irq, whcrc); | ||
299 | error_request_irq: | ||
300 | iounmap(whcrc->rc_base); | ||
301 | error_ioremap_nocache: | ||
302 | release_mem_region(whcrc->area, whcrc->rc_len); | ||
303 | error_request_region: | ||
304 | return result; | ||
305 | } | ||
306 | |||
307 | |||
308 | /** | ||
309 | * Release RC's UMC resources | ||
310 | */ | ||
311 | static | ||
312 | void whcrc_release_rc_umc(struct whcrc *whcrc) | ||
313 | { | ||
314 | struct umc_dev *umc_dev = whcrc->umc_dev; | ||
315 | |||
316 | dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf, | ||
317 | whcrc->evt_dma_buf); | ||
318 | dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, | ||
319 | whcrc->cmd_dma_buf); | ||
320 | free_irq(umc_dev->irq, whcrc); | ||
321 | iounmap(whcrc->rc_base); | ||
322 | release_mem_region(whcrc->area, whcrc->rc_len); | ||
323 | } | ||
324 | |||
325 | |||
326 | /** | ||
327 | * whcrc_start_rc - start a WHCI radio controller | ||
328 | * @whcrc: the radio controller to start | ||
329 | * | ||
330 | * Reset the UMC device, start the radio controller, enable events and | ||
331 | * finally enable interrupts. | ||
332 | */ | ||
333 | static int whcrc_start_rc(struct uwb_rc *rc) | ||
334 | { | ||
335 | struct whcrc *whcrc = rc->priv; | ||
336 | int result = 0; | ||
337 | struct device *dev = &whcrc->umc_dev->dev; | ||
338 | unsigned long start, duration; | ||
339 | |||
340 | /* Reset the thing */ | ||
341 | le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); | ||
342 | if (d_test(3)) | ||
343 | start = jiffies; | ||
344 | if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, | ||
345 | 5000, "device to reset at init") < 0) { | ||
346 | result = -EBUSY; | ||
347 | goto error; | ||
348 | } else if (d_test(3)) { | ||
349 | duration = jiffies - start; | ||
350 | if (duration > msecs_to_jiffies(40)) | ||
351 | dev_err(dev, "Device took %ums to " | ||
352 | "reset. MAX expected: 40ms\n", | ||
353 | jiffies_to_msecs(duration)); | ||
354 | } | ||
355 | |||
356 | /* Set the event buffer, start the controller (enable IRQs later) */ | ||
357 | le_writel(0, whcrc->rc_base + URCINTR); | ||
358 | le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); | ||
359 | result = -ETIMEDOUT; | ||
360 | if (d_test(3)) | ||
361 | start = jiffies; | ||
362 | if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, | ||
363 | 5000, "device to start") < 0) | ||
364 | goto error; | ||
365 | if (d_test(3)) { | ||
366 | duration = jiffies - start; | ||
367 | if (duration > msecs_to_jiffies(40)) | ||
368 | dev_err(dev, "Device took %ums to start. " | ||
369 | "MAX expected: 40ms\n", | ||
370 | jiffies_to_msecs(duration)); | ||
371 | } | ||
372 | whcrc_enable_events(whcrc); | ||
373 | result = 0; | ||
374 | le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); | ||
375 | error: | ||
376 | return result; | ||
377 | } | ||
378 | |||
379 | |||
380 | /** | ||
381 | * whcrc_stop_rc - stop a WHCI radio controller | ||
382 | * @whcrc: the radio controller to stop | ||
383 | * | ||
384 | * Disable interrupts and cancel any pending event processing work | ||
385 | * before clearing the Run/Stop bit. | ||
386 | */ | ||
387 | static | ||
388 | void whcrc_stop_rc(struct uwb_rc *rc) | ||
389 | { | ||
390 | struct whcrc *whcrc = rc->priv; | ||
391 | struct umc_dev *umc_dev = whcrc->umc_dev; | ||
392 | |||
393 | le_writel(0, whcrc->rc_base + URCINTR); | ||
394 | cancel_work_sync(&whcrc->event_work); | ||
395 | |||
396 | le_writel(0, whcrc->rc_base + URCCMD); | ||
397 | whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, | ||
398 | URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); | ||
399 | } | ||
400 | |||
401 | static void whcrc_init(struct whcrc *whcrc) | ||
402 | { | ||
403 | spin_lock_init(&whcrc->irq_lock); | ||
404 | init_waitqueue_head(&whcrc->cmd_wq); | ||
405 | INIT_WORK(&whcrc->event_work, whcrc_event_work); | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * Initialize the radio controller. | ||
410 | * | ||
411 | * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the | ||
412 | * IRQ handler we use that to determine if the hw is ready to | ||
413 | * handle events. Looks like a race condition, but it really is | ||
414 | * not. | ||
415 | */ | ||
416 | static | ||
417 | int whcrc_probe(struct umc_dev *umc_dev) | ||
418 | { | ||
419 | int result; | ||
420 | struct uwb_rc *uwb_rc; | ||
421 | struct whcrc *whcrc; | ||
422 | struct device *dev = &umc_dev->dev; | ||
423 | |||
424 | d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); | ||
425 | result = -ENOMEM; | ||
426 | uwb_rc = uwb_rc_alloc(); | ||
427 | if (uwb_rc == NULL) { | ||
428 | dev_err(dev, "unable to allocate RC instance\n"); | ||
429 | goto error_rc_alloc; | ||
430 | } | ||
431 | whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL); | ||
432 | if (whcrc == NULL) { | ||
433 | dev_err(dev, "unable to allocate WHC-RC instance\n"); | ||
434 | goto error_alloc; | ||
435 | } | ||
436 | whcrc_init(whcrc); | ||
437 | whcrc->umc_dev = umc_dev; | ||
438 | |||
439 | result = whcrc_setup_rc_umc(whcrc); | ||
440 | if (result < 0) { | ||
441 | dev_err(dev, "Can't setup RC UMC interface: %d\n", result); | ||
442 | goto error_setup_rc_umc; | ||
443 | } | ||
444 | whcrc->uwb_rc = uwb_rc; | ||
445 | |||
446 | uwb_rc->owner = THIS_MODULE; | ||
447 | uwb_rc->cmd = whcrc_cmd; | ||
448 | uwb_rc->reset = whcrc_reset; | ||
449 | uwb_rc->start = whcrc_start_rc; | ||
450 | uwb_rc->stop = whcrc_stop_rc; | ||
451 | |||
452 | result = uwb_rc_add(uwb_rc, dev, whcrc); | ||
453 | if (result < 0) | ||
454 | goto error_rc_add; | ||
455 | umc_set_drvdata(umc_dev, whcrc); | ||
456 | d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); | ||
457 | return 0; | ||
458 | |||
459 | error_rc_add: | ||
460 | whcrc_release_rc_umc(whcrc); | ||
461 | error_setup_rc_umc: | ||
462 | kfree(whcrc); | ||
463 | error_alloc: | ||
464 | uwb_rc_put(uwb_rc); | ||
465 | error_rc_alloc: | ||
466 | d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); | ||
467 | return result; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * Clean up the radio control resources | ||
472 | * | ||
473 | * When we up the command semaphore, everybody possibly held trying to | ||
474 | * execute a command should be granted entry and then they'll see the | ||
475 | * host is quiescing and up it (so it will chain to the next waiter). | ||
476 | * This should not happen (in any case), as we can only remove when | ||
477 | * there are no handles open... | ||
478 | */ | ||
479 | static void whcrc_remove(struct umc_dev *umc_dev) | ||
480 | { | ||
481 | struct whcrc *whcrc = umc_get_drvdata(umc_dev); | ||
482 | struct uwb_rc *uwb_rc = whcrc->uwb_rc; | ||
483 | |||
484 | umc_set_drvdata(umc_dev, NULL); | ||
485 | uwb_rc_rm(uwb_rc); | ||
486 | whcrc_release_rc_umc(whcrc); | ||
487 | kfree(whcrc); | ||
488 | uwb_rc_put(uwb_rc); | ||
489 | d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); | ||
490 | } | ||
491 | |||
492 | /* PCI device ID's that we handle [so it gets loaded] */ | ||
493 | static struct pci_device_id whcrc_id_table[] = { | ||
494 | { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, | ||
495 | { /* empty last entry */ } | ||
496 | }; | ||
497 | MODULE_DEVICE_TABLE(pci, whcrc_id_table); | ||
498 | |||
499 | static struct umc_driver whcrc_driver = { | ||
500 | .name = "whc-rc", | ||
501 | .cap_id = UMC_CAP_ID_WHCI_RC, | ||
502 | .probe = whcrc_probe, | ||
503 | .remove = whcrc_remove, | ||
504 | }; | ||
505 | |||
506 | static int __init whcrc_driver_init(void) | ||
507 | { | ||
508 | return umc_driver_register(&whcrc_driver); | ||
509 | } | ||
510 | module_init(whcrc_driver_init); | ||
511 | |||
512 | static void __exit whcrc_driver_exit(void) | ||
513 | { | ||
514 | umc_driver_unregister(&whcrc_driver); | ||
515 | } | ||
516 | module_exit(whcrc_driver_exit); | ||
517 | |||
518 | MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); | ||
519 | MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver"); | ||
520 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c new file mode 100644 index 000000000000..3df2388f908f --- /dev/null +++ b/drivers/uwb/whci.c | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * WHCI UWB Multi-interface Controller enumerator. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This file is released under the GNU GPL v2. | ||
7 | */ | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/uwb/whci.h> | ||
13 | #include <linux/uwb/umc.h> | ||
14 | |||
15 | struct whci_card { | ||
16 | struct pci_dev *pci; | ||
17 | void __iomem *uwbbase; | ||
18 | u8 n_caps; | ||
19 | struct umc_dev *devs[0]; | ||
20 | }; | ||
21 | |||
22 | |||
23 | /* Fix faulty HW :( */ | ||
24 | static | ||
25 | u64 whci_capdata_quirks(struct whci_card *card, u64 capdata) | ||
26 | { | ||
27 | u64 capdata_orig = capdata; | ||
28 | struct pci_dev *pci_dev = card->pci; | ||
29 | if (pci_dev->vendor == PCI_VENDOR_ID_INTEL | ||
30 | && (pci_dev->device == 0x0c3b || pci_dev->device == 0004) | ||
31 | && pci_dev->class == 0x0d1010) { | ||
32 | switch (UWBCAPDATA_TO_CAP_ID(capdata)) { | ||
33 | /* WLP capability has 0x100 bytes of aperture */ | ||
34 | case 0x80: | ||
35 | capdata |= 0x40 << 8; break; | ||
36 | /* WUSB capability has 0x80 bytes of aperture | ||
37 | * and ID is 1 */ | ||
38 | case 0x02: | ||
39 | capdata &= ~0xffff; | ||
40 | capdata |= 0x2001; | ||
41 | break; | ||
42 | } | ||
43 | } | ||
44 | if (capdata_orig != capdata) | ||
45 | dev_warn(&pci_dev->dev, | ||
46 | "PCI v%04x d%04x c%06x#%02x: " | ||
47 | "corrected capdata from %016Lx to %016Lx\n", | ||
48 | pci_dev->vendor, pci_dev->device, pci_dev->class, | ||
49 | (unsigned)UWBCAPDATA_TO_CAP_ID(capdata), | ||
50 | (unsigned long long)capdata_orig, | ||
51 | (unsigned long long)capdata); | ||
52 | return capdata; | ||
53 | } | ||
54 | |||
55 | |||
56 | /** | ||
57 | * whci_wait_for - wait for a WHCI register to be set | ||
58 | * | ||
59 | * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'. | ||
60 | */ | ||
61 | int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, | ||
62 | unsigned long max_ms, const char *tag) | ||
63 | { | ||
64 | unsigned t = 0; | ||
65 | u32 val; | ||
66 | for (;;) { | ||
67 | val = le_readl(reg); | ||
68 | if ((val & mask) == result) | ||
69 | break; | ||
70 | msleep(10); | ||
71 | if (t >= max_ms) { | ||
72 | dev_err(dev, "timed out waiting for %s ", tag); | ||
73 | return -ETIMEDOUT; | ||
74 | } | ||
75 | t += 10; | ||
76 | } | ||
77 | return 0; | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(whci_wait_for); | ||
80 | |||
81 | |||
82 | /* | ||
83 | * NOTE: the capinfo and capdata registers are slightly different | ||
84 | * (size and cap-id fields). So for cap #0, we need to fill | ||
85 | * in. Size comes from the size of the register block | ||
86 | * (statically calculated); cap_id comes from nowhere, we use | ||
87 | * zero, that is reserved, for the radio controller, because | ||
88 | * none was defined at the spec level. | ||
89 | */ | ||
90 | static int whci_add_cap(struct whci_card *card, int n) | ||
91 | { | ||
92 | struct umc_dev *umc; | ||
93 | u64 capdata; | ||
94 | int bar, err; | ||
95 | |||
96 | umc = umc_device_create(&card->pci->dev, n); | ||
97 | if (umc == NULL) | ||
98 | return -ENOMEM; | ||
99 | |||
100 | capdata = le_readq(card->uwbbase + UWBCAPDATA(n)); | ||
101 | |||
102 | bar = UWBCAPDATA_TO_BAR(capdata) << 1; | ||
103 | |||
104 | capdata = whci_capdata_quirks(card, capdata); | ||
105 | /* Capability 0 is the radio controller. It's size is 32 | ||
106 | * bytes (WHCI0.95[2.3, T2-9]). */ | ||
107 | umc->version = UWBCAPDATA_TO_VERSION(capdata); | ||
108 | umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata); | ||
109 | umc->bar = bar; | ||
110 | umc->resource.start = pci_resource_start(card->pci, bar) | ||
111 | + UWBCAPDATA_TO_OFFSET(capdata); | ||
112 | umc->resource.end = umc->resource.start | ||
113 | + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; | ||
114 | umc->resource.name = umc->dev.bus_id; | ||
115 | umc->resource.flags = card->pci->resource[bar].flags; | ||
116 | umc->resource.parent = &card->pci->resource[bar]; | ||
117 | umc->irq = card->pci->irq; | ||
118 | |||
119 | err = umc_device_register(umc); | ||
120 | if (err < 0) | ||
121 | goto error; | ||
122 | card->devs[n] = umc; | ||
123 | return 0; | ||
124 | |||
125 | error: | ||
126 | kfree(umc); | ||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static void whci_del_cap(struct whci_card *card, int n) | ||
131 | { | ||
132 | struct umc_dev *umc = card->devs[n]; | ||
133 | |||
134 | if (umc != NULL) | ||
135 | umc_device_unregister(umc); | ||
136 | } | ||
137 | |||
138 | static int whci_n_caps(struct pci_dev *pci) | ||
139 | { | ||
140 | void __iomem *uwbbase; | ||
141 | u64 capinfo; | ||
142 | |||
143 | uwbbase = pci_iomap(pci, 0, 8); | ||
144 | if (!uwbbase) | ||
145 | return -ENOMEM; | ||
146 | capinfo = le_readq(uwbbase + UWBCAPINFO); | ||
147 | pci_iounmap(pci, uwbbase); | ||
148 | |||
149 | return UWBCAPINFO_TO_N_CAPS(capinfo); | ||
150 | } | ||
151 | |||
152 | static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id) | ||
153 | { | ||
154 | struct whci_card *card; | ||
155 | int err, n_caps, n; | ||
156 | |||
157 | err = pci_enable_device(pci); | ||
158 | if (err < 0) | ||
159 | goto error; | ||
160 | pci_enable_msi(pci); | ||
161 | pci_set_master(pci); | ||
162 | err = -ENXIO; | ||
163 | if (!pci_set_dma_mask(pci, DMA_64BIT_MASK)) | ||
164 | pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK); | ||
165 | else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK)) | ||
166 | pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK); | ||
167 | else | ||
168 | goto error_dma; | ||
169 | |||
170 | err = n_caps = whci_n_caps(pci); | ||
171 | if (n_caps < 0) | ||
172 | goto error_ncaps; | ||
173 | |||
174 | err = -ENOMEM; | ||
175 | card = kzalloc(sizeof(struct whci_card) | ||
176 | + sizeof(struct whci_dev *) * (n_caps + 1), | ||
177 | GFP_KERNEL); | ||
178 | if (card == NULL) | ||
179 | goto error_kzalloc; | ||
180 | card->pci = pci; | ||
181 | card->n_caps = n_caps; | ||
182 | |||
183 | err = -EBUSY; | ||
184 | if (!request_mem_region(pci_resource_start(pci, 0), | ||
185 | UWBCAPDATA_SIZE(card->n_caps), | ||
186 | "whci (capability data)")) | ||
187 | goto error_request_memregion; | ||
188 | err = -ENOMEM; | ||
189 | card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps)); | ||
190 | if (!card->uwbbase) | ||
191 | goto error_iomap; | ||
192 | |||
193 | /* Add each capability. */ | ||
194 | for (n = 0; n <= card->n_caps; n++) { | ||
195 | err = whci_add_cap(card, n); | ||
196 | if (err < 0 && n == 0) { | ||
197 | dev_err(&pci->dev, "cannot bind UWB radio controller:" | ||
198 | " %d\n", err); | ||
199 | goto error_bind; | ||
200 | } | ||
201 | if (err < 0) | ||
202 | dev_warn(&pci->dev, "warning: cannot bind capability " | ||
203 | "#%u: %d\n", n, err); | ||
204 | } | ||
205 | pci_set_drvdata(pci, card); | ||
206 | return 0; | ||
207 | |||
208 | error_bind: | ||
209 | pci_iounmap(pci, card->uwbbase); | ||
210 | error_iomap: | ||
211 | release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); | ||
212 | error_request_memregion: | ||
213 | kfree(card); | ||
214 | error_kzalloc: | ||
215 | error_ncaps: | ||
216 | error_dma: | ||
217 | pci_disable_msi(pci); | ||
218 | pci_disable_device(pci); | ||
219 | error: | ||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static void whci_remove(struct pci_dev *pci) | ||
224 | { | ||
225 | struct whci_card *card = pci_get_drvdata(pci); | ||
226 | int n; | ||
227 | |||
228 | pci_set_drvdata(pci, NULL); | ||
229 | /* Unregister each capability in reverse (so the master device | ||
230 | * is unregistered last). */ | ||
231 | for (n = card->n_caps; n >= 0 ; n--) | ||
232 | whci_del_cap(card, n); | ||
233 | pci_iounmap(pci, card->uwbbase); | ||
234 | release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); | ||
235 | kfree(card); | ||
236 | pci_disable_msi(pci); | ||
237 | pci_disable_device(pci); | ||
238 | } | ||
239 | |||
240 | static struct pci_device_id whci_id_table[] = { | ||
241 | { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, | ||
242 | { 0 }, | ||
243 | }; | ||
244 | MODULE_DEVICE_TABLE(pci, whci_id_table); | ||
245 | |||
246 | |||
247 | static struct pci_driver whci_driver = { | ||
248 | .name = "whci", | ||
249 | .id_table = whci_id_table, | ||
250 | .probe = whci_probe, | ||
251 | .remove = whci_remove, | ||
252 | }; | ||
253 | |||
254 | static int __init whci_init(void) | ||
255 | { | ||
256 | return pci_register_driver(&whci_driver); | ||
257 | } | ||
258 | |||
259 | static void __exit whci_exit(void) | ||
260 | { | ||
261 | pci_unregister_driver(&whci_driver); | ||
262 | } | ||
263 | |||
264 | module_init(whci_init); | ||
265 | module_exit(whci_exit); | ||
266 | |||
267 | MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator"); | ||
268 | MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); | ||
269 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile new file mode 100644 index 000000000000..c72c11db5b1b --- /dev/null +++ b/drivers/uwb/wlp/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | obj-$(CONFIG_UWB_WLP) := wlp.o | ||
2 | |||
3 | wlp-objs := \ | ||
4 | driver.o \ | ||
5 | eda.o \ | ||
6 | messages.o \ | ||
7 | sysfs.o \ | ||
8 | txrx.o \ | ||
9 | wlp-lc.o \ | ||
10 | wss-lc.o | ||
diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c new file mode 100644 index 000000000000..cb8d699b6a67 --- /dev/null +++ b/drivers/uwb/wlp/driver.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * | ||
4 | * Copyright (C) 2007 Intel Corporation | ||
5 | * Reinette Chatre <reinette.chatre@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Life cycle of WLP substack | ||
23 | * | ||
24 | * FIXME: Docs | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | |||
29 | static int __init wlp_subsys_init(void) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | module_init(wlp_subsys_init); | ||
34 | |||
35 | static void __exit wlp_subsys_exit(void) | ||
36 | { | ||
37 | return; | ||
38 | } | ||
39 | module_exit(wlp_subsys_exit); | ||
40 | |||
41 | MODULE_AUTHOR("Reinette Chatre <reinette.chatre@intel.com>"); | ||
42 | MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)"); | ||
43 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c new file mode 100644 index 000000000000..cdfe8dfc4340 --- /dev/null +++ b/drivers/uwb/wlp/eda.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * WUSB Wire Adapter: WLP interface | ||
3 | * Ethernet to device address cache | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * We need to be able to map ethernet addresses to device addresses | ||
24 | * and back because there is not explicit relationship between the eth | ||
25 | * addresses used in the ETH frames and the device addresses (no, it | ||
26 | * would not have been simpler to force as ETH address the MBOA MAC | ||
27 | * address...no, not at all :). | ||
28 | * | ||
29 | * A device has one MBOA MAC address and one device address. It is possible | ||
30 | * for a device to have more than one virtual MAC address (although a | ||
31 | * virtual address can be the same as the MBOA MAC address). The device | ||
32 | * address is guaranteed to be unique among the devices in the extended | ||
33 | * beacon group (see ECMA 17.1.1). We thus use the device address as index | ||
34 | * to this cache. We do allow searching based on virtual address as this | ||
35 | * is how Ethernet frames will be addressed. | ||
36 | * | ||
37 | * We need to support virtual EUI-48. Although, right now the virtual | ||
38 | * EUI-48 will always be the same as the MAC SAP address. The EDA cache | ||
39 | * entry thus contains a MAC SAP address as well as the virtual address | ||
40 | * (used to map the network stack address to a neighbor). When we move | ||
41 | * to support more than one virtual MAC on a host then this organization | ||
42 | * will have to change. Perhaps a neighbor has a list of WSSs, each with a | ||
43 | * tag and virtual EUI-48. | ||
44 | * | ||
45 | * On data transmission | ||
46 | * it is used to determine if the neighbor is connected and what WSS it | ||
47 | * belongs to. With this we know what tag to add to the WLP frame. Storing | ||
48 | * the WSS in the EDA cache may be overkill because we only support one | ||
49 | * WSS. Hopefully we will support more than one WSS at some point. | ||
50 | * On data reception it is used to determine the WSS based on | ||
51 | * the tag and address of the transmitting neighbor. | ||
52 | */ | ||
53 | |||
54 | #define D_LOCAL 5 | ||
55 | #include <linux/netdevice.h> | ||
56 | #include <linux/uwb/debug.h> | ||
57 | #include <linux/etherdevice.h> | ||
58 | #include <linux/wlp.h> | ||
59 | #include "wlp-internal.h" | ||
60 | |||
61 | |||
62 | /* FIXME: cache is not purged, only on device close */ | ||
63 | |||
64 | /* FIXME: does not scale, change to dynamic array */ | ||
65 | |||
66 | /* | ||
67 | * Initialize the EDA cache | ||
68 | * | ||
69 | * @returns 0 if ok, < 0 errno code on error | ||
70 | * | ||
71 | * Call when the interface is being brought up | ||
72 | * | ||
73 | * NOTE: Keep it as a separate function as the implementation will | ||
74 | * change and be more complex. | ||
75 | */ | ||
76 | void wlp_eda_init(struct wlp_eda *eda) | ||
77 | { | ||
78 | INIT_LIST_HEAD(&eda->cache); | ||
79 | spin_lock_init(&eda->lock); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Release the EDA cache | ||
84 | * | ||
85 | * @returns 0 if ok, < 0 errno code on error | ||
86 | * | ||
87 | * Called when the interface is brought down | ||
88 | */ | ||
89 | void wlp_eda_release(struct wlp_eda *eda) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | struct wlp_eda_node *itr, *next; | ||
93 | |||
94 | spin_lock_irqsave(&eda->lock, flags); | ||
95 | list_for_each_entry_safe(itr, next, &eda->cache, list_node) { | ||
96 | list_del(&itr->list_node); | ||
97 | kfree(itr); | ||
98 | } | ||
99 | spin_unlock_irqrestore(&eda->lock, flags); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Add an address mapping | ||
104 | * | ||
105 | * @returns 0 if ok, < 0 errno code on error | ||
106 | * | ||
107 | * An address mapping is initially created when the neighbor device is seen | ||
108 | * for the first time (it is "onair"). At this time the neighbor is not | ||
109 | * connected or associated with a WSS so we only populate the Ethernet and | ||
110 | * Device address fields. | ||
111 | * | ||
112 | */ | ||
113 | int wlp_eda_create_node(struct wlp_eda *eda, | ||
114 | const unsigned char eth_addr[ETH_ALEN], | ||
115 | const struct uwb_dev_addr *dev_addr) | ||
116 | { | ||
117 | int result = 0; | ||
118 | struct wlp_eda_node *itr; | ||
119 | unsigned long flags; | ||
120 | |||
121 | BUG_ON(dev_addr == NULL || eth_addr == NULL); | ||
122 | spin_lock_irqsave(&eda->lock, flags); | ||
123 | list_for_each_entry(itr, &eda->cache, list_node) { | ||
124 | if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { | ||
125 | printk(KERN_ERR "EDA cache already contains entry " | ||
126 | "for neighbor %02x:%02x\n", | ||
127 | dev_addr->data[1], dev_addr->data[0]); | ||
128 | result = -EEXIST; | ||
129 | goto out_unlock; | ||
130 | } | ||
131 | } | ||
132 | itr = kzalloc(sizeof(*itr), GFP_ATOMIC); | ||
133 | if (itr != NULL) { | ||
134 | memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr)); | ||
135 | itr->dev_addr = *dev_addr; | ||
136 | list_add(&itr->list_node, &eda->cache); | ||
137 | } else | ||
138 | result = -ENOMEM; | ||
139 | out_unlock: | ||
140 | spin_unlock_irqrestore(&eda->lock, flags); | ||
141 | return result; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Remove entry from EDA cache | ||
146 | * | ||
147 | * This is done when the device goes off air. | ||
148 | */ | ||
149 | void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr) | ||
150 | { | ||
151 | struct wlp_eda_node *itr, *next; | ||
152 | unsigned long flags; | ||
153 | |||
154 | spin_lock_irqsave(&eda->lock, flags); | ||
155 | list_for_each_entry_safe(itr, next, &eda->cache, list_node) { | ||
156 | if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { | ||
157 | list_del(&itr->list_node); | ||
158 | kfree(itr); | ||
159 | break; | ||
160 | } | ||
161 | } | ||
162 | spin_unlock_irqrestore(&eda->lock, flags); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Update an address mapping | ||
167 | * | ||
168 | * @returns 0 if ok, < 0 errno code on error | ||
169 | */ | ||
170 | int wlp_eda_update_node(struct wlp_eda *eda, | ||
171 | const struct uwb_dev_addr *dev_addr, | ||
172 | struct wlp_wss *wss, | ||
173 | const unsigned char virt_addr[ETH_ALEN], | ||
174 | const u8 tag, const enum wlp_wss_connect state) | ||
175 | { | ||
176 | int result = -ENOENT; | ||
177 | struct wlp_eda_node *itr; | ||
178 | unsigned long flags; | ||
179 | |||
180 | spin_lock_irqsave(&eda->lock, flags); | ||
181 | list_for_each_entry(itr, &eda->cache, list_node) { | ||
182 | if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { | ||
183 | /* Found it, update it */ | ||
184 | itr->wss = wss; | ||
185 | memcpy(itr->virt_addr, virt_addr, | ||
186 | sizeof(itr->virt_addr)); | ||
187 | itr->tag = tag; | ||
188 | itr->state = state; | ||
189 | result = 0; | ||
190 | goto out_unlock; | ||
191 | } | ||
192 | } | ||
193 | /* Not found */ | ||
194 | out_unlock: | ||
195 | spin_unlock_irqrestore(&eda->lock, flags); | ||
196 | return result; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Update only state field of an address mapping | ||
201 | * | ||
202 | * @returns 0 if ok, < 0 errno code on error | ||
203 | */ | ||
204 | int wlp_eda_update_node_state(struct wlp_eda *eda, | ||
205 | const struct uwb_dev_addr *dev_addr, | ||
206 | const enum wlp_wss_connect state) | ||
207 | { | ||
208 | int result = -ENOENT; | ||
209 | struct wlp_eda_node *itr; | ||
210 | unsigned long flags; | ||
211 | |||
212 | spin_lock_irqsave(&eda->lock, flags); | ||
213 | list_for_each_entry(itr, &eda->cache, list_node) { | ||
214 | if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { | ||
215 | /* Found it, update it */ | ||
216 | itr->state = state; | ||
217 | result = 0; | ||
218 | goto out_unlock; | ||
219 | } | ||
220 | } | ||
221 | /* Not found */ | ||
222 | out_unlock: | ||
223 | spin_unlock_irqrestore(&eda->lock, flags); | ||
224 | return result; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Return contents of EDA cache entry | ||
229 | * | ||
230 | * @dev_addr: index to EDA cache | ||
231 | * @eda_entry: pointer to where contents of EDA cache will be copied | ||
232 | */ | ||
233 | int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr, | ||
234 | struct wlp_eda_node *eda_entry) | ||
235 | { | ||
236 | int result = -ENOENT; | ||
237 | struct wlp_eda_node *itr; | ||
238 | unsigned long flags; | ||
239 | |||
240 | spin_lock_irqsave(&eda->lock, flags); | ||
241 | list_for_each_entry(itr, &eda->cache, list_node) { | ||
242 | if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { | ||
243 | *eda_entry = *itr; | ||
244 | result = 0; | ||
245 | goto out_unlock; | ||
246 | } | ||
247 | } | ||
248 | /* Not found */ | ||
249 | out_unlock: | ||
250 | spin_unlock_irqrestore(&eda->lock, flags); | ||
251 | return result; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Execute function for every element in the cache | ||
256 | * | ||
257 | * @function: function to execute on element of cache (must be atomic) | ||
258 | * @priv: private data of function | ||
259 | * @returns: result of first function that failed, or last function | ||
260 | * executed if no function failed. | ||
261 | * | ||
262 | * Stop executing when function returns error for any element in cache. | ||
263 | * | ||
264 | * IMPORTANT: We are using a spinlock here: the function executed on each | ||
265 | * element has to be atomic. | ||
266 | */ | ||
267 | int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function, | ||
268 | void *priv) | ||
269 | { | ||
270 | int result = 0; | ||
271 | struct wlp *wlp = container_of(eda, struct wlp, eda); | ||
272 | struct wlp_eda_node *entry; | ||
273 | unsigned long flags; | ||
274 | |||
275 | spin_lock_irqsave(&eda->lock, flags); | ||
276 | list_for_each_entry(entry, &eda->cache, list_node) { | ||
277 | result = (*function)(wlp, entry, priv); | ||
278 | if (result < 0) | ||
279 | break; | ||
280 | } | ||
281 | spin_unlock_irqrestore(&eda->lock, flags); | ||
282 | return result; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Execute function for single element in the cache (return dev addr) | ||
287 | * | ||
288 | * @virt_addr: index into EDA cache used to determine which element to | ||
289 | * execute the function on | ||
290 | * @dev_addr: device address of element in cache will be returned using | ||
291 | * @dev_addr | ||
292 | * @function: function to execute on element of cache (must be atomic) | ||
293 | * @priv: private data of function | ||
294 | * @returns: result of function | ||
295 | * | ||
296 | * IMPORTANT: We are using a spinlock here: the function executed on the | ||
297 | * element has to be atomic. | ||
298 | */ | ||
299 | int wlp_eda_for_virtual(struct wlp_eda *eda, | ||
300 | const unsigned char virt_addr[ETH_ALEN], | ||
301 | struct uwb_dev_addr *dev_addr, | ||
302 | wlp_eda_for_each_f function, | ||
303 | void *priv) | ||
304 | { | ||
305 | int result = 0; | ||
306 | struct wlp *wlp = container_of(eda, struct wlp, eda); | ||
307 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
308 | struct wlp_eda_node *itr; | ||
309 | unsigned long flags; | ||
310 | int found = 0; | ||
311 | |||
312 | spin_lock_irqsave(&eda->lock, flags); | ||
313 | list_for_each_entry(itr, &eda->cache, list_node) { | ||
314 | if (!memcmp(itr->virt_addr, virt_addr, | ||
315 | sizeof(itr->virt_addr))) { | ||
316 | d_printf(6, dev, "EDA: looking for " | ||
317 | "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x " | ||
318 | "wss %p tag 0x%02x state %u\n", | ||
319 | virt_addr[0], virt_addr[1], | ||
320 | virt_addr[2], virt_addr[3], | ||
321 | virt_addr[4], virt_addr[5], | ||
322 | itr->dev_addr.data[1], | ||
323 | itr->dev_addr.data[0], itr->wss, | ||
324 | itr->tag, itr->state); | ||
325 | result = (*function)(wlp, itr, priv); | ||
326 | *dev_addr = itr->dev_addr; | ||
327 | found = 1; | ||
328 | break; | ||
329 | } else | ||
330 | d_printf(6, dev, "EDA: looking for " | ||
331 | "%02x:%02x:%02x:%02x:%02x:%02x " | ||
332 | "against " | ||
333 | "%02x:%02x:%02x:%02x:%02x:%02x miss\n", | ||
334 | virt_addr[0], virt_addr[1], | ||
335 | virt_addr[2], virt_addr[3], | ||
336 | virt_addr[4], virt_addr[5], | ||
337 | itr->virt_addr[0], itr->virt_addr[1], | ||
338 | itr->virt_addr[2], itr->virt_addr[3], | ||
339 | itr->virt_addr[4], itr->virt_addr[5]); | ||
340 | } | ||
341 | if (!found) { | ||
342 | if (printk_ratelimit()) | ||
343 | dev_err(dev, "EDA: Eth addr %02x:%02x:%02x" | ||
344 | ":%02x:%02x:%02x not found.\n", | ||
345 | virt_addr[0], virt_addr[1], | ||
346 | virt_addr[2], virt_addr[3], | ||
347 | virt_addr[4], virt_addr[5]); | ||
348 | result = -ENODEV; | ||
349 | } | ||
350 | spin_unlock_irqrestore(&eda->lock, flags); | ||
351 | return result; | ||
352 | } | ||
353 | |||
354 | static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED", | ||
355 | "WLP_WSS_CONNECTED", | ||
356 | "WLP_WSS_CONNECT_FAILED", | ||
357 | }; | ||
358 | |||
359 | static const char *wlp_wss_connect_state_str(unsigned id) | ||
360 | { | ||
361 | if (id >= ARRAY_SIZE(__wlp_wss_connect_state)) | ||
362 | return "unknown WSS connection state"; | ||
363 | return __wlp_wss_connect_state[id]; | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * View EDA cache from user space | ||
368 | * | ||
369 | * A debugging feature to give user visibility into the EDA cache. Also | ||
370 | * used to display members of WSS to user (called from wlp_wss_members_show()) | ||
371 | */ | ||
372 | ssize_t wlp_eda_show(struct wlp *wlp, char *buf) | ||
373 | { | ||
374 | ssize_t result = 0; | ||
375 | struct wlp_eda_node *entry; | ||
376 | unsigned long flags; | ||
377 | struct wlp_eda *eda = &wlp->eda; | ||
378 | spin_lock_irqsave(&eda->lock, flags); | ||
379 | result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr " | ||
380 | "tag state virt_addr\n"); | ||
381 | list_for_each_entry(entry, &eda->cache, list_node) { | ||
382 | result += scnprintf(buf + result, PAGE_SIZE - result, | ||
383 | "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x " | ||
384 | "%p 0x%02x %s " | ||
385 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
386 | entry->eth_addr[0], entry->eth_addr[1], | ||
387 | entry->eth_addr[2], entry->eth_addr[3], | ||
388 | entry->eth_addr[4], entry->eth_addr[5], | ||
389 | entry->dev_addr.data[1], | ||
390 | entry->dev_addr.data[0], entry->wss, | ||
391 | entry->tag, | ||
392 | wlp_wss_connect_state_str(entry->state), | ||
393 | entry->virt_addr[0], entry->virt_addr[1], | ||
394 | entry->virt_addr[2], entry->virt_addr[3], | ||
395 | entry->virt_addr[4], entry->virt_addr[5]); | ||
396 | if (result >= PAGE_SIZE) | ||
397 | break; | ||
398 | } | ||
399 | spin_unlock_irqrestore(&eda->lock, flags); | ||
400 | return result; | ||
401 | } | ||
402 | EXPORT_SYMBOL_GPL(wlp_eda_show); | ||
403 | |||
404 | /* | ||
405 | * Add new EDA cache entry based on user input in sysfs | ||
406 | * | ||
407 | * Should only be used for debugging. | ||
408 | * | ||
409 | * The WSS is assumed to be the only WSS supported. This needs to be | ||
410 | * redesigned when we support more than one WSS. | ||
411 | */ | ||
412 | ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size) | ||
413 | { | ||
414 | ssize_t result; | ||
415 | struct wlp_eda *eda = &wlp->eda; | ||
416 | u8 eth_addr[6]; | ||
417 | struct uwb_dev_addr dev_addr; | ||
418 | u8 tag; | ||
419 | unsigned state; | ||
420 | |||
421 | result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx " | ||
422 | "%02hhx:%02hhx %02hhx %u\n", | ||
423 | ð_addr[0], ð_addr[1], | ||
424 | ð_addr[2], ð_addr[3], | ||
425 | ð_addr[4], ð_addr[5], | ||
426 | &dev_addr.data[1], &dev_addr.data[0], &tag, &state); | ||
427 | switch (result) { | ||
428 | case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */ | ||
429 | /*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/ | ||
430 | result = -ENOSYS; | ||
431 | break; | ||
432 | case 10: | ||
433 | state = state >= 1 ? 1 : 0; | ||
434 | result = wlp_eda_create_node(eda, eth_addr, &dev_addr); | ||
435 | if (result < 0 && result != -EEXIST) | ||
436 | goto error; | ||
437 | /* Set virtual addr to be same as MAC */ | ||
438 | result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss, | ||
439 | eth_addr, tag, state); | ||
440 | if (result < 0) | ||
441 | goto error; | ||
442 | break; | ||
443 | default: /* bad format */ | ||
444 | result = -EINVAL; | ||
445 | } | ||
446 | error: | ||
447 | return result < 0 ? result : size; | ||
448 | } | ||
449 | EXPORT_SYMBOL_GPL(wlp_eda_store); | ||
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c new file mode 100644 index 000000000000..a64cb8241713 --- /dev/null +++ b/drivers/uwb/wlp/messages.c | |||
@@ -0,0 +1,1946 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * Message construction and parsing | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Reinette Chatre <reinette.chatre@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | */ | ||
25 | |||
26 | #include <linux/wlp.h> | ||
27 | #define D_LOCAL 6 | ||
28 | #include <linux/uwb/debug.h> | ||
29 | #include "wlp-internal.h" | ||
30 | |||
31 | static | ||
32 | const char *__wlp_assoc_frame[] = { | ||
33 | [WLP_ASSOC_D1] = "WLP_ASSOC_D1", | ||
34 | [WLP_ASSOC_D2] = "WLP_ASSOC_D2", | ||
35 | [WLP_ASSOC_M1] = "WLP_ASSOC_M1", | ||
36 | [WLP_ASSOC_M2] = "WLP_ASSOC_M2", | ||
37 | [WLP_ASSOC_M3] = "WLP_ASSOC_M3", | ||
38 | [WLP_ASSOC_M4] = "WLP_ASSOC_M4", | ||
39 | [WLP_ASSOC_M5] = "WLP_ASSOC_M5", | ||
40 | [WLP_ASSOC_M6] = "WLP_ASSOC_M6", | ||
41 | [WLP_ASSOC_M7] = "WLP_ASSOC_M7", | ||
42 | [WLP_ASSOC_M8] = "WLP_ASSOC_M8", | ||
43 | [WLP_ASSOC_F0] = "WLP_ASSOC_F0", | ||
44 | [WLP_ASSOC_E1] = "WLP_ASSOC_E1", | ||
45 | [WLP_ASSOC_E2] = "WLP_ASSOC_E2", | ||
46 | [WLP_ASSOC_C1] = "WLP_ASSOC_C1", | ||
47 | [WLP_ASSOC_C2] = "WLP_ASSOC_C2", | ||
48 | [WLP_ASSOC_C3] = "WLP_ASSOC_C3", | ||
49 | [WLP_ASSOC_C4] = "WLP_ASSOC_C4", | ||
50 | }; | ||
51 | |||
52 | static const char *wlp_assoc_frame_str(unsigned id) | ||
53 | { | ||
54 | if (id >= ARRAY_SIZE(__wlp_assoc_frame)) | ||
55 | return "unknown association frame"; | ||
56 | return __wlp_assoc_frame[id]; | ||
57 | } | ||
58 | |||
59 | static const char *__wlp_assc_error[] = { | ||
60 | "none", | ||
61 | "Authenticator Failure", | ||
62 | "Rogue activity suspected", | ||
63 | "Device busy", | ||
64 | "Setup Locked", | ||
65 | "Registrar not ready", | ||
66 | "Invalid WSS selection", | ||
67 | "Message timeout", | ||
68 | "Enrollment session timeout", | ||
69 | "Device password invalid", | ||
70 | "Unsupported version", | ||
71 | "Internal error", | ||
72 | "Undefined error", | ||
73 | "Numeric comparison failure", | ||
74 | "Waiting for user input", | ||
75 | }; | ||
76 | |||
77 | static const char *wlp_assc_error_str(unsigned id) | ||
78 | { | ||
79 | if (id >= ARRAY_SIZE(__wlp_assc_error)) | ||
80 | return "unknown WLP association error"; | ||
81 | return __wlp_assc_error[id]; | ||
82 | } | ||
83 | |||
84 | static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type, | ||
85 | size_t len) | ||
86 | { | ||
87 | hdr->type = cpu_to_le16(type); | ||
88 | hdr->length = cpu_to_le16(len); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Populate fields of a constant sized attribute | ||
93 | * | ||
94 | * @returns: total size of attribute including size of new value | ||
95 | * | ||
96 | * We have two instances of this function (wlp_pset and wlp_set): one takes | ||
97 | * the value as a parameter, the other takes a pointer to the value as | ||
98 | * parameter. They thus only differ in how the value is assigned to the | ||
99 | * attribute. | ||
100 | * | ||
101 | * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of | ||
102 | * sizeof(type) to be able to use this same code for the structures that | ||
103 | * contain 8bit enum values and be able to deal with pointer types. | ||
104 | */ | ||
105 | #define wlp_set(type, type_code, name) \ | ||
106 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | ||
107 | { \ | ||
108 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
109 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | ||
110 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | ||
111 | attr->name = value; \ | ||
112 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
113 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
114 | return sizeof(*attr); \ | ||
115 | } | ||
116 | |||
117 | #define wlp_pset(type, type_code, name) \ | ||
118 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | ||
119 | { \ | ||
120 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
121 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | ||
122 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | ||
123 | attr->name = *value; \ | ||
124 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
125 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
126 | return sizeof(*attr); \ | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * Populate fields of a variable attribute | ||
131 | * | ||
132 | * @returns: total size of attribute including size of new value | ||
133 | * | ||
134 | * Provided with a pointer to the memory area reserved for the | ||
135 | * attribute structure, the field is populated with the value. The | ||
136 | * reserved memory has to contain enough space for the value. | ||
137 | */ | ||
138 | #define wlp_vset(type, type_code, name) \ | ||
139 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ | ||
140 | size_t len) \ | ||
141 | { \ | ||
142 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
143 | wlp_set_attr_hdr(&attr->hdr, type_code, len); \ | ||
144 | memcpy(attr->name, value, len); \ | ||
145 | d_dump(6, NULL, attr, sizeof(*attr) + len); \ | ||
146 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
147 | return sizeof(*attr) + len; \ | ||
148 | } | ||
149 | |||
150 | wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name) | ||
151 | wlp_vset(char *, WLP_ATTR_MANUF, manufacturer) | ||
152 | wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type) | ||
153 | wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name) | ||
154 | wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr) | ||
155 | wlp_vset(char *, WLP_ATTR_SERIAL, serial) | ||
156 | wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name) | ||
157 | wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e) | ||
158 | wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r) | ||
159 | wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid) | ||
160 | wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type) | ||
161 | /*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/ | ||
162 | wlp_set(u8, WLP_ATTR_WLP_VER, version) | ||
163 | wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err) | ||
164 | wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd) | ||
165 | wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl) | ||
166 | wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status) | ||
167 | wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast) | ||
168 | wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce) | ||
169 | wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce) | ||
170 | wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag) | ||
171 | wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt) | ||
172 | |||
173 | /** | ||
174 | * Fill in the WSS information attributes | ||
175 | * | ||
176 | * We currently only support one WSS, and this is assumed in this function | ||
177 | * that can populate only one WSS information attribute. | ||
178 | */ | ||
179 | static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, | ||
180 | struct wlp_wss *wss) | ||
181 | { | ||
182 | size_t datalen; | ||
183 | void *ptr = attr->wss_info; | ||
184 | size_t used = sizeof(*attr); | ||
185 | d_fnstart(6, NULL, "(attribute %p)\n", attr); | ||
186 | datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); | ||
187 | wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); | ||
188 | used = wlp_set_wssid(ptr, &wss->wssid); | ||
189 | used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name)); | ||
190 | used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); | ||
191 | used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); | ||
192 | used += wlp_set_wss_bcast(ptr + used, &wss->bcast); | ||
193 | d_dump(6, NULL, attr, sizeof(*attr) + datalen); | ||
194 | d_fnend(6, NULL, "(attribute %p, used %d)\n", | ||
195 | attr, (int)(sizeof(*attr) + used)); | ||
196 | return sizeof(*attr) + used; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * Verify attribute header | ||
201 | * | ||
202 | * @hdr: Pointer to attribute header that will be verified. | ||
203 | * @type: Expected attribute type. | ||
204 | * @len: Expected length of attribute value (excluding header). | ||
205 | * | ||
206 | * Most attribute values have a known length even when they do have a | ||
207 | * length field. This knowledge can be used via this function to verify | ||
208 | * that the length field matches the expected value. | ||
209 | */ | ||
210 | static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr, | ||
211 | enum wlp_attr_type type, unsigned len) | ||
212 | { | ||
213 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
214 | |||
215 | if (le16_to_cpu(hdr->type) != type) { | ||
216 | dev_err(dev, "WLP: unexpected header type. Expected " | ||
217 | "%u, got %u.\n", type, le16_to_cpu(hdr->type)); | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | if (le16_to_cpu(hdr->length) != len) { | ||
221 | dev_err(dev, "WLP: unexpected length in header. Expected " | ||
222 | "%u, got %u.\n", len, le16_to_cpu(hdr->length)); | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * Check if header of WSS information attribute valid | ||
230 | * | ||
231 | * @returns: length of WSS attributes (value of length attribute field) if | ||
232 | * valid WSS information attribute found | ||
233 | * -ENODATA if no WSS information attribute found | ||
234 | * -EIO other error occured | ||
235 | * | ||
236 | * The WSS information attribute is optional. The function will be provided | ||
237 | * with a pointer to data that could _potentially_ be a WSS information | ||
238 | * attribute. If a valid WSS information attribute is found it will return | ||
239 | * 0, if no WSS information attribute is found it will return -ENODATA, and | ||
240 | * another error will be returned if it is a WSS information attribute, but | ||
241 | * some parsing failure occured. | ||
242 | */ | ||
243 | static int wlp_check_wss_info_attr_hdr(struct wlp *wlp, | ||
244 | struct wlp_attr_hdr *hdr, size_t buflen) | ||
245 | { | ||
246 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
247 | size_t len; | ||
248 | int result = 0; | ||
249 | |||
250 | if (buflen < sizeof(*hdr)) { | ||
251 | dev_err(dev, "WLP: Not enough space in buffer to parse" | ||
252 | " WSS information attribute header.\n"); | ||
253 | result = -EIO; | ||
254 | goto out; | ||
255 | } | ||
256 | if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) { | ||
257 | /* WSS information is optional */ | ||
258 | result = -ENODATA; | ||
259 | goto out; | ||
260 | } | ||
261 | len = le16_to_cpu(hdr->length); | ||
262 | if (buflen < sizeof(*hdr) + len) { | ||
263 | dev_err(dev, "WLP: Not enough space in buffer to parse " | ||
264 | "variable data. Got %d, expected %d.\n", | ||
265 | (int)buflen, (int)(sizeof(*hdr) + len)); | ||
266 | result = -EIO; | ||
267 | goto out; | ||
268 | } | ||
269 | result = len; | ||
270 | out: | ||
271 | return result; | ||
272 | } | ||
273 | |||
274 | |||
275 | /** | ||
276 | * Get value of attribute from fixed size attribute field. | ||
277 | * | ||
278 | * @attr: Pointer to attribute field. | ||
279 | * @value: Pointer to variable in which attribute value will be placed. | ||
280 | * @buflen: Size of buffer in which attribute field (including header) | ||
281 | * can be found. | ||
282 | * @returns: Amount of given buffer consumed by parsing for this attribute. | ||
283 | * | ||
284 | * The size and type of the value is known by the type of the attribute. | ||
285 | */ | ||
286 | #define wlp_get(type, type_code, name) \ | ||
287 | ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \ | ||
288 | type *value, ssize_t buflen) \ | ||
289 | { \ | ||
290 | struct device *dev = &wlp->rc->uwb_dev.dev; \ | ||
291 | if (buflen < 0) \ | ||
292 | return -EINVAL; \ | ||
293 | if (buflen < sizeof(*attr)) { \ | ||
294 | dev_err(dev, "WLP: Not enough space in buffer to parse" \ | ||
295 | " attribute field. Need %d, received %zu\n", \ | ||
296 | (int)sizeof(*attr), buflen); \ | ||
297 | return -EIO; \ | ||
298 | } \ | ||
299 | if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \ | ||
300 | sizeof(attr->name)) < 0) { \ | ||
301 | dev_err(dev, "WLP: Header verification failed. \n"); \ | ||
302 | return -EINVAL; \ | ||
303 | } \ | ||
304 | *value = attr->name; \ | ||
305 | return sizeof(*attr); \ | ||
306 | } | ||
307 | |||
308 | #define wlp_get_sparse(type, type_code, name) \ | ||
309 | static wlp_get(type, type_code, name) | ||
310 | |||
311 | /** | ||
312 | * Get value of attribute from variable sized attribute field. | ||
313 | * | ||
314 | * @max: The maximum size of this attribute. This value is dictated by | ||
315 | * the maximum value from the WLP specification. | ||
316 | * | ||
317 | * @attr: Pointer to attribute field. | ||
318 | * @value: Pointer to variable that will contain the value. The memory | ||
319 | * must already have been allocated for this value. | ||
320 | * @buflen: Size of buffer in which attribute field (including header) | ||
321 | * can be found. | ||
322 | * @returns: Amount of given bufferconsumed by parsing for this attribute. | ||
323 | */ | ||
324 | #define wlp_vget(type_val, type_code, name, max) \ | ||
325 | static ssize_t wlp_get_##name(struct wlp *wlp, \ | ||
326 | struct wlp_attr_##name *attr, \ | ||
327 | type_val *value, ssize_t buflen) \ | ||
328 | { \ | ||
329 | struct device *dev = &wlp->rc->uwb_dev.dev; \ | ||
330 | size_t len; \ | ||
331 | if (buflen < 0) \ | ||
332 | return -EINVAL; \ | ||
333 | if (buflen < sizeof(*attr)) { \ | ||
334 | dev_err(dev, "WLP: Not enough space in buffer to parse" \ | ||
335 | " header.\n"); \ | ||
336 | return -EIO; \ | ||
337 | } \ | ||
338 | if (le16_to_cpu(attr->hdr.type) != type_code) { \ | ||
339 | dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \ | ||
340 | "expected %u.\n", le16_to_cpu(attr->hdr.type), \ | ||
341 | type_code); \ | ||
342 | return -EINVAL; \ | ||
343 | } \ | ||
344 | len = le16_to_cpu(attr->hdr.length); \ | ||
345 | if (len > max) { \ | ||
346 | dev_err(dev, "WLP: Attribute larger than maximum " \ | ||
347 | "allowed. Received %zu, max is %d.\n", len, \ | ||
348 | (int)max); \ | ||
349 | return -EFBIG; \ | ||
350 | } \ | ||
351 | if (buflen < sizeof(*attr) + len) { \ | ||
352 | dev_err(dev, "WLP: Not enough space in buffer to parse "\ | ||
353 | "variable data.\n"); \ | ||
354 | return -EIO; \ | ||
355 | } \ | ||
356 | memcpy(value, (void *) attr + sizeof(*attr), len); \ | ||
357 | return sizeof(*attr) + len; \ | ||
358 | } | ||
359 | |||
360 | wlp_get(u8, WLP_ATTR_WLP_VER, version) | ||
361 | wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd) | ||
362 | wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type) | ||
363 | wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err) | ||
364 | wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e) | ||
365 | wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r) | ||
366 | wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid) | ||
367 | wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl) | ||
368 | wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status) | ||
369 | wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast) | ||
370 | wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag) | ||
371 | wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt) | ||
372 | wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce) | ||
373 | wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce) | ||
374 | |||
375 | /* The buffers for the device info attributes can be found in the | ||
376 | * wlp_device_info struct. These buffers contain one byte more than the | ||
377 | * max allowed by the spec - this is done to be able to add the | ||
378 | * terminating \0 for user display. This terminating byte is not required | ||
379 | * in the actual attribute field (because it has a length field) so the | ||
380 | * maximum allowed for this value is one less than its size in the | ||
381 | * structure. | ||
382 | */ | ||
383 | wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name, | ||
384 | FIELD_SIZEOF(struct wlp_wss, name) - 1) | ||
385 | wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name, | ||
386 | FIELD_SIZEOF(struct wlp_device_info, name) - 1) | ||
387 | wlp_vget(char, WLP_ATTR_MANUF, manufacturer, | ||
388 | FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1) | ||
389 | wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name, | ||
390 | FIELD_SIZEOF(struct wlp_device_info, model_name) - 1) | ||
391 | wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr, | ||
392 | FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1) | ||
393 | wlp_vget(char, WLP_ATTR_SERIAL, serial, | ||
394 | FIELD_SIZEOF(struct wlp_device_info, serial) - 1) | ||
395 | |||
396 | /** | ||
397 | * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info | ||
398 | * | ||
399 | * @attr: pointer to WSS name attribute in WSS information attribute field | ||
400 | * @info: structure that will be populated with data from WSS information | ||
401 | * field (WSS name, Accept enroll, secure status, broadcast address) | ||
402 | * @buflen: size of buffer | ||
403 | * | ||
404 | * Although the WSSID attribute forms part of the WSS info attribute it is | ||
405 | * retrieved separately and stored in a different location. | ||
406 | */ | ||
407 | static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | ||
408 | struct wlp_attr_hdr *attr, | ||
409 | struct wlp_wss_tmp_info *info, | ||
410 | ssize_t buflen) | ||
411 | { | ||
412 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
413 | void *ptr = attr; | ||
414 | size_t used = 0; | ||
415 | ssize_t result = -EINVAL; | ||
416 | |||
417 | d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); | ||
418 | result = wlp_get_wss_name(wlp, ptr, info->name, buflen); | ||
419 | if (result < 0) { | ||
420 | dev_err(dev, "WLP: unable to obtain WSS name from " | ||
421 | "WSS info in D2 message.\n"); | ||
422 | goto error_parse; | ||
423 | } | ||
424 | used += result; | ||
425 | d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); | ||
426 | result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, | ||
427 | buflen - used); | ||
428 | if (result < 0) { | ||
429 | dev_err(dev, "WLP: unable to obtain accepting " | ||
430 | "enrollment from WSS info in D2 message.\n"); | ||
431 | goto error_parse; | ||
432 | } | ||
433 | if (info->accept_enroll != 0 && info->accept_enroll != 1) { | ||
434 | dev_err(dev, "WLP: invalid value for accepting " | ||
435 | "enrollment in D2 message.\n"); | ||
436 | result = -EINVAL; | ||
437 | goto error_parse; | ||
438 | } | ||
439 | used += result; | ||
440 | d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); | ||
441 | result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, | ||
442 | buflen - used); | ||
443 | if (result < 0) { | ||
444 | dev_err(dev, "WLP: unable to obtain secure " | ||
445 | "status from WSS info in D2 message.\n"); | ||
446 | goto error_parse; | ||
447 | } | ||
448 | if (info->sec_status != 0 && info->sec_status != 1) { | ||
449 | dev_err(dev, "WLP: invalid value for secure " | ||
450 | "status in D2 message.\n"); | ||
451 | result = -EINVAL; | ||
452 | goto error_parse; | ||
453 | } | ||
454 | used += result; | ||
455 | d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); | ||
456 | result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, | ||
457 | buflen - used); | ||
458 | if (result < 0) { | ||
459 | dev_err(dev, "WLP: unable to obtain broadcast " | ||
460 | "address from WSS info in D2 message.\n"); | ||
461 | goto error_parse; | ||
462 | } | ||
463 | used += result; | ||
464 | result = used; | ||
465 | error_parse: | ||
466 | return result; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * Create a new WSSID entry for the neighbor, allocate temporary storage | ||
471 | * | ||
472 | * Each neighbor can have many WSS active. We maintain a list of WSSIDs | ||
473 | * advertised by neighbor. During discovery we also cache information about | ||
474 | * these WSS in temporary storage. | ||
475 | * | ||
476 | * The temporary storage will be removed after it has been used (eg. | ||
477 | * displayed to user), the wssid element will be removed from the list when | ||
478 | * the neighbor is rediscovered or when it disappears. | ||
479 | */ | ||
480 | static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp, | ||
481 | struct wlp_neighbor_e *neighbor) | ||
482 | { | ||
483 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
484 | struct wlp_wssid_e *wssid_e; | ||
485 | |||
486 | wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL); | ||
487 | if (wssid_e == NULL) { | ||
488 | dev_err(dev, "WLP: unable to allocate memory " | ||
489 | "for WSS information.\n"); | ||
490 | goto error_alloc; | ||
491 | } | ||
492 | wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL); | ||
493 | if (wssid_e->info == NULL) { | ||
494 | dev_err(dev, "WLP: unable to allocate memory " | ||
495 | "for temporary WSS information.\n"); | ||
496 | kfree(wssid_e); | ||
497 | wssid_e = NULL; | ||
498 | goto error_alloc; | ||
499 | } | ||
500 | list_add(&wssid_e->node, &neighbor->wssid); | ||
501 | error_alloc: | ||
502 | return wssid_e; | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * Parse WSS information attribute | ||
507 | * | ||
508 | * @attr: pointer to WSS information attribute header | ||
509 | * @buflen: size of buffer in which WSS information attribute appears | ||
510 | * @wssid: will place wssid from WSS info attribute in this location | ||
511 | * @wss_info: will place other information from WSS information attribute | ||
512 | * in this location | ||
513 | * | ||
514 | * memory for @wssid and @wss_info must be allocated when calling this | ||
515 | */ | ||
516 | static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, | ||
517 | size_t buflen, struct wlp_uuid *wssid, | ||
518 | struct wlp_wss_tmp_info *wss_info) | ||
519 | { | ||
520 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
521 | ssize_t result; | ||
522 | size_t len; | ||
523 | size_t used = 0; | ||
524 | void *ptr; | ||
525 | |||
526 | result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr, | ||
527 | buflen); | ||
528 | if (result < 0) | ||
529 | goto out; | ||
530 | len = result; | ||
531 | used = sizeof(*attr); | ||
532 | ptr = attr; | ||
533 | d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); | ||
534 | result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); | ||
535 | if (result < 0) { | ||
536 | dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); | ||
537 | goto out; | ||
538 | } | ||
539 | used += result; | ||
540 | result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info, | ||
541 | buflen - used); | ||
542 | if (result < 0) { | ||
543 | dev_err(dev, "WLP: unable to obtain WSS information " | ||
544 | "from WSS information attributes. \n"); | ||
545 | goto out; | ||
546 | } | ||
547 | used += result; | ||
548 | if (len + sizeof(*attr) != used) { | ||
549 | dev_err(dev, "WLP: Amount of data parsed does not " | ||
550 | "match length field. Parsed %zu, length " | ||
551 | "field %zu. \n", used, len); | ||
552 | result = -EINVAL; | ||
553 | goto out; | ||
554 | } | ||
555 | result = used; | ||
556 | d_printf(6, dev, "WLP: Successfully parsed WLP information " | ||
557 | "attribute. used %zu bytes\n", used); | ||
558 | out: | ||
559 | return result; | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * Retrieve WSS info from association frame | ||
564 | * | ||
565 | * @attr: pointer to WSS information attribute | ||
566 | * @neighbor: ptr to neighbor being discovered, NULL if enrollment in | ||
567 | * progress | ||
568 | * @wss: ptr to WSS being enrolled in, NULL if discovery in progress | ||
569 | * @buflen: size of buffer in which WSS information appears | ||
570 | * | ||
571 | * The WSS information attribute appears in the D2 association message. | ||
572 | * This message is used in two ways: to discover all neighbors or to enroll | ||
573 | * into a WSS activated by a neighbor. During discovery we only want to | ||
574 | * store the WSS info in a cache, to be deleted right after it has been | ||
575 | * used (eg. displayed to the user). During enrollment we store the WSS | ||
576 | * information for the lifetime of enrollment. | ||
577 | * | ||
578 | * During discovery we are interested in all WSS information, during | ||
579 | * enrollment we are only interested in the WSS being enrolled in. Even so, | ||
580 | * when in enrollment we keep parsing the message after finding the WSS of | ||
581 | * interest, this simplifies the calling routine in that it can be sure | ||
582 | * that all WSS information attributes have been parsed out of the message. | ||
583 | * | ||
584 | * Association frame is process with nbmutex held. The list access is safe. | ||
585 | */ | ||
586 | static ssize_t wlp_get_all_wss_info(struct wlp *wlp, | ||
587 | struct wlp_attr_wss_info *attr, | ||
588 | struct wlp_neighbor_e *neighbor, | ||
589 | struct wlp_wss *wss, ssize_t buflen) | ||
590 | { | ||
591 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
592 | size_t used = 0; | ||
593 | ssize_t result = -EINVAL; | ||
594 | struct wlp_attr_wss_info *cur; | ||
595 | struct wlp_uuid wssid; | ||
596 | struct wlp_wss_tmp_info wss_info; | ||
597 | unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */ | ||
598 | struct wlp_wssid_e *wssid_e; | ||
599 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
600 | |||
601 | d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", | ||
602 | wlp, attr, neighbor, wss, (int)buflen); | ||
603 | if (buflen < 0) | ||
604 | goto out; | ||
605 | |||
606 | if (neighbor != NULL && wss == NULL) | ||
607 | enroll = 0; /* discovery */ | ||
608 | else if (wss != NULL && neighbor == NULL) | ||
609 | enroll = 1; /* enrollment */ | ||
610 | else | ||
611 | goto out; | ||
612 | |||
613 | cur = attr; | ||
614 | while (buflen - used > 0) { | ||
615 | memset(&wss_info, 0, sizeof(wss_info)); | ||
616 | cur = (void *)cur + used; | ||
617 | result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid, | ||
618 | &wss_info); | ||
619 | if (result == -ENODATA) { | ||
620 | result = used; | ||
621 | goto out; | ||
622 | } else if (result < 0) { | ||
623 | dev_err(dev, "WLP: Unable to parse WSS information " | ||
624 | "from WSS information attribute. \n"); | ||
625 | result = -EINVAL; | ||
626 | goto error_parse; | ||
627 | } | ||
628 | if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) { | ||
629 | if (wss_info.accept_enroll != 1) { | ||
630 | dev_err(dev, "WLP: Requested WSS does " | ||
631 | "not accept enrollment.\n"); | ||
632 | result = -EINVAL; | ||
633 | goto out; | ||
634 | } | ||
635 | memcpy(wss->name, wss_info.name, sizeof(wss->name)); | ||
636 | wss->bcast = wss_info.bcast; | ||
637 | wss->secure_status = wss_info.sec_status; | ||
638 | wss->accept_enroll = wss_info.accept_enroll; | ||
639 | wss->state = WLP_WSS_STATE_PART_ENROLLED; | ||
640 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
641 | d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", | ||
642 | buf); | ||
643 | } else { | ||
644 | wssid_e = wlp_create_wssid_e(wlp, neighbor); | ||
645 | if (wssid_e == NULL) { | ||
646 | dev_err(dev, "WLP: Cannot create new WSSID " | ||
647 | "entry for neighbor %02x:%02x.\n", | ||
648 | neighbor->uwb_dev->dev_addr.data[1], | ||
649 | neighbor->uwb_dev->dev_addr.data[0]); | ||
650 | result = -ENOMEM; | ||
651 | goto out; | ||
652 | } | ||
653 | wssid_e->wssid = wssid; | ||
654 | *wssid_e->info = wss_info; | ||
655 | } | ||
656 | used += result; | ||
657 | } | ||
658 | result = used; | ||
659 | error_parse: | ||
660 | if (result < 0 && !enroll) /* this was a discovery */ | ||
661 | wlp_remove_neighbor_tmp_info(neighbor); | ||
662 | out: | ||
663 | d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " | ||
664 | "result %d \n", wlp, attr, neighbor, wss, (int)buflen, | ||
665 | (int)result); | ||
666 | return result; | ||
667 | |||
668 | } | ||
669 | |||
670 | /** | ||
671 | * Parse WSS information attributes into cache for discovery | ||
672 | * | ||
673 | * @attr: the first WSS information attribute in message | ||
674 | * @neighbor: the neighbor whose cache will be populated | ||
675 | * @buflen: size of the input buffer | ||
676 | */ | ||
677 | static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp, | ||
678 | struct wlp_attr_wss_info *attr, | ||
679 | struct wlp_neighbor_e *neighbor, | ||
680 | ssize_t buflen) | ||
681 | { | ||
682 | return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen); | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * Parse WSS information attributes into WSS struct for enrollment | ||
687 | * | ||
688 | * @attr: the first WSS information attribute in message | ||
689 | * @wss: the WSS that will be enrolled | ||
690 | * @buflen: size of the input buffer | ||
691 | */ | ||
692 | static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp, | ||
693 | struct wlp_attr_wss_info *attr, | ||
694 | struct wlp_wss *wss, ssize_t buflen) | ||
695 | { | ||
696 | return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen); | ||
697 | } | ||
698 | |||
699 | /** | ||
700 | * Construct a D1 association frame | ||
701 | * | ||
702 | * We use the radio control functions to determine the values of the device | ||
703 | * properties. These are of variable length and the total space needed is | ||
704 | * tallied first before we start constructing the message. The radio | ||
705 | * control functions return strings that are terminated with \0. This | ||
706 | * character should not be included in the message (there is a length field | ||
707 | * accompanying it in the attribute). | ||
708 | */ | ||
709 | static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | ||
710 | struct sk_buff **skb) | ||
711 | { | ||
712 | |||
713 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
714 | int result = 0; | ||
715 | struct wlp_device_info *info; | ||
716 | size_t used = 0; | ||
717 | struct wlp_frame_assoc *_d1; | ||
718 | struct sk_buff *_skb; | ||
719 | void *d1_itr; | ||
720 | |||
721 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
722 | if (wlp->dev_info == NULL) { | ||
723 | result = __wlp_setup_device_info(wlp); | ||
724 | if (result < 0) { | ||
725 | dev_err(dev, "WLP: Unable to setup device " | ||
726 | "information for D1 message.\n"); | ||
727 | goto error; | ||
728 | } | ||
729 | } | ||
730 | info = wlp->dev_info; | ||
731 | d_printf(6, dev, "Local properties:\n" | ||
732 | "Device name (%d bytes): %s\n" | ||
733 | "Model name (%d bytes): %s\n" | ||
734 | "Manufacturer (%d bytes): %s\n" | ||
735 | "Model number (%d bytes): %s\n" | ||
736 | "Serial number (%d bytes): %s\n" | ||
737 | "Primary device type: \n" | ||
738 | " Category: %d \n" | ||
739 | " OUI: %02x:%02x:%02x \n" | ||
740 | " OUI Subdivision: %u \n", | ||
741 | (int)strlen(info->name), info->name, | ||
742 | (int)strlen(info->model_name), info->model_name, | ||
743 | (int)strlen(info->manufacturer), info->manufacturer, | ||
744 | (int)strlen(info->model_nr), info->model_nr, | ||
745 | (int)strlen(info->serial), info->serial, | ||
746 | info->prim_dev_type.category, | ||
747 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
748 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
749 | _skb = dev_alloc_skb(sizeof(*_d1) | ||
750 | + sizeof(struct wlp_attr_uuid_e) | ||
751 | + sizeof(struct wlp_attr_wss_sel_mthd) | ||
752 | + sizeof(struct wlp_attr_dev_name) | ||
753 | + strlen(info->name) | ||
754 | + sizeof(struct wlp_attr_manufacturer) | ||
755 | + strlen(info->manufacturer) | ||
756 | + sizeof(struct wlp_attr_model_name) | ||
757 | + strlen(info->model_name) | ||
758 | + sizeof(struct wlp_attr_model_nr) | ||
759 | + strlen(info->model_nr) | ||
760 | + sizeof(struct wlp_attr_serial) | ||
761 | + strlen(info->serial) | ||
762 | + sizeof(struct wlp_attr_prim_dev_type) | ||
763 | + sizeof(struct wlp_attr_wlp_assc_err)); | ||
764 | if (_skb == NULL) { | ||
765 | dev_err(dev, "WLP: Cannot allocate memory for association " | ||
766 | "message.\n"); | ||
767 | result = -ENOMEM; | ||
768 | goto error; | ||
769 | } | ||
770 | _d1 = (void *) _skb->data; | ||
771 | d_printf(6, dev, "D1 starts at %p \n", _d1); | ||
772 | _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
773 | _d1->hdr.type = WLP_FRAME_ASSOCIATION; | ||
774 | _d1->type = WLP_ASSOC_D1; | ||
775 | |||
776 | wlp_set_version(&_d1->version, WLP_VERSION); | ||
777 | wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1); | ||
778 | d1_itr = _d1->attr; | ||
779 | used = wlp_set_uuid_e(d1_itr, &wlp->uuid); | ||
780 | used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT); | ||
781 | used += wlp_set_dev_name(d1_itr + used, info->name, | ||
782 | strlen(info->name)); | ||
783 | used += wlp_set_manufacturer(d1_itr + used, info->manufacturer, | ||
784 | strlen(info->manufacturer)); | ||
785 | used += wlp_set_model_name(d1_itr + used, info->model_name, | ||
786 | strlen(info->model_name)); | ||
787 | used += wlp_set_model_nr(d1_itr + used, info->model_nr, | ||
788 | strlen(info->model_nr)); | ||
789 | used += wlp_set_serial(d1_itr + used, info->serial, | ||
790 | strlen(info->serial)); | ||
791 | used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); | ||
792 | used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); | ||
793 | skb_put(_skb, sizeof(*_d1) + used); | ||
794 | d_printf(6, dev, "D1 message:\n"); | ||
795 | d_dump(6, dev, _d1, sizeof(*_d1) | ||
796 | + sizeof(struct wlp_attr_uuid_e) | ||
797 | + sizeof(struct wlp_attr_wss_sel_mthd) | ||
798 | + sizeof(struct wlp_attr_dev_name) | ||
799 | + strlen(info->name) | ||
800 | + sizeof(struct wlp_attr_manufacturer) | ||
801 | + strlen(info->manufacturer) | ||
802 | + sizeof(struct wlp_attr_model_name) | ||
803 | + strlen(info->model_name) | ||
804 | + sizeof(struct wlp_attr_model_nr) | ||
805 | + strlen(info->model_nr) | ||
806 | + sizeof(struct wlp_attr_serial) | ||
807 | + strlen(info->serial) | ||
808 | + sizeof(struct wlp_attr_prim_dev_type) | ||
809 | + sizeof(struct wlp_attr_wlp_assc_err)); | ||
810 | *skb = _skb; | ||
811 | error: | ||
812 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
813 | return result; | ||
814 | } | ||
815 | |||
816 | /** | ||
817 | * Construct a D2 association frame | ||
818 | * | ||
819 | * We use the radio control functions to determine the values of the device | ||
820 | * properties. These are of variable length and the total space needed is | ||
821 | * tallied first before we start constructing the message. The radio | ||
822 | * control functions return strings that are terminated with \0. This | ||
823 | * character should not be included in the message (there is a length field | ||
824 | * accompanying it in the attribute). | ||
825 | */ | ||
826 | static | ||
827 | int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | ||
828 | struct sk_buff **skb, struct wlp_uuid *uuid_e) | ||
829 | { | ||
830 | |||
831 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
832 | int result = 0; | ||
833 | struct wlp_device_info *info; | ||
834 | size_t used = 0; | ||
835 | struct wlp_frame_assoc *_d2; | ||
836 | struct sk_buff *_skb; | ||
837 | void *d2_itr; | ||
838 | size_t mem_needed; | ||
839 | |||
840 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
841 | if (wlp->dev_info == NULL) { | ||
842 | result = __wlp_setup_device_info(wlp); | ||
843 | if (result < 0) { | ||
844 | dev_err(dev, "WLP: Unable to setup device " | ||
845 | "information for D2 message.\n"); | ||
846 | goto error; | ||
847 | } | ||
848 | } | ||
849 | info = wlp->dev_info; | ||
850 | d_printf(6, dev, "Local properties:\n" | ||
851 | "Device name (%d bytes): %s\n" | ||
852 | "Model name (%d bytes): %s\n" | ||
853 | "Manufacturer (%d bytes): %s\n" | ||
854 | "Model number (%d bytes): %s\n" | ||
855 | "Serial number (%d bytes): %s\n" | ||
856 | "Primary device type: \n" | ||
857 | " Category: %d \n" | ||
858 | " OUI: %02x:%02x:%02x \n" | ||
859 | " OUI Subdivision: %u \n", | ||
860 | (int)strlen(info->name), info->name, | ||
861 | (int)strlen(info->model_name), info->model_name, | ||
862 | (int)strlen(info->manufacturer), info->manufacturer, | ||
863 | (int)strlen(info->model_nr), info->model_nr, | ||
864 | (int)strlen(info->serial), info->serial, | ||
865 | info->prim_dev_type.category, | ||
866 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
867 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
868 | mem_needed = sizeof(*_d2) | ||
869 | + sizeof(struct wlp_attr_uuid_e) | ||
870 | + sizeof(struct wlp_attr_uuid_r) | ||
871 | + sizeof(struct wlp_attr_dev_name) | ||
872 | + strlen(info->name) | ||
873 | + sizeof(struct wlp_attr_manufacturer) | ||
874 | + strlen(info->manufacturer) | ||
875 | + sizeof(struct wlp_attr_model_name) | ||
876 | + strlen(info->model_name) | ||
877 | + sizeof(struct wlp_attr_model_nr) | ||
878 | + strlen(info->model_nr) | ||
879 | + sizeof(struct wlp_attr_serial) | ||
880 | + strlen(info->serial) | ||
881 | + sizeof(struct wlp_attr_prim_dev_type) | ||
882 | + sizeof(struct wlp_attr_wlp_assc_err); | ||
883 | if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE) | ||
884 | mem_needed += sizeof(struct wlp_attr_wss_info) | ||
885 | + sizeof(struct wlp_wss_info) | ||
886 | + strlen(wlp->wss.name); | ||
887 | _skb = dev_alloc_skb(mem_needed); | ||
888 | if (_skb == NULL) { | ||
889 | dev_err(dev, "WLP: Cannot allocate memory for association " | ||
890 | "message.\n"); | ||
891 | result = -ENOMEM; | ||
892 | goto error; | ||
893 | } | ||
894 | _d2 = (void *) _skb->data; | ||
895 | d_printf(6, dev, "D2 starts at %p \n", _d2); | ||
896 | _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
897 | _d2->hdr.type = WLP_FRAME_ASSOCIATION; | ||
898 | _d2->type = WLP_ASSOC_D2; | ||
899 | |||
900 | wlp_set_version(&_d2->version, WLP_VERSION); | ||
901 | wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2); | ||
902 | d2_itr = _d2->attr; | ||
903 | used = wlp_set_uuid_e(d2_itr, uuid_e); | ||
904 | used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid); | ||
905 | if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE) | ||
906 | used += wlp_set_wss_info(d2_itr + used, &wlp->wss); | ||
907 | used += wlp_set_dev_name(d2_itr + used, info->name, | ||
908 | strlen(info->name)); | ||
909 | used += wlp_set_manufacturer(d2_itr + used, info->manufacturer, | ||
910 | strlen(info->manufacturer)); | ||
911 | used += wlp_set_model_name(d2_itr + used, info->model_name, | ||
912 | strlen(info->model_name)); | ||
913 | used += wlp_set_model_nr(d2_itr + used, info->model_nr, | ||
914 | strlen(info->model_nr)); | ||
915 | used += wlp_set_serial(d2_itr + used, info->serial, | ||
916 | strlen(info->serial)); | ||
917 | used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); | ||
918 | used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); | ||
919 | skb_put(_skb, sizeof(*_d2) + used); | ||
920 | d_printf(6, dev, "D2 message:\n"); | ||
921 | d_dump(6, dev, _d2, mem_needed); | ||
922 | *skb = _skb; | ||
923 | error: | ||
924 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
925 | return result; | ||
926 | } | ||
927 | |||
928 | /** | ||
929 | * Allocate memory for and populate fields of F0 association frame | ||
930 | * | ||
931 | * Currently (while focusing on unsecure enrollment) we ignore the | ||
932 | * nonce's that could be placed in the message. Only the error field is | ||
933 | * populated by the value provided by the caller. | ||
934 | */ | ||
935 | static | ||
936 | int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | ||
937 | enum wlp_assc_error error) | ||
938 | { | ||
939 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
940 | int result = -ENOMEM; | ||
941 | struct { | ||
942 | struct wlp_frame_assoc f0_hdr; | ||
943 | struct wlp_attr_enonce enonce; | ||
944 | struct wlp_attr_rnonce rnonce; | ||
945 | struct wlp_attr_wlp_assc_err assc_err; | ||
946 | } *f0; | ||
947 | struct sk_buff *_skb; | ||
948 | struct wlp_nonce tmp; | ||
949 | |||
950 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
951 | _skb = dev_alloc_skb(sizeof(*f0)); | ||
952 | if (_skb == NULL) { | ||
953 | dev_err(dev, "WLP: Unable to allocate memory for F0 " | ||
954 | "association frame. \n"); | ||
955 | goto error_alloc; | ||
956 | } | ||
957 | f0 = (void *) _skb->data; | ||
958 | d_printf(6, dev, "F0 starts at %p \n", f0); | ||
959 | f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
960 | f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | ||
961 | f0->f0_hdr.type = WLP_ASSOC_F0; | ||
962 | wlp_set_version(&f0->f0_hdr.version, WLP_VERSION); | ||
963 | wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0); | ||
964 | memset(&tmp, 0, sizeof(tmp)); | ||
965 | wlp_set_enonce(&f0->enonce, &tmp); | ||
966 | wlp_set_rnonce(&f0->rnonce, &tmp); | ||
967 | wlp_set_wlp_assc_err(&f0->assc_err, error); | ||
968 | skb_put(_skb, sizeof(*f0)); | ||
969 | *skb = _skb; | ||
970 | result = 0; | ||
971 | error_alloc: | ||
972 | d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); | ||
973 | return result; | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * Parse F0 frame | ||
978 | * | ||
979 | * We just retrieve the values and print it as an error to the user. | ||
980 | * Calling function already knows an error occured (F0 indicates error), so | ||
981 | * we just parse the content as debug for higher layers. | ||
982 | */ | ||
983 | int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb) | ||
984 | { | ||
985 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
986 | struct wlp_frame_assoc *f0 = (void *) skb->data; | ||
987 | void *ptr = skb->data; | ||
988 | size_t len = skb->len; | ||
989 | size_t used; | ||
990 | ssize_t result; | ||
991 | struct wlp_nonce enonce, rnonce; | ||
992 | enum wlp_assc_error assc_err; | ||
993 | char enonce_buf[WLP_WSS_NONCE_STRSIZE]; | ||
994 | char rnonce_buf[WLP_WSS_NONCE_STRSIZE]; | ||
995 | |||
996 | used = sizeof(*f0); | ||
997 | result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used); | ||
998 | if (result < 0) { | ||
999 | dev_err(dev, "WLP: unable to obtain Enrollee nonce " | ||
1000 | "attribute from F0 message.\n"); | ||
1001 | goto error_parse; | ||
1002 | } | ||
1003 | used += result; | ||
1004 | result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used); | ||
1005 | if (result < 0) { | ||
1006 | dev_err(dev, "WLP: unable to obtain Registrar nonce " | ||
1007 | "attribute from F0 message.\n"); | ||
1008 | goto error_parse; | ||
1009 | } | ||
1010 | used += result; | ||
1011 | result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); | ||
1012 | if (result < 0) { | ||
1013 | dev_err(dev, "WLP: unable to obtain WLP Association error " | ||
1014 | "attribute from F0 message.\n"); | ||
1015 | goto error_parse; | ||
1016 | } | ||
1017 | wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce); | ||
1018 | wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce); | ||
1019 | dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee " | ||
1020 | "nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n", | ||
1021 | enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err)); | ||
1022 | result = 0; | ||
1023 | error_parse: | ||
1024 | return result; | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * Retrieve variable device information from association message | ||
1029 | * | ||
1030 | * The device information parsed is not required in any message. This | ||
1031 | * routine will thus not fail if an attribute is not present. | ||
1032 | * The attributes are expected in a certain order, even if all are not | ||
1033 | * present. The "attribute type" value is used to ensure the attributes | ||
1034 | * are parsed in the correct order. | ||
1035 | * | ||
1036 | * If an error is encountered during parsing the function will return an | ||
1037 | * error code, when this happens the given device_info structure may be | ||
1038 | * partially filled. | ||
1039 | */ | ||
1040 | static | ||
1041 | int wlp_get_variable_info(struct wlp *wlp, void *data, | ||
1042 | struct wlp_device_info *dev_info, ssize_t len) | ||
1043 | { | ||
1044 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1045 | size_t used = 0; | ||
1046 | struct wlp_attr_hdr *hdr; | ||
1047 | ssize_t result = 0; | ||
1048 | unsigned last = 0; | ||
1049 | |||
1050 | while (len - used > 0) { | ||
1051 | if (len - used < sizeof(*hdr)) { | ||
1052 | dev_err(dev, "WLP: Partial data in frame, cannot " | ||
1053 | "parse. \n"); | ||
1054 | goto error_parse; | ||
1055 | } | ||
1056 | hdr = data + used; | ||
1057 | switch (le16_to_cpu(hdr->type)) { | ||
1058 | case WLP_ATTR_MANUF: | ||
1059 | if (last >= WLP_ATTR_MANUF) { | ||
1060 | dev_err(dev, "WLP: Incorrect order of " | ||
1061 | "attribute values in D1 msg.\n"); | ||
1062 | goto error_parse; | ||
1063 | } | ||
1064 | result = wlp_get_manufacturer(wlp, data + used, | ||
1065 | dev_info->manufacturer, | ||
1066 | len - used); | ||
1067 | if (result < 0) { | ||
1068 | dev_err(dev, "WLP: Unable to obtain " | ||
1069 | "Manufacturer attribute from D1 " | ||
1070 | "message.\n"); | ||
1071 | goto error_parse; | ||
1072 | } | ||
1073 | last = WLP_ATTR_MANUF; | ||
1074 | used += result; | ||
1075 | break; | ||
1076 | case WLP_ATTR_MODEL_NAME: | ||
1077 | if (last >= WLP_ATTR_MODEL_NAME) { | ||
1078 | dev_err(dev, "WLP: Incorrect order of " | ||
1079 | "attribute values in D1 msg.\n"); | ||
1080 | goto error_parse; | ||
1081 | } | ||
1082 | result = wlp_get_model_name(wlp, data + used, | ||
1083 | dev_info->model_name, | ||
1084 | len - used); | ||
1085 | if (result < 0) { | ||
1086 | dev_err(dev, "WLP: Unable to obtain Model " | ||
1087 | "name attribute from D1 message.\n"); | ||
1088 | goto error_parse; | ||
1089 | } | ||
1090 | last = WLP_ATTR_MODEL_NAME; | ||
1091 | used += result; | ||
1092 | break; | ||
1093 | case WLP_ATTR_MODEL_NR: | ||
1094 | if (last >= WLP_ATTR_MODEL_NR) { | ||
1095 | dev_err(dev, "WLP: Incorrect order of " | ||
1096 | "attribute values in D1 msg.\n"); | ||
1097 | goto error_parse; | ||
1098 | } | ||
1099 | result = wlp_get_model_nr(wlp, data + used, | ||
1100 | dev_info->model_nr, | ||
1101 | len - used); | ||
1102 | if (result < 0) { | ||
1103 | dev_err(dev, "WLP: Unable to obtain Model " | ||
1104 | "number attribute from D1 message.\n"); | ||
1105 | goto error_parse; | ||
1106 | } | ||
1107 | last = WLP_ATTR_MODEL_NR; | ||
1108 | used += result; | ||
1109 | break; | ||
1110 | case WLP_ATTR_SERIAL: | ||
1111 | if (last >= WLP_ATTR_SERIAL) { | ||
1112 | dev_err(dev, "WLP: Incorrect order of " | ||
1113 | "attribute values in D1 msg.\n"); | ||
1114 | goto error_parse; | ||
1115 | } | ||
1116 | result = wlp_get_serial(wlp, data + used, | ||
1117 | dev_info->serial, len - used); | ||
1118 | if (result < 0) { | ||
1119 | dev_err(dev, "WLP: Unable to obtain Serial " | ||
1120 | "number attribute from D1 message.\n"); | ||
1121 | goto error_parse; | ||
1122 | } | ||
1123 | last = WLP_ATTR_SERIAL; | ||
1124 | used += result; | ||
1125 | break; | ||
1126 | case WLP_ATTR_PRI_DEV_TYPE: | ||
1127 | if (last >= WLP_ATTR_PRI_DEV_TYPE) { | ||
1128 | dev_err(dev, "WLP: Incorrect order of " | ||
1129 | "attribute values in D1 msg.\n"); | ||
1130 | goto error_parse; | ||
1131 | } | ||
1132 | result = wlp_get_prim_dev_type(wlp, data + used, | ||
1133 | &dev_info->prim_dev_type, | ||
1134 | len - used); | ||
1135 | if (result < 0) { | ||
1136 | dev_err(dev, "WLP: Unable to obtain Primary " | ||
1137 | "device type attribute from D1 " | ||
1138 | "message.\n"); | ||
1139 | goto error_parse; | ||
1140 | } | ||
1141 | dev_info->prim_dev_type.category = | ||
1142 | le16_to_cpu(dev_info->prim_dev_type.category); | ||
1143 | dev_info->prim_dev_type.subID = | ||
1144 | le16_to_cpu(dev_info->prim_dev_type.subID); | ||
1145 | last = WLP_ATTR_PRI_DEV_TYPE; | ||
1146 | used += result; | ||
1147 | break; | ||
1148 | default: | ||
1149 | /* This is not variable device information. */ | ||
1150 | goto out; | ||
1151 | break; | ||
1152 | } | ||
1153 | } | ||
1154 | out: | ||
1155 | return used; | ||
1156 | error_parse: | ||
1157 | return -EINVAL; | ||
1158 | } | ||
1159 | |||
1160 | /** | ||
1161 | * Parse incoming D1 frame, populate attribute values | ||
1162 | * | ||
1163 | * Caller provides pointers to memory already allocated for attributes | ||
1164 | * expected in the D1 frame. These variables will be populated. | ||
1165 | */ | ||
1166 | static | ||
1167 | int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb, | ||
1168 | struct wlp_uuid *uuid_e, | ||
1169 | enum wlp_wss_sel_mthd *sel_mthd, | ||
1170 | struct wlp_device_info *dev_info, | ||
1171 | enum wlp_assc_error *assc_err) | ||
1172 | { | ||
1173 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1174 | struct wlp_frame_assoc *d1 = (void *) skb->data; | ||
1175 | void *ptr = skb->data; | ||
1176 | size_t len = skb->len; | ||
1177 | size_t used; | ||
1178 | ssize_t result; | ||
1179 | |||
1180 | used = sizeof(*d1); | ||
1181 | result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used); | ||
1182 | if (result < 0) { | ||
1183 | dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 " | ||
1184 | "message.\n"); | ||
1185 | goto error_parse; | ||
1186 | } | ||
1187 | used += result; | ||
1188 | result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used); | ||
1189 | if (result < 0) { | ||
1190 | dev_err(dev, "WLP: unable to obtain WSS selection method " | ||
1191 | "from D1 message.\n"); | ||
1192 | goto error_parse; | ||
1193 | } | ||
1194 | used += result; | ||
1195 | result = wlp_get_dev_name(wlp, ptr + used, dev_info->name, | ||
1196 | len - used); | ||
1197 | if (result < 0) { | ||
1198 | dev_err(dev, "WLP: unable to obtain Device Name from D1 " | ||
1199 | "message.\n"); | ||
1200 | goto error_parse; | ||
1201 | } | ||
1202 | used += result; | ||
1203 | result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used); | ||
1204 | if (result < 0) { | ||
1205 | dev_err(dev, "WLP: unable to obtain Device Information from " | ||
1206 | "D1 message.\n"); | ||
1207 | goto error_parse; | ||
1208 | } | ||
1209 | used += result; | ||
1210 | result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used); | ||
1211 | if (result < 0) { | ||
1212 | dev_err(dev, "WLP: unable to obtain WLP Association Error " | ||
1213 | "Information from D1 message.\n"); | ||
1214 | goto error_parse; | ||
1215 | } | ||
1216 | result = 0; | ||
1217 | error_parse: | ||
1218 | return result; | ||
1219 | } | ||
1220 | /** | ||
1221 | * Handle incoming D1 frame | ||
1222 | * | ||
1223 | * The frame has already been verified to contain an Association header with | ||
1224 | * the correct version number. Parse the incoming frame, construct and send | ||
1225 | * a D2 frame in response. | ||
1226 | * | ||
1227 | * It is not clear what to do with most fields in the incoming D1 frame. We | ||
1228 | * retrieve and discard the information here for now. | ||
1229 | */ | ||
1230 | void wlp_handle_d1_frame(struct work_struct *ws) | ||
1231 | { | ||
1232 | struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, | ||
1233 | struct wlp_assoc_frame_ctx, | ||
1234 | ws); | ||
1235 | struct wlp *wlp = frame_ctx->wlp; | ||
1236 | struct wlp_wss *wss = &wlp->wss; | ||
1237 | struct sk_buff *skb = frame_ctx->skb; | ||
1238 | struct uwb_dev_addr *src = &frame_ctx->src; | ||
1239 | int result; | ||
1240 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1241 | struct wlp_uuid uuid_e; | ||
1242 | enum wlp_wss_sel_mthd sel_mthd = 0; | ||
1243 | struct wlp_device_info dev_info; | ||
1244 | enum wlp_assc_error assc_err; | ||
1245 | char uuid[WLP_WSS_UUID_STRSIZE]; | ||
1246 | struct sk_buff *resp = NULL; | ||
1247 | |||
1248 | /* Parse D1 frame */ | ||
1249 | d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", | ||
1250 | wlp, skb); | ||
1251 | mutex_lock(&wss->mutex); | ||
1252 | mutex_lock(&wlp->mutex); /* to access wlp->uuid */ | ||
1253 | memset(&dev_info, 0, sizeof(dev_info)); | ||
1254 | result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info, | ||
1255 | &assc_err); | ||
1256 | if (result < 0) { | ||
1257 | dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n"); | ||
1258 | kfree_skb(skb); | ||
1259 | goto out; | ||
1260 | } | ||
1261 | wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); | ||
1262 | d_printf(6, dev, "From D1 frame:\n" | ||
1263 | "UUID-E: %s\n" | ||
1264 | "Selection method: %d\n" | ||
1265 | "Device name (%d bytes): %s\n" | ||
1266 | "Model name (%d bytes): %s\n" | ||
1267 | "Manufacturer (%d bytes): %s\n" | ||
1268 | "Model number (%d bytes): %s\n" | ||
1269 | "Serial number (%d bytes): %s\n" | ||
1270 | "Primary device type: \n" | ||
1271 | " Category: %d \n" | ||
1272 | " OUI: %02x:%02x:%02x \n" | ||
1273 | " OUI Subdivision: %u \n", | ||
1274 | uuid, sel_mthd, | ||
1275 | (int)strlen(dev_info.name), dev_info.name, | ||
1276 | (int)strlen(dev_info.model_name), dev_info.model_name, | ||
1277 | (int)strlen(dev_info.manufacturer), dev_info.manufacturer, | ||
1278 | (int)strlen(dev_info.model_nr), dev_info.model_nr, | ||
1279 | (int)strlen(dev_info.serial), dev_info.serial, | ||
1280 | dev_info.prim_dev_type.category, | ||
1281 | dev_info.prim_dev_type.OUI[0], | ||
1282 | dev_info.prim_dev_type.OUI[1], | ||
1283 | dev_info.prim_dev_type.OUI[2], | ||
1284 | dev_info.prim_dev_type.OUIsubdiv); | ||
1285 | |||
1286 | kfree_skb(skb); | ||
1287 | if (!wlp_uuid_is_set(&wlp->uuid)) { | ||
1288 | dev_err(dev, "WLP: UUID is not set. Set via sysfs to " | ||
1289 | "proceed. Respong to D1 message with error F0.\n"); | ||
1290 | result = wlp_build_assoc_f0(wlp, &resp, | ||
1291 | WLP_ASSOC_ERROR_NOT_READY); | ||
1292 | if (result < 0) { | ||
1293 | dev_err(dev, "WLP: Unable to construct F0 message.\n"); | ||
1294 | goto out; | ||
1295 | } | ||
1296 | } else { | ||
1297 | /* Construct D2 frame */ | ||
1298 | result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e); | ||
1299 | if (result < 0) { | ||
1300 | dev_err(dev, "WLP: Unable to construct D2 message.\n"); | ||
1301 | goto out; | ||
1302 | } | ||
1303 | } | ||
1304 | /* Send D2 frame */ | ||
1305 | BUG_ON(wlp->xmit_frame == NULL); | ||
1306 | result = wlp->xmit_frame(wlp, resp, src); | ||
1307 | if (result < 0) { | ||
1308 | dev_err(dev, "WLP: Unable to transmit D2 association " | ||
1309 | "message: %d\n", result); | ||
1310 | if (result == -ENXIO) | ||
1311 | dev_err(dev, "WLP: Is network interface up? \n"); | ||
1312 | /* We could try again ... */ | ||
1313 | dev_kfree_skb_any(resp); /* we need to free if tx fails */ | ||
1314 | } | ||
1315 | out: | ||
1316 | kfree(frame_ctx); | ||
1317 | mutex_unlock(&wlp->mutex); | ||
1318 | mutex_unlock(&wss->mutex); | ||
1319 | d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); | ||
1320 | } | ||
1321 | |||
1322 | /** | ||
1323 | * Parse incoming D2 frame, create and populate temporary cache | ||
1324 | * | ||
1325 | * @skb: socket buffer in which D2 frame can be found | ||
1326 | * @neighbor: the neighbor that sent the D2 frame | ||
1327 | * | ||
1328 | * Will allocate memory for temporary storage of information learned during | ||
1329 | * discovery. | ||
1330 | */ | ||
1331 | int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb, | ||
1332 | struct wlp_neighbor_e *neighbor) | ||
1333 | { | ||
1334 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1335 | struct wlp_frame_assoc *d2 = (void *) skb->data; | ||
1336 | void *ptr = skb->data; | ||
1337 | size_t len = skb->len; | ||
1338 | size_t used; | ||
1339 | ssize_t result; | ||
1340 | struct wlp_uuid uuid_e; | ||
1341 | struct wlp_device_info *nb_info; | ||
1342 | enum wlp_assc_error assc_err; | ||
1343 | |||
1344 | used = sizeof(*d2); | ||
1345 | result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used); | ||
1346 | if (result < 0) { | ||
1347 | dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 " | ||
1348 | "message.\n"); | ||
1349 | goto error_parse; | ||
1350 | } | ||
1351 | if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) { | ||
1352 | dev_err(dev, "WLP: UUID-E in incoming D2 does not match " | ||
1353 | "local UUID sent in D1. \n"); | ||
1354 | goto error_parse; | ||
1355 | } | ||
1356 | used += result; | ||
1357 | result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used); | ||
1358 | if (result < 0) { | ||
1359 | dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 " | ||
1360 | "message.\n"); | ||
1361 | goto error_parse; | ||
1362 | } | ||
1363 | used += result; | ||
1364 | result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor, | ||
1365 | len - used); | ||
1366 | if (result < 0) { | ||
1367 | dev_err(dev, "WLP: unable to obtain WSS information " | ||
1368 | "from D2 message.\n"); | ||
1369 | goto error_parse; | ||
1370 | } | ||
1371 | used += result; | ||
1372 | neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL); | ||
1373 | if (neighbor->info == NULL) { | ||
1374 | dev_err(dev, "WLP: cannot allocate memory to store device " | ||
1375 | "info.\n"); | ||
1376 | result = -ENOMEM; | ||
1377 | goto error_parse; | ||
1378 | } | ||
1379 | nb_info = neighbor->info; | ||
1380 | result = wlp_get_dev_name(wlp, ptr + used, nb_info->name, | ||
1381 | len - used); | ||
1382 | if (result < 0) { | ||
1383 | dev_err(dev, "WLP: unable to obtain Device Name from D2 " | ||
1384 | "message.\n"); | ||
1385 | goto error_parse; | ||
1386 | } | ||
1387 | used += result; | ||
1388 | result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used); | ||
1389 | if (result < 0) { | ||
1390 | dev_err(dev, "WLP: unable to obtain Device Information from " | ||
1391 | "D2 message.\n"); | ||
1392 | goto error_parse; | ||
1393 | } | ||
1394 | used += result; | ||
1395 | result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); | ||
1396 | if (result < 0) { | ||
1397 | dev_err(dev, "WLP: unable to obtain WLP Association Error " | ||
1398 | "Information from D2 message.\n"); | ||
1399 | goto error_parse; | ||
1400 | } | ||
1401 | if (assc_err != WLP_ASSOC_ERROR_NONE) { | ||
1402 | dev_err(dev, "WLP: neighbor device returned association " | ||
1403 | "error %d\n", assc_err); | ||
1404 | result = -EINVAL; | ||
1405 | goto error_parse; | ||
1406 | } | ||
1407 | result = 0; | ||
1408 | error_parse: | ||
1409 | if (result < 0) | ||
1410 | wlp_remove_neighbor_tmp_info(neighbor); | ||
1411 | return result; | ||
1412 | } | ||
1413 | |||
1414 | /** | ||
1415 | * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in | ||
1416 | * | ||
1417 | * @wss: our WSS that will be enrolled | ||
1418 | * @skb: socket buffer in which D2 frame can be found | ||
1419 | * @neighbor: the neighbor that sent the D2 frame | ||
1420 | * @wssid: the wssid of the WSS in which we want to enroll | ||
1421 | * | ||
1422 | * Forms part of enrollment sequence. We are trying to enroll in WSS with | ||
1423 | * @wssid by using @neighbor as registrar. A D1 message was sent to | ||
1424 | * @neighbor and now we need to parse the D2 response. The neighbor's | ||
1425 | * response is searched for the requested WSS and if found (and it accepts | ||
1426 | * enrollment), we store the information. | ||
1427 | */ | ||
1428 | int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb, | ||
1429 | struct wlp_neighbor_e *neighbor, | ||
1430 | struct wlp_uuid *wssid) | ||
1431 | { | ||
1432 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
1433 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1434 | void *ptr = skb->data; | ||
1435 | size_t len = skb->len; | ||
1436 | size_t used; | ||
1437 | ssize_t result; | ||
1438 | struct wlp_uuid uuid_e; | ||
1439 | struct wlp_uuid uuid_r; | ||
1440 | struct wlp_device_info nb_info; | ||
1441 | enum wlp_assc_error assc_err; | ||
1442 | char uuid_bufA[WLP_WSS_UUID_STRSIZE]; | ||
1443 | char uuid_bufB[WLP_WSS_UUID_STRSIZE]; | ||
1444 | |||
1445 | used = sizeof(struct wlp_frame_assoc); | ||
1446 | result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used); | ||
1447 | if (result < 0) { | ||
1448 | dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 " | ||
1449 | "message.\n"); | ||
1450 | goto error_parse; | ||
1451 | } | ||
1452 | if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) { | ||
1453 | dev_err(dev, "WLP: UUID-E in incoming D2 does not match " | ||
1454 | "local UUID sent in D1. \n"); | ||
1455 | goto error_parse; | ||
1456 | } | ||
1457 | used += result; | ||
1458 | result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used); | ||
1459 | if (result < 0) { | ||
1460 | dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 " | ||
1461 | "message.\n"); | ||
1462 | goto error_parse; | ||
1463 | } | ||
1464 | if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) { | ||
1465 | wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA), | ||
1466 | &neighbor->uuid); | ||
1467 | wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r); | ||
1468 | dev_err(dev, "WLP: UUID of neighbor does not match UUID " | ||
1469 | "learned during discovery. Originally discovered: %s, " | ||
1470 | "now from D2 message: %s\n", uuid_bufA, uuid_bufB); | ||
1471 | result = -EINVAL; | ||
1472 | goto error_parse; | ||
1473 | } | ||
1474 | used += result; | ||
1475 | wss->wssid = *wssid; | ||
1476 | result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used); | ||
1477 | if (result < 0) { | ||
1478 | dev_err(dev, "WLP: unable to obtain WSS information " | ||
1479 | "from D2 message.\n"); | ||
1480 | goto error_parse; | ||
1481 | } | ||
1482 | if (wss->state != WLP_WSS_STATE_PART_ENROLLED) { | ||
1483 | dev_err(dev, "WLP: D2 message did not contain information " | ||
1484 | "for successful enrollment. \n"); | ||
1485 | result = -EINVAL; | ||
1486 | goto error_parse; | ||
1487 | } | ||
1488 | used += result; | ||
1489 | /* Place device information on stack to continue parsing of message */ | ||
1490 | result = wlp_get_dev_name(wlp, ptr + used, nb_info.name, | ||
1491 | len - used); | ||
1492 | if (result < 0) { | ||
1493 | dev_err(dev, "WLP: unable to obtain Device Name from D2 " | ||
1494 | "message.\n"); | ||
1495 | goto error_parse; | ||
1496 | } | ||
1497 | used += result; | ||
1498 | result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used); | ||
1499 | if (result < 0) { | ||
1500 | dev_err(dev, "WLP: unable to obtain Device Information from " | ||
1501 | "D2 message.\n"); | ||
1502 | goto error_parse; | ||
1503 | } | ||
1504 | used += result; | ||
1505 | result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); | ||
1506 | if (result < 0) { | ||
1507 | dev_err(dev, "WLP: unable to obtain WLP Association Error " | ||
1508 | "Information from D2 message.\n"); | ||
1509 | goto error_parse; | ||
1510 | } | ||
1511 | if (assc_err != WLP_ASSOC_ERROR_NONE) { | ||
1512 | dev_err(dev, "WLP: neighbor device returned association " | ||
1513 | "error %d\n", assc_err); | ||
1514 | if (wss->state == WLP_WSS_STATE_PART_ENROLLED) { | ||
1515 | dev_err(dev, "WLP: Enrolled in WSS (should not " | ||
1516 | "happen according to spec). Undoing. \n"); | ||
1517 | wlp_wss_reset(wss); | ||
1518 | } | ||
1519 | result = -EINVAL; | ||
1520 | goto error_parse; | ||
1521 | } | ||
1522 | result = 0; | ||
1523 | error_parse: | ||
1524 | return result; | ||
1525 | } | ||
1526 | |||
1527 | /** | ||
1528 | * Parse C3/C4 frame into provided variables | ||
1529 | * | ||
1530 | * @wssid: will point to copy of wssid retrieved from C3/C4 frame | ||
1531 | * @tag: will point to copy of tag retrieved from C3/C4 frame | ||
1532 | * @virt_addr: will point to copy of virtual address retrieved from C3/C4 | ||
1533 | * frame. | ||
1534 | * | ||
1535 | * Calling function has to allocate memory for these values. | ||
1536 | * | ||
1537 | * skb contains a valid C3/C4 frame, return the individual fields of this | ||
1538 | * frame in the provided variables. | ||
1539 | */ | ||
1540 | int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, | ||
1541 | struct wlp_uuid *wssid, u8 *tag, | ||
1542 | struct uwb_mac_addr *virt_addr) | ||
1543 | { | ||
1544 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1545 | int result; | ||
1546 | void *ptr = skb->data; | ||
1547 | size_t len = skb->len; | ||
1548 | size_t used; | ||
1549 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1550 | struct wlp_frame_assoc *assoc = ptr; | ||
1551 | |||
1552 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
1553 | used = sizeof(*assoc); | ||
1554 | result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); | ||
1555 | if (result < 0) { | ||
1556 | dev_err(dev, "WLP: unable to obtain WSSID attribute from " | ||
1557 | "%s message.\n", wlp_assoc_frame_str(assoc->type)); | ||
1558 | goto error_parse; | ||
1559 | } | ||
1560 | used += result; | ||
1561 | result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used); | ||
1562 | if (result < 0) { | ||
1563 | dev_err(dev, "WLP: unable to obtain WSS tag attribute from " | ||
1564 | "%s message.\n", wlp_assoc_frame_str(assoc->type)); | ||
1565 | goto error_parse; | ||
1566 | } | ||
1567 | used += result; | ||
1568 | result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used); | ||
1569 | if (result < 0) { | ||
1570 | dev_err(dev, "WLP: unable to obtain WSS virtual address " | ||
1571 | "attribute from %s message.\n", | ||
1572 | wlp_assoc_frame_str(assoc->type)); | ||
1573 | goto error_parse; | ||
1574 | } | ||
1575 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
1576 | d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " | ||
1577 | "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, | ||
1578 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
1579 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
1580 | |||
1581 | error_parse: | ||
1582 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
1583 | return result; | ||
1584 | } | ||
1585 | |||
1586 | /** | ||
1587 | * Allocate memory for and populate fields of C1 or C2 association frame | ||
1588 | * | ||
1589 | * The C1 and C2 association frames appear identical - except for the type. | ||
1590 | */ | ||
1591 | static | ||
1592 | int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | ||
1593 | struct sk_buff **skb, enum wlp_assoc_type type) | ||
1594 | { | ||
1595 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1596 | int result = -ENOMEM; | ||
1597 | struct { | ||
1598 | struct wlp_frame_assoc c_hdr; | ||
1599 | struct wlp_attr_wssid wssid; | ||
1600 | } *c; | ||
1601 | struct sk_buff *_skb; | ||
1602 | |||
1603 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1604 | _skb = dev_alloc_skb(sizeof(*c)); | ||
1605 | if (_skb == NULL) { | ||
1606 | dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " | ||
1607 | "association frame. \n"); | ||
1608 | goto error_alloc; | ||
1609 | } | ||
1610 | c = (void *) _skb->data; | ||
1611 | d_printf(6, dev, "C1/C2 starts at %p \n", c); | ||
1612 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
1613 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | ||
1614 | c->c_hdr.type = type; | ||
1615 | wlp_set_version(&c->c_hdr.version, WLP_VERSION); | ||
1616 | wlp_set_msg_type(&c->c_hdr.msg_type, type); | ||
1617 | wlp_set_wssid(&c->wssid, &wss->wssid); | ||
1618 | skb_put(_skb, sizeof(*c)); | ||
1619 | d_printf(6, dev, "C1/C2 message:\n"); | ||
1620 | d_dump(6, dev, c, sizeof(*c)); | ||
1621 | *skb = _skb; | ||
1622 | result = 0; | ||
1623 | error_alloc: | ||
1624 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1625 | return result; | ||
1626 | } | ||
1627 | |||
1628 | |||
1629 | static | ||
1630 | int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss, | ||
1631 | struct sk_buff **skb) | ||
1632 | { | ||
1633 | return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1); | ||
1634 | } | ||
1635 | |||
1636 | static | ||
1637 | int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss, | ||
1638 | struct sk_buff **skb) | ||
1639 | { | ||
1640 | return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2); | ||
1641 | } | ||
1642 | |||
1643 | |||
1644 | /** | ||
1645 | * Allocate memory for and populate fields of C3 or C4 association frame | ||
1646 | * | ||
1647 | * The C3 and C4 association frames appear identical - except for the type. | ||
1648 | */ | ||
1649 | static | ||
1650 | int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | ||
1651 | struct sk_buff **skb, enum wlp_assoc_type type) | ||
1652 | { | ||
1653 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1654 | int result = -ENOMEM; | ||
1655 | struct { | ||
1656 | struct wlp_frame_assoc c_hdr; | ||
1657 | struct wlp_attr_wssid wssid; | ||
1658 | struct wlp_attr_wss_tag wss_tag; | ||
1659 | struct wlp_attr_wss_virt wss_virt; | ||
1660 | } *c; | ||
1661 | struct sk_buff *_skb; | ||
1662 | |||
1663 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1664 | _skb = dev_alloc_skb(sizeof(*c)); | ||
1665 | if (_skb == NULL) { | ||
1666 | dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " | ||
1667 | "association frame. \n"); | ||
1668 | goto error_alloc; | ||
1669 | } | ||
1670 | c = (void *) _skb->data; | ||
1671 | d_printf(6, dev, "C3/C4 starts at %p \n", c); | ||
1672 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
1673 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | ||
1674 | c->c_hdr.type = type; | ||
1675 | wlp_set_version(&c->c_hdr.version, WLP_VERSION); | ||
1676 | wlp_set_msg_type(&c->c_hdr.msg_type, type); | ||
1677 | wlp_set_wssid(&c->wssid, &wss->wssid); | ||
1678 | wlp_set_wss_tag(&c->wss_tag, wss->tag); | ||
1679 | wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); | ||
1680 | skb_put(_skb, sizeof(*c)); | ||
1681 | d_printf(6, dev, "C3/C4 message:\n"); | ||
1682 | d_dump(6, dev, c, sizeof(*c)); | ||
1683 | *skb = _skb; | ||
1684 | result = 0; | ||
1685 | error_alloc: | ||
1686 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1687 | return result; | ||
1688 | } | ||
1689 | |||
1690 | static | ||
1691 | int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss, | ||
1692 | struct sk_buff **skb) | ||
1693 | { | ||
1694 | return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3); | ||
1695 | } | ||
1696 | |||
1697 | static | ||
1698 | int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss, | ||
1699 | struct sk_buff **skb) | ||
1700 | { | ||
1701 | return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4); | ||
1702 | } | ||
1703 | |||
1704 | |||
1705 | #define wlp_send_assoc(type, id) \ | ||
1706 | static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ | ||
1707 | struct uwb_dev_addr *dev_addr) \ | ||
1708 | { \ | ||
1709 | struct device *dev = &wlp->rc->uwb_dev.dev; \ | ||
1710 | int result; \ | ||
1711 | struct sk_buff *skb = NULL; \ | ||
1712 | d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | ||
1713 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1714 | d_printf(6, dev, "WLP: Constructing %s frame. \n", \ | ||
1715 | wlp_assoc_frame_str(id)); \ | ||
1716 | /* Build the frame */ \ | ||
1717 | result = wlp_build_assoc_##type(wlp, wss, &skb); \ | ||
1718 | if (result < 0) { \ | ||
1719 | dev_err(dev, "WLP: Unable to construct %s association " \ | ||
1720 | "frame: %d\n", wlp_assoc_frame_str(id), result);\ | ||
1721 | goto error_build_assoc; \ | ||
1722 | } \ | ||
1723 | /* Send the frame */ \ | ||
1724 | d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ | ||
1725 | wlp_assoc_frame_str(id), \ | ||
1726 | dev_addr->data[1], dev_addr->data[0]); \ | ||
1727 | BUG_ON(wlp->xmit_frame == NULL); \ | ||
1728 | result = wlp->xmit_frame(wlp, skb, dev_addr); \ | ||
1729 | if (result < 0) { \ | ||
1730 | dev_err(dev, "WLP: Unable to transmit %s association " \ | ||
1731 | "message: %d\n", wlp_assoc_frame_str(id), \ | ||
1732 | result); \ | ||
1733 | if (result == -ENXIO) \ | ||
1734 | dev_err(dev, "WLP: Is network interface " \ | ||
1735 | "up? \n"); \ | ||
1736 | goto error_xmit; \ | ||
1737 | } \ | ||
1738 | return 0; \ | ||
1739 | error_xmit: \ | ||
1740 | /* We could try again ... */ \ | ||
1741 | dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ | ||
1742 | error_build_assoc: \ | ||
1743 | d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | ||
1744 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1745 | return result; \ | ||
1746 | } | ||
1747 | |||
1748 | wlp_send_assoc(d1, WLP_ASSOC_D1) | ||
1749 | wlp_send_assoc(c1, WLP_ASSOC_C1) | ||
1750 | wlp_send_assoc(c3, WLP_ASSOC_C3) | ||
1751 | |||
1752 | int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss, | ||
1753 | struct uwb_dev_addr *dev_addr, | ||
1754 | enum wlp_assoc_type type) | ||
1755 | { | ||
1756 | int result = 0; | ||
1757 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1758 | switch (type) { | ||
1759 | case WLP_ASSOC_D1: | ||
1760 | result = wlp_send_assoc_d1(wlp, wss, dev_addr); | ||
1761 | break; | ||
1762 | case WLP_ASSOC_C1: | ||
1763 | result = wlp_send_assoc_c1(wlp, wss, dev_addr); | ||
1764 | break; | ||
1765 | case WLP_ASSOC_C3: | ||
1766 | result = wlp_send_assoc_c3(wlp, wss, dev_addr); | ||
1767 | break; | ||
1768 | default: | ||
1769 | dev_err(dev, "WLP: Received request to send unknown " | ||
1770 | "association message.\n"); | ||
1771 | result = -EINVAL; | ||
1772 | break; | ||
1773 | } | ||
1774 | return result; | ||
1775 | } | ||
1776 | |||
1777 | /** | ||
1778 | * Handle incoming C1 frame | ||
1779 | * | ||
1780 | * The frame has already been verified to contain an Association header with | ||
1781 | * the correct version number. Parse the incoming frame, construct and send | ||
1782 | * a C2 frame in response. | ||
1783 | */ | ||
1784 | void wlp_handle_c1_frame(struct work_struct *ws) | ||
1785 | { | ||
1786 | struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, | ||
1787 | struct wlp_assoc_frame_ctx, | ||
1788 | ws); | ||
1789 | struct wlp *wlp = frame_ctx->wlp; | ||
1790 | struct wlp_wss *wss = &wlp->wss; | ||
1791 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1792 | struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data; | ||
1793 | unsigned int len = frame_ctx->skb->len; | ||
1794 | struct uwb_dev_addr *src = &frame_ctx->src; | ||
1795 | int result; | ||
1796 | struct wlp_uuid wssid; | ||
1797 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1798 | struct sk_buff *resp = NULL; | ||
1799 | |||
1800 | /* Parse C1 frame */ | ||
1801 | d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", | ||
1802 | wlp, c1); | ||
1803 | mutex_lock(&wss->mutex); | ||
1804 | result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, | ||
1805 | len - sizeof(*c1)); | ||
1806 | if (result < 0) { | ||
1807 | dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); | ||
1808 | goto out; | ||
1809 | } | ||
1810 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1811 | d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); | ||
1812 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | ||
1813 | && wss->state == WLP_WSS_STATE_ACTIVE) { | ||
1814 | d_printf(6, dev, "WSSID from C1 frame is known locally " | ||
1815 | "and is active\n"); | ||
1816 | /* Construct C2 frame */ | ||
1817 | result = wlp_build_assoc_c2(wlp, wss, &resp); | ||
1818 | if (result < 0) { | ||
1819 | dev_err(dev, "WLP: Unable to construct C2 message.\n"); | ||
1820 | goto out; | ||
1821 | } | ||
1822 | } else { | ||
1823 | d_printf(6, dev, "WSSID from C1 frame is not known locally " | ||
1824 | "or is not active\n"); | ||
1825 | /* Construct F0 frame */ | ||
1826 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | ||
1827 | if (result < 0) { | ||
1828 | dev_err(dev, "WLP: Unable to construct F0 message.\n"); | ||
1829 | goto out; | ||
1830 | } | ||
1831 | } | ||
1832 | /* Send C2 frame */ | ||
1833 | d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", | ||
1834 | src->data[1], src->data[0]); | ||
1835 | BUG_ON(wlp->xmit_frame == NULL); | ||
1836 | result = wlp->xmit_frame(wlp, resp, src); | ||
1837 | if (result < 0) { | ||
1838 | dev_err(dev, "WLP: Unable to transmit response association " | ||
1839 | "message: %d\n", result); | ||
1840 | if (result == -ENXIO) | ||
1841 | dev_err(dev, "WLP: Is network interface up? \n"); | ||
1842 | /* We could try again ... */ | ||
1843 | dev_kfree_skb_any(resp); /* we need to free if tx fails */ | ||
1844 | } | ||
1845 | out: | ||
1846 | kfree_skb(frame_ctx->skb); | ||
1847 | kfree(frame_ctx); | ||
1848 | mutex_unlock(&wss->mutex); | ||
1849 | d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); | ||
1850 | } | ||
1851 | |||
1852 | /** | ||
1853 | * Handle incoming C3 frame | ||
1854 | * | ||
1855 | * The frame has already been verified to contain an Association header with | ||
1856 | * the correct version number. Parse the incoming frame, construct and send | ||
1857 | * a C4 frame in response. If the C3 frame identifies a WSS that is locally | ||
1858 | * active then we connect to this neighbor (add it to our EDA cache). | ||
1859 | */ | ||
1860 | void wlp_handle_c3_frame(struct work_struct *ws) | ||
1861 | { | ||
1862 | struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, | ||
1863 | struct wlp_assoc_frame_ctx, | ||
1864 | ws); | ||
1865 | struct wlp *wlp = frame_ctx->wlp; | ||
1866 | struct wlp_wss *wss = &wlp->wss; | ||
1867 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1868 | struct sk_buff *skb = frame_ctx->skb; | ||
1869 | struct uwb_dev_addr *src = &frame_ctx->src; | ||
1870 | int result; | ||
1871 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1872 | struct sk_buff *resp = NULL; | ||
1873 | struct wlp_uuid wssid; | ||
1874 | u8 tag; | ||
1875 | struct uwb_mac_addr virt_addr; | ||
1876 | |||
1877 | /* Parse C3 frame */ | ||
1878 | d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1879 | wlp, skb); | ||
1880 | mutex_lock(&wss->mutex); | ||
1881 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); | ||
1882 | if (result < 0) { | ||
1883 | dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); | ||
1884 | goto out; | ||
1885 | } | ||
1886 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1887 | d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); | ||
1888 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | ||
1889 | && wss->state >= WLP_WSS_STATE_ACTIVE) { | ||
1890 | d_printf(6, dev, "WSSID from C3 frame is known locally " | ||
1891 | "and is active\n"); | ||
1892 | result = wlp_eda_update_node(&wlp->eda, src, wss, | ||
1893 | (void *) virt_addr.data, tag, | ||
1894 | WLP_WSS_CONNECTED); | ||
1895 | if (result < 0) { | ||
1896 | dev_err(dev, "WLP: Unable to update EDA cache " | ||
1897 | "with new connected neighbor information.\n"); | ||
1898 | result = wlp_build_assoc_f0(wlp, &resp, | ||
1899 | WLP_ASSOC_ERROR_INT); | ||
1900 | if (result < 0) { | ||
1901 | dev_err(dev, "WLP: Unable to construct F0 " | ||
1902 | "message.\n"); | ||
1903 | goto out; | ||
1904 | } | ||
1905 | } else { | ||
1906 | wss->state = WLP_WSS_STATE_CONNECTED; | ||
1907 | /* Construct C4 frame */ | ||
1908 | result = wlp_build_assoc_c4(wlp, wss, &resp); | ||
1909 | if (result < 0) { | ||
1910 | dev_err(dev, "WLP: Unable to construct C4 " | ||
1911 | "message.\n"); | ||
1912 | goto out; | ||
1913 | } | ||
1914 | } | ||
1915 | } else { | ||
1916 | d_printf(6, dev, "WSSID from C3 frame is not known locally " | ||
1917 | "or is not active\n"); | ||
1918 | /* Construct F0 frame */ | ||
1919 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | ||
1920 | if (result < 0) { | ||
1921 | dev_err(dev, "WLP: Unable to construct F0 message.\n"); | ||
1922 | goto out; | ||
1923 | } | ||
1924 | } | ||
1925 | /* Send C4 frame */ | ||
1926 | d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", | ||
1927 | src->data[1], src->data[0]); | ||
1928 | BUG_ON(wlp->xmit_frame == NULL); | ||
1929 | result = wlp->xmit_frame(wlp, resp, src); | ||
1930 | if (result < 0) { | ||
1931 | dev_err(dev, "WLP: Unable to transmit response association " | ||
1932 | "message: %d\n", result); | ||
1933 | if (result == -ENXIO) | ||
1934 | dev_err(dev, "WLP: Is network interface up? \n"); | ||
1935 | /* We could try again ... */ | ||
1936 | dev_kfree_skb_any(resp); /* we need to free if tx fails */ | ||
1937 | } | ||
1938 | out: | ||
1939 | kfree_skb(frame_ctx->skb); | ||
1940 | kfree(frame_ctx); | ||
1941 | mutex_unlock(&wss->mutex); | ||
1942 | d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1943 | wlp, skb); | ||
1944 | } | ||
1945 | |||
1946 | |||
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c new file mode 100644 index 000000000000..1bb9b1f97d47 --- /dev/null +++ b/drivers/uwb/wlp/sysfs.c | |||
@@ -0,0 +1,709 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * sysfs functions | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Reinette Chatre <reinette.chatre@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: Docs | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/wlp.h> | ||
28 | #include "wlp-internal.h" | ||
29 | |||
30 | static | ||
31 | size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize, | ||
32 | struct wlp_wssid_e *wssid_e) | ||
33 | { | ||
34 | size_t used = 0; | ||
35 | used += scnprintf(buf, bufsize, " WSS: "); | ||
36 | used += wlp_wss_uuid_print(buf + used, bufsize - used, | ||
37 | &wssid_e->wssid); | ||
38 | |||
39 | if (wssid_e->info != NULL) { | ||
40 | used += scnprintf(buf + used, bufsize - used, " "); | ||
41 | used += uwb_mac_addr_print(buf + used, bufsize - used, | ||
42 | &wssid_e->info->bcast); | ||
43 | used += scnprintf(buf + used, bufsize - used, " %u %u %s\n", | ||
44 | wssid_e->info->accept_enroll, | ||
45 | wssid_e->info->sec_status, | ||
46 | wssid_e->info->name); | ||
47 | } | ||
48 | return used; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * Print out information learned from neighbor discovery | ||
53 | * | ||
54 | * Some fields being printed may not be included in the device discovery | ||
55 | * information (it is not mandatory). We are thus careful how the | ||
56 | * information is printed to ensure it is clear to the user what field is | ||
57 | * being referenced. | ||
58 | * The information being printed is for one time use - temporary storage is | ||
59 | * cleaned after it is printed. | ||
60 | * | ||
61 | * Ideally sysfs output should be on one line. The information printed here | ||
62 | * contain a few strings so it will be hard to parse if they are all | ||
63 | * printed on the same line - without agreeing on a standard field | ||
64 | * separator. | ||
65 | */ | ||
66 | static | ||
67 | ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf, | ||
68 | size_t bufsize) | ||
69 | { | ||
70 | size_t used = 0; | ||
71 | struct wlp_neighbor_e *neighb; | ||
72 | struct wlp_wssid_e *wssid_e; | ||
73 | |||
74 | mutex_lock(&wlp->nbmutex); | ||
75 | used = scnprintf(buf, bufsize, "#Neighbor information\n" | ||
76 | "#uuid dev_addr\n" | ||
77 | "# Device Name:\n# Model Name:\n# Manufacturer:\n" | ||
78 | "# Model Nr:\n# Serial:\n" | ||
79 | "# Pri Dev type: CategoryID OUI OUISubdiv " | ||
80 | "SubcategoryID\n" | ||
81 | "# WSS: WSSID WSS_name accept_enroll sec_status " | ||
82 | "bcast\n" | ||
83 | "# WSS: WSSID WSS_name accept_enroll sec_status " | ||
84 | "bcast\n\n"); | ||
85 | list_for_each_entry(neighb, &wlp->neighbors, node) { | ||
86 | if (bufsize - used <= 0) | ||
87 | goto out; | ||
88 | used += wlp_wss_uuid_print(buf + used, bufsize - used, | ||
89 | &neighb->uuid); | ||
90 | buf[used++] = ' '; | ||
91 | used += uwb_dev_addr_print(buf + used, bufsize - used, | ||
92 | &neighb->uwb_dev->dev_addr); | ||
93 | if (neighb->info != NULL) | ||
94 | used += scnprintf(buf + used, bufsize - used, | ||
95 | "\n Device Name: %s\n" | ||
96 | " Model Name: %s\n" | ||
97 | " Manufacturer:%s \n" | ||
98 | " Model Nr: %s\n" | ||
99 | " Serial: %s\n" | ||
100 | " Pri Dev type: " | ||
101 | "%u %02x:%02x:%02x %u %u\n", | ||
102 | neighb->info->name, | ||
103 | neighb->info->model_name, | ||
104 | neighb->info->manufacturer, | ||
105 | neighb->info->model_nr, | ||
106 | neighb->info->serial, | ||
107 | neighb->info->prim_dev_type.category, | ||
108 | neighb->info->prim_dev_type.OUI[0], | ||
109 | neighb->info->prim_dev_type.OUI[1], | ||
110 | neighb->info->prim_dev_type.OUI[2], | ||
111 | neighb->info->prim_dev_type.OUIsubdiv, | ||
112 | neighb->info->prim_dev_type.subID); | ||
113 | list_for_each_entry(wssid_e, &neighb->wssid, node) { | ||
114 | used += wlp_wss_wssid_e_print(buf + used, | ||
115 | bufsize - used, | ||
116 | wssid_e); | ||
117 | } | ||
118 | buf[used++] = '\n'; | ||
119 | wlp_remove_neighbor_tmp_info(neighb); | ||
120 | } | ||
121 | |||
122 | |||
123 | out: | ||
124 | mutex_unlock(&wlp->nbmutex); | ||
125 | return used; | ||
126 | } | ||
127 | |||
128 | |||
129 | /** | ||
130 | * Show properties of all WSS in neighborhood. | ||
131 | * | ||
132 | * Will trigger a complete discovery of WSS activated by this device and | ||
133 | * its neighbors. | ||
134 | */ | ||
135 | ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf) | ||
136 | { | ||
137 | wlp_discover(wlp); | ||
138 | return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE); | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(wlp_neighborhood_show); | ||
141 | |||
142 | static | ||
143 | ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf, | ||
144 | size_t bufsize) | ||
145 | { | ||
146 | ssize_t result; | ||
147 | |||
148 | result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid); | ||
149 | result += scnprintf(buf + result, bufsize - result, " "); | ||
150 | result += uwb_mac_addr_print(buf + result, bufsize - result, | ||
151 | &wss->bcast); | ||
152 | result += scnprintf(buf + result, bufsize - result, | ||
153 | " 0x%02x %u ", wss->hash, wss->secure_status); | ||
154 | result += wlp_wss_key_print(buf + result, bufsize - result, | ||
155 | wss->master_key); | ||
156 | result += scnprintf(buf + result, bufsize - result, " 0x%02x ", | ||
157 | wss->tag); | ||
158 | result += uwb_mac_addr_print(buf + result, bufsize - result, | ||
159 | &wss->virtual_addr); | ||
160 | result += scnprintf(buf + result, bufsize - result, " %s", wss->name); | ||
161 | result += scnprintf(buf + result, bufsize - result, | ||
162 | "\n\n#WSSID\n#WSS broadcast address\n" | ||
163 | "#WSS hash\n#WSS secure status\n" | ||
164 | "#WSS master key\n#WSS local tag\n" | ||
165 | "#WSS local virtual EUI-48\n#WSS name\n"); | ||
166 | return result; | ||
167 | } | ||
168 | |||
169 | /** | ||
170 | * Show which WSS is activated. | ||
171 | */ | ||
172 | ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf) | ||
173 | { | ||
174 | int result = 0; | ||
175 | |||
176 | if (mutex_lock_interruptible(&wss->mutex)) | ||
177 | goto out; | ||
178 | if (wss->state >= WLP_WSS_STATE_ACTIVE) | ||
179 | result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE); | ||
180 | else | ||
181 | result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n"); | ||
182 | result += scnprintf(buf + result, PAGE_SIZE - result, | ||
183 | "\n\n" | ||
184 | "# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT " | ||
185 | "NAME #create new WSS\n" | ||
186 | "# echo WSSID [DEV ADDR] #enroll in and activate " | ||
187 | "existing WSS, can request registrar\n" | ||
188 | "#\n" | ||
189 | "# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n" | ||
190 | "# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n" | ||
191 | "# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n" | ||
192 | "# NAME is the text string identifying the WSS\n" | ||
193 | "# DEV ADDR is the device address of neighbor " | ||
194 | "that should be registrar. Eg. 32:AB\n"); | ||
195 | |||
196 | mutex_unlock(&wss->mutex); | ||
197 | out: | ||
198 | return result; | ||
199 | |||
200 | } | ||
201 | EXPORT_SYMBOL_GPL(wlp_wss_activate_show); | ||
202 | |||
203 | /** | ||
204 | * Create/activate a new WSS or enroll/activate in neighboring WSS | ||
205 | * | ||
206 | * The user can provide the WSSID of a WSS in which it wants to enroll. | ||
207 | * Only the WSSID is necessary if the WSS have been discovered before. If | ||
208 | * the WSS has not been discovered before, or the user wants to use a | ||
209 | * particular neighbor as its registrar, then the user can also provide a | ||
210 | * device address or the neighbor that will be used as registrar. | ||
211 | * | ||
212 | * A new WSS is created when the user provides a WSSID, secure status, and | ||
213 | * WSS name. | ||
214 | */ | ||
215 | ssize_t wlp_wss_activate_store(struct wlp_wss *wss, | ||
216 | const char *buf, size_t size) | ||
217 | { | ||
218 | ssize_t result = -EINVAL; | ||
219 | struct wlp_uuid wssid; | ||
220 | struct uwb_dev_addr dev; | ||
221 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; | ||
222 | char name[65]; | ||
223 | unsigned sec_status, accept; | ||
224 | memset(name, 0, sizeof(name)); | ||
225 | result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " | ||
226 | "%02hhx %02hhx %02hhx %02hhx " | ||
227 | "%02hhx %02hhx %02hhx %02hhx " | ||
228 | "%02hhx %02hhx %02hhx %02hhx " | ||
229 | "%02hhx:%02hhx", | ||
230 | &wssid.data[0] , &wssid.data[1], | ||
231 | &wssid.data[2] , &wssid.data[3], | ||
232 | &wssid.data[4] , &wssid.data[5], | ||
233 | &wssid.data[6] , &wssid.data[7], | ||
234 | &wssid.data[8] , &wssid.data[9], | ||
235 | &wssid.data[10], &wssid.data[11], | ||
236 | &wssid.data[12], &wssid.data[13], | ||
237 | &wssid.data[14], &wssid.data[15], | ||
238 | &dev.data[1], &dev.data[0]); | ||
239 | if (result == 16 || result == 17) { | ||
240 | result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " | ||
241 | "%02hhx %02hhx %02hhx %02hhx " | ||
242 | "%02hhx %02hhx %02hhx %02hhx " | ||
243 | "%02hhx %02hhx %02hhx %02hhx " | ||
244 | "%u %u %64c", | ||
245 | &wssid.data[0] , &wssid.data[1], | ||
246 | &wssid.data[2] , &wssid.data[3], | ||
247 | &wssid.data[4] , &wssid.data[5], | ||
248 | &wssid.data[6] , &wssid.data[7], | ||
249 | &wssid.data[8] , &wssid.data[9], | ||
250 | &wssid.data[10], &wssid.data[11], | ||
251 | &wssid.data[12], &wssid.data[13], | ||
252 | &wssid.data[14], &wssid.data[15], | ||
253 | &sec_status, &accept, name); | ||
254 | if (result == 16) | ||
255 | result = wlp_wss_enroll_activate(wss, &wssid, &bcast); | ||
256 | else if (result == 19) { | ||
257 | sec_status = sec_status == 0 ? 0 : 1; | ||
258 | accept = accept == 0 ? 0 : 1; | ||
259 | /* We read name using %c, so the newline needs to be | ||
260 | * removed */ | ||
261 | if (strlen(name) != sizeof(name) - 1) | ||
262 | name[strlen(name) - 1] = '\0'; | ||
263 | result = wlp_wss_create_activate(wss, &wssid, name, | ||
264 | sec_status, accept); | ||
265 | } else | ||
266 | result = -EINVAL; | ||
267 | } else if (result == 18) | ||
268 | result = wlp_wss_enroll_activate(wss, &wssid, &dev); | ||
269 | else | ||
270 | result = -EINVAL; | ||
271 | return result < 0 ? result : size; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(wlp_wss_activate_store); | ||
274 | |||
275 | /** | ||
276 | * Show the UUID of this host | ||
277 | */ | ||
278 | ssize_t wlp_uuid_show(struct wlp *wlp, char *buf) | ||
279 | { | ||
280 | ssize_t result = 0; | ||
281 | |||
282 | mutex_lock(&wlp->mutex); | ||
283 | result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid); | ||
284 | buf[result++] = '\n'; | ||
285 | mutex_unlock(&wlp->mutex); | ||
286 | return result; | ||
287 | } | ||
288 | EXPORT_SYMBOL_GPL(wlp_uuid_show); | ||
289 | |||
290 | /** | ||
291 | * Store a new UUID for this host | ||
292 | * | ||
293 | * According to the spec this should be encoded as an octet string in the | ||
294 | * order the octets are shown in string representation in RFC 4122 (WLP | ||
295 | * 0.99 [Table 6]) | ||
296 | * | ||
297 | * We do not check value provided by user. | ||
298 | */ | ||
299 | ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size) | ||
300 | { | ||
301 | ssize_t result; | ||
302 | struct wlp_uuid uuid; | ||
303 | |||
304 | mutex_lock(&wlp->mutex); | ||
305 | result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " | ||
306 | "%02hhx %02hhx %02hhx %02hhx " | ||
307 | "%02hhx %02hhx %02hhx %02hhx " | ||
308 | "%02hhx %02hhx %02hhx %02hhx ", | ||
309 | &uuid.data[0] , &uuid.data[1], | ||
310 | &uuid.data[2] , &uuid.data[3], | ||
311 | &uuid.data[4] , &uuid.data[5], | ||
312 | &uuid.data[6] , &uuid.data[7], | ||
313 | &uuid.data[8] , &uuid.data[9], | ||
314 | &uuid.data[10], &uuid.data[11], | ||
315 | &uuid.data[12], &uuid.data[13], | ||
316 | &uuid.data[14], &uuid.data[15]); | ||
317 | if (result != 16) { | ||
318 | result = -EINVAL; | ||
319 | goto error; | ||
320 | } | ||
321 | wlp->uuid = uuid; | ||
322 | error: | ||
323 | mutex_unlock(&wlp->mutex); | ||
324 | return result < 0 ? result : size; | ||
325 | } | ||
326 | EXPORT_SYMBOL_GPL(wlp_uuid_store); | ||
327 | |||
328 | /** | ||
329 | * Show contents of members of device information structure | ||
330 | */ | ||
331 | #define wlp_dev_info_show(type) \ | ||
332 | ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \ | ||
333 | { \ | ||
334 | ssize_t result = 0; \ | ||
335 | mutex_lock(&wlp->mutex); \ | ||
336 | if (wlp->dev_info == NULL) { \ | ||
337 | result = __wlp_setup_device_info(wlp); \ | ||
338 | if (result < 0) \ | ||
339 | goto out; \ | ||
340 | } \ | ||
341 | result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\ | ||
342 | out: \ | ||
343 | mutex_unlock(&wlp->mutex); \ | ||
344 | return result; \ | ||
345 | } \ | ||
346 | EXPORT_SYMBOL_GPL(wlp_dev_##type##_show); | ||
347 | |||
348 | wlp_dev_info_show(name) | ||
349 | wlp_dev_info_show(model_name) | ||
350 | wlp_dev_info_show(model_nr) | ||
351 | wlp_dev_info_show(manufacturer) | ||
352 | wlp_dev_info_show(serial) | ||
353 | |||
354 | /** | ||
355 | * Store contents of members of device information structure | ||
356 | */ | ||
357 | #define wlp_dev_info_store(type, len) \ | ||
358 | ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\ | ||
359 | { \ | ||
360 | ssize_t result; \ | ||
361 | char format[10]; \ | ||
362 | mutex_lock(&wlp->mutex); \ | ||
363 | if (wlp->dev_info == NULL) { \ | ||
364 | result = __wlp_alloc_device_info(wlp); \ | ||
365 | if (result < 0) \ | ||
366 | goto out; \ | ||
367 | } \ | ||
368 | memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \ | ||
369 | sprintf(format, "%%%uc", len); \ | ||
370 | result = sscanf(buf, format, wlp->dev_info->type); \ | ||
371 | out: \ | ||
372 | mutex_unlock(&wlp->mutex); \ | ||
373 | return result < 0 ? result : size; \ | ||
374 | } \ | ||
375 | EXPORT_SYMBOL_GPL(wlp_dev_##type##_store); | ||
376 | |||
377 | wlp_dev_info_store(name, 32) | ||
378 | wlp_dev_info_store(manufacturer, 64) | ||
379 | wlp_dev_info_store(model_name, 32) | ||
380 | wlp_dev_info_store(model_nr, 32) | ||
381 | wlp_dev_info_store(serial, 32) | ||
382 | |||
383 | static | ||
384 | const char *__wlp_dev_category[] = { | ||
385 | [WLP_DEV_CAT_COMPUTER] = "Computer", | ||
386 | [WLP_DEV_CAT_INPUT] = "Input device", | ||
387 | [WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or " | ||
388 | "Copier", | ||
389 | [WLP_DEV_CAT_CAMERA] = "Camera", | ||
390 | [WLP_DEV_CAT_STORAGE] = "Storage Network", | ||
391 | [WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure", | ||
392 | [WLP_DEV_CAT_DISPLAY] = "Display", | ||
393 | [WLP_DEV_CAT_MULTIM] = "Multimedia device", | ||
394 | [WLP_DEV_CAT_GAMING] = "Gaming device", | ||
395 | [WLP_DEV_CAT_TELEPHONE] = "Telephone", | ||
396 | [WLP_DEV_CAT_OTHER] = "Other", | ||
397 | }; | ||
398 | |||
399 | static | ||
400 | const char *wlp_dev_category_str(unsigned cat) | ||
401 | { | ||
402 | if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE) | ||
403 | || cat == WLP_DEV_CAT_OTHER) | ||
404 | return __wlp_dev_category[cat]; | ||
405 | return "unknown category"; | ||
406 | } | ||
407 | |||
408 | ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf) | ||
409 | { | ||
410 | ssize_t result = 0; | ||
411 | mutex_lock(&wlp->mutex); | ||
412 | if (wlp->dev_info == NULL) { | ||
413 | result = __wlp_setup_device_info(wlp); | ||
414 | if (result < 0) | ||
415 | goto out; | ||
416 | } | ||
417 | result = scnprintf(buf, PAGE_SIZE, "%s\n", | ||
418 | wlp_dev_category_str(wlp->dev_info->prim_dev_type.category)); | ||
419 | out: | ||
420 | mutex_unlock(&wlp->mutex); | ||
421 | return result; | ||
422 | } | ||
423 | EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show); | ||
424 | |||
425 | ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf, | ||
426 | size_t size) | ||
427 | { | ||
428 | ssize_t result; | ||
429 | u16 cat; | ||
430 | mutex_lock(&wlp->mutex); | ||
431 | if (wlp->dev_info == NULL) { | ||
432 | result = __wlp_alloc_device_info(wlp); | ||
433 | if (result < 0) | ||
434 | goto out; | ||
435 | } | ||
436 | result = sscanf(buf, "%hu", &cat); | ||
437 | if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE) | ||
438 | || cat == WLP_DEV_CAT_OTHER) | ||
439 | wlp->dev_info->prim_dev_type.category = cat; | ||
440 | else | ||
441 | result = -EINVAL; | ||
442 | out: | ||
443 | mutex_unlock(&wlp->mutex); | ||
444 | return result < 0 ? result : size; | ||
445 | } | ||
446 | EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store); | ||
447 | |||
448 | ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf) | ||
449 | { | ||
450 | ssize_t result = 0; | ||
451 | mutex_lock(&wlp->mutex); | ||
452 | if (wlp->dev_info == NULL) { | ||
453 | result = __wlp_setup_device_info(wlp); | ||
454 | if (result < 0) | ||
455 | goto out; | ||
456 | } | ||
457 | result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n", | ||
458 | wlp->dev_info->prim_dev_type.OUI[0], | ||
459 | wlp->dev_info->prim_dev_type.OUI[1], | ||
460 | wlp->dev_info->prim_dev_type.OUI[2]); | ||
461 | out: | ||
462 | mutex_unlock(&wlp->mutex); | ||
463 | return result; | ||
464 | } | ||
465 | EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show); | ||
466 | |||
467 | ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size) | ||
468 | { | ||
469 | ssize_t result; | ||
470 | u8 OUI[3]; | ||
471 | mutex_lock(&wlp->mutex); | ||
472 | if (wlp->dev_info == NULL) { | ||
473 | result = __wlp_alloc_device_info(wlp); | ||
474 | if (result < 0) | ||
475 | goto out; | ||
476 | } | ||
477 | result = sscanf(buf, "%hhx:%hhx:%hhx", | ||
478 | &OUI[0], &OUI[1], &OUI[2]); | ||
479 | if (result != 3) { | ||
480 | result = -EINVAL; | ||
481 | goto out; | ||
482 | } else | ||
483 | memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI)); | ||
484 | out: | ||
485 | mutex_unlock(&wlp->mutex); | ||
486 | return result < 0 ? result : size; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store); | ||
489 | |||
490 | |||
491 | ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf) | ||
492 | { | ||
493 | ssize_t result = 0; | ||
494 | mutex_lock(&wlp->mutex); | ||
495 | if (wlp->dev_info == NULL) { | ||
496 | result = __wlp_setup_device_info(wlp); | ||
497 | if (result < 0) | ||
498 | goto out; | ||
499 | } | ||
500 | result = scnprintf(buf, PAGE_SIZE, "%u\n", | ||
501 | wlp->dev_info->prim_dev_type.OUIsubdiv); | ||
502 | out: | ||
503 | mutex_unlock(&wlp->mutex); | ||
504 | return result; | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show); | ||
507 | |||
508 | ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf, | ||
509 | size_t size) | ||
510 | { | ||
511 | ssize_t result; | ||
512 | unsigned sub; | ||
513 | u8 max_sub = ~0; | ||
514 | mutex_lock(&wlp->mutex); | ||
515 | if (wlp->dev_info == NULL) { | ||
516 | result = __wlp_alloc_device_info(wlp); | ||
517 | if (result < 0) | ||
518 | goto out; | ||
519 | } | ||
520 | result = sscanf(buf, "%u", &sub); | ||
521 | if (sub <= max_sub) | ||
522 | wlp->dev_info->prim_dev_type.OUIsubdiv = sub; | ||
523 | else | ||
524 | result = -EINVAL; | ||
525 | out: | ||
526 | mutex_unlock(&wlp->mutex); | ||
527 | return result < 0 ? result : size; | ||
528 | } | ||
529 | EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store); | ||
530 | |||
531 | ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf) | ||
532 | { | ||
533 | ssize_t result = 0; | ||
534 | mutex_lock(&wlp->mutex); | ||
535 | if (wlp->dev_info == NULL) { | ||
536 | result = __wlp_setup_device_info(wlp); | ||
537 | if (result < 0) | ||
538 | goto out; | ||
539 | } | ||
540 | result = scnprintf(buf, PAGE_SIZE, "%u\n", | ||
541 | wlp->dev_info->prim_dev_type.subID); | ||
542 | out: | ||
543 | mutex_unlock(&wlp->mutex); | ||
544 | return result; | ||
545 | } | ||
546 | EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show); | ||
547 | |||
548 | ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf, | ||
549 | size_t size) | ||
550 | { | ||
551 | ssize_t result; | ||
552 | unsigned sub; | ||
553 | __le16 max_sub = ~0; | ||
554 | mutex_lock(&wlp->mutex); | ||
555 | if (wlp->dev_info == NULL) { | ||
556 | result = __wlp_alloc_device_info(wlp); | ||
557 | if (result < 0) | ||
558 | goto out; | ||
559 | } | ||
560 | result = sscanf(buf, "%u", &sub); | ||
561 | if (sub <= max_sub) | ||
562 | wlp->dev_info->prim_dev_type.subID = sub; | ||
563 | else | ||
564 | result = -EINVAL; | ||
565 | out: | ||
566 | mutex_unlock(&wlp->mutex); | ||
567 | return result < 0 ? result : size; | ||
568 | } | ||
569 | EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store); | ||
570 | |||
571 | /** | ||
572 | * Subsystem implementation for interaction with individual WSS via sysfs | ||
573 | * | ||
574 | * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt | ||
575 | */ | ||
576 | |||
577 | #define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj) | ||
578 | #define attr_to_wlp_wss_attr(_attr) \ | ||
579 | container_of(_attr, struct wlp_wss_attribute, attr) | ||
580 | |||
581 | /** | ||
582 | * Sysfs subsystem: forward read calls | ||
583 | * | ||
584 | * Sysfs operation for forwarding read call to the show method of the | ||
585 | * attribute owner | ||
586 | */ | ||
587 | static | ||
588 | ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr, | ||
589 | char *buf) | ||
590 | { | ||
591 | struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr); | ||
592 | struct wlp_wss *wss = kobj_to_wlp_wss(kobj); | ||
593 | ssize_t ret = -EIO; | ||
594 | |||
595 | if (wss_attr->show) | ||
596 | ret = wss_attr->show(wss, buf); | ||
597 | return ret; | ||
598 | } | ||
599 | /** | ||
600 | * Sysfs subsystem: forward write calls | ||
601 | * | ||
602 | * Sysfs operation for forwarding write call to the store method of the | ||
603 | * attribute owner | ||
604 | */ | ||
605 | static | ||
606 | ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr, | ||
607 | const char *buf, size_t count) | ||
608 | { | ||
609 | struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr); | ||
610 | struct wlp_wss *wss = kobj_to_wlp_wss(kobj); | ||
611 | ssize_t ret = -EIO; | ||
612 | |||
613 | if (wss_attr->store) | ||
614 | ret = wss_attr->store(wss, buf, count); | ||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static | ||
619 | struct sysfs_ops wss_sysfs_ops = { | ||
620 | .show = wlp_wss_attr_show, | ||
621 | .store = wlp_wss_attr_store, | ||
622 | }; | ||
623 | |||
624 | struct kobj_type wss_ktype = { | ||
625 | .release = wlp_wss_release, | ||
626 | .sysfs_ops = &wss_sysfs_ops, | ||
627 | }; | ||
628 | |||
629 | |||
630 | /** | ||
631 | * Sysfs files for individual WSS | ||
632 | */ | ||
633 | |||
634 | /** | ||
635 | * Print static properties of this WSS | ||
636 | * | ||
637 | * The name of a WSS may not be null teminated. It's max size is 64 bytes | ||
638 | * so we copy it to a larger array just to make sure we print sane data. | ||
639 | */ | ||
640 | static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf) | ||
641 | { | ||
642 | int result = 0; | ||
643 | |||
644 | if (mutex_lock_interruptible(&wss->mutex)) | ||
645 | goto out; | ||
646 | result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE); | ||
647 | mutex_unlock(&wss->mutex); | ||
648 | out: | ||
649 | return result; | ||
650 | } | ||
651 | WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL); | ||
652 | |||
653 | /** | ||
654 | * Print all connected members of this WSS | ||
655 | * The EDA cache contains all members of WSS neighborhood. | ||
656 | */ | ||
657 | static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf) | ||
658 | { | ||
659 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
660 | return wlp_eda_show(wlp, buf); | ||
661 | } | ||
662 | WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL); | ||
663 | |||
664 | static | ||
665 | const char *__wlp_strstate[] = { | ||
666 | "none", | ||
667 | "partially enrolled", | ||
668 | "enrolled", | ||
669 | "active", | ||
670 | "connected", | ||
671 | }; | ||
672 | |||
673 | static const char *wlp_wss_strstate(unsigned state) | ||
674 | { | ||
675 | if (state >= ARRAY_SIZE(__wlp_strstate)) | ||
676 | return "unknown state"; | ||
677 | return __wlp_strstate[state]; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * Print current state of this WSS | ||
682 | */ | ||
683 | static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf) | ||
684 | { | ||
685 | int result = 0; | ||
686 | |||
687 | if (mutex_lock_interruptible(&wss->mutex)) | ||
688 | goto out; | ||
689 | result = scnprintf(buf, PAGE_SIZE, "%s\n", | ||
690 | wlp_wss_strstate(wss->state)); | ||
691 | mutex_unlock(&wss->mutex); | ||
692 | out: | ||
693 | return result; | ||
694 | } | ||
695 | WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL); | ||
696 | |||
697 | |||
698 | static | ||
699 | struct attribute *wss_attrs[] = { | ||
700 | &wss_attr_properties.attr, | ||
701 | &wss_attr_members.attr, | ||
702 | &wss_attr_state.attr, | ||
703 | NULL, | ||
704 | }; | ||
705 | |||
706 | struct attribute_group wss_attr_group = { | ||
707 | .name = NULL, /* we want them in the same directory */ | ||
708 | .attrs = wss_attrs, | ||
709 | }; | ||
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c new file mode 100644 index 000000000000..c701bd1a2887 --- /dev/null +++ b/drivers/uwb/wlp/txrx.c | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * Message exchange infrastructure | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Reinette Chatre <reinette.chatre@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: Docs | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/wlp.h> | ||
29 | #define D_LOCAL 5 | ||
30 | #include <linux/uwb/debug.h> | ||
31 | #include "wlp-internal.h" | ||
32 | |||
33 | |||
34 | /** | ||
35 | * Direct incoming association msg to correct parsing routine | ||
36 | * | ||
37 | * We only expect D1, E1, C1, C3 messages as new. All other incoming | ||
38 | * association messages should form part of an established session that is | ||
39 | * handled elsewhere. | ||
40 | * The handling of these messages often require calling sleeping functions | ||
41 | * - this cannot be done in interrupt context. We use the kernel's | ||
42 | * workqueue to handle these messages. | ||
43 | */ | ||
44 | static | ||
45 | void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | ||
46 | struct uwb_dev_addr *src) | ||
47 | { | ||
48 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
49 | struct wlp_frame_assoc *assoc = (void *) skb->data; | ||
50 | struct wlp_assoc_frame_ctx *frame_ctx; | ||
51 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | ||
52 | frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); | ||
53 | if (frame_ctx == NULL) { | ||
54 | dev_err(dev, "WLP: Unable to allocate memory for association " | ||
55 | "frame handling.\n"); | ||
56 | kfree_skb(skb); | ||
57 | goto out; | ||
58 | } | ||
59 | frame_ctx->wlp = wlp; | ||
60 | frame_ctx->skb = skb; | ||
61 | frame_ctx->src = *src; | ||
62 | switch (assoc->type) { | ||
63 | case WLP_ASSOC_D1: | ||
64 | d_printf(5, dev, "Received a D1 frame.\n"); | ||
65 | INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); | ||
66 | schedule_work(&frame_ctx->ws); | ||
67 | break; | ||
68 | case WLP_ASSOC_E1: | ||
69 | d_printf(5, dev, "Received a E1 frame. FIXME?\n"); | ||
70 | kfree_skb(skb); /* Temporary until we handle it */ | ||
71 | kfree(frame_ctx); /* Temporary until we handle it */ | ||
72 | break; | ||
73 | case WLP_ASSOC_C1: | ||
74 | d_printf(5, dev, "Received a C1 frame.\n"); | ||
75 | INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); | ||
76 | schedule_work(&frame_ctx->ws); | ||
77 | break; | ||
78 | case WLP_ASSOC_C3: | ||
79 | d_printf(5, dev, "Received a C3 frame.\n"); | ||
80 | INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); | ||
81 | schedule_work(&frame_ctx->ws); | ||
82 | break; | ||
83 | default: | ||
84 | dev_err(dev, "Received unexpected association frame. " | ||
85 | "Type = %d \n", assoc->type); | ||
86 | kfree_skb(skb); | ||
87 | kfree(frame_ctx); | ||
88 | break; | ||
89 | } | ||
90 | out: | ||
91 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * Process incoming association frame | ||
96 | * | ||
97 | * Although it could be possible to deal with some incoming association | ||
98 | * messages without creating a new session we are keeping things simple. We | ||
99 | * do not accept new association messages if there is a session in progress | ||
100 | * and the messages do not belong to that session. | ||
101 | * | ||
102 | * If an association message arrives that causes the creation of a session | ||
103 | * (WLP_ASSOC_E1) while we are in the process of creating a session then we | ||
104 | * rely on the neighbor mutex to protect the data. That is, the new session | ||
105 | * will not be started until the previous is completed. | ||
106 | */ | ||
107 | static | ||
108 | void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | ||
109 | struct uwb_dev_addr *src) | ||
110 | { | ||
111 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
112 | struct wlp_frame_assoc *assoc = (void *) skb->data; | ||
113 | struct wlp_session *session = wlp->session; | ||
114 | u8 version; | ||
115 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | ||
116 | |||
117 | if (wlp_get_version(wlp, &assoc->version, &version, | ||
118 | sizeof(assoc->version)) < 0) | ||
119 | goto error; | ||
120 | if (version != WLP_VERSION) { | ||
121 | dev_err(dev, "Unsupported WLP version in association " | ||
122 | "message.\n"); | ||
123 | goto error; | ||
124 | } | ||
125 | if (session != NULL) { | ||
126 | /* Function that created this session is still holding the | ||
127 | * &wlp->mutex to protect this session. */ | ||
128 | if (assoc->type == session->exp_message || | ||
129 | assoc->type == WLP_ASSOC_F0) { | ||
130 | if (!memcmp(&session->neighbor_addr, src, | ||
131 | sizeof(*src))) { | ||
132 | session->data = skb; | ||
133 | (session->cb)(wlp); | ||
134 | } else { | ||
135 | dev_err(dev, "Received expected message from " | ||
136 | "unexpected source. Expected message " | ||
137 | "%d or F0 from %02x:%02x, but received " | ||
138 | "it from %02x:%02x. Dropping.\n", | ||
139 | session->exp_message, | ||
140 | session->neighbor_addr.data[1], | ||
141 | session->neighbor_addr.data[0], | ||
142 | src->data[1], src->data[0]); | ||
143 | goto error; | ||
144 | } | ||
145 | } else { | ||
146 | dev_err(dev, "Association already in progress. " | ||
147 | "Dropping.\n"); | ||
148 | goto error; | ||
149 | } | ||
150 | } else { | ||
151 | wlp_direct_assoc_frame(wlp, skb, src); | ||
152 | } | ||
153 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
154 | return; | ||
155 | error: | ||
156 | kfree_skb(skb); | ||
157 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * Verify incoming frame is from connected neighbor, prep to pass to WLP client | ||
162 | * | ||
163 | * Verification proceeds according to WLP 0.99 [7.3.1]. The source address | ||
164 | * is used to determine which neighbor is sending the frame and the WSS tag | ||
165 | * is used to know to which WSS the frame belongs (we only support one WSS | ||
166 | * so this test is straight forward). | ||
167 | * With the WSS found we need to ensure that we are connected before | ||
168 | * allowing the exchange of data frames. | ||
169 | */ | ||
170 | static | ||
171 | int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, | ||
172 | struct uwb_dev_addr *src) | ||
173 | { | ||
174 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
175 | int result = -EINVAL; | ||
176 | struct wlp_eda_node eda_entry; | ||
177 | struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; | ||
178 | |||
179 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
180 | /*verify*/ | ||
181 | result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); | ||
182 | if (result < 0) { | ||
183 | if (printk_ratelimit()) | ||
184 | dev_err(dev, "WLP: Incoming frame is from unknown " | ||
185 | "neighbor %02x:%02x.\n", src->data[1], | ||
186 | src->data[0]); | ||
187 | goto out; | ||
188 | } | ||
189 | if (hdr->tag != eda_entry.tag) { | ||
190 | if (printk_ratelimit()) | ||
191 | dev_err(dev, "WLP: Tag of incoming frame from " | ||
192 | "%02x:%02x does not match expected tag. " | ||
193 | "Received 0x%02x, expected 0x%02x. \n", | ||
194 | src->data[1], src->data[0], hdr->tag, | ||
195 | eda_entry.tag); | ||
196 | result = -EINVAL; | ||
197 | goto out; | ||
198 | } | ||
199 | if (eda_entry.state != WLP_WSS_CONNECTED) { | ||
200 | if (printk_ratelimit()) | ||
201 | dev_err(dev, "WLP: Incoming frame from " | ||
202 | "%02x:%02x does is not from connected WSS.\n", | ||
203 | src->data[1], src->data[0]); | ||
204 | result = -EINVAL; | ||
205 | goto out; | ||
206 | } | ||
207 | /*prep*/ | ||
208 | skb_pull(skb, sizeof(*hdr)); | ||
209 | out: | ||
210 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
211 | return result; | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * Receive a WLP frame from device | ||
216 | * | ||
217 | * @returns: 1 if calling function should free the skb | ||
218 | * 0 if it successfully handled skb and freed it | ||
219 | * 0 if error occured, will free skb in this case | ||
220 | */ | ||
221 | int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, | ||
222 | struct uwb_dev_addr *src) | ||
223 | { | ||
224 | unsigned len = skb->len; | ||
225 | void *ptr = skb->data; | ||
226 | struct wlp_frame_hdr *hdr; | ||
227 | int result = 0; | ||
228 | |||
229 | d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); | ||
230 | if (len < sizeof(*hdr)) { | ||
231 | dev_err(dev, "Not enough data to parse WLP header.\n"); | ||
232 | result = -EINVAL; | ||
233 | goto out; | ||
234 | } | ||
235 | hdr = ptr; | ||
236 | d_dump(6, dev, hdr, sizeof(*hdr)); | ||
237 | if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { | ||
238 | dev_err(dev, "Not a WLP frame type.\n"); | ||
239 | result = -EINVAL; | ||
240 | goto out; | ||
241 | } | ||
242 | switch (hdr->type) { | ||
243 | case WLP_FRAME_STANDARD: | ||
244 | if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) { | ||
245 | dev_err(dev, "Not enough data to parse Standard " | ||
246 | "WLP header.\n"); | ||
247 | goto out; | ||
248 | } | ||
249 | result = wlp_verify_prep_rx_frame(wlp, skb, src); | ||
250 | if (result < 0) { | ||
251 | if (printk_ratelimit()) | ||
252 | dev_err(dev, "WLP: Verification of frame " | ||
253 | "from neighbor %02x:%02x failed.\n", | ||
254 | src->data[1], src->data[0]); | ||
255 | goto out; | ||
256 | } | ||
257 | result = 1; | ||
258 | break; | ||
259 | case WLP_FRAME_ABBREVIATED: | ||
260 | dev_err(dev, "Abbreviated frame received. FIXME?\n"); | ||
261 | kfree_skb(skb); | ||
262 | break; | ||
263 | case WLP_FRAME_CONTROL: | ||
264 | dev_err(dev, "Control frame received. FIXME?\n"); | ||
265 | kfree_skb(skb); | ||
266 | break; | ||
267 | case WLP_FRAME_ASSOCIATION: | ||
268 | if (len < sizeof(struct wlp_frame_assoc)) { | ||
269 | dev_err(dev, "Not enough data to parse Association " | ||
270 | "WLP header.\n"); | ||
271 | goto out; | ||
272 | } | ||
273 | d_printf(5, dev, "Association frame received.\n"); | ||
274 | wlp_receive_assoc_frame(wlp, skb, src); | ||
275 | break; | ||
276 | default: | ||
277 | dev_err(dev, "Invalid frame received.\n"); | ||
278 | result = -EINVAL; | ||
279 | break; | ||
280 | } | ||
281 | out: | ||
282 | if (result < 0) { | ||
283 | kfree_skb(skb); | ||
284 | result = 0; | ||
285 | } | ||
286 | d_fnend(6, dev, "skb (%p)\n", skb); | ||
287 | return result; | ||
288 | } | ||
289 | EXPORT_SYMBOL_GPL(wlp_receive_frame); | ||
290 | |||
291 | |||
292 | /** | ||
293 | * Verify frame from network stack, prepare for further transmission | ||
294 | * | ||
295 | * @skb: the socket buffer that needs to be prepared for transmission (it | ||
296 | * is in need of a WLP header). If this is a broadcast frame we take | ||
297 | * over the entire transmission. | ||
298 | * If it is a unicast the WSS connection should already be established | ||
299 | * and transmission will be done by the calling function. | ||
300 | * @dst: On return this will contain the device address to which the | ||
301 | * frame is destined. | ||
302 | * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer, | ||
303 | * calling function can proceed with tx | ||
304 | * 1 on success with tx : WLP will take over transmission of this | ||
305 | * frame | ||
306 | * <0 on error | ||
307 | * | ||
308 | * The network stack (WLP client) is attempting to transmit a frame. We can | ||
309 | * only transmit data if a local WSS is at least active (connection will be | ||
310 | * done here if this is a broadcast frame and neighbor also has the WSS | ||
311 | * active). | ||
312 | * | ||
313 | * The frame can be either broadcast or unicast. Broadcast in a WSS is | ||
314 | * supported via multicast, but we don't support multicast yet (until | ||
315 | * devices start to support MAB IEs). If a broadcast frame needs to be | ||
316 | * transmitted it is treated as a unicast frame to each neighbor. In this | ||
317 | * case the WLP takes over transmission of the skb and returns 1 | ||
318 | * to the caller to indicate so. Also, in this case, if a neighbor has the | ||
319 | * same WSS activated but is not connected then the WSS connection will be | ||
320 | * done at this time. The neighbor's virtual address will be learned at | ||
321 | * this time. | ||
322 | * | ||
323 | * The destination address in a unicast frame is the virtual address of the | ||
324 | * neighbor. This address only becomes known when a WSS connection is | ||
325 | * established. We thus rely on a broadcast frame to trigger the setup of | ||
326 | * WSS connections to all neighbors before we are able to send unicast | ||
327 | * frames to them. This seems reasonable as IP would usually use ARP first | ||
328 | * before any unicast frames are sent. | ||
329 | * | ||
330 | * If we are already connected to the neighbor (neighbor's virtual address | ||
331 | * is known) we just prepare the WLP header and the caller will continue to | ||
332 | * send the frame. | ||
333 | * | ||
334 | * A failure in this function usually indicates something that cannot be | ||
335 | * fixed automatically. So, if this function fails (@return < 0) the calling | ||
336 | * function should not retry to send the frame as it will very likely keep | ||
337 | * failing. | ||
338 | * | ||
339 | */ | ||
340 | int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | ||
341 | struct sk_buff *skb, struct uwb_dev_addr *dst) | ||
342 | { | ||
343 | int result = -EINVAL; | ||
344 | struct ethhdr *eth_hdr = (void *) skb->data; | ||
345 | |||
346 | d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); | ||
347 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { | ||
348 | d_printf(6, dev, "WLP: handling broadcast frame. \n"); | ||
349 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); | ||
350 | if (result < 0) { | ||
351 | if (printk_ratelimit()) | ||
352 | dev_err(dev, "Unable to handle broadcast " | ||
353 | "frame from WLP client.\n"); | ||
354 | goto out; | ||
355 | } | ||
356 | dev_kfree_skb_irq(skb); | ||
357 | result = 1; | ||
358 | /* Frame will be transmitted by WLP. */ | ||
359 | } else { | ||
360 | d_printf(6, dev, "WLP: handling unicast frame. \n"); | ||
361 | result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, | ||
362 | wlp_wss_prep_hdr, skb); | ||
363 | if (unlikely(result < 0)) { | ||
364 | if (printk_ratelimit()) | ||
365 | dev_err(dev, "Unable to prepare " | ||
366 | "skb for transmission. \n"); | ||
367 | goto out; | ||
368 | } | ||
369 | } | ||
370 | out: | ||
371 | d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); | ||
372 | return result; | ||
373 | } | ||
374 | EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); | ||
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h new file mode 100644 index 000000000000..1c94fabfb1a7 --- /dev/null +++ b/drivers/uwb/wlp/wlp-internal.h | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * Internal API | ||
4 | * | ||
5 | * Copyright (C) 2007 Intel Corporation | ||
6 | * Reinette Chatre <reinette.chatre@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __WLP_INTERNAL_H__ | ||
25 | #define __WLP_INTERNAL_H__ | ||
26 | |||
27 | /** | ||
28 | * State of WSS connection | ||
29 | * | ||
30 | * A device needs to connect to a neighbor in an activated WSS before data | ||
31 | * can be transmitted. The spec also distinguishes between a new connection | ||
32 | * attempt and a connection attempt after previous connection attempts. The | ||
33 | * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99 | ||
34 | * [7.2.6] | ||
35 | */ | ||
36 | enum wlp_wss_connect { | ||
37 | WLP_WSS_UNCONNECTED = 0, | ||
38 | WLP_WSS_CONNECTED, | ||
39 | WLP_WSS_CONNECT_FAILED, | ||
40 | }; | ||
41 | |||
42 | extern struct kobj_type wss_ktype; | ||
43 | extern struct attribute_group wss_attr_group; | ||
44 | |||
45 | extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); | ||
46 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | ||
47 | |||
48 | |||
49 | /* This should be changed to a dynamic array where entries are sorted | ||
50 | * by eth_addr and search is done in a binary form | ||
51 | * | ||
52 | * Although thinking twice about it: this technologie's maximum reach | ||
53 | * is 10 meters...unless you want to pack too much stuff in around | ||
54 | * your radio controller/WLP device, the list will probably not be | ||
55 | * too big. | ||
56 | * | ||
57 | * In any case, there is probably some data structure in the kernel | ||
58 | * than we could reused for that already. | ||
59 | * | ||
60 | * The below structure is really just good while we support one WSS per | ||
61 | * host. | ||
62 | */ | ||
63 | struct wlp_eda_node { | ||
64 | struct list_head list_node; | ||
65 | unsigned char eth_addr[ETH_ALEN]; | ||
66 | struct uwb_dev_addr dev_addr; | ||
67 | struct wlp_wss *wss; | ||
68 | unsigned char virt_addr[ETH_ALEN]; | ||
69 | u8 tag; | ||
70 | enum wlp_wss_connect state; | ||
71 | }; | ||
72 | |||
73 | typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *); | ||
74 | |||
75 | extern void wlp_eda_init(struct wlp_eda *); | ||
76 | extern void wlp_eda_release(struct wlp_eda *); | ||
77 | extern int wlp_eda_create_node(struct wlp_eda *, | ||
78 | const unsigned char eth_addr[ETH_ALEN], | ||
79 | const struct uwb_dev_addr *); | ||
80 | extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *); | ||
81 | extern int wlp_eda_update_node(struct wlp_eda *, | ||
82 | const struct uwb_dev_addr *, | ||
83 | struct wlp_wss *, | ||
84 | const unsigned char virt_addr[ETH_ALEN], | ||
85 | const u8, const enum wlp_wss_connect); | ||
86 | extern int wlp_eda_update_node_state(struct wlp_eda *, | ||
87 | const struct uwb_dev_addr *, | ||
88 | const enum wlp_wss_connect); | ||
89 | |||
90 | extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *, | ||
91 | struct wlp_eda_node *); | ||
92 | extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *); | ||
93 | extern int wlp_eda_for_virtual(struct wlp_eda *, | ||
94 | const unsigned char eth_addr[ETH_ALEN], | ||
95 | struct uwb_dev_addr *, | ||
96 | wlp_eda_for_each_f , void *); | ||
97 | |||
98 | |||
99 | extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *); | ||
100 | |||
101 | extern size_t wlp_wss_key_print(char *, size_t, u8 *); | ||
102 | |||
103 | /* Function called when no more references to WSS exists */ | ||
104 | extern void wlp_wss_release(struct kobject *); | ||
105 | |||
106 | extern void wlp_wss_reset(struct wlp_wss *); | ||
107 | extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *, | ||
108 | char *, unsigned, unsigned); | ||
109 | extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *, | ||
110 | struct uwb_dev_addr *); | ||
111 | extern ssize_t wlp_discover(struct wlp *); | ||
112 | |||
113 | extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *, | ||
114 | struct wlp_wss *, struct wlp_uuid *); | ||
115 | extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *, | ||
116 | struct uwb_dev_addr *); | ||
117 | |||
118 | struct wlp_assoc_conn_ctx { | ||
119 | struct work_struct ws; | ||
120 | struct wlp *wlp; | ||
121 | struct sk_buff *skb; | ||
122 | struct wlp_eda_node eda_entry; | ||
123 | }; | ||
124 | |||
125 | |||
126 | extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *); | ||
127 | extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *); | ||
128 | |||
129 | |||
130 | /* Message handling */ | ||
131 | struct wlp_assoc_frame_ctx { | ||
132 | struct work_struct ws; | ||
133 | struct wlp *wlp; | ||
134 | struct sk_buff *skb; | ||
135 | struct uwb_dev_addr src; | ||
136 | }; | ||
137 | |||
138 | extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *); | ||
139 | extern void wlp_handle_d1_frame(struct work_struct *); | ||
140 | extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *, | ||
141 | struct wlp_neighbor_e *); | ||
142 | extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *, | ||
143 | struct wlp_neighbor_e *, | ||
144 | struct wlp_uuid *); | ||
145 | extern void wlp_handle_c1_frame(struct work_struct *); | ||
146 | extern void wlp_handle_c3_frame(struct work_struct *); | ||
147 | extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *, | ||
148 | struct wlp_uuid *, u8 *, | ||
149 | struct uwb_mac_addr *); | ||
150 | extern int wlp_parse_f0(struct wlp *, struct sk_buff *); | ||
151 | extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *, | ||
152 | struct uwb_dev_addr *, enum wlp_assoc_type); | ||
153 | extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *, | ||
154 | u8 *, ssize_t); | ||
155 | extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *, | ||
156 | struct wlp_uuid *, ssize_t); | ||
157 | extern int __wlp_alloc_device_info(struct wlp *); | ||
158 | extern int __wlp_setup_device_info(struct wlp *); | ||
159 | |||
160 | extern struct wlp_wss_attribute wss_attribute_properties; | ||
161 | extern struct wlp_wss_attribute wss_attribute_members; | ||
162 | extern struct wlp_wss_attribute wss_attribute_state; | ||
163 | |||
164 | static inline | ||
165 | size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid) | ||
166 | { | ||
167 | size_t result; | ||
168 | |||
169 | result = scnprintf(buf, bufsize, | ||
170 | "%02x:%02x:%02x:%02x:%02x:%02x:" | ||
171 | "%02x:%02x:%02x:%02x:%02x:%02x:" | ||
172 | "%02x:%02x:%02x:%02x", | ||
173 | uuid->data[0], uuid->data[1], | ||
174 | uuid->data[2], uuid->data[3], | ||
175 | uuid->data[4], uuid->data[5], | ||
176 | uuid->data[6], uuid->data[7], | ||
177 | uuid->data[8], uuid->data[9], | ||
178 | uuid->data[10], uuid->data[11], | ||
179 | uuid->data[12], uuid->data[13], | ||
180 | uuid->data[14], uuid->data[15]); | ||
181 | return result; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * FIXME: How should a nonce be displayed? | ||
186 | */ | ||
187 | static inline | ||
188 | size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce) | ||
189 | { | ||
190 | size_t result; | ||
191 | |||
192 | result = scnprintf(buf, bufsize, | ||
193 | "%02x %02x %02x %02x %02x %02x " | ||
194 | "%02x %02x %02x %02x %02x %02x " | ||
195 | "%02x %02x %02x %02x", | ||
196 | nonce->data[0], nonce->data[1], | ||
197 | nonce->data[2], nonce->data[3], | ||
198 | nonce->data[4], nonce->data[5], | ||
199 | nonce->data[6], nonce->data[7], | ||
200 | nonce->data[8], nonce->data[9], | ||
201 | nonce->data[10], nonce->data[11], | ||
202 | nonce->data[12], nonce->data[13], | ||
203 | nonce->data[14], nonce->data[15]); | ||
204 | return result; | ||
205 | } | ||
206 | |||
207 | |||
208 | static inline | ||
209 | void wlp_session_cb(struct wlp *wlp) | ||
210 | { | ||
211 | struct completion *completion = wlp->session->cb_priv; | ||
212 | complete(completion); | ||
213 | } | ||
214 | |||
215 | static inline | ||
216 | int wlp_uuid_is_set(struct wlp_uuid *uuid) | ||
217 | { | ||
218 | struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00, | ||
219 | 0x00, 0x00, 0x00, 0x00, | ||
220 | 0x00, 0x00, 0x00, 0x00, | ||
221 | 0x00, 0x00, 0x00, 0x00} }; | ||
222 | |||
223 | if (!memcmp(uuid, &zero_uuid, sizeof(*uuid))) | ||
224 | return 0; | ||
225 | return 1; | ||
226 | } | ||
227 | |||
228 | #endif /* __WLP_INTERNAL_H__ */ | ||
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c new file mode 100644 index 000000000000..0799402e73fb --- /dev/null +++ b/drivers/uwb/wlp/wlp-lc.c | |||
@@ -0,0 +1,585 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Reinette Chatre <reinette.chatre@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * FIXME: docs | ||
23 | */ | ||
24 | |||
25 | #include <linux/wlp.h> | ||
26 | #define D_LOCAL 6 | ||
27 | #include <linux/uwb/debug.h> | ||
28 | #include "wlp-internal.h" | ||
29 | |||
30 | |||
31 | static | ||
32 | void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) | ||
33 | { | ||
34 | INIT_LIST_HEAD(&neighbor->wssid); | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * Create area for device information storage | ||
39 | * | ||
40 | * wlp->mutex must be held | ||
41 | */ | ||
42 | int __wlp_alloc_device_info(struct wlp *wlp) | ||
43 | { | ||
44 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
45 | BUG_ON(wlp->dev_info != NULL); | ||
46 | wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL); | ||
47 | if (wlp->dev_info == NULL) { | ||
48 | dev_err(dev, "WLP: Unable to allocate memory for " | ||
49 | "device information.\n"); | ||
50 | return -ENOMEM; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | |||
56 | /** | ||
57 | * Fill in device information using function provided by driver | ||
58 | * | ||
59 | * wlp->mutex must be held | ||
60 | */ | ||
61 | static | ||
62 | void __wlp_fill_device_info(struct wlp *wlp) | ||
63 | { | ||
64 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
65 | |||
66 | BUG_ON(wlp->fill_device_info == NULL); | ||
67 | d_printf(6, dev, "Retrieving device information " | ||
68 | "from device driver.\n"); | ||
69 | wlp->fill_device_info(wlp, wlp->dev_info); | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * Setup device information | ||
74 | * | ||
75 | * Allocate area for device information and populate it. | ||
76 | * | ||
77 | * wlp->mutex must be held | ||
78 | */ | ||
79 | int __wlp_setup_device_info(struct wlp *wlp) | ||
80 | { | ||
81 | int result; | ||
82 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
83 | |||
84 | result = __wlp_alloc_device_info(wlp); | ||
85 | if (result < 0) { | ||
86 | dev_err(dev, "WLP: Unable to allocate area for " | ||
87 | "device information.\n"); | ||
88 | return result; | ||
89 | } | ||
90 | __wlp_fill_device_info(wlp); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * Remove information about neighbor stored temporarily | ||
96 | * | ||
97 | * Information learned during discovey should only be stored when the | ||
98 | * device enrolls in the neighbor's WSS. We do need to store this | ||
99 | * information temporarily in order to present it to the user. | ||
100 | * | ||
101 | * We are only interested in keeping neighbor WSS information if that | ||
102 | * neighbor is accepting enrollment. | ||
103 | * | ||
104 | * should be called with wlp->nbmutex held | ||
105 | */ | ||
106 | void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor) | ||
107 | { | ||
108 | struct wlp_wssid_e *wssid_e, *next; | ||
109 | u8 keep; | ||
110 | if (!list_empty(&neighbor->wssid)) { | ||
111 | list_for_each_entry_safe(wssid_e, next, &neighbor->wssid, | ||
112 | node) { | ||
113 | if (wssid_e->info != NULL) { | ||
114 | keep = wssid_e->info->accept_enroll; | ||
115 | kfree(wssid_e->info); | ||
116 | wssid_e->info = NULL; | ||
117 | if (!keep) { | ||
118 | list_del(&wssid_e->node); | ||
119 | kfree(wssid_e); | ||
120 | } | ||
121 | } | ||
122 | } | ||
123 | } | ||
124 | if (neighbor->info != NULL) { | ||
125 | kfree(neighbor->info); | ||
126 | neighbor->info = NULL; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * Populate WLP neighborhood cache with neighbor information | ||
132 | * | ||
133 | * A new neighbor is found. If it is discoverable then we add it to the | ||
134 | * neighborhood cache. | ||
135 | * | ||
136 | */ | ||
137 | static | ||
138 | int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) | ||
139 | { | ||
140 | int result = 0; | ||
141 | int discoverable; | ||
142 | struct wlp_neighbor_e *neighbor; | ||
143 | |||
144 | d_fnstart(6, &dev->dev, "uwb %p \n", dev); | ||
145 | d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", | ||
146 | dev->dev_addr.data[1], dev->dev_addr.data[0]); | ||
147 | /** | ||
148 | * FIXME: | ||
149 | * Use contents of WLP IE found in beacon cache to determine if | ||
150 | * neighbor is discoverable. | ||
151 | * The device does not support WLP IE yet so this still needs to be | ||
152 | * done. Until then we assume all devices are discoverable. | ||
153 | */ | ||
154 | discoverable = 1; /* will be changed when FIXME disappears */ | ||
155 | if (discoverable) { | ||
156 | /* Add neighbor to cache for discovery */ | ||
157 | neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL); | ||
158 | if (neighbor == NULL) { | ||
159 | dev_err(&dev->dev, "Unable to create memory for " | ||
160 | "new neighbor. \n"); | ||
161 | result = -ENOMEM; | ||
162 | goto error_no_mem; | ||
163 | } | ||
164 | wlp_neighbor_init(neighbor); | ||
165 | uwb_dev_get(dev); | ||
166 | neighbor->uwb_dev = dev; | ||
167 | list_add(&neighbor->node, &wlp->neighbors); | ||
168 | } | ||
169 | error_no_mem: | ||
170 | d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); | ||
171 | return result; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * Remove one neighbor from cache | ||
176 | */ | ||
177 | static | ||
178 | void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor) | ||
179 | { | ||
180 | struct wlp_wssid_e *wssid_e, *next_wssid_e; | ||
181 | |||
182 | list_for_each_entry_safe(wssid_e, next_wssid_e, | ||
183 | &neighbor->wssid, node) { | ||
184 | list_del(&wssid_e->node); | ||
185 | kfree(wssid_e); | ||
186 | } | ||
187 | uwb_dev_put(neighbor->uwb_dev); | ||
188 | list_del(&neighbor->node); | ||
189 | kfree(neighbor); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * Clear entire neighborhood cache. | ||
194 | */ | ||
195 | static | ||
196 | void __wlp_neighbors_release(struct wlp *wlp) | ||
197 | { | ||
198 | struct wlp_neighbor_e *neighbor, *next; | ||
199 | if (list_empty(&wlp->neighbors)) | ||
200 | return; | ||
201 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { | ||
202 | __wlp_neighbor_release(neighbor); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | static | ||
207 | void wlp_neighbors_release(struct wlp *wlp) | ||
208 | { | ||
209 | mutex_lock(&wlp->nbmutex); | ||
210 | __wlp_neighbors_release(wlp); | ||
211 | mutex_unlock(&wlp->nbmutex); | ||
212 | } | ||
213 | |||
214 | |||
215 | |||
216 | /** | ||
217 | * Send D1 message to neighbor, receive D2 message | ||
218 | * | ||
219 | * @neighbor: neighbor to which D1 message will be sent | ||
220 | * @wss: if not NULL, it is an enrollment request for this WSS | ||
221 | * @wssid: if wss not NULL, this is the wssid of the WSS in which we | ||
222 | * want to enroll | ||
223 | * | ||
224 | * A D1/D2 exchange is done for one of two reasons: discovery or | ||
225 | * enrollment. If done for discovery the D1 message is sent to the neighbor | ||
226 | * and the contents of the D2 response is stored in a temporary cache. | ||
227 | * If done for enrollment the @wss and @wssid are provided also. In this | ||
228 | * case the D1 message is sent to the neighbor, the D2 response is parsed | ||
229 | * for enrollment of the WSS with wssid. | ||
230 | * | ||
231 | * &wss->mutex is held | ||
232 | */ | ||
233 | static | ||
234 | int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | ||
235 | struct wlp_wss *wss, struct wlp_uuid *wssid) | ||
236 | { | ||
237 | int result; | ||
238 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
239 | DECLARE_COMPLETION_ONSTACK(completion); | ||
240 | struct wlp_session session; | ||
241 | struct sk_buff *skb; | ||
242 | struct wlp_frame_assoc *resp; | ||
243 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; | ||
244 | |||
245 | mutex_lock(&wlp->mutex); | ||
246 | if (!wlp_uuid_is_set(&wlp->uuid)) { | ||
247 | dev_err(dev, "WLP: UUID is not set. Set via sysfs to " | ||
248 | "proceed.\n"); | ||
249 | result = -ENXIO; | ||
250 | goto out; | ||
251 | } | ||
252 | /* Send D1 association frame */ | ||
253 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1); | ||
254 | if (result < 0) { | ||
255 | dev_err(dev, "Unable to send D1 frame to neighbor " | ||
256 | "%02x:%02x (%d)\n", dev_addr->data[1], | ||
257 | dev_addr->data[0], result); | ||
258 | d_printf(6, dev, "Add placeholders into buffer next to " | ||
259 | "neighbor information we have (dev address).\n"); | ||
260 | goto out; | ||
261 | } | ||
262 | /* Create session, wait for response */ | ||
263 | session.exp_message = WLP_ASSOC_D2; | ||
264 | session.cb = wlp_session_cb; | ||
265 | session.cb_priv = &completion; | ||
266 | session.neighbor_addr = *dev_addr; | ||
267 | BUG_ON(wlp->session != NULL); | ||
268 | wlp->session = &session; | ||
269 | /* Wait for D2/F0 frame */ | ||
270 | result = wait_for_completion_interruptible_timeout(&completion, | ||
271 | WLP_PER_MSG_TIMEOUT * HZ); | ||
272 | if (result == 0) { | ||
273 | result = -ETIMEDOUT; | ||
274 | dev_err(dev, "Timeout while sending D1 to neighbor " | ||
275 | "%02x:%02x.\n", dev_addr->data[1], | ||
276 | dev_addr->data[0]); | ||
277 | goto error_session; | ||
278 | } | ||
279 | if (result < 0) { | ||
280 | dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n", | ||
281 | dev_addr->data[1], dev_addr->data[0]); | ||
282 | goto error_session; | ||
283 | } | ||
284 | /* Parse message in session->data: it will be either D2 or F0 */ | ||
285 | skb = session.data; | ||
286 | resp = (void *) skb->data; | ||
287 | d_printf(6, dev, "Received response to D1 frame. \n"); | ||
288 | d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
289 | |||
290 | if (resp->type == WLP_ASSOC_F0) { | ||
291 | result = wlp_parse_f0(wlp, skb); | ||
292 | if (result < 0) | ||
293 | dev_err(dev, "WLP: Unable to parse F0 from neighbor " | ||
294 | "%02x:%02x.\n", dev_addr->data[1], | ||
295 | dev_addr->data[0]); | ||
296 | result = -EINVAL; | ||
297 | goto error_resp_parse; | ||
298 | } | ||
299 | if (wss == NULL) { | ||
300 | /* Discovery */ | ||
301 | result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor); | ||
302 | if (result < 0) { | ||
303 | dev_err(dev, "WLP: Unable to parse D2 message from " | ||
304 | "neighbor %02x:%02x for discovery.\n", | ||
305 | dev_addr->data[1], dev_addr->data[0]); | ||
306 | goto error_resp_parse; | ||
307 | } | ||
308 | } else { | ||
309 | /* Enrollment */ | ||
310 | result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor, | ||
311 | wssid); | ||
312 | if (result < 0) { | ||
313 | dev_err(dev, "WLP: Unable to parse D2 message from " | ||
314 | "neighbor %02x:%02x for enrollment.\n", | ||
315 | dev_addr->data[1], dev_addr->data[0]); | ||
316 | goto error_resp_parse; | ||
317 | } | ||
318 | } | ||
319 | error_resp_parse: | ||
320 | kfree_skb(skb); | ||
321 | error_session: | ||
322 | wlp->session = NULL; | ||
323 | out: | ||
324 | mutex_unlock(&wlp->mutex); | ||
325 | return result; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * Enroll into WSS of provided WSSID by using neighbor as registrar | ||
330 | * | ||
331 | * &wss->mutex is held | ||
332 | */ | ||
333 | int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | ||
334 | struct wlp_wss *wss, struct wlp_uuid *wssid) | ||
335 | { | ||
336 | int result = 0; | ||
337 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
338 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
339 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; | ||
340 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
341 | d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | ||
342 | wlp, neighbor, wss, wssid, buf); | ||
343 | d_printf(6, dev, "Complete me.\n"); | ||
344 | result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); | ||
345 | if (result < 0) { | ||
346 | dev_err(dev, "WLP: D1/D2 message exchange for enrollment " | ||
347 | "failed. result = %d \n", result); | ||
348 | goto out; | ||
349 | } | ||
350 | if (wss->state != WLP_WSS_STATE_PART_ENROLLED) { | ||
351 | dev_err(dev, "WLP: Unable to enroll into WSS %s using " | ||
352 | "neighbor %02x:%02x. \n", buf, | ||
353 | dev_addr->data[1], dev_addr->data[0]); | ||
354 | result = -EINVAL; | ||
355 | goto out; | ||
356 | } | ||
357 | if (wss->secure_status == WLP_WSS_SECURE) { | ||
358 | dev_err(dev, "FIXME: need to complete secure enrollment.\n"); | ||
359 | result = -EINVAL; | ||
360 | goto error; | ||
361 | } else { | ||
362 | wss->state = WLP_WSS_STATE_ENROLLED; | ||
363 | d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " | ||
364 | "%s using neighbor %02x:%02x. \n", buf, | ||
365 | dev_addr->data[1], dev_addr->data[0]); | ||
366 | } | ||
367 | |||
368 | d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | ||
369 | wlp, neighbor, wss, wssid, buf); | ||
370 | out: | ||
371 | return result; | ||
372 | error: | ||
373 | wlp_wss_reset(wss); | ||
374 | return result; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * Discover WSS information of neighbor's active WSS | ||
379 | */ | ||
380 | static | ||
381 | int wlp_discover_neighbor(struct wlp *wlp, | ||
382 | struct wlp_neighbor_e *neighbor) | ||
383 | { | ||
384 | return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL); | ||
385 | } | ||
386 | |||
387 | |||
388 | /** | ||
389 | * Each neighbor in the neighborhood cache is discoverable. Discover it. | ||
390 | * | ||
391 | * Discovery is done through sending of D1 association frame and parsing | ||
392 | * the D2 association frame response. Only wssid from D2 will be included | ||
393 | * in neighbor cache, rest is just displayed to user and forgotten. | ||
394 | * | ||
395 | * The discovery is not done in parallel. This is simple and enables us to | ||
396 | * maintain only one association context. | ||
397 | * | ||
398 | * The discovery of one neighbor does not affect the other, but if the | ||
399 | * discovery of a neighbor fails it is removed from the neighborhood cache. | ||
400 | */ | ||
401 | static | ||
402 | int wlp_discover_all_neighbors(struct wlp *wlp) | ||
403 | { | ||
404 | int result = 0; | ||
405 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
406 | struct wlp_neighbor_e *neighbor, *next; | ||
407 | |||
408 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { | ||
409 | result = wlp_discover_neighbor(wlp, neighbor); | ||
410 | if (result < 0) { | ||
411 | dev_err(dev, "WLP: Unable to discover neighbor " | ||
412 | "%02x:%02x, removing from neighborhood. \n", | ||
413 | neighbor->uwb_dev->dev_addr.data[1], | ||
414 | neighbor->uwb_dev->dev_addr.data[0]); | ||
415 | __wlp_neighbor_release(neighbor); | ||
416 | } | ||
417 | } | ||
418 | return result; | ||
419 | } | ||
420 | |||
421 | static int wlp_add_neighbor_helper(struct device *dev, void *priv) | ||
422 | { | ||
423 | struct wlp *wlp = priv; | ||
424 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | ||
425 | |||
426 | return wlp_add_neighbor(wlp, uwb_dev); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * Discover WLP neighborhood | ||
431 | * | ||
432 | * Will send D1 association frame to all devices in beacon group that have | ||
433 | * discoverable bit set in WLP IE. D2 frames will be received, information | ||
434 | * displayed to user in @buf. Partial information (from D2 association | ||
435 | * frame) will be cached to assist with future association | ||
436 | * requests. | ||
437 | * | ||
438 | * The discovery of the WLP neighborhood is triggered by the user. This | ||
439 | * should occur infrequently and we thus free current cache and re-allocate | ||
440 | * memory if needed. | ||
441 | * | ||
442 | * If one neighbor fails during initial discovery (determining if it is a | ||
443 | * neighbor or not), we fail all - note that interaction with neighbor has | ||
444 | * not occured at this point so if a failure occurs we know something went wrong | ||
445 | * locally. We thus undo everything. | ||
446 | */ | ||
447 | ssize_t wlp_discover(struct wlp *wlp) | ||
448 | { | ||
449 | int result = 0; | ||
450 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
451 | |||
452 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
453 | mutex_lock(&wlp->nbmutex); | ||
454 | /* Clear current neighborhood cache. */ | ||
455 | __wlp_neighbors_release(wlp); | ||
456 | /* Determine which devices in neighborhood. Repopulate cache. */ | ||
457 | result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp); | ||
458 | if (result < 0) { | ||
459 | /* May have partial neighbor information, release all. */ | ||
460 | __wlp_neighbors_release(wlp); | ||
461 | goto error_dev_for_each; | ||
462 | } | ||
463 | /* Discover the properties of devices in neighborhood. */ | ||
464 | result = wlp_discover_all_neighbors(wlp); | ||
465 | /* In case of failure we still print our partial results. */ | ||
466 | if (result < 0) { | ||
467 | dev_err(dev, "Unable to fully discover neighborhood. \n"); | ||
468 | result = 0; | ||
469 | } | ||
470 | error_dev_for_each: | ||
471 | mutex_unlock(&wlp->nbmutex); | ||
472 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
473 | return result; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * Handle events from UWB stack | ||
478 | * | ||
479 | * We handle events conservatively. If a neighbor goes off the air we | ||
480 | * remove it from the neighborhood. If an association process is in | ||
481 | * progress this function will block waiting for the nbmutex to become | ||
482 | * free. The association process will thus be allowed to complete before it | ||
483 | * is removed. | ||
484 | */ | ||
485 | static | ||
486 | void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | ||
487 | enum uwb_notifs event) | ||
488 | { | ||
489 | struct wlp *wlp = _wlp; | ||
490 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
491 | struct wlp_neighbor_e *neighbor, *next; | ||
492 | int result; | ||
493 | switch (event) { | ||
494 | case UWB_NOTIF_ONAIR: | ||
495 | d_printf(6, dev, "UWB device %02x:%02x is onair\n", | ||
496 | uwb_dev->dev_addr.data[1], | ||
497 | uwb_dev->dev_addr.data[0]); | ||
498 | result = wlp_eda_create_node(&wlp->eda, | ||
499 | uwb_dev->mac_addr.data, | ||
500 | &uwb_dev->dev_addr); | ||
501 | if (result < 0) | ||
502 | dev_err(dev, "WLP: Unable to add new neighbor " | ||
503 | "%02x:%02x to EDA cache.\n", | ||
504 | uwb_dev->dev_addr.data[1], | ||
505 | uwb_dev->dev_addr.data[0]); | ||
506 | break; | ||
507 | case UWB_NOTIF_OFFAIR: | ||
508 | d_printf(6, dev, "UWB device %02x:%02x is offair\n", | ||
509 | uwb_dev->dev_addr.data[1], | ||
510 | uwb_dev->dev_addr.data[0]); | ||
511 | wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); | ||
512 | mutex_lock(&wlp->nbmutex); | ||
513 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, | ||
514 | node) { | ||
515 | if (neighbor->uwb_dev == uwb_dev) { | ||
516 | d_printf(6, dev, "Removing device from " | ||
517 | "neighborhood.\n"); | ||
518 | __wlp_neighbor_release(neighbor); | ||
519 | } | ||
520 | } | ||
521 | mutex_unlock(&wlp->nbmutex); | ||
522 | break; | ||
523 | default: | ||
524 | dev_err(dev, "don't know how to handle event %d from uwb\n", | ||
525 | event); | ||
526 | } | ||
527 | } | ||
528 | |||
529 | int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) | ||
530 | { | ||
531 | struct device *dev = &rc->uwb_dev.dev; | ||
532 | int result; | ||
533 | |||
534 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
535 | BUG_ON(wlp->fill_device_info == NULL); | ||
536 | BUG_ON(wlp->xmit_frame == NULL); | ||
537 | BUG_ON(wlp->stop_queue == NULL); | ||
538 | BUG_ON(wlp->start_queue == NULL); | ||
539 | wlp->rc = rc; | ||
540 | wlp_eda_init(&wlp->eda);/* Set up address cache */ | ||
541 | wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; | ||
542 | wlp->uwb_notifs_handler.data = wlp; | ||
543 | uwb_notifs_register(rc, &wlp->uwb_notifs_handler); | ||
544 | |||
545 | uwb_pal_init(&wlp->pal); | ||
546 | result = uwb_pal_register(rc, &wlp->pal); | ||
547 | if (result < 0) | ||
548 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | ||
549 | |||
550 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
551 | return result; | ||
552 | } | ||
553 | EXPORT_SYMBOL_GPL(wlp_setup); | ||
554 | |||
555 | void wlp_remove(struct wlp *wlp) | ||
556 | { | ||
557 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
558 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
559 | wlp_neighbors_release(wlp); | ||
560 | uwb_pal_unregister(wlp->rc, &wlp->pal); | ||
561 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | ||
562 | wlp_eda_release(&wlp->eda); | ||
563 | mutex_lock(&wlp->mutex); | ||
564 | if (wlp->dev_info != NULL) | ||
565 | kfree(wlp->dev_info); | ||
566 | mutex_unlock(&wlp->mutex); | ||
567 | wlp->rc = NULL; | ||
568 | /* We have to use NULL here because this function can be called | ||
569 | * when the device disappeared. */ | ||
570 | d_fnend(6, NULL, "wlp %p\n", wlp); | ||
571 | } | ||
572 | EXPORT_SYMBOL_GPL(wlp_remove); | ||
573 | |||
574 | /** | ||
575 | * wlp_reset_all - reset the WLP hardware | ||
576 | * @wlp: the WLP device to reset. | ||
577 | * | ||
578 | * This schedules a full hardware reset of the WLP device. The radio | ||
579 | * controller and any other PALs will also be reset. | ||
580 | */ | ||
581 | void wlp_reset_all(struct wlp *wlp) | ||
582 | { | ||
583 | uwb_rc_reset_all(wlp->rc); | ||
584 | } | ||
585 | EXPORT_SYMBOL_GPL(wlp_reset_all); | ||
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c new file mode 100644 index 000000000000..96b18c9bd6e9 --- /dev/null +++ b/drivers/uwb/wlp/wss-lc.c | |||
@@ -0,0 +1,1055 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * | ||
4 | * Copyright (C) 2007 Intel Corporation | ||
5 | * Reinette Chatre <reinette.chatre@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Implementation of the WLP association protocol. | ||
23 | * | ||
24 | * FIXME: Docs | ||
25 | * | ||
26 | * A UWB network interface will configure a WSS through wlp_wss_setup() after | ||
27 | * the interface has been assigned a MAC address, typically after | ||
28 | * "ifconfig" has been called. When the interface goes down it should call | ||
29 | * wlp_wss_remove(). | ||
30 | * | ||
31 | * When the WSS is ready for use the user interacts via sysfs to create, | ||
32 | * discover, and activate WSS. | ||
33 | * | ||
34 | * wlp_wss_enroll_activate() | ||
35 | * | ||
36 | * wlp_wss_create_activate() | ||
37 | * wlp_wss_set_wssid_hash() | ||
38 | * wlp_wss_comp_wssid_hash() | ||
39 | * wlp_wss_sel_bcast_addr() | ||
40 | * wlp_wss_sysfs_add() | ||
41 | * | ||
42 | * Called when no more references to WSS exist: | ||
43 | * wlp_wss_release() | ||
44 | * wlp_wss_reset() | ||
45 | */ | ||
46 | |||
47 | #include <linux/etherdevice.h> /* for is_valid_ether_addr */ | ||
48 | #include <linux/skbuff.h> | ||
49 | #include <linux/wlp.h> | ||
50 | #define D_LOCAL 5 | ||
51 | #include <linux/uwb/debug.h> | ||
52 | #include "wlp-internal.h" | ||
53 | |||
54 | |||
55 | size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) | ||
56 | { | ||
57 | size_t result; | ||
58 | |||
59 | result = scnprintf(buf, bufsize, | ||
60 | "%02x %02x %02x %02x %02x %02x " | ||
61 | "%02x %02x %02x %02x %02x %02x " | ||
62 | "%02x %02x %02x %02x", | ||
63 | key[0], key[1], key[2], key[3], | ||
64 | key[4], key[5], key[6], key[7], | ||
65 | key[8], key[9], key[10], key[11], | ||
66 | key[12], key[13], key[14], key[15]); | ||
67 | return result; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * Compute WSSID hash | ||
72 | * WLP Draft 0.99 [7.2.1] | ||
73 | * | ||
74 | * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR | ||
75 | * of all octets in the WSSID. | ||
76 | */ | ||
77 | static | ||
78 | u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid) | ||
79 | { | ||
80 | return wssid->data[0] ^ wssid->data[1] ^ wssid->data[2] | ||
81 | ^ wssid->data[3] ^ wssid->data[4] ^ wssid->data[5] | ||
82 | ^ wssid->data[6] ^ wssid->data[7] ^ wssid->data[8] | ||
83 | ^ wssid->data[9] ^ wssid->data[10] ^ wssid->data[11] | ||
84 | ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14] | ||
85 | ^ wssid->data[15]; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * Select a multicast EUI-48 for the WSS broadcast address. | ||
90 | * WLP Draft 0.99 [7.2.1] | ||
91 | * | ||
92 | * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP | ||
93 | * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive. | ||
94 | * | ||
95 | * This address is currently hardcoded. | ||
96 | * FIXME? | ||
97 | */ | ||
98 | static | ||
99 | struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss) | ||
100 | { | ||
101 | struct uwb_mac_addr bcast = { | ||
102 | .data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 } | ||
103 | }; | ||
104 | return bcast; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * Clear the contents of the WSS structure - all except kobj, mutex, virtual | ||
109 | * | ||
110 | * We do not want to reinitialize - the internal kobj should not change as | ||
111 | * it still points to the parent received during setup. The mutex should | ||
112 | * remain also. We thus just reset values individually. | ||
113 | * The virutal address assigned to WSS will remain the same for the | ||
114 | * lifetime of the WSS. We only reset the fields that can change during its | ||
115 | * lifetime. | ||
116 | */ | ||
117 | void wlp_wss_reset(struct wlp_wss *wss) | ||
118 | { | ||
119 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
120 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
121 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
122 | memset(&wss->wssid, 0, sizeof(wss->wssid)); | ||
123 | wss->hash = 0; | ||
124 | memset(&wss->name[0], 0, sizeof(wss->name)); | ||
125 | memset(&wss->bcast, 0, sizeof(wss->bcast)); | ||
126 | wss->secure_status = WLP_WSS_UNSECURE; | ||
127 | memset(&wss->master_key[0], 0, sizeof(wss->master_key)); | ||
128 | wss->tag = 0; | ||
129 | wss->state = WLP_WSS_STATE_NONE; | ||
130 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * Create sysfs infrastructure for WSS | ||
135 | * | ||
136 | * The WSS is configured to have the interface as parent (see wlp_wss_setup()) | ||
137 | * a new sysfs directory that includes wssid as its name is created in the | ||
138 | * interface's sysfs directory. The group of files interacting with WSS are | ||
139 | * created also. | ||
140 | */ | ||
141 | static | ||
142 | int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) | ||
143 | { | ||
144 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
145 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
146 | int result; | ||
147 | |||
148 | d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); | ||
149 | result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); | ||
150 | if (result < 0) | ||
151 | return result; | ||
152 | wss->kobj.ktype = &wss_ktype; | ||
153 | result = kobject_init_and_add(&wss->kobj, | ||
154 | &wss_ktype, wss->kobj.parent, "wlp"); | ||
155 | if (result < 0) { | ||
156 | dev_err(dev, "WLP: Cannot register WSS kobject.\n"); | ||
157 | goto error_kobject_register; | ||
158 | } | ||
159 | result = sysfs_create_group(&wss->kobj, &wss_attr_group); | ||
160 | if (result < 0) { | ||
161 | dev_err(dev, "WLP: Cannot register WSS attributes: %d\n", | ||
162 | result); | ||
163 | goto error_sysfs_create_group; | ||
164 | } | ||
165 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
166 | return 0; | ||
167 | error_sysfs_create_group: | ||
168 | |||
169 | kobject_put(&wss->kobj); /* will free name if needed */ | ||
170 | return result; | ||
171 | error_kobject_register: | ||
172 | kfree(wss->kobj.name); | ||
173 | wss->kobj.name = NULL; | ||
174 | wss->kobj.ktype = NULL; | ||
175 | return result; | ||
176 | } | ||
177 | |||
178 | |||
179 | /** | ||
180 | * Release WSS | ||
181 | * | ||
182 | * No more references exist to this WSS. We should undo everything that was | ||
183 | * done in wlp_wss_create_activate() except removing the group. The group | ||
184 | * is not removed because an object can be unregistered before the group is | ||
185 | * created. We also undo any additional operations on the WSS after this | ||
186 | * (addition of members). | ||
187 | * | ||
188 | * If memory was allocated for the kobject's name then it will | ||
189 | * be freed by the kobject system during this time. | ||
190 | * | ||
191 | * The EDA cache is removed and reinitilized when the WSS is removed. We | ||
192 | * thus loose knowledge of members of this WSS at that time and need not do | ||
193 | * it here. | ||
194 | */ | ||
195 | void wlp_wss_release(struct kobject *kobj) | ||
196 | { | ||
197 | struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj); | ||
198 | |||
199 | wlp_wss_reset(wss); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * Enroll into a WSS using provided neighbor as registrar | ||
204 | * | ||
205 | * First search the neighborhood information to learn which neighbor is | ||
206 | * referred to, next proceed with enrollment. | ||
207 | * | ||
208 | * &wss->mutex is held | ||
209 | */ | ||
210 | static | ||
211 | int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, | ||
212 | struct uwb_dev_addr *dest) | ||
213 | { | ||
214 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
215 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
216 | struct wlp_neighbor_e *neighbor; | ||
217 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
218 | int result = -ENXIO; | ||
219 | struct uwb_dev_addr *dev_addr; | ||
220 | |||
221 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
222 | d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", | ||
223 | wss, buf, dest->data[1], dest->data[0]); | ||
224 | mutex_lock(&wlp->nbmutex); | ||
225 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | ||
226 | dev_addr = &neighbor->uwb_dev->dev_addr; | ||
227 | if (!memcmp(dest, dev_addr, sizeof(*dest))) { | ||
228 | d_printf(5, dev, "Neighbor %02x:%02x is valid, " | ||
229 | "enrolling. \n", | ||
230 | dev_addr->data[1], dev_addr->data[0]); | ||
231 | result = wlp_enroll_neighbor(wlp, neighbor, wss, | ||
232 | wssid); | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (result == -ENXIO) | ||
237 | dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", | ||
238 | dest->data[1], dest->data[0]); | ||
239 | mutex_unlock(&wlp->nbmutex); | ||
240 | d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", | ||
241 | wss, buf, dest->data[1], dest->data[0], result); | ||
242 | return result; | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * Enroll into a WSS previously discovered | ||
247 | * | ||
248 | * User provides WSSID of WSS, search for neighbor that has this WSS | ||
249 | * activated and attempt to enroll. | ||
250 | * | ||
251 | * &wss->mutex is held | ||
252 | */ | ||
253 | static | ||
254 | int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) | ||
255 | { | ||
256 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
257 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
258 | struct wlp_neighbor_e *neighbor; | ||
259 | struct wlp_wssid_e *wssid_e; | ||
260 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
261 | int result = -ENXIO; | ||
262 | |||
263 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
264 | d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); | ||
265 | mutex_lock(&wlp->nbmutex); | ||
266 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | ||
267 | list_for_each_entry(wssid_e, &neighbor->wssid, node) { | ||
268 | if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { | ||
269 | d_printf(5, dev, "Found WSSID %s in neighbor " | ||
270 | "%02x:%02x cache. \n", buf, | ||
271 | neighbor->uwb_dev->dev_addr.data[1], | ||
272 | neighbor->uwb_dev->dev_addr.data[0]); | ||
273 | result = wlp_enroll_neighbor(wlp, neighbor, | ||
274 | wss, wssid); | ||
275 | if (result == 0) /* enrollment success */ | ||
276 | goto out; | ||
277 | break; | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | out: | ||
282 | if (result == -ENXIO) | ||
283 | dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); | ||
284 | mutex_unlock(&wlp->nbmutex); | ||
285 | d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); | ||
286 | return result; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * Enroll into WSS with provided WSSID, registrar may be provided | ||
291 | * | ||
292 | * @wss: out WSS that will be enrolled | ||
293 | * @wssid: wssid of neighboring WSS that we want to enroll in | ||
294 | * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any | ||
295 | * neighbor can be used as registrar. | ||
296 | * | ||
297 | * &wss->mutex is held | ||
298 | */ | ||
299 | static | ||
300 | int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid, | ||
301 | struct uwb_dev_addr *devaddr) | ||
302 | { | ||
303 | int result; | ||
304 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
305 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
306 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
307 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; | ||
308 | |||
309 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
310 | if (wss->state != WLP_WSS_STATE_NONE) { | ||
311 | dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); | ||
312 | result = -EEXIST; | ||
313 | goto error; | ||
314 | } | ||
315 | if (!memcmp(&bcast, devaddr, sizeof(bcast))) { | ||
316 | d_printf(5, dev, "Request to enroll in discovered WSS " | ||
317 | "with WSSID %s \n", buf); | ||
318 | result = wlp_wss_enroll_discovered(wss, wssid); | ||
319 | } else { | ||
320 | d_printf(5, dev, "Request to enroll in WSSID %s with " | ||
321 | "registrar %02x:%02x\n", buf, devaddr->data[1], | ||
322 | devaddr->data[0]); | ||
323 | result = wlp_wss_enroll_target(wss, wssid, devaddr); | ||
324 | } | ||
325 | if (result < 0) { | ||
326 | dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", | ||
327 | buf, result); | ||
328 | goto error; | ||
329 | } | ||
330 | d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); | ||
331 | result = wlp_wss_sysfs_add(wss, buf); | ||
332 | if (result < 0) { | ||
333 | dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); | ||
334 | wlp_wss_reset(wss); | ||
335 | } | ||
336 | error: | ||
337 | return result; | ||
338 | |||
339 | } | ||
340 | |||
341 | /** | ||
342 | * Activate given WSS | ||
343 | * | ||
344 | * Prior to activation a WSS must be enrolled. To activate a WSS a device | ||
345 | * includes the WSS hash in the WLP IE in its beacon in each superframe. | ||
346 | * WLP 0.99 [7.2.5]. | ||
347 | * | ||
348 | * The WSS tag is also computed at this time. We only support one activated | ||
349 | * WSS so we can use the hash as a tag - there will never be a conflict. | ||
350 | * | ||
351 | * We currently only support one activated WSS so only one WSS hash is | ||
352 | * included in the WLP IE. | ||
353 | */ | ||
354 | static | ||
355 | int wlp_wss_activate(struct wlp_wss *wss) | ||
356 | { | ||
357 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
358 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
359 | struct uwb_rc *uwb_rc = wlp->rc; | ||
360 | int result; | ||
361 | struct { | ||
362 | struct wlp_ie wlp_ie; | ||
363 | u8 hash; /* only include one hash */ | ||
364 | } ie_data; | ||
365 | |||
366 | d_fnstart(5, dev, "Activating WSS %p. \n", wss); | ||
367 | BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); | ||
368 | wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); | ||
369 | wss->tag = wss->hash; | ||
370 | memset(&ie_data, 0, sizeof(ie_data)); | ||
371 | ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP; | ||
372 | ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr); | ||
373 | wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash)); | ||
374 | ie_data.hash = wss->hash; | ||
375 | result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr, | ||
376 | sizeof(ie_data)); | ||
377 | if (result < 0) { | ||
378 | dev_err(dev, "WLP: Unable to add WLP IE to beacon. " | ||
379 | "result = %d.\n", result); | ||
380 | goto error_wlp_ie; | ||
381 | } | ||
382 | wss->state = WLP_WSS_STATE_ACTIVE; | ||
383 | result = 0; | ||
384 | error_wlp_ie: | ||
385 | d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); | ||
386 | return result; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * Enroll in and activate WSS identified by provided WSSID | ||
391 | * | ||
392 | * The neighborhood cache should contain a list of all neighbors and the | ||
393 | * WSS they have activated. Based on that cache we search which neighbor we | ||
394 | * can perform the association process with. The user also has option to | ||
395 | * specify which neighbor it prefers as registrar. | ||
396 | * Successful enrollment is followed by activation. | ||
397 | * Successful activation will create the sysfs directory containing | ||
398 | * specific information regarding this WSS. | ||
399 | */ | ||
400 | int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | ||
401 | struct uwb_dev_addr *devaddr) | ||
402 | { | ||
403 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
404 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
405 | int result = 0; | ||
406 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
407 | |||
408 | d_fnstart(5, dev, "Enrollment and activation requested. \n"); | ||
409 | mutex_lock(&wss->mutex); | ||
410 | result = wlp_wss_enroll(wss, wssid, devaddr); | ||
411 | if (result < 0) { | ||
412 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
413 | dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf); | ||
414 | goto error_enroll; | ||
415 | } | ||
416 | result = wlp_wss_activate(wss); | ||
417 | if (result < 0) { | ||
418 | dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment " | ||
419 | "result = %d \n", result); | ||
420 | /* Undo enrollment */ | ||
421 | wlp_wss_reset(wss); | ||
422 | goto error_activate; | ||
423 | } | ||
424 | error_activate: | ||
425 | error_enroll: | ||
426 | mutex_unlock(&wss->mutex); | ||
427 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
428 | return result; | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * Create, enroll, and activate a new WSS | ||
433 | * | ||
434 | * @wssid: new wssid provided by user | ||
435 | * @name: WSS name requested by used. | ||
436 | * @sec_status: security status requested by user | ||
437 | * | ||
438 | * A user requested the creation of a new WSS. All operations are done | ||
439 | * locally. The new WSS will be stored locally, the hash will be included | ||
440 | * in the WLP IE, and the sysfs infrastructure for this WSS will be | ||
441 | * created. | ||
442 | */ | ||
443 | int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | ||
444 | char *name, unsigned sec_status, unsigned accept) | ||
445 | { | ||
446 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
447 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
448 | int result = 0; | ||
449 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
450 | d_fnstart(5, dev, "Request to create new WSS.\n"); | ||
451 | result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
452 | d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " | ||
453 | "sec_status=%u, accepting enrollment=%u \n", | ||
454 | buf, name, sec_status, accept); | ||
455 | if (!mutex_trylock(&wss->mutex)) { | ||
456 | dev_err(dev, "WLP: WLP association session in progress.\n"); | ||
457 | return -EBUSY; | ||
458 | } | ||
459 | if (wss->state != WLP_WSS_STATE_NONE) { | ||
460 | dev_err(dev, "WLP: WSS already exists. Not creating new.\n"); | ||
461 | result = -EEXIST; | ||
462 | goto out; | ||
463 | } | ||
464 | if (wss->kobj.parent == NULL) { | ||
465 | dev_err(dev, "WLP: WSS parent not ready. Is network interface " | ||
466 | "up?\n"); | ||
467 | result = -ENXIO; | ||
468 | goto out; | ||
469 | } | ||
470 | if (sec_status == WLP_WSS_SECURE) { | ||
471 | dev_err(dev, "WLP: FIXME Creation of secure WSS not " | ||
472 | "supported yet.\n"); | ||
473 | result = -EINVAL; | ||
474 | goto out; | ||
475 | } | ||
476 | wss->wssid = *wssid; | ||
477 | memcpy(wss->name, name, sizeof(wss->name)); | ||
478 | wss->bcast = wlp_wss_sel_bcast_addr(wss); | ||
479 | wss->secure_status = sec_status; | ||
480 | wss->accept_enroll = accept; | ||
481 | /*wss->virtual_addr is initialized in call to wlp_wss_setup*/ | ||
482 | /* sysfs infrastructure */ | ||
483 | result = wlp_wss_sysfs_add(wss, buf); | ||
484 | if (result < 0) { | ||
485 | dev_err(dev, "Cannot set up sysfs for WSS kobject.\n"); | ||
486 | wlp_wss_reset(wss); | ||
487 | goto out; | ||
488 | } else | ||
489 | result = 0; | ||
490 | wss->state = WLP_WSS_STATE_ENROLLED; | ||
491 | result = wlp_wss_activate(wss); | ||
492 | if (result < 0) { | ||
493 | dev_err(dev, "WLP: Unable to activate WSS. Undoing " | ||
494 | "enrollment\n"); | ||
495 | wlp_wss_reset(wss); | ||
496 | goto out; | ||
497 | } | ||
498 | result = 0; | ||
499 | out: | ||
500 | mutex_unlock(&wss->mutex); | ||
501 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
502 | return result; | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * Determine if neighbor has WSS activated | ||
507 | * | ||
508 | * @returns: 1 if neighbor has WSS activated, zero otherwise | ||
509 | * | ||
510 | * This can be done in two ways: | ||
511 | * - send a C1 frame, parse C2/F0 response | ||
512 | * - examine the WLP IE sent by the neighbor | ||
513 | * | ||
514 | * The WLP IE is not fully supported in hardware so we use the C1/C2 frame | ||
515 | * exchange to determine if a WSS is activated. Using the WLP IE should be | ||
516 | * faster and should be used when it becomes possible. | ||
517 | */ | ||
518 | int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | ||
519 | struct uwb_dev_addr *dev_addr) | ||
520 | { | ||
521 | int result = 0; | ||
522 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
523 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
524 | DECLARE_COMPLETION_ONSTACK(completion); | ||
525 | struct wlp_session session; | ||
526 | struct sk_buff *skb; | ||
527 | struct wlp_frame_assoc *resp; | ||
528 | struct wlp_uuid wssid; | ||
529 | |||
530 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
531 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
532 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
533 | mutex_lock(&wlp->mutex); | ||
534 | /* Send C1 association frame */ | ||
535 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); | ||
536 | if (result < 0) { | ||
537 | dev_err(dev, "Unable to send C1 frame to neighbor " | ||
538 | "%02x:%02x (%d)\n", dev_addr->data[1], | ||
539 | dev_addr->data[0], result); | ||
540 | result = 0; | ||
541 | goto out; | ||
542 | } | ||
543 | /* Create session, wait for response */ | ||
544 | session.exp_message = WLP_ASSOC_C2; | ||
545 | session.cb = wlp_session_cb; | ||
546 | session.cb_priv = &completion; | ||
547 | session.neighbor_addr = *dev_addr; | ||
548 | BUG_ON(wlp->session != NULL); | ||
549 | wlp->session = &session; | ||
550 | /* Wait for C2/F0 frame */ | ||
551 | result = wait_for_completion_interruptible_timeout(&completion, | ||
552 | WLP_PER_MSG_TIMEOUT * HZ); | ||
553 | if (result == 0) { | ||
554 | dev_err(dev, "Timeout while sending C1 to neighbor " | ||
555 | "%02x:%02x.\n", dev_addr->data[1], | ||
556 | dev_addr->data[0]); | ||
557 | goto out; | ||
558 | } | ||
559 | if (result < 0) { | ||
560 | dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n", | ||
561 | dev_addr->data[1], dev_addr->data[0]); | ||
562 | result = 0; | ||
563 | goto out; | ||
564 | } | ||
565 | /* Parse message in session->data: it will be either C2 or F0 */ | ||
566 | skb = session.data; | ||
567 | resp = (void *) skb->data; | ||
568 | d_printf(5, dev, "Received response to C1 frame. \n"); | ||
569 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
570 | if (resp->type == WLP_ASSOC_F0) { | ||
571 | result = wlp_parse_f0(wlp, skb); | ||
572 | if (result < 0) | ||
573 | dev_err(dev, "WLP: unable to parse incoming F0 " | ||
574 | "frame from neighbor %02x:%02x.\n", | ||
575 | dev_addr->data[1], dev_addr->data[0]); | ||
576 | result = 0; | ||
577 | goto error_resp_parse; | ||
578 | } | ||
579 | /* WLP version and message type fields have already been parsed */ | ||
580 | result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid, | ||
581 | skb->len - sizeof(*resp)); | ||
582 | if (result < 0) { | ||
583 | dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n"); | ||
584 | result = 0; | ||
585 | goto error_resp_parse; | ||
586 | } | ||
587 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { | ||
588 | d_printf(5, dev, "WSSID in C2 frame matches local " | ||
589 | "active WSS.\n"); | ||
590 | result = 1; | ||
591 | } else { | ||
592 | dev_err(dev, "WLP: Received a C2 frame without matching " | ||
593 | "WSSID.\n"); | ||
594 | result = 0; | ||
595 | } | ||
596 | error_resp_parse: | ||
597 | kfree_skb(skb); | ||
598 | out: | ||
599 | wlp->session = NULL; | ||
600 | mutex_unlock(&wlp->mutex); | ||
601 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
602 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
603 | return result; | ||
604 | } | ||
605 | |||
606 | /** | ||
607 | * Activate connection with neighbor by updating EDA cache | ||
608 | * | ||
609 | * @wss: local WSS to which neighbor wants to connect | ||
610 | * @dev_addr: neighbor's address | ||
611 | * @wssid: neighbor's WSSID - must be same as our WSS's WSSID | ||
612 | * @tag: neighbor's WSS tag used to identify frames transmitted by it | ||
613 | * @virt_addr: neighbor's virtual EUI-48 | ||
614 | */ | ||
615 | static | ||
616 | int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, | ||
617 | struct uwb_dev_addr *dev_addr, | ||
618 | struct wlp_uuid *wssid, u8 *tag, | ||
619 | struct uwb_mac_addr *virt_addr) | ||
620 | { | ||
621 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
622 | int result = 0; | ||
623 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
624 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
625 | d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
626 | "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, | ||
627 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
628 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
629 | |||
630 | if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { | ||
631 | d_printf(5, dev, "WSSID from neighbor frame matches local " | ||
632 | "active WSS.\n"); | ||
633 | /* Update EDA cache */ | ||
634 | result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, | ||
635 | (void *) virt_addr->data, *tag, | ||
636 | WLP_WSS_CONNECTED); | ||
637 | if (result < 0) | ||
638 | dev_err(dev, "WLP: Unable to update EDA cache " | ||
639 | "with new connected neighbor information.\n"); | ||
640 | } else { | ||
641 | dev_err(dev, "WLP: Neighbor does not have matching " | ||
642 | "WSSID.\n"); | ||
643 | result = -EINVAL; | ||
644 | } | ||
645 | |||
646 | d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
647 | "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", | ||
648 | wlp, wss, buf, *tag, | ||
649 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
650 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], | ||
651 | result); | ||
652 | |||
653 | return result; | ||
654 | } | ||
655 | |||
656 | /** | ||
657 | * Connect to WSS neighbor | ||
658 | * | ||
659 | * Use C3/C4 exchange to determine if neighbor has WSS activated and | ||
660 | * retrieve the WSS tag and virtual EUI-48 of the neighbor. | ||
661 | */ | ||
662 | static | ||
663 | int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | ||
664 | struct uwb_dev_addr *dev_addr) | ||
665 | { | ||
666 | int result; | ||
667 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
668 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
669 | struct wlp_uuid wssid; | ||
670 | u8 tag; | ||
671 | struct uwb_mac_addr virt_addr; | ||
672 | DECLARE_COMPLETION_ONSTACK(completion); | ||
673 | struct wlp_session session; | ||
674 | struct wlp_frame_assoc *resp; | ||
675 | struct sk_buff *skb; | ||
676 | |||
677 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
678 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
679 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
680 | mutex_lock(&wlp->mutex); | ||
681 | /* Send C3 association frame */ | ||
682 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); | ||
683 | if (result < 0) { | ||
684 | dev_err(dev, "Unable to send C3 frame to neighbor " | ||
685 | "%02x:%02x (%d)\n", dev_addr->data[1], | ||
686 | dev_addr->data[0], result); | ||
687 | goto out; | ||
688 | } | ||
689 | /* Create session, wait for response */ | ||
690 | session.exp_message = WLP_ASSOC_C4; | ||
691 | session.cb = wlp_session_cb; | ||
692 | session.cb_priv = &completion; | ||
693 | session.neighbor_addr = *dev_addr; | ||
694 | BUG_ON(wlp->session != NULL); | ||
695 | wlp->session = &session; | ||
696 | /* Wait for C4/F0 frame */ | ||
697 | result = wait_for_completion_interruptible_timeout(&completion, | ||
698 | WLP_PER_MSG_TIMEOUT * HZ); | ||
699 | if (result == 0) { | ||
700 | dev_err(dev, "Timeout while sending C3 to neighbor " | ||
701 | "%02x:%02x.\n", dev_addr->data[1], | ||
702 | dev_addr->data[0]); | ||
703 | result = -ETIMEDOUT; | ||
704 | goto out; | ||
705 | } | ||
706 | if (result < 0) { | ||
707 | dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n", | ||
708 | dev_addr->data[1], dev_addr->data[0]); | ||
709 | goto out; | ||
710 | } | ||
711 | /* Parse message in session->data: it will be either C4 or F0 */ | ||
712 | skb = session.data; | ||
713 | resp = (void *) skb->data; | ||
714 | d_printf(5, dev, "Received response to C3 frame. \n"); | ||
715 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
716 | if (resp->type == WLP_ASSOC_F0) { | ||
717 | result = wlp_parse_f0(wlp, skb); | ||
718 | if (result < 0) | ||
719 | dev_err(dev, "WLP: unable to parse incoming F0 " | ||
720 | "frame from neighbor %02x:%02x.\n", | ||
721 | dev_addr->data[1], dev_addr->data[0]); | ||
722 | result = -EINVAL; | ||
723 | goto error_resp_parse; | ||
724 | } | ||
725 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); | ||
726 | if (result < 0) { | ||
727 | dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n"); | ||
728 | goto error_resp_parse; | ||
729 | } | ||
730 | result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag, | ||
731 | &virt_addr); | ||
732 | if (result < 0) { | ||
733 | dev_err(dev, "WLP: Unable to activate connection to " | ||
734 | "neighbor %02x:%02x.\n", dev_addr->data[1], | ||
735 | dev_addr->data[0]); | ||
736 | goto error_resp_parse; | ||
737 | } | ||
738 | error_resp_parse: | ||
739 | kfree_skb(skb); | ||
740 | out: | ||
741 | /* Record that we unsuccessfully tried to connect to this neighbor */ | ||
742 | if (result < 0) | ||
743 | wlp_eda_update_node_state(&wlp->eda, dev_addr, | ||
744 | WLP_WSS_CONNECT_FAILED); | ||
745 | wlp->session = NULL; | ||
746 | mutex_unlock(&wlp->mutex); | ||
747 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
748 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
749 | return result; | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * Connect to neighbor with common WSS, send pending frame | ||
754 | * | ||
755 | * This function is scheduled when a frame is destined to a neighbor with | ||
756 | * which we do not have a connection. A copy of the EDA cache entry is | ||
757 | * provided - not the actual cache entry (because it is protected by a | ||
758 | * spinlock). | ||
759 | * | ||
760 | * First determine if neighbor has the same WSS activated, connect if it | ||
761 | * does. The C3/C4 exchange is dual purpose to determine if neighbor has | ||
762 | * WSS activated and proceed with the connection. | ||
763 | * | ||
764 | * The frame that triggered the connection setup is sent after connection | ||
765 | * setup. | ||
766 | * | ||
767 | * network queue is stopped - we need to restart when done | ||
768 | * | ||
769 | */ | ||
770 | static | ||
771 | void wlp_wss_connect_send(struct work_struct *ws) | ||
772 | { | ||
773 | struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws, | ||
774 | struct wlp_assoc_conn_ctx, | ||
775 | ws); | ||
776 | struct wlp *wlp = conn_ctx->wlp; | ||
777 | struct sk_buff *skb = conn_ctx->skb; | ||
778 | struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry; | ||
779 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
780 | struct wlp_wss *wss = &wlp->wss; | ||
781 | int result; | ||
782 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
783 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
784 | |||
785 | mutex_lock(&wss->mutex); | ||
786 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
787 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
788 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
789 | if (wss->state < WLP_WSS_STATE_ACTIVE) { | ||
790 | if (printk_ratelimit()) | ||
791 | dev_err(dev, "WLP: Attempting to connect with " | ||
792 | "WSS that is not active or connected.\n"); | ||
793 | dev_kfree_skb(skb); | ||
794 | goto out; | ||
795 | } | ||
796 | /* Establish connection - send C3 rcv C4 */ | ||
797 | result = wlp_wss_connect_neighbor(wlp, wss, dev_addr); | ||
798 | if (result < 0) { | ||
799 | if (printk_ratelimit()) | ||
800 | dev_err(dev, "WLP: Unable to establish connection " | ||
801 | "with neighbor %02x:%02x.\n", | ||
802 | dev_addr->data[1], dev_addr->data[0]); | ||
803 | dev_kfree_skb(skb); | ||
804 | goto out; | ||
805 | } | ||
806 | /* EDA entry changed, update the local copy being used */ | ||
807 | result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry); | ||
808 | if (result < 0) { | ||
809 | if (printk_ratelimit()) | ||
810 | dev_err(dev, "WLP: Cannot find EDA entry for " | ||
811 | "neighbor %02x:%02x \n", | ||
812 | dev_addr->data[1], dev_addr->data[0]); | ||
813 | } | ||
814 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); | ||
815 | if (result < 0) { | ||
816 | if (printk_ratelimit()) | ||
817 | dev_err(dev, "WLP: Unable to prepare frame header for " | ||
818 | "transmission (neighbor %02x:%02x). \n", | ||
819 | dev_addr->data[1], dev_addr->data[0]); | ||
820 | dev_kfree_skb(skb); | ||
821 | goto out; | ||
822 | } | ||
823 | BUG_ON(wlp->xmit_frame == NULL); | ||
824 | result = wlp->xmit_frame(wlp, skb, dev_addr); | ||
825 | if (result < 0) { | ||
826 | if (printk_ratelimit()) | ||
827 | dev_err(dev, "WLP: Unable to transmit frame: %d\n", | ||
828 | result); | ||
829 | if (result == -ENXIO) | ||
830 | dev_err(dev, "WLP: Is network interface up? \n"); | ||
831 | /* We could try again ... */ | ||
832 | dev_kfree_skb(skb);/*we need to free if tx fails */ | ||
833 | } | ||
834 | out: | ||
835 | kfree(conn_ctx); | ||
836 | BUG_ON(wlp->start_queue == NULL); | ||
837 | wlp->start_queue(wlp); | ||
838 | mutex_unlock(&wss->mutex); | ||
839 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * Add WLP header to outgoing skb | ||
844 | * | ||
845 | * @eda_entry: pointer to neighbor's entry in the EDA cache | ||
846 | * @_skb: skb containing data destined to the neighbor | ||
847 | */ | ||
848 | int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, | ||
849 | void *_skb) | ||
850 | { | ||
851 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
852 | int result = 0; | ||
853 | unsigned char *eth_addr = eda_entry->eth_addr; | ||
854 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
855 | struct sk_buff *skb = _skb; | ||
856 | struct wlp_frame_std_abbrv_hdr *std_hdr; | ||
857 | |||
858 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
859 | if (eda_entry->state == WLP_WSS_CONNECTED) { | ||
860 | /* Add WLP header */ | ||
861 | BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); | ||
862 | std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr)); | ||
863 | std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | ||
864 | std_hdr->hdr.type = WLP_FRAME_STANDARD; | ||
865 | std_hdr->tag = eda_entry->wss->tag; | ||
866 | } else { | ||
867 | if (printk_ratelimit()) | ||
868 | dev_err(dev, "WLP: Destination neighbor (Ethernet: " | ||
869 | "%02x:%02x:%02x:%02x:%02x:%02x, Dev: " | ||
870 | "%02x:%02x) is not connected. \n", eth_addr[0], | ||
871 | eth_addr[1], eth_addr[2], eth_addr[3], | ||
872 | eth_addr[4], eth_addr[5], dev_addr->data[1], | ||
873 | dev_addr->data[0]); | ||
874 | result = -EINVAL; | ||
875 | } | ||
876 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
877 | return result; | ||
878 | } | ||
879 | |||
880 | |||
881 | /** | ||
882 | * Prepare skb for neighbor: connect if not already and prep WLP header | ||
883 | * | ||
884 | * This function is called in interrupt context, but it needs to sleep. We | ||
885 | * temporarily stop the net queue to establish the WLP connection. | ||
886 | * Setup of the WLP connection and restart of queue is scheduled | ||
887 | * on the default work queue. | ||
888 | * | ||
889 | * run with eda->lock held (spinlock) | ||
890 | */ | ||
891 | int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, | ||
892 | void *_skb) | ||
893 | { | ||
894 | int result = 0; | ||
895 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
896 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
897 | unsigned char *eth_addr = eda_entry->eth_addr; | ||
898 | struct sk_buff *skb = _skb; | ||
899 | struct wlp_assoc_conn_ctx *conn_ctx; | ||
900 | |||
901 | d_fnstart(5, dev, "wlp %p\n", wlp); | ||
902 | d_printf(5, dev, "To neighbor %02x:%02x with eth " | ||
903 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], | ||
904 | dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], | ||
905 | eth_addr[3], eth_addr[4], eth_addr[5]); | ||
906 | if (eda_entry->state == WLP_WSS_UNCONNECTED) { | ||
907 | /* We don't want any more packets while we set up connection */ | ||
908 | BUG_ON(wlp->stop_queue == NULL); | ||
909 | wlp->stop_queue(wlp); | ||
910 | conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC); | ||
911 | if (conn_ctx == NULL) { | ||
912 | if (printk_ratelimit()) | ||
913 | dev_err(dev, "WLP: Unable to allocate memory " | ||
914 | "for connection handling.\n"); | ||
915 | result = -ENOMEM; | ||
916 | goto out; | ||
917 | } | ||
918 | conn_ctx->wlp = wlp; | ||
919 | conn_ctx->skb = skb; | ||
920 | conn_ctx->eda_entry = *eda_entry; | ||
921 | INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send); | ||
922 | schedule_work(&conn_ctx->ws); | ||
923 | result = 1; | ||
924 | } else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) { | ||
925 | /* Previous connection attempts failed, don't retry - see | ||
926 | * conditions for connection in WLP 0.99 [7.6.2] */ | ||
927 | if (printk_ratelimit()) | ||
928 | dev_err(dev, "Could not connect to neighbor " | ||
929 | "previously. Not retrying. \n"); | ||
930 | result = -ENONET; | ||
931 | goto out; | ||
932 | } else { /* eda_entry->state == WLP_WSS_CONNECTED */ | ||
933 | d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); | ||
934 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); | ||
935 | } | ||
936 | out: | ||
937 | d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); | ||
938 | return result; | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * Emulate broadcast: copy skb, send copy to neighbor (connect if not already) | ||
943 | * | ||
944 | * We need to copy skbs in the case where we emulate broadcast through | ||
945 | * unicast. We copy instead of clone because we are modifying the data of | ||
946 | * the frame after copying ... clones share data so we cannot emulate | ||
947 | * broadcast using clones. | ||
948 | * | ||
949 | * run with eda->lock held (spinlock) | ||
950 | */ | ||
951 | int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, | ||
952 | void *_skb) | ||
953 | { | ||
954 | int result = -ENOMEM; | ||
955 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
956 | struct sk_buff *skb = _skb; | ||
957 | struct sk_buff *copy; | ||
958 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
959 | |||
960 | d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", | ||
961 | dev_addr->data[1], dev_addr->data[0], skb); | ||
962 | copy = skb_copy(skb, GFP_ATOMIC); | ||
963 | if (copy == NULL) { | ||
964 | if (printk_ratelimit()) | ||
965 | dev_err(dev, "WLP: Unable to copy skb for " | ||
966 | "transmission.\n"); | ||
967 | goto out; | ||
968 | } | ||
969 | result = wlp_wss_connect_prep(wlp, eda_entry, copy); | ||
970 | if (result < 0) { | ||
971 | if (printk_ratelimit()) | ||
972 | dev_err(dev, "WLP: Unable to connect/send skb " | ||
973 | "to neighbor.\n"); | ||
974 | dev_kfree_skb_irq(copy); | ||
975 | goto out; | ||
976 | } else if (result == 1) | ||
977 | /* Frame will be transmitted separately */ | ||
978 | goto out; | ||
979 | BUG_ON(wlp->xmit_frame == NULL); | ||
980 | result = wlp->xmit_frame(wlp, copy, dev_addr); | ||
981 | if (result < 0) { | ||
982 | if (printk_ratelimit()) | ||
983 | dev_err(dev, "WLP: Unable to transmit frame: %d\n", | ||
984 | result); | ||
985 | if ((result == -ENXIO) && printk_ratelimit()) | ||
986 | dev_err(dev, "WLP: Is network interface up? \n"); | ||
987 | /* We could try again ... */ | ||
988 | dev_kfree_skb_irq(copy);/*we need to free if tx fails */ | ||
989 | } | ||
990 | out: | ||
991 | d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], | ||
992 | dev_addr->data[0]); | ||
993 | return result; | ||
994 | } | ||
995 | |||
996 | |||
997 | /** | ||
998 | * Setup WSS | ||
999 | * | ||
1000 | * Should be called by network driver after the interface has been given a | ||
1001 | * MAC address. | ||
1002 | */ | ||
1003 | int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) | ||
1004 | { | ||
1005 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
1006 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1007 | int result = 0; | ||
1008 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
1009 | mutex_lock(&wss->mutex); | ||
1010 | wss->kobj.parent = &net_dev->dev.kobj; | ||
1011 | if (!is_valid_ether_addr(net_dev->dev_addr)) { | ||
1012 | dev_err(dev, "WLP: Invalid MAC address. Cannot use for" | ||
1013 | "virtual.\n"); | ||
1014 | result = -EINVAL; | ||
1015 | goto out; | ||
1016 | } | ||
1017 | memcpy(wss->virtual_addr.data, net_dev->dev_addr, | ||
1018 | sizeof(wss->virtual_addr.data)); | ||
1019 | out: | ||
1020 | mutex_unlock(&wss->mutex); | ||
1021 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1022 | return result; | ||
1023 | } | ||
1024 | EXPORT_SYMBOL_GPL(wlp_wss_setup); | ||
1025 | |||
1026 | /** | ||
1027 | * Remove WSS | ||
1028 | * | ||
1029 | * Called by client that configured WSS through wlp_wss_setup(). This | ||
1030 | * function is called when client no longer needs WSS, eg. client shuts | ||
1031 | * down. | ||
1032 | * | ||
1033 | * We remove the WLP IE from the beacon before initiating local cleanup. | ||
1034 | */ | ||
1035 | void wlp_wss_remove(struct wlp_wss *wss) | ||
1036 | { | ||
1037 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
1038 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
1039 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
1040 | mutex_lock(&wss->mutex); | ||
1041 | if (wss->state == WLP_WSS_STATE_ACTIVE) | ||
1042 | uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); | ||
1043 | if (wss->state != WLP_WSS_STATE_NONE) { | ||
1044 | sysfs_remove_group(&wss->kobj, &wss_attr_group); | ||
1045 | kobject_put(&wss->kobj); | ||
1046 | } | ||
1047 | wss->kobj.parent = NULL; | ||
1048 | memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr)); | ||
1049 | /* Cleanup EDA cache */ | ||
1050 | wlp_eda_release(&wlp->eda); | ||
1051 | wlp_eda_init(&wlp->eda); | ||
1052 | mutex_unlock(&wss->mutex); | ||
1053 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1054 | } | ||
1055 | EXPORT_SYMBOL_GPL(wlp_wss_remove); | ||
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index c73b5e2919c6..ada8ad82d993 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c | |||
@@ -102,7 +102,7 @@ static void w83697ug_select_wd_register(void) | |||
102 | 102 | ||
103 | } else { | 103 | } else { |
104 | printk(KERN_ERR PFX "No W83697UG/UF could be found\n"); | 104 | printk(KERN_ERR PFX "No W83697UG/UF could be found\n"); |
105 | return -EIO; | 105 | return; |
106 | } | 106 | } |
107 | 107 | ||
108 | outb_p(0x07, WDT_EFER); /* point to logical device number reg */ | 108 | outb_p(0x07, WDT_EFER); /* point to logical device number reg */ |
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 0d0c70151642..b7394d05ee8e 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c | |||
@@ -239,7 +239,7 @@ setrsvsz_out: | |||
239 | case EXT3_IOC_GROUP_EXTEND: { | 239 | case EXT3_IOC_GROUP_EXTEND: { |
240 | ext3_fsblk_t n_blocks_count; | 240 | ext3_fsblk_t n_blocks_count; |
241 | struct super_block *sb = inode->i_sb; | 241 | struct super_block *sb = inode->i_sb; |
242 | int err; | 242 | int err, err2; |
243 | 243 | ||
244 | if (!capable(CAP_SYS_RESOURCE)) | 244 | if (!capable(CAP_SYS_RESOURCE)) |
245 | return -EPERM; | 245 | return -EPERM; |
@@ -254,8 +254,10 @@ setrsvsz_out: | |||
254 | } | 254 | } |
255 | err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); | 255 | err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); |
256 | journal_lock_updates(EXT3_SB(sb)->s_journal); | 256 | journal_lock_updates(EXT3_SB(sb)->s_journal); |
257 | journal_flush(EXT3_SB(sb)->s_journal); | 257 | err2 = journal_flush(EXT3_SB(sb)->s_journal); |
258 | journal_unlock_updates(EXT3_SB(sb)->s_journal); | 258 | journal_unlock_updates(EXT3_SB(sb)->s_journal); |
259 | if (err == 0) | ||
260 | err = err2; | ||
259 | group_extend_out: | 261 | group_extend_out: |
260 | mnt_drop_write(filp->f_path.mnt); | 262 | mnt_drop_write(filp->f_path.mnt); |
261 | return err; | 263 | return err; |
@@ -263,7 +265,7 @@ group_extend_out: | |||
263 | case EXT3_IOC_GROUP_ADD: { | 265 | case EXT3_IOC_GROUP_ADD: { |
264 | struct ext3_new_group_data input; | 266 | struct ext3_new_group_data input; |
265 | struct super_block *sb = inode->i_sb; | 267 | struct super_block *sb = inode->i_sb; |
266 | int err; | 268 | int err, err2; |
267 | 269 | ||
268 | if (!capable(CAP_SYS_RESOURCE)) | 270 | if (!capable(CAP_SYS_RESOURCE)) |
269 | return -EPERM; | 271 | return -EPERM; |
@@ -280,8 +282,10 @@ group_extend_out: | |||
280 | 282 | ||
281 | err = ext3_group_add(sb, &input); | 283 | err = ext3_group_add(sb, &input); |
282 | journal_lock_updates(EXT3_SB(sb)->s_journal); | 284 | journal_lock_updates(EXT3_SB(sb)->s_journal); |
283 | journal_flush(EXT3_SB(sb)->s_journal); | 285 | err2 = journal_flush(EXT3_SB(sb)->s_journal); |
284 | journal_unlock_updates(EXT3_SB(sb)->s_journal); | 286 | journal_unlock_updates(EXT3_SB(sb)->s_journal); |
287 | if (err == 0) | ||
288 | err = err2; | ||
285 | group_add_out: | 289 | group_add_out: |
286 | mnt_drop_write(filp->f_path.mnt); | 290 | mnt_drop_write(filp->f_path.mnt); |
287 | return err; | 291 | return err; |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 3a260af5544d..cac29ee3b14a 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -393,7 +393,8 @@ static void ext3_put_super (struct super_block * sb) | |||
393 | int i; | 393 | int i; |
394 | 394 | ||
395 | ext3_xattr_put_super(sb); | 395 | ext3_xattr_put_super(sb); |
396 | journal_destroy(sbi->s_journal); | 396 | if (journal_destroy(sbi->s_journal) < 0) |
397 | ext3_abort(sb, __func__, "Couldn't clean up the journal"); | ||
397 | if (!(sb->s_flags & MS_RDONLY)) { | 398 | if (!(sb->s_flags & MS_RDONLY)) { |
398 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 399 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
399 | es->s_state = cpu_to_le16(sbi->s_mount_state); | 400 | es->s_state = cpu_to_le16(sbi->s_mount_state); |
@@ -2296,7 +2297,9 @@ static void ext3_mark_recovery_complete(struct super_block * sb, | |||
2296 | journal_t *journal = EXT3_SB(sb)->s_journal; | 2297 | journal_t *journal = EXT3_SB(sb)->s_journal; |
2297 | 2298 | ||
2298 | journal_lock_updates(journal); | 2299 | journal_lock_updates(journal); |
2299 | journal_flush(journal); | 2300 | if (journal_flush(journal) < 0) |
2301 | goto out; | ||
2302 | |||
2300 | lock_super(sb); | 2303 | lock_super(sb); |
2301 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && | 2304 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && |
2302 | sb->s_flags & MS_RDONLY) { | 2305 | sb->s_flags & MS_RDONLY) { |
@@ -2305,6 +2308,8 @@ static void ext3_mark_recovery_complete(struct super_block * sb, | |||
2305 | ext3_commit_super(sb, es, 1); | 2308 | ext3_commit_super(sb, es, 1); |
2306 | } | 2309 | } |
2307 | unlock_super(sb); | 2310 | unlock_super(sb); |
2311 | |||
2312 | out: | ||
2308 | journal_unlock_updates(journal); | 2313 | journal_unlock_updates(journal); |
2309 | } | 2314 | } |
2310 | 2315 | ||
@@ -2404,7 +2409,13 @@ static void ext3_write_super_lockfs(struct super_block *sb) | |||
2404 | 2409 | ||
2405 | /* Now we set up the journal barrier. */ | 2410 | /* Now we set up the journal barrier. */ |
2406 | journal_lock_updates(journal); | 2411 | journal_lock_updates(journal); |
2407 | journal_flush(journal); | 2412 | |
2413 | /* | ||
2414 | * We don't want to clear needs_recovery flag when we failed | ||
2415 | * to flush the journal. | ||
2416 | */ | ||
2417 | if (journal_flush(journal) < 0) | ||
2418 | return; | ||
2408 | 2419 | ||
2409 | /* Journal blocked and flushed, clear needs_recovery flag. */ | 2420 | /* Journal blocked and flushed, clear needs_recovery flag. */ |
2410 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 2421 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
@@ -2822,8 +2833,12 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, | |||
2822 | * otherwise be livelocked... | 2833 | * otherwise be livelocked... |
2823 | */ | 2834 | */ |
2824 | journal_lock_updates(EXT3_SB(sb)->s_journal); | 2835 | journal_lock_updates(EXT3_SB(sb)->s_journal); |
2825 | journal_flush(EXT3_SB(sb)->s_journal); | 2836 | err = journal_flush(EXT3_SB(sb)->s_journal); |
2826 | journal_unlock_updates(EXT3_SB(sb)->s_journal); | 2837 | journal_unlock_updates(EXT3_SB(sb)->s_journal); |
2838 | if (err) { | ||
2839 | path_put(&nd.path); | ||
2840 | return err; | ||
2841 | } | ||
2827 | } | 2842 | } |
2828 | 2843 | ||
2829 | err = vfs_quota_on_path(sb, type, format_id, &nd.path); | 2844 | err = vfs_quota_on_path(sb, type, format_id, &nd.path); |
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index a5432bbbfb88..1bd8d4acc6f2 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c | |||
@@ -93,7 +93,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh) | |||
93 | int ret = 0; | 93 | int ret = 0; |
94 | struct buffer_head *bh = jh2bh(jh); | 94 | struct buffer_head *bh = jh2bh(jh); |
95 | 95 | ||
96 | if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { | 96 | if (jh->b_jlist == BJ_None && !buffer_locked(bh) && |
97 | !buffer_dirty(bh) && !buffer_write_io_error(bh)) { | ||
97 | JBUFFER_TRACE(jh, "remove from checkpoint list"); | 98 | JBUFFER_TRACE(jh, "remove from checkpoint list"); |
98 | ret = __journal_remove_checkpoint(jh) + 1; | 99 | ret = __journal_remove_checkpoint(jh) + 1; |
99 | jbd_unlock_bh_state(bh); | 100 | jbd_unlock_bh_state(bh); |
@@ -126,14 +127,29 @@ void __log_wait_for_space(journal_t *journal) | |||
126 | 127 | ||
127 | /* | 128 | /* |
128 | * Test again, another process may have checkpointed while we | 129 | * Test again, another process may have checkpointed while we |
129 | * were waiting for the checkpoint lock | 130 | * were waiting for the checkpoint lock. If there are no |
131 | * outstanding transactions there is nothing to checkpoint and | ||
132 | * we can't make progress. Abort the journal in this case. | ||
130 | */ | 133 | */ |
131 | spin_lock(&journal->j_state_lock); | 134 | spin_lock(&journal->j_state_lock); |
135 | spin_lock(&journal->j_list_lock); | ||
132 | nblocks = jbd_space_needed(journal); | 136 | nblocks = jbd_space_needed(journal); |
133 | if (__log_space_left(journal) < nblocks) { | 137 | if (__log_space_left(journal) < nblocks) { |
138 | int chkpt = journal->j_checkpoint_transactions != NULL; | ||
139 | |||
140 | spin_unlock(&journal->j_list_lock); | ||
134 | spin_unlock(&journal->j_state_lock); | 141 | spin_unlock(&journal->j_state_lock); |
135 | log_do_checkpoint(journal); | 142 | if (chkpt) { |
143 | log_do_checkpoint(journal); | ||
144 | } else { | ||
145 | printk(KERN_ERR "%s: no transactions\n", | ||
146 | __func__); | ||
147 | journal_abort(journal, 0); | ||
148 | } | ||
149 | |||
136 | spin_lock(&journal->j_state_lock); | 150 | spin_lock(&journal->j_state_lock); |
151 | } else { | ||
152 | spin_unlock(&journal->j_list_lock); | ||
137 | } | 153 | } |
138 | mutex_unlock(&journal->j_checkpoint_mutex); | 154 | mutex_unlock(&journal->j_checkpoint_mutex); |
139 | } | 155 | } |
@@ -160,21 +176,25 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh) | |||
160 | * buffers. Note that we take the buffers in the opposite ordering | 176 | * buffers. Note that we take the buffers in the opposite ordering |
161 | * from the one in which they were submitted for IO. | 177 | * from the one in which they were submitted for IO. |
162 | * | 178 | * |
179 | * Return 0 on success, and return <0 if some buffers have failed | ||
180 | * to be written out. | ||
181 | * | ||
163 | * Called with j_list_lock held. | 182 | * Called with j_list_lock held. |
164 | */ | 183 | */ |
165 | static void __wait_cp_io(journal_t *journal, transaction_t *transaction) | 184 | static int __wait_cp_io(journal_t *journal, transaction_t *transaction) |
166 | { | 185 | { |
167 | struct journal_head *jh; | 186 | struct journal_head *jh; |
168 | struct buffer_head *bh; | 187 | struct buffer_head *bh; |
169 | tid_t this_tid; | 188 | tid_t this_tid; |
170 | int released = 0; | 189 | int released = 0; |
190 | int ret = 0; | ||
171 | 191 | ||
172 | this_tid = transaction->t_tid; | 192 | this_tid = transaction->t_tid; |
173 | restart: | 193 | restart: |
174 | /* Did somebody clean up the transaction in the meanwhile? */ | 194 | /* Did somebody clean up the transaction in the meanwhile? */ |
175 | if (journal->j_checkpoint_transactions != transaction || | 195 | if (journal->j_checkpoint_transactions != transaction || |
176 | transaction->t_tid != this_tid) | 196 | transaction->t_tid != this_tid) |
177 | return; | 197 | return ret; |
178 | while (!released && transaction->t_checkpoint_io_list) { | 198 | while (!released && transaction->t_checkpoint_io_list) { |
179 | jh = transaction->t_checkpoint_io_list; | 199 | jh = transaction->t_checkpoint_io_list; |
180 | bh = jh2bh(jh); | 200 | bh = jh2bh(jh); |
@@ -194,6 +214,9 @@ restart: | |||
194 | spin_lock(&journal->j_list_lock); | 214 | spin_lock(&journal->j_list_lock); |
195 | goto restart; | 215 | goto restart; |
196 | } | 216 | } |
217 | if (unlikely(buffer_write_io_error(bh))) | ||
218 | ret = -EIO; | ||
219 | |||
197 | /* | 220 | /* |
198 | * Now in whatever state the buffer currently is, we know that | 221 | * Now in whatever state the buffer currently is, we know that |
199 | * it has been written out and so we can drop it from the list | 222 | * it has been written out and so we can drop it from the list |
@@ -203,6 +226,8 @@ restart: | |||
203 | journal_remove_journal_head(bh); | 226 | journal_remove_journal_head(bh); |
204 | __brelse(bh); | 227 | __brelse(bh); |
205 | } | 228 | } |
229 | |||
230 | return ret; | ||
206 | } | 231 | } |
207 | 232 | ||
208 | #define NR_BATCH 64 | 233 | #define NR_BATCH 64 |
@@ -226,7 +251,8 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) | |||
226 | * Try to flush one buffer from the checkpoint list to disk. | 251 | * Try to flush one buffer from the checkpoint list to disk. |
227 | * | 252 | * |
228 | * Return 1 if something happened which requires us to abort the current | 253 | * Return 1 if something happened which requires us to abort the current |
229 | * scan of the checkpoint list. | 254 | * scan of the checkpoint list. Return <0 if the buffer has failed to |
255 | * be written out. | ||
230 | * | 256 | * |
231 | * Called with j_list_lock held and drops it if 1 is returned | 257 | * Called with j_list_lock held and drops it if 1 is returned |
232 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it | 258 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it |
@@ -256,6 +282,9 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, | |||
256 | log_wait_commit(journal, tid); | 282 | log_wait_commit(journal, tid); |
257 | ret = 1; | 283 | ret = 1; |
258 | } else if (!buffer_dirty(bh)) { | 284 | } else if (!buffer_dirty(bh)) { |
285 | ret = 1; | ||
286 | if (unlikely(buffer_write_io_error(bh))) | ||
287 | ret = -EIO; | ||
259 | J_ASSERT_JH(jh, !buffer_jbddirty(bh)); | 288 | J_ASSERT_JH(jh, !buffer_jbddirty(bh)); |
260 | BUFFER_TRACE(bh, "remove from checkpoint"); | 289 | BUFFER_TRACE(bh, "remove from checkpoint"); |
261 | __journal_remove_checkpoint(jh); | 290 | __journal_remove_checkpoint(jh); |
@@ -263,7 +292,6 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, | |||
263 | jbd_unlock_bh_state(bh); | 292 | jbd_unlock_bh_state(bh); |
264 | journal_remove_journal_head(bh); | 293 | journal_remove_journal_head(bh); |
265 | __brelse(bh); | 294 | __brelse(bh); |
266 | ret = 1; | ||
267 | } else { | 295 | } else { |
268 | /* | 296 | /* |
269 | * Important: we are about to write the buffer, and | 297 | * Important: we are about to write the buffer, and |
@@ -295,6 +323,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, | |||
295 | * to disk. We submit larger chunks of data at once. | 323 | * to disk. We submit larger chunks of data at once. |
296 | * | 324 | * |
297 | * The journal should be locked before calling this function. | 325 | * The journal should be locked before calling this function. |
326 | * Called with j_checkpoint_mutex held. | ||
298 | */ | 327 | */ |
299 | int log_do_checkpoint(journal_t *journal) | 328 | int log_do_checkpoint(journal_t *journal) |
300 | { | 329 | { |
@@ -318,6 +347,7 @@ int log_do_checkpoint(journal_t *journal) | |||
318 | * OK, we need to start writing disk blocks. Take one transaction | 347 | * OK, we need to start writing disk blocks. Take one transaction |
319 | * and write it. | 348 | * and write it. |
320 | */ | 349 | */ |
350 | result = 0; | ||
321 | spin_lock(&journal->j_list_lock); | 351 | spin_lock(&journal->j_list_lock); |
322 | if (!journal->j_checkpoint_transactions) | 352 | if (!journal->j_checkpoint_transactions) |
323 | goto out; | 353 | goto out; |
@@ -334,7 +364,7 @@ restart: | |||
334 | int batch_count = 0; | 364 | int batch_count = 0; |
335 | struct buffer_head *bhs[NR_BATCH]; | 365 | struct buffer_head *bhs[NR_BATCH]; |
336 | struct journal_head *jh; | 366 | struct journal_head *jh; |
337 | int retry = 0; | 367 | int retry = 0, err; |
338 | 368 | ||
339 | while (!retry && transaction->t_checkpoint_list) { | 369 | while (!retry && transaction->t_checkpoint_list) { |
340 | struct buffer_head *bh; | 370 | struct buffer_head *bh; |
@@ -347,6 +377,8 @@ restart: | |||
347 | break; | 377 | break; |
348 | } | 378 | } |
349 | retry = __process_buffer(journal, jh, bhs,&batch_count); | 379 | retry = __process_buffer(journal, jh, bhs,&batch_count); |
380 | if (retry < 0 && !result) | ||
381 | result = retry; | ||
350 | if (!retry && (need_resched() || | 382 | if (!retry && (need_resched() || |
351 | spin_needbreak(&journal->j_list_lock))) { | 383 | spin_needbreak(&journal->j_list_lock))) { |
352 | spin_unlock(&journal->j_list_lock); | 384 | spin_unlock(&journal->j_list_lock); |
@@ -371,14 +403,18 @@ restart: | |||
371 | * Now we have cleaned up the first transaction's checkpoint | 403 | * Now we have cleaned up the first transaction's checkpoint |
372 | * list. Let's clean up the second one | 404 | * list. Let's clean up the second one |
373 | */ | 405 | */ |
374 | __wait_cp_io(journal, transaction); | 406 | err = __wait_cp_io(journal, transaction); |
407 | if (!result) | ||
408 | result = err; | ||
375 | } | 409 | } |
376 | out: | 410 | out: |
377 | spin_unlock(&journal->j_list_lock); | 411 | spin_unlock(&journal->j_list_lock); |
378 | result = cleanup_journal_tail(journal); | ||
379 | if (result < 0) | 412 | if (result < 0) |
380 | return result; | 413 | journal_abort(journal, result); |
381 | return 0; | 414 | else |
415 | result = cleanup_journal_tail(journal); | ||
416 | |||
417 | return (result < 0) ? result : 0; | ||
382 | } | 418 | } |
383 | 419 | ||
384 | /* | 420 | /* |
@@ -394,8 +430,9 @@ out: | |||
394 | * This is the only part of the journaling code which really needs to be | 430 | * This is the only part of the journaling code which really needs to be |
395 | * aware of transaction aborts. Checkpointing involves writing to the | 431 | * aware of transaction aborts. Checkpointing involves writing to the |
396 | * main filesystem area rather than to the journal, so it can proceed | 432 | * main filesystem area rather than to the journal, so it can proceed |
397 | * even in abort state, but we must not update the journal superblock if | 433 | * even in abort state, but we must not update the super block if |
398 | * we have an abort error outstanding. | 434 | * checkpointing may have failed. Otherwise, we would lose some metadata |
435 | * buffers which should be written-back to the filesystem. | ||
399 | */ | 436 | */ |
400 | 437 | ||
401 | int cleanup_journal_tail(journal_t *journal) | 438 | int cleanup_journal_tail(journal_t *journal) |
@@ -404,6 +441,9 @@ int cleanup_journal_tail(journal_t *journal) | |||
404 | tid_t first_tid; | 441 | tid_t first_tid; |
405 | unsigned long blocknr, freed; | 442 | unsigned long blocknr, freed; |
406 | 443 | ||
444 | if (is_journal_aborted(journal)) | ||
445 | return 1; | ||
446 | |||
407 | /* OK, work out the oldest transaction remaining in the log, and | 447 | /* OK, work out the oldest transaction remaining in the log, and |
408 | * the log block it starts at. | 448 | * the log block it starts at. |
409 | * | 449 | * |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index aa7143a8349b..9e4fa52d7dc8 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -1121,9 +1121,12 @@ recovery_error: | |||
1121 | * | 1121 | * |
1122 | * Release a journal_t structure once it is no longer in use by the | 1122 | * Release a journal_t structure once it is no longer in use by the |
1123 | * journaled object. | 1123 | * journaled object. |
1124 | * Return <0 if we couldn't clean up the journal. | ||
1124 | */ | 1125 | */ |
1125 | void journal_destroy(journal_t *journal) | 1126 | int journal_destroy(journal_t *journal) |
1126 | { | 1127 | { |
1128 | int err = 0; | ||
1129 | |||
1127 | /* Wait for the commit thread to wake up and die. */ | 1130 | /* Wait for the commit thread to wake up and die. */ |
1128 | journal_kill_thread(journal); | 1131 | journal_kill_thread(journal); |
1129 | 1132 | ||
@@ -1146,11 +1149,16 @@ void journal_destroy(journal_t *journal) | |||
1146 | J_ASSERT(journal->j_checkpoint_transactions == NULL); | 1149 | J_ASSERT(journal->j_checkpoint_transactions == NULL); |
1147 | spin_unlock(&journal->j_list_lock); | 1150 | spin_unlock(&journal->j_list_lock); |
1148 | 1151 | ||
1149 | /* We can now mark the journal as empty. */ | ||
1150 | journal->j_tail = 0; | ||
1151 | journal->j_tail_sequence = ++journal->j_transaction_sequence; | ||
1152 | if (journal->j_sb_buffer) { | 1152 | if (journal->j_sb_buffer) { |
1153 | journal_update_superblock(journal, 1); | 1153 | if (!is_journal_aborted(journal)) { |
1154 | /* We can now mark the journal as empty. */ | ||
1155 | journal->j_tail = 0; | ||
1156 | journal->j_tail_sequence = | ||
1157 | ++journal->j_transaction_sequence; | ||
1158 | journal_update_superblock(journal, 1); | ||
1159 | } else { | ||
1160 | err = -EIO; | ||
1161 | } | ||
1154 | brelse(journal->j_sb_buffer); | 1162 | brelse(journal->j_sb_buffer); |
1155 | } | 1163 | } |
1156 | 1164 | ||
@@ -1160,6 +1168,8 @@ void journal_destroy(journal_t *journal) | |||
1160 | journal_destroy_revoke(journal); | 1168 | journal_destroy_revoke(journal); |
1161 | kfree(journal->j_wbuf); | 1169 | kfree(journal->j_wbuf); |
1162 | kfree(journal); | 1170 | kfree(journal); |
1171 | |||
1172 | return err; | ||
1163 | } | 1173 | } |
1164 | 1174 | ||
1165 | 1175 | ||
@@ -1359,10 +1369,16 @@ int journal_flush(journal_t *journal) | |||
1359 | spin_lock(&journal->j_list_lock); | 1369 | spin_lock(&journal->j_list_lock); |
1360 | while (!err && journal->j_checkpoint_transactions != NULL) { | 1370 | while (!err && journal->j_checkpoint_transactions != NULL) { |
1361 | spin_unlock(&journal->j_list_lock); | 1371 | spin_unlock(&journal->j_list_lock); |
1372 | mutex_lock(&journal->j_checkpoint_mutex); | ||
1362 | err = log_do_checkpoint(journal); | 1373 | err = log_do_checkpoint(journal); |
1374 | mutex_unlock(&journal->j_checkpoint_mutex); | ||
1363 | spin_lock(&journal->j_list_lock); | 1375 | spin_lock(&journal->j_list_lock); |
1364 | } | 1376 | } |
1365 | spin_unlock(&journal->j_list_lock); | 1377 | spin_unlock(&journal->j_list_lock); |
1378 | |||
1379 | if (is_journal_aborted(journal)) | ||
1380 | return -EIO; | ||
1381 | |||
1366 | cleanup_journal_tail(journal); | 1382 | cleanup_journal_tail(journal); |
1367 | 1383 | ||
1368 | /* Finally, mark the journal as really needing no recovery. | 1384 | /* Finally, mark the journal as really needing no recovery. |
@@ -1384,7 +1400,7 @@ int journal_flush(journal_t *journal) | |||
1384 | J_ASSERT(journal->j_head == journal->j_tail); | 1400 | J_ASSERT(journal->j_head == journal->j_tail); |
1385 | J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); | 1401 | J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); |
1386 | spin_unlock(&journal->j_state_lock); | 1402 | spin_unlock(&journal->j_state_lock); |
1387 | return err; | 1403 | return 0; |
1388 | } | 1404 | } |
1389 | 1405 | ||
1390 | /** | 1406 | /** |
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index 43bc5e5ed064..db5e982c5ddf 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c | |||
@@ -223,7 +223,7 @@ do { \ | |||
223 | */ | 223 | */ |
224 | int journal_recover(journal_t *journal) | 224 | int journal_recover(journal_t *journal) |
225 | { | 225 | { |
226 | int err; | 226 | int err, err2; |
227 | journal_superblock_t * sb; | 227 | journal_superblock_t * sb; |
228 | 228 | ||
229 | struct recovery_info info; | 229 | struct recovery_info info; |
@@ -261,7 +261,10 @@ int journal_recover(journal_t *journal) | |||
261 | journal->j_transaction_sequence = ++info.end_transaction; | 261 | journal->j_transaction_sequence = ++info.end_transaction; |
262 | 262 | ||
263 | journal_clear_revoke(journal); | 263 | journal_clear_revoke(journal); |
264 | sync_blockdev(journal->j_fs_dev); | 264 | err2 = sync_blockdev(journal->j_fs_dev); |
265 | if (!err) | ||
266 | err = err2; | ||
267 | |||
265 | return err; | 268 | return err; |
266 | } | 269 | } |
267 | 270 | ||
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index ed108be6743f..f104af7cf437 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h | |||
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
22 | { | 22 | { |
23 | if (unlikely(atomic_dec_return(count) < 0)) | 23 | if (unlikely(atomic_dec_return(count) < 0)) |
24 | fail_fn(count); | 24 | fail_fn(count); |
25 | else | ||
26 | smp_mb(); | ||
27 | } | 25 | } |
28 | 26 | ||
29 | /** | 27 | /** |
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
41 | { | 39 | { |
42 | if (unlikely(atomic_dec_return(count) < 0)) | 40 | if (unlikely(atomic_dec_return(count) < 0)) |
43 | return fail_fn(count); | 41 | return fail_fn(count); |
44 | else { | 42 | return 0; |
45 | smp_mb(); | ||
46 | return 0; | ||
47 | } | ||
48 | } | 43 | } |
49 | 44 | ||
50 | /** | 45 | /** |
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
63 | static inline void | 58 | static inline void |
64 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 59 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
65 | { | 60 | { |
66 | smp_mb(); | ||
67 | if (unlikely(atomic_inc_return(count) <= 0)) | 61 | if (unlikely(atomic_inc_return(count) <= 0)) |
68 | fail_fn(count); | 62 | fail_fn(count); |
69 | } | 63 | } |
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
88 | static inline int | 82 | static inline int |
89 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 83 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
90 | { | 84 | { |
91 | /* | 85 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
92 | * We have two variants here. The cmpxchg based one is the best one | ||
93 | * because it never induce a false contention state. It is included | ||
94 | * here because architectures using the inc/dec algorithms over the | ||
95 | * xchg ones are much more likely to support cmpxchg natively. | ||
96 | * | ||
97 | * If not we fall back to the spinlock based variant - that is | ||
98 | * just as efficient (and simpler) as a 'destructive' probing of | ||
99 | * the mutex state would be. | ||
100 | */ | ||
101 | #ifdef __HAVE_ARCH_CMPXCHG | ||
102 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { | ||
103 | smp_mb(); | ||
104 | return 1; | 86 | return 1; |
105 | } | ||
106 | return 0; | 87 | return 0; |
107 | #else | ||
108 | return fail_fn(count); | ||
109 | #endif | ||
110 | } | 88 | } |
111 | 89 | ||
112 | #endif | 90 | #endif |
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 7b9cd2cbfebe..580a6d35c700 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h | |||
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
27 | { | 27 | { |
28 | if (unlikely(atomic_xchg(count, 0) != 1)) | 28 | if (unlikely(atomic_xchg(count, 0) != 1)) |
29 | fail_fn(count); | 29 | fail_fn(count); |
30 | else | ||
31 | smp_mb(); | ||
32 | } | 30 | } |
33 | 31 | ||
34 | /** | 32 | /** |
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
46 | { | 44 | { |
47 | if (unlikely(atomic_xchg(count, 0) != 1)) | 45 | if (unlikely(atomic_xchg(count, 0) != 1)) |
48 | return fail_fn(count); | 46 | return fail_fn(count); |
49 | else { | 47 | return 0; |
50 | smp_mb(); | ||
51 | return 0; | ||
52 | } | ||
53 | } | 48 | } |
54 | 49 | ||
55 | /** | 50 | /** |
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
67 | static inline void | 62 | static inline void |
68 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 63 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
69 | { | 64 | { |
70 | smp_mb(); | ||
71 | if (unlikely(atomic_xchg(count, 1) != 0)) | 65 | if (unlikely(atomic_xchg(count, 1) != 0)) |
72 | fail_fn(count); | 66 | fail_fn(count); |
73 | } | 67 | } |
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
110 | if (prev < 0) | 104 | if (prev < 0) |
111 | prev = 0; | 105 | prev = 0; |
112 | } | 106 | } |
113 | smp_mb(); | ||
114 | 107 | ||
115 | return prev; | 108 | return prev; |
116 | } | 109 | } |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 961e746da977..2daaffcda52f 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops; | |||
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | 9 | extern int dmar_disabled; |
10 | extern int forbid_dac; | ||
10 | 11 | ||
11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 12 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
12 | 13 | ||
14 | /* 10 seconds */ | ||
15 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | ||
16 | |||
13 | #ifdef CONFIG_GART_IOMMU | 17 | #ifdef CONFIG_GART_IOMMU |
14 | extern int gart_iommu_aperture; | 18 | extern int gart_iommu_aperture; |
15 | extern int gart_iommu_aperture_allowed; | 19 | extern int gart_iommu_aperture_allowed; |
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h index 47c3616ea9ac..07b7299dab20 100644 --- a/include/asm-xtensa/io.h +++ b/include/asm-xtensa/io.h | |||
@@ -18,10 +18,12 @@ | |||
18 | 18 | ||
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | 20 | ||
21 | #define XCHAL_KIO_CACHED_VADDR 0xf0000000 | 21 | #define XCHAL_KIO_CACHED_VADDR 0xe0000000 |
22 | #define XCHAL_KIO_BYPASS_VADDR 0xf8000000 | 22 | #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 |
23 | #define XCHAL_KIO_PADDR 0xf0000000 | 23 | #define XCHAL_KIO_PADDR 0xf0000000 |
24 | #define XCHAL_KIO_SIZE 0x08000000 | 24 | #define XCHAL_KIO_SIZE 0x10000000 |
25 | |||
26 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) | ||
25 | 27 | ||
26 | /* | 28 | /* |
27 | * swap functions to change byte order from little-endian to big-endian and | 29 | * swap functions to change byte order from little-endian to big-endian and |
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h index 0aad3a587551..e39edf5c86f2 100644 --- a/include/asm-xtensa/rwsem.h +++ b/include/asm-xtensa/rwsem.h | |||
@@ -13,6 +13,10 @@ | |||
13 | #ifndef _XTENSA_RWSEM_H | 13 | #ifndef _XTENSA_RWSEM_H |
14 | #define _XTENSA_RWSEM_H | 14 | #define _XTENSA_RWSEM_H |
15 | 15 | ||
16 | #ifndef _LINUX_RWSEM_H | ||
17 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | ||
18 | #endif | ||
19 | |||
16 | #include <linux/list.h> | 20 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
18 | #include <asm/atomic.h> | 22 | #include <asm/atomic.h> |
diff --git a/include/asm-xtensa/variant-dc232b/core.h b/include/asm-xtensa/variant-dc232b/core.h new file mode 100644 index 000000000000..525bd3d90154 --- /dev/null +++ b/include/asm-xtensa/variant-dc232b/core.h | |||
@@ -0,0 +1,424 @@ | |||
1 | /* | ||
2 | * Xtensa processor core configuration information. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (c) 1999-2007 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_CORE_CONFIGURATION_H | ||
12 | #define _XTENSA_CORE_CONFIGURATION_H | ||
13 | |||
14 | |||
15 | /**************************************************************************** | ||
16 | Parameters Useful for Any Code, USER or PRIVILEGED | ||
17 | ****************************************************************************/ | ||
18 | |||
19 | /* | ||
20 | * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is | ||
21 | * configured, and a value of 0 otherwise. These macros are always defined. | ||
22 | */ | ||
23 | |||
24 | |||
25 | /*---------------------------------------------------------------------- | ||
26 | ISA | ||
27 | ----------------------------------------------------------------------*/ | ||
28 | |||
29 | #define XCHAL_HAVE_BE 0 /* big-endian byte ordering */ | ||
30 | #define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */ | ||
31 | #define XCHAL_NUM_AREGS 32 /* num of physical addr regs */ | ||
32 | #define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */ | ||
33 | #define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */ | ||
34 | #define XCHAL_HAVE_DEBUG 1 /* debug option */ | ||
35 | #define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */ | ||
36 | #define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */ | ||
37 | #define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */ | ||
38 | #define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */ | ||
39 | #define XCHAL_HAVE_SEXT 1 /* SEXT instruction */ | ||
40 | #define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */ | ||
41 | #define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */ | ||
42 | #define XCHAL_HAVE_MUL32 1 /* MULL instruction */ | ||
43 | #define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */ | ||
44 | #define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */ | ||
45 | #define XCHAL_HAVE_L32R 1 /* L32R instruction */ | ||
46 | #define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */ | ||
47 | #define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */ | ||
48 | #define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */ | ||
49 | #define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */ | ||
50 | #define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */ | ||
51 | #define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */ | ||
52 | #define XCHAL_HAVE_ABS 1 /* ABS instruction */ | ||
53 | /*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */ | ||
54 | /*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */ | ||
55 | #define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */ | ||
56 | #define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */ | ||
57 | #define XCHAL_HAVE_SPECULATION 0 /* speculation */ | ||
58 | #define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */ | ||
59 | #define XCHAL_NUM_CONTEXTS 1 /* */ | ||
60 | #define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */ | ||
61 | #define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */ | ||
62 | #define XCHAL_HAVE_PRID 1 /* processor ID register */ | ||
63 | #define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */ | ||
64 | #define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */ | ||
65 | #define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */ | ||
66 | #define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */ | ||
67 | #define XCHAL_HAVE_MAC16 1 /* MAC16 package */ | ||
68 | #define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */ | ||
69 | #define XCHAL_HAVE_FP 0 /* floating point pkg */ | ||
70 | #define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */ | ||
71 | #define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */ | ||
72 | #define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */ | ||
73 | |||
74 | |||
75 | /*---------------------------------------------------------------------- | ||
76 | MISC | ||
77 | ----------------------------------------------------------------------*/ | ||
78 | |||
79 | #define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */ | ||
80 | #define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */ | ||
81 | #define XCHAL_DATA_WIDTH 4 /* data width in bytes */ | ||
82 | /* In T1050, applies to selected core load and store instructions (see ISA): */ | ||
83 | #define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ | ||
84 | #define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/ | ||
85 | |||
86 | #define XCHAL_SW_VERSION 701001 /* sw version of this header */ | ||
87 | |||
88 | #define XCHAL_CORE_ID "dc232b" /* alphanum core name | ||
89 | (CoreID) set in the Xtensa | ||
90 | Processor Generator */ | ||
91 | |||
92 | #define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)" | ||
93 | #define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */ | ||
94 | |||
95 | /* | ||
96 | * These definitions describe the hardware targeted by this software. | ||
97 | */ | ||
98 | #define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/ | ||
99 | #define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/ | ||
100 | #define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */ | ||
101 | #define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */ | ||
102 | #define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */ | ||
103 | #define XCHAL_HW_VERSION 221001 /* major*100+minor */ | ||
104 | #define XCHAL_HW_REL_LX2 1 | ||
105 | #define XCHAL_HW_REL_LX2_1 1 | ||
106 | #define XCHAL_HW_REL_LX2_1_1 1 | ||
107 | #define XCHAL_HW_CONFIGID_RELIABLE 1 | ||
108 | /* If software targets a *range* of hardware versions, these are the bounds: */ | ||
109 | #define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */ | ||
110 | #define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */ | ||
111 | #define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */ | ||
112 | #define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */ | ||
113 | #define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */ | ||
114 | #define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */ | ||
115 | |||
116 | |||
117 | /*---------------------------------------------------------------------- | ||
118 | CACHE | ||
119 | ----------------------------------------------------------------------*/ | ||
120 | |||
121 | #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ | ||
122 | #define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ | ||
123 | #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ | ||
124 | #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ | ||
125 | |||
126 | #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */ | ||
127 | #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */ | ||
128 | |||
129 | #define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */ | ||
130 | |||
131 | |||
132 | |||
133 | |||
134 | /**************************************************************************** | ||
135 | Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code | ||
136 | ****************************************************************************/ | ||
137 | |||
138 | |||
139 | #ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY | ||
140 | |||
141 | /*---------------------------------------------------------------------- | ||
142 | CACHE | ||
143 | ----------------------------------------------------------------------*/ | ||
144 | |||
145 | #define XCHAL_HAVE_PIF 1 /* any outbound PIF present */ | ||
146 | |||
147 | /* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */ | ||
148 | |||
149 | /* Number of cache sets in log2(lines per way): */ | ||
150 | #define XCHAL_ICACHE_SETWIDTH 7 | ||
151 | #define XCHAL_DCACHE_SETWIDTH 7 | ||
152 | |||
153 | /* Cache set associativity (number of ways): */ | ||
154 | #define XCHAL_ICACHE_WAYS 4 | ||
155 | #define XCHAL_DCACHE_WAYS 4 | ||
156 | |||
157 | /* Cache features: */ | ||
158 | #define XCHAL_ICACHE_LINE_LOCKABLE 1 | ||
159 | #define XCHAL_DCACHE_LINE_LOCKABLE 1 | ||
160 | #define XCHAL_ICACHE_ECC_PARITY 0 | ||
161 | #define XCHAL_DCACHE_ECC_PARITY 0 | ||
162 | |||
163 | /* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */ | ||
164 | #define XCHAL_CA_BITS 4 | ||
165 | |||
166 | |||
167 | /*---------------------------------------------------------------------- | ||
168 | INTERNAL I/D RAM/ROMs and XLMI | ||
169 | ----------------------------------------------------------------------*/ | ||
170 | |||
171 | #define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */ | ||
172 | #define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */ | ||
173 | #define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */ | ||
174 | #define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */ | ||
175 | #define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/ | ||
176 | #define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */ | ||
177 | |||
178 | |||
179 | /*---------------------------------------------------------------------- | ||
180 | INTERRUPTS and TIMERS | ||
181 | ----------------------------------------------------------------------*/ | ||
182 | |||
183 | #define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */ | ||
184 | #define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */ | ||
185 | #define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */ | ||
186 | #define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */ | ||
187 | #define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */ | ||
188 | #define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */ | ||
189 | #define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */ | ||
190 | #define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */ | ||
191 | #define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels | ||
192 | (not including level zero) */ | ||
193 | #define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */ | ||
194 | /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */ | ||
195 | |||
196 | /* Masks of interrupts at each interrupt level: */ | ||
197 | #define XCHAL_INTLEVEL1_MASK 0x001F80FF | ||
198 | #define XCHAL_INTLEVEL2_MASK 0x00000100 | ||
199 | #define XCHAL_INTLEVEL3_MASK 0x00200E00 | ||
200 | #define XCHAL_INTLEVEL4_MASK 0x00001000 | ||
201 | #define XCHAL_INTLEVEL5_MASK 0x00002000 | ||
202 | #define XCHAL_INTLEVEL6_MASK 0x00000000 | ||
203 | #define XCHAL_INTLEVEL7_MASK 0x00004000 | ||
204 | |||
205 | /* Masks of interrupts at each range 1..n of interrupt levels: */ | ||
206 | #define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF | ||
207 | #define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF | ||
208 | #define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF | ||
209 | #define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF | ||
210 | #define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF | ||
211 | #define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF | ||
212 | #define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF | ||
213 | |||
214 | /* Level of each interrupt: */ | ||
215 | #define XCHAL_INT0_LEVEL 1 | ||
216 | #define XCHAL_INT1_LEVEL 1 | ||
217 | #define XCHAL_INT2_LEVEL 1 | ||
218 | #define XCHAL_INT3_LEVEL 1 | ||
219 | #define XCHAL_INT4_LEVEL 1 | ||
220 | #define XCHAL_INT5_LEVEL 1 | ||
221 | #define XCHAL_INT6_LEVEL 1 | ||
222 | #define XCHAL_INT7_LEVEL 1 | ||
223 | #define XCHAL_INT8_LEVEL 2 | ||
224 | #define XCHAL_INT9_LEVEL 3 | ||
225 | #define XCHAL_INT10_LEVEL 3 | ||
226 | #define XCHAL_INT11_LEVEL 3 | ||
227 | #define XCHAL_INT12_LEVEL 4 | ||
228 | #define XCHAL_INT13_LEVEL 5 | ||
229 | #define XCHAL_INT14_LEVEL 7 | ||
230 | #define XCHAL_INT15_LEVEL 1 | ||
231 | #define XCHAL_INT16_LEVEL 1 | ||
232 | #define XCHAL_INT17_LEVEL 1 | ||
233 | #define XCHAL_INT18_LEVEL 1 | ||
234 | #define XCHAL_INT19_LEVEL 1 | ||
235 | #define XCHAL_INT20_LEVEL 1 | ||
236 | #define XCHAL_INT21_LEVEL 3 | ||
237 | #define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */ | ||
238 | #define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */ | ||
239 | #define XCHAL_NMILEVEL 7 /* NMI "level" (for use with | ||
240 | EXCSAVE/EPS/EPC_n, RFI n) */ | ||
241 | |||
242 | /* Type of each interrupt: */ | ||
243 | #define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
244 | #define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
245 | #define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
246 | #define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
247 | #define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
248 | #define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
249 | #define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER | ||
250 | #define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE | ||
251 | #define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
252 | #define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
253 | #define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER | ||
254 | #define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE | ||
255 | #define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL | ||
256 | #define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER | ||
257 | #define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI | ||
258 | #define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
259 | #define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
260 | #define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
261 | #define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
262 | #define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
263 | #define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
264 | #define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE | ||
265 | |||
266 | /* Masks of interrupts for each type of interrupt: */ | ||
267 | #define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000 | ||
268 | #define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880 | ||
269 | #define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000 | ||
270 | #define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F | ||
271 | #define XCHAL_INTTYPE_MASK_TIMER 0x00002440 | ||
272 | #define XCHAL_INTTYPE_MASK_NMI 0x00004000 | ||
273 | #define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000 | ||
274 | |||
275 | /* Interrupt numbers assigned to specific interrupt sources: */ | ||
276 | #define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */ | ||
277 | #define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */ | ||
278 | #define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */ | ||
279 | #define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED | ||
280 | #define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */ | ||
281 | |||
282 | /* Interrupt numbers for levels at which only one interrupt is configured: */ | ||
283 | #define XCHAL_INTLEVEL2_NUM 8 | ||
284 | #define XCHAL_INTLEVEL4_NUM 12 | ||
285 | #define XCHAL_INTLEVEL5_NUM 13 | ||
286 | #define XCHAL_INTLEVEL7_NUM 14 | ||
287 | /* (There are many interrupts each at level(s) 1, 3.) */ | ||
288 | |||
289 | |||
290 | /* | ||
291 | * External interrupt vectors/levels. | ||
292 | * These macros describe how Xtensa processor interrupt numbers | ||
293 | * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) | ||
294 | * map to external BInterrupt<n> pins, for those interrupts | ||
295 | * configured as external (level-triggered, edge-triggered, or NMI). | ||
296 | * See the Xtensa processor databook for more details. | ||
297 | */ | ||
298 | |||
299 | /* Core interrupt numbers mapped to each EXTERNAL interrupt number: */ | ||
300 | #define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ | ||
301 | #define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */ | ||
302 | #define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */ | ||
303 | #define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ | ||
304 | #define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ | ||
305 | #define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ | ||
306 | #define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */ | ||
307 | #define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */ | ||
308 | #define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */ | ||
309 | #define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */ | ||
310 | #define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */ | ||
311 | #define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */ | ||
312 | #define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */ | ||
313 | #define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */ | ||
314 | #define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */ | ||
315 | #define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */ | ||
316 | #define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */ | ||
317 | |||
318 | |||
319 | /*---------------------------------------------------------------------- | ||
320 | EXCEPTIONS and VECTORS | ||
321 | ----------------------------------------------------------------------*/ | ||
322 | |||
323 | #define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture | ||
324 | number: 1 == XEA1 (old) | ||
325 | 2 == XEA2 (new) | ||
326 | 0 == XEAX (extern) */ | ||
327 | #define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */ | ||
328 | #define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */ | ||
329 | #define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */ | ||
330 | #define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */ | ||
331 | #define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */ | ||
332 | #define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */ | ||
333 | #define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */ | ||
334 | #define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */ | ||
335 | #define XCHAL_VECBASE_RESET_PADDR 0x00000000 | ||
336 | #define XCHAL_RESET_VECBASE_OVERLAP 0 | ||
337 | |||
338 | #define XCHAL_RESET_VECTOR0_VADDR 0xFE000000 | ||
339 | #define XCHAL_RESET_VECTOR0_PADDR 0xFE000000 | ||
340 | #define XCHAL_RESET_VECTOR1_VADDR 0xD8000500 | ||
341 | #define XCHAL_RESET_VECTOR1_PADDR 0x00000500 | ||
342 | #define XCHAL_RESET_VECTOR_VADDR 0xFE000000 | ||
343 | #define XCHAL_RESET_VECTOR_PADDR 0xFE000000 | ||
344 | #define XCHAL_USER_VECOFS 0x00000340 | ||
345 | #define XCHAL_USER_VECTOR_VADDR 0xD0000340 | ||
346 | #define XCHAL_USER_VECTOR_PADDR 0x00000340 | ||
347 | #define XCHAL_KERNEL_VECOFS 0x00000300 | ||
348 | #define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300 | ||
349 | #define XCHAL_KERNEL_VECTOR_PADDR 0x00000300 | ||
350 | #define XCHAL_DOUBLEEXC_VECOFS 0x000003C0 | ||
351 | #define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0 | ||
352 | #define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0 | ||
353 | #define XCHAL_WINDOW_OF4_VECOFS 0x00000000 | ||
354 | #define XCHAL_WINDOW_UF4_VECOFS 0x00000040 | ||
355 | #define XCHAL_WINDOW_OF8_VECOFS 0x00000080 | ||
356 | #define XCHAL_WINDOW_UF8_VECOFS 0x000000C0 | ||
357 | #define XCHAL_WINDOW_OF12_VECOFS 0x00000100 | ||
358 | #define XCHAL_WINDOW_UF12_VECOFS 0x00000140 | ||
359 | #define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000 | ||
360 | #define XCHAL_WINDOW_VECTORS_PADDR 0x00000000 | ||
361 | #define XCHAL_INTLEVEL2_VECOFS 0x00000180 | ||
362 | #define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180 | ||
363 | #define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180 | ||
364 | #define XCHAL_INTLEVEL3_VECOFS 0x000001C0 | ||
365 | #define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0 | ||
366 | #define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0 | ||
367 | #define XCHAL_INTLEVEL4_VECOFS 0x00000200 | ||
368 | #define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200 | ||
369 | #define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200 | ||
370 | #define XCHAL_INTLEVEL5_VECOFS 0x00000240 | ||
371 | #define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240 | ||
372 | #define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240 | ||
373 | #define XCHAL_INTLEVEL6_VECOFS 0x00000280 | ||
374 | #define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280 | ||
375 | #define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280 | ||
376 | #define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS | ||
377 | #define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR | ||
378 | #define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR | ||
379 | #define XCHAL_NMI_VECOFS 0x000002C0 | ||
380 | #define XCHAL_NMI_VECTOR_VADDR 0xD00002C0 | ||
381 | #define XCHAL_NMI_VECTOR_PADDR 0x000002C0 | ||
382 | #define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS | ||
383 | #define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR | ||
384 | #define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR | ||
385 | |||
386 | |||
387 | /*---------------------------------------------------------------------- | ||
388 | DEBUG | ||
389 | ----------------------------------------------------------------------*/ | ||
390 | |||
391 | #define XCHAL_HAVE_OCD 1 /* OnChipDebug option */ | ||
392 | #define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */ | ||
393 | #define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */ | ||
394 | #define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */ | ||
395 | |||
396 | |||
397 | /*---------------------------------------------------------------------- | ||
398 | MMU | ||
399 | ----------------------------------------------------------------------*/ | ||
400 | |||
401 | /* See core-matmap.h header file for more details. */ | ||
402 | |||
403 | #define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */ | ||
404 | #define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */ | ||
405 | #define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */ | ||
406 | #define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */ | ||
407 | #define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */ | ||
408 | #define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */ | ||
409 | #define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table | ||
410 | [autorefill] and protection) | ||
411 | usable for an MMU-based OS */ | ||
412 | /* If none of the above last 4 are set, it's a custom TLB configuration. */ | ||
413 | #define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */ | ||
414 | #define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */ | ||
415 | |||
416 | #define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */ | ||
417 | #define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */ | ||
418 | #define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */ | ||
419 | |||
420 | #endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ | ||
421 | |||
422 | |||
423 | #endif /* _XTENSA_CORE_CONFIGURATION_H */ | ||
424 | |||
diff --git a/include/asm-xtensa/variant-dc232b/tie-asm.h b/include/asm-xtensa/variant-dc232b/tie-asm.h new file mode 100644 index 000000000000..ed4f53f529db --- /dev/null +++ b/include/asm-xtensa/variant-dc232b/tie-asm.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This header file contains assembly-language definitions (assembly | ||
3 | * macros, etc.) for this specific Xtensa processor's TIE extensions | ||
4 | * and options. It is customized to this Xtensa processor configuration. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 1999-2007 Tensilica Inc. | ||
11 | */ | ||
12 | |||
13 | #ifndef _XTENSA_CORE_TIE_ASM_H | ||
14 | #define _XTENSA_CORE_TIE_ASM_H | ||
15 | |||
16 | /* Selection parameter values for save-area save/restore macros: */ | ||
17 | /* Option vs. TIE: */ | ||
18 | #define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */ | ||
19 | #define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */ | ||
20 | /* Whether used automatically by compiler: */ | ||
21 | #define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */ | ||
22 | #define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */ | ||
23 | /* ABI handling across function calls: */ | ||
24 | #define XTHAL_SAS_CALR 0x0010 /* caller-saved */ | ||
25 | #define XTHAL_SAS_CALE 0x0020 /* callee-saved */ | ||
26 | #define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */ | ||
27 | /* Misc */ | ||
28 | #define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */ | ||
29 | |||
30 | |||
31 | |||
32 | /* Macro to save all non-coprocessor (extra) custom TIE and optional state | ||
33 | * (not including zero-overhead loop registers). | ||
34 | * Save area ptr (clobbered): ptr (1 byte aligned) | ||
35 | * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) | ||
36 | */ | ||
37 | .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL | ||
38 | xchal_sa_start \continue, \ofs | ||
39 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select | ||
40 | xchal_sa_align \ptr, 0, 1024-8, 4, 4 | ||
41 | rsr \at1, ACCLO // MAC16 accumulator | ||
42 | rsr \at2, ACCHI | ||
43 | s32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
44 | s32i \at2, \ptr, .Lxchal_ofs_ + 4 | ||
45 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 | ||
46 | .endif | ||
47 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select | ||
48 | xchal_sa_align \ptr, 0, 1024-16, 4, 4 | ||
49 | rsr \at1, M0 // MAC16 registers | ||
50 | rsr \at2, M1 | ||
51 | s32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
52 | s32i \at2, \ptr, .Lxchal_ofs_ + 4 | ||
53 | rsr \at1, M2 | ||
54 | rsr \at2, M3 | ||
55 | s32i \at1, \ptr, .Lxchal_ofs_ + 8 | ||
56 | s32i \at2, \ptr, .Lxchal_ofs_ + 12 | ||
57 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 16 | ||
58 | .endif | ||
59 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select | ||
60 | xchal_sa_align \ptr, 0, 1024-4, 4, 4 | ||
61 | rsr \at1, SCOMPARE1 // conditional store option | ||
62 | s32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
63 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 | ||
64 | .endif | ||
65 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select | ||
66 | xchal_sa_align \ptr, 0, 1024-4, 4, 4 | ||
67 | rur \at1, THREADPTR // threadptr option | ||
68 | s32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
69 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 | ||
70 | .endif | ||
71 | .endm // xchal_ncp_store | ||
72 | |||
73 | /* Macro to save all non-coprocessor (extra) custom TIE and optional state | ||
74 | * (not including zero-overhead loop registers). | ||
75 | * Save area ptr (clobbered): ptr (1 byte aligned) | ||
76 | * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) | ||
77 | */ | ||
78 | .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL | ||
79 | xchal_sa_start \continue, \ofs | ||
80 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select | ||
81 | xchal_sa_align \ptr, 0, 1024-8, 4, 4 | ||
82 | l32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
83 | l32i \at2, \ptr, .Lxchal_ofs_ + 4 | ||
84 | wsr \at1, ACCLO // MAC16 accumulator | ||
85 | wsr \at2, ACCHI | ||
86 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 | ||
87 | .endif | ||
88 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select | ||
89 | xchal_sa_align \ptr, 0, 1024-16, 4, 4 | ||
90 | l32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
91 | l32i \at2, \ptr, .Lxchal_ofs_ + 4 | ||
92 | wsr \at1, M0 // MAC16 registers | ||
93 | wsr \at2, M1 | ||
94 | l32i \at1, \ptr, .Lxchal_ofs_ + 8 | ||
95 | l32i \at2, \ptr, .Lxchal_ofs_ + 12 | ||
96 | wsr \at1, M2 | ||
97 | wsr \at2, M3 | ||
98 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 16 | ||
99 | .endif | ||
100 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select | ||
101 | xchal_sa_align \ptr, 0, 1024-4, 4, 4 | ||
102 | l32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
103 | wsr \at1, SCOMPARE1 // conditional store option | ||
104 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 | ||
105 | .endif | ||
106 | .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select | ||
107 | xchal_sa_align \ptr, 0, 1024-4, 4, 4 | ||
108 | l32i \at1, \ptr, .Lxchal_ofs_ + 0 | ||
109 | wur \at1, THREADPTR // threadptr option | ||
110 | .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 | ||
111 | .endif | ||
112 | .endm // xchal_ncp_load | ||
113 | |||
114 | |||
115 | |||
116 | #define XCHAL_NCP_NUM_ATMPS 2 | ||
117 | |||
118 | |||
119 | #define XCHAL_SA_NUM_ATMPS 2 | ||
120 | |||
121 | #endif /*_XTENSA_CORE_TIE_ASM_H*/ | ||
122 | |||
diff --git a/include/asm-xtensa/variant-dc232b/tie.h b/include/asm-xtensa/variant-dc232b/tie.h new file mode 100644 index 000000000000..018e81af4393 --- /dev/null +++ b/include/asm-xtensa/variant-dc232b/tie.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * This header file describes this specific Xtensa processor's TIE extensions | ||
3 | * that extend basic Xtensa core functionality. It is customized to this | ||
4 | * Xtensa processor configuration. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 1999-2007 Tensilica Inc. | ||
11 | */ | ||
12 | |||
13 | #ifndef _XTENSA_CORE_TIE_H | ||
14 | #define _XTENSA_CORE_TIE_H | ||
15 | |||
16 | #define XCHAL_CP_NUM 1 /* number of coprocessors */ | ||
17 | #define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */ | ||
18 | #define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */ | ||
19 | #define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */ | ||
20 | |||
21 | /* Basic parameters of each coprocessor: */ | ||
22 | #define XCHAL_CP7_NAME "XTIOP" | ||
23 | #define XCHAL_CP7_IDENT XTIOP | ||
24 | #define XCHAL_CP7_SA_SIZE 0 /* size of state save area */ | ||
25 | #define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */ | ||
26 | #define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */ | ||
27 | |||
28 | /* Filler info for unassigned coprocessors, to simplify arrays etc: */ | ||
29 | #define XCHAL_CP0_SA_SIZE 0 | ||
30 | #define XCHAL_CP0_SA_ALIGN 1 | ||
31 | #define XCHAL_CP1_SA_SIZE 0 | ||
32 | #define XCHAL_CP1_SA_ALIGN 1 | ||
33 | #define XCHAL_CP2_SA_SIZE 0 | ||
34 | #define XCHAL_CP2_SA_ALIGN 1 | ||
35 | #define XCHAL_CP3_SA_SIZE 0 | ||
36 | #define XCHAL_CP3_SA_ALIGN 1 | ||
37 | #define XCHAL_CP4_SA_SIZE 0 | ||
38 | #define XCHAL_CP4_SA_ALIGN 1 | ||
39 | #define XCHAL_CP5_SA_SIZE 0 | ||
40 | #define XCHAL_CP5_SA_ALIGN 1 | ||
41 | #define XCHAL_CP6_SA_SIZE 0 | ||
42 | #define XCHAL_CP6_SA_ALIGN 1 | ||
43 | |||
44 | /* Save area for non-coprocessor optional and custom (TIE) state: */ | ||
45 | #define XCHAL_NCP_SA_SIZE 32 | ||
46 | #define XCHAL_NCP_SA_ALIGN 4 | ||
47 | |||
48 | /* Total save area for optional and custom state (NCP + CPn): */ | ||
49 | #define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */ | ||
50 | #define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */ | ||
51 | |||
52 | /* | ||
53 | * Detailed contents of save areas. | ||
54 | * NOTE: caller must define the XCHAL_SA_REG macro (not defined here) | ||
55 | * before expanding the XCHAL_xxx_SA_LIST() macros. | ||
56 | * | ||
57 | * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize, | ||
58 | * dbnum,base,regnum,bitsz,gapsz,reset,x...) | ||
59 | * | ||
60 | * s = passed from XCHAL_*_LIST(s), eg. to select how to expand | ||
61 | * ccused = set if used by compiler without special options or code | ||
62 | * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global) | ||
63 | * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg) | ||
64 | * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg) | ||
65 | * name = lowercase reg name (no quotes) | ||
66 | * galign = group byte alignment (power of 2) (galign >= align) | ||
67 | * align = register byte alignment (power of 2) | ||
68 | * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz) | ||
69 | * (not including any pad bytes required to galign this or next reg) | ||
70 | * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>) | ||
71 | * base = reg shortname w/o index (or sr=special, ur=TIE user reg) | ||
72 | * regnum = reg index in regfile, or special/TIE-user reg number | ||
73 | * bitsz = number of significant bits (regfile width, or ur/sr mask bits) | ||
74 | * gapsz = intervening bits, if bitsz bits not stored contiguously | ||
75 | * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize) | ||
76 | * reset = register reset value (or 0 if undefined at reset) | ||
77 | * x = reserved for future use (0 until then) | ||
78 | * | ||
79 | * To filter out certain registers, e.g. to expand only the non-global | ||
80 | * registers used by the compiler, you can do something like this: | ||
81 | * | ||
82 | * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p) | ||
83 | * #define SELCC0(p...) | ||
84 | * #define SELCC1(abikind,p...) SELAK##abikind(p) | ||
85 | * #define SELAK0(p...) REG(p) | ||
86 | * #define SELAK1(p...) REG(p) | ||
87 | * #define SELAK2(p...) | ||
88 | * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \ | ||
89 | * ...what you want to expand... | ||
90 | */ | ||
91 | |||
92 | #define XCHAL_NCP_SA_NUM 8 | ||
93 | #define XCHAL_NCP_SA_LIST(s) \ | ||
94 | XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \ | ||
95 | XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \ | ||
96 | XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \ | ||
97 | XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \ | ||
98 | XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \ | ||
99 | XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \ | ||
100 | XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \ | ||
101 | XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) | ||
102 | |||
103 | #define XCHAL_CP0_SA_NUM 0 | ||
104 | #define XCHAL_CP0_SA_LIST(s) /* empty */ | ||
105 | |||
106 | #define XCHAL_CP1_SA_NUM 0 | ||
107 | #define XCHAL_CP1_SA_LIST(s) /* empty */ | ||
108 | |||
109 | #define XCHAL_CP2_SA_NUM 0 | ||
110 | #define XCHAL_CP2_SA_LIST(s) /* empty */ | ||
111 | |||
112 | #define XCHAL_CP3_SA_NUM 0 | ||
113 | #define XCHAL_CP3_SA_LIST(s) /* empty */ | ||
114 | |||
115 | #define XCHAL_CP4_SA_NUM 0 | ||
116 | #define XCHAL_CP4_SA_LIST(s) /* empty */ | ||
117 | |||
118 | #define XCHAL_CP5_SA_NUM 0 | ||
119 | #define XCHAL_CP5_SA_LIST(s) /* empty */ | ||
120 | |||
121 | #define XCHAL_CP6_SA_NUM 0 | ||
122 | #define XCHAL_CP6_SA_LIST(s) /* empty */ | ||
123 | |||
124 | #define XCHAL_CP7_SA_NUM 0 | ||
125 | #define XCHAL_CP7_SA_LIST(s) /* empty */ | ||
126 | |||
127 | /* Byte length of instruction from its first nibble (op0 field), per FLIX. */ | ||
128 | #define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3 | ||
129 | |||
130 | #endif /*_XTENSA_CORE_TIE_H*/ | ||
131 | |||
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c444..a08c33a26ca9 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -129,6 +129,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
129 | extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); | 129 | extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); |
130 | extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); | 130 | extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); |
131 | extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); | 131 | extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); |
132 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); | ||
132 | 133 | ||
133 | #define BITMAP_LAST_WORD_MASK(nbits) \ | 134 | #define BITMAP_LAST_WORD_MASK(nbits) \ |
134 | ( \ | 135 | ( \ |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 08d783592b73..dfb30db475ed 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -354,6 +354,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); | |||
354 | */ | 354 | */ |
355 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) | 355 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) |
356 | 356 | ||
357 | #define dm_array_too_big(fixed, obj, num) \ | ||
358 | ((num) > (UINT_MAX - (fixed)) / (obj)) | ||
359 | |||
357 | static inline sector_t to_sector(unsigned long n) | 360 | static inline sector_t to_sector(unsigned long n) |
358 | { | 361 | { |
359 | return (n >> SECTOR_SHIFT); | 362 | return (n >> SECTOR_SHIFT); |
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h new file mode 100644 index 000000000000..a9e652a41373 --- /dev/null +++ b/include/linux/dm-region-hash.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Sistina Software Limited. | ||
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * Device-Mapper dirty region hash interface. | ||
6 | * | ||
7 | * This file is released under the GPL. | ||
8 | */ | ||
9 | |||
10 | #ifndef DM_REGION_HASH_H | ||
11 | #define DM_REGION_HASH_H | ||
12 | |||
13 | #include <linux/dm-dirty-log.h> | ||
14 | |||
15 | /*----------------------------------------------------------------- | ||
16 | * Region hash | ||
17 | *----------------------------------------------------------------*/ | ||
18 | struct dm_region_hash; | ||
19 | struct dm_region; | ||
20 | |||
21 | /* | ||
22 | * States a region can have. | ||
23 | */ | ||
24 | enum dm_rh_region_states { | ||
25 | DM_RH_CLEAN = 0x01, /* No writes in flight. */ | ||
26 | DM_RH_DIRTY = 0x02, /* Writes in flight. */ | ||
27 | DM_RH_NOSYNC = 0x04, /* Out of sync. */ | ||
28 | DM_RH_RECOVERING = 0x08, /* Under resynchronization. */ | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * Region hash create/destroy. | ||
33 | */ | ||
34 | struct bio_list; | ||
35 | struct dm_region_hash *dm_region_hash_create( | ||
36 | void *context, void (*dispatch_bios)(void *context, | ||
37 | struct bio_list *bios), | ||
38 | void (*wakeup_workers)(void *context), | ||
39 | void (*wakeup_all_recovery_waiters)(void *context), | ||
40 | sector_t target_begin, unsigned max_recovery, | ||
41 | struct dm_dirty_log *log, uint32_t region_size, | ||
42 | region_t nr_regions); | ||
43 | void dm_region_hash_destroy(struct dm_region_hash *rh); | ||
44 | |||
45 | struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); | ||
46 | |||
47 | /* | ||
48 | * Conversion functions. | ||
49 | */ | ||
50 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); | ||
51 | sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); | ||
52 | void *dm_rh_region_context(struct dm_region *reg); | ||
53 | |||
54 | /* | ||
55 | * Get region size and key (ie. number of the region). | ||
56 | */ | ||
57 | sector_t dm_rh_get_region_size(struct dm_region_hash *rh); | ||
58 | region_t dm_rh_get_region_key(struct dm_region *reg); | ||
59 | |||
60 | /* | ||
61 | * Get/set/update region state (and dirty log). | ||
62 | * | ||
63 | */ | ||
64 | int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); | ||
65 | void dm_rh_set_state(struct dm_region_hash *rh, region_t region, | ||
66 | enum dm_rh_region_states state, int may_block); | ||
67 | |||
68 | /* Non-zero errors_handled leaves the state of the region NOSYNC */ | ||
69 | void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); | ||
70 | |||
71 | /* Flush the region hash and dirty log. */ | ||
72 | int dm_rh_flush(struct dm_region_hash *rh); | ||
73 | |||
74 | /* Inc/dec pending count on regions. */ | ||
75 | void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); | ||
76 | void dm_rh_dec(struct dm_region_hash *rh, region_t region); | ||
77 | |||
78 | /* Delay bios on regions. */ | ||
79 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); | ||
80 | |||
81 | void dm_rh_mark_nosync(struct dm_region_hash *rh, | ||
82 | struct bio *bio, unsigned done, int error); | ||
83 | |||
84 | /* | ||
85 | * Region recovery control. | ||
86 | */ | ||
87 | |||
88 | /* Prepare some regions for recovery by starting to quiesce them. */ | ||
89 | void dm_rh_recovery_prepare(struct dm_region_hash *rh); | ||
90 | |||
91 | /* Try fetching a quiesced region for recovery. */ | ||
92 | struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); | ||
93 | |||
94 | /* Report recovery end on a region. */ | ||
95 | void dm_rh_recovery_end(struct dm_region *reg, int error); | ||
96 | |||
97 | /* Returns number of regions with recovery work outstanding. */ | ||
98 | int dm_rh_recovery_in_flight(struct dm_region_hash *rh); | ||
99 | |||
100 | /* Start/stop recovery. */ | ||
101 | void dm_rh_start_recovery(struct dm_region_hash *rh); | ||
102 | void dm_rh_stop_recovery(struct dm_region_hash *rh); | ||
103 | |||
104 | #endif /* DM_REGION_HASH_H */ | ||
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index bff5c65f81dc..952df39c989d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -2,15 +2,14 @@ | |||
2 | #define _DMA_REMAPPING_H | 2 | #define _DMA_REMAPPING_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * We need a fixed PAGE_SIZE of 4K irrespective of | 5 | * VT-d hardware uses 4KiB page size regardless of host page size. |
6 | * arch PAGE_SIZE for IOMMU page tables. | ||
7 | */ | 6 | */ |
8 | #define PAGE_SHIFT_4K (12) | 7 | #define VTD_PAGE_SHIFT (12) |
9 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | 8 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
10 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | 9 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
11 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | 10 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
12 | 11 | ||
13 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | 12 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
14 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | 13 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) |
15 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | 14 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) |
16 | 15 | ||
@@ -25,7 +24,7 @@ struct root_entry { | |||
25 | u64 val; | 24 | u64 val; |
26 | u64 rsvd1; | 25 | u64 rsvd1; |
27 | }; | 26 | }; |
28 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | 27 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
29 | static inline bool root_present(struct root_entry *root) | 28 | static inline bool root_present(struct root_entry *root) |
30 | { | 29 | { |
31 | return (root->val & 1); | 30 | return (root->val & 1); |
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root) | |||
36 | } | 35 | } |
37 | static inline void set_root_value(struct root_entry *root, unsigned long value) | 36 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
38 | { | 37 | { |
39 | root->val |= value & PAGE_MASK_4K; | 38 | root->val |= value & VTD_PAGE_MASK; |
40 | } | 39 | } |
41 | 40 | ||
42 | struct context_entry; | 41 | struct context_entry; |
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root) | |||
45 | { | 44 | { |
46 | return (struct context_entry *) | 45 | return (struct context_entry *) |
47 | (root_present(root)?phys_to_virt( | 46 | (root_present(root)?phys_to_virt( |
48 | root->val & PAGE_MASK_4K): | 47 | root->val & VTD_PAGE_MASK) : |
49 | NULL); | 48 | NULL); |
50 | } | 49 | } |
51 | 50 | ||
@@ -67,7 +66,7 @@ struct context_entry { | |||
67 | #define context_present(c) ((c).lo & 1) | 66 | #define context_present(c) ((c).lo & 1) |
68 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | 67 | #define context_fault_disable(c) (((c).lo >> 1) & 1) |
69 | #define context_translation_type(c) (((c).lo >> 2) & 3) | 68 | #define context_translation_type(c) (((c).lo >> 2) & 3) |
70 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | 69 | #define context_address_root(c) ((c).lo & VTD_PAGE_MASK) |
71 | #define context_address_width(c) ((c).hi & 7) | 70 | #define context_address_width(c) ((c).hi & 7) |
72 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | 71 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) |
73 | 72 | ||
@@ -81,7 +80,7 @@ struct context_entry { | |||
81 | } while (0) | 80 | } while (0) |
82 | #define CONTEXT_TT_MULTI_LEVEL 0 | 81 | #define CONTEXT_TT_MULTI_LEVEL 0 |
83 | #define context_set_address_root(c, val) \ | 82 | #define context_set_address_root(c, val) \ |
84 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | 83 | do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) |
85 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | 84 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) |
86 | #define context_set_domain_id(c, val) \ | 85 | #define context_set_domain_id(c, val) \ |
87 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | 86 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) |
@@ -107,9 +106,9 @@ struct dma_pte { | |||
107 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 106 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) |
108 | #define dma_set_pte_prot(p, prot) \ | 107 | #define dma_set_pte_prot(p, prot) \ |
109 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 108 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) |
110 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | 109 | #define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) |
111 | #define dma_set_pte_addr(p, addr) do {\ | 110 | #define dma_set_pte_addr(p, addr) do {\ |
112 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | 111 | (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) |
113 | #define dma_pte_present(p) (((p).val & 3) != 0) | 112 | #define dma_pte_present(p) (((p).val & 3) != 0) |
114 | 113 | ||
115 | struct intel_iommu; | 114 | struct intel_iommu; |
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index cdb453162a97..fb604dcd38f1 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h | |||
@@ -228,6 +228,12 @@ struct twl4030_gpio_platform_data { | |||
228 | int gpio_base; | 228 | int gpio_base; |
229 | unsigned irq_base, irq_end; | 229 | unsigned irq_base, irq_end; |
230 | 230 | ||
231 | /* package the two LED signals as output-only GPIOs? */ | ||
232 | bool use_leds; | ||
233 | |||
234 | /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ | ||
235 | u8 mmc_cd; | ||
236 | |||
231 | /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup | 237 | /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup |
232 | * should be enabled. Else, if that bit is set in "pulldowns", | 238 | * should be enabled. Else, if that bit is set in "pulldowns", |
233 | * that pulldown is enabled. Don't waste power by letting any | 239 | * that pulldown is enabled. Don't waste power by letting any |
@@ -277,6 +283,8 @@ struct twl4030_platform_data { | |||
277 | 283 | ||
278 | /*----------------------------------------------------------------------*/ | 284 | /*----------------------------------------------------------------------*/ |
279 | 285 | ||
286 | int twl4030_sih_setup(int module); | ||
287 | |||
280 | /* | 288 | /* |
281 | * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the | 289 | * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the |
282 | * IRQ data to subsidiary devices using platform device resources. | 290 | * IRQ data to subsidiary devices using platform device resources. |
@@ -291,16 +299,16 @@ struct twl4030_platform_data { | |||
291 | #define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2) | 299 | #define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2) |
292 | #define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3) | 300 | #define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3) |
293 | /* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */ | 301 | /* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */ |
294 | #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) | 302 | /* #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) */ |
295 | 303 | ||
296 | #define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0) | 304 | #define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0) |
297 | #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) | 305 | /* #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) */ |
298 | #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) | 306 | /* #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) */ |
299 | #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) | 307 | /* #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) */ |
300 | #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) | 308 | /* #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) */ |
301 | #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) | 309 | /* #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) */ |
302 | #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) | 310 | /* #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) */ |
303 | #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) | 311 | /* #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) */ |
304 | 312 | ||
305 | /* Rest are unsued currently*/ | 313 | /* Rest are unsued currently*/ |
306 | 314 | ||
@@ -317,17 +325,13 @@ struct twl4030_platform_data { | |||
317 | /* TWL4030 GPIO interrupt definitions */ | 325 | /* TWL4030 GPIO interrupt definitions */ |
318 | 326 | ||
319 | #define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n)) | 327 | #define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n)) |
320 | #define TWL4030_GPIO_IS_ENABLE 1 | ||
321 | 328 | ||
322 | /* | 329 | /* |
323 | * Exported TWL4030 GPIO APIs | 330 | * Exported TWL4030 GPIO APIs |
324 | * | 331 | * |
325 | * WARNING -- use standard GPIO and IRQ calls instead; these will vanish. | 332 | * WARNING -- use standard GPIO and IRQ calls instead; these will vanish. |
326 | */ | 333 | */ |
327 | int twl4030_get_gpio_datain(int gpio); | ||
328 | int twl4030_request_gpio(int gpio); | ||
329 | int twl4030_set_gpio_debounce(int gpio, int enable); | 334 | int twl4030_set_gpio_debounce(int gpio, int enable); |
330 | int twl4030_free_gpio(int gpio); | ||
331 | 335 | ||
332 | #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ | 336 | #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ |
333 | defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) | 337 | defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2e117f30a76c..3d017cfd245b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/dma_remapping.h> | 30 | #include <linux/dma_remapping.h> |
31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/iommu.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Intel IOMMU register specification per version 1.0 public spec. | 35 | * Intel IOMMU register specification per version 1.0 public spec. |
@@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
127 | 128 | ||
128 | 129 | ||
129 | /* IOTLB_REG */ | 130 | /* IOTLB_REG */ |
131 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 | ||
130 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) | 132 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) |
131 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) | 133 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) |
132 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) | 134 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) |
@@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
140 | #define DMA_TLB_MAX_SIZE (0x3f) | 142 | #define DMA_TLB_MAX_SIZE (0x3f) |
141 | 143 | ||
142 | /* INVALID_DESC */ | 144 | /* INVALID_DESC */ |
145 | #define DMA_CCMD_INVL_GRANU_OFFSET 61 | ||
143 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) | 146 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) |
144 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) | 147 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) |
145 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) | 148 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) |
@@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
200 | #define dma_frcd_type(d) ((d >> 30) & 1) | 203 | #define dma_frcd_type(d) ((d >> 30) & 1) |
201 | #define dma_frcd_fault_reason(c) (c & 0xff) | 204 | #define dma_frcd_fault_reason(c) (c & 0xff) |
202 | #define dma_frcd_source_id(c) (c & 0xffff) | 205 | #define dma_frcd_source_id(c) (c & 0xffff) |
203 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 206 | /* low 64 bit */ |
204 | 207 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) | |
205 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | 208 | |
206 | 209 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | |
207 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | 210 | do { \ |
208 | {\ | 211 | cycles_t start_time = get_cycles(); \ |
209 | cycles_t start_time = get_cycles();\ | 212 | while (1) { \ |
210 | while (1) {\ | 213 | sts = op(iommu->reg + offset); \ |
211 | sts = op (iommu->reg + offset);\ | 214 | if (cond) \ |
212 | if (cond)\ | 215 | break; \ |
213 | break;\ | ||
214 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | 216 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
215 | panic("DMAR hardware is malfunctioning\n");\ | 217 | panic("DMAR hardware is malfunctioning\n"); \ |
216 | cpu_relax();\ | 218 | cpu_relax(); \ |
217 | }\ | 219 | } \ |
218 | } | 220 | } while (0) |
219 | 221 | ||
220 | #define QI_LENGTH 256 /* queue length */ | 222 | #define QI_LENGTH 256 /* queue length */ |
221 | 223 | ||
@@ -238,6 +240,19 @@ enum { | |||
238 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) | 240 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
239 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) | 241 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
240 | 242 | ||
243 | #define QI_IOTLB_DID(did) (((u64)did) << 16) | ||
244 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) | ||
245 | #define QI_IOTLB_DW(dw) (((u64)dw) << 6) | ||
246 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) | ||
247 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) | ||
248 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) | ||
249 | #define QI_IOTLB_AM(am) (((u8)am)) | ||
250 | |||
251 | #define QI_CC_FM(fm) (((u64)fm) << 48) | ||
252 | #define QI_CC_SID(sid) (((u64)sid) << 32) | ||
253 | #define QI_CC_DID(did) (((u64)did) << 16) | ||
254 | #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) | ||
255 | |||
241 | struct qi_desc { | 256 | struct qi_desc { |
242 | u64 low, high; | 257 | u64 low, high; |
243 | }; | 258 | }; |
@@ -263,6 +278,13 @@ struct ir_table { | |||
263 | }; | 278 | }; |
264 | #endif | 279 | #endif |
265 | 280 | ||
281 | struct iommu_flush { | ||
282 | int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | ||
283 | u64 type, int non_present_entry_flush); | ||
284 | int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, | ||
285 | unsigned int size_order, u64 type, int non_present_entry_flush); | ||
286 | }; | ||
287 | |||
266 | struct intel_iommu { | 288 | struct intel_iommu { |
267 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 289 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
268 | u64 cap; | 290 | u64 cap; |
@@ -282,6 +304,7 @@ struct intel_iommu { | |||
282 | unsigned char name[7]; /* Device Name */ | 304 | unsigned char name[7]; /* Device Name */ |
283 | struct msi_msg saved_msg; | 305 | struct msi_msg saved_msg; |
284 | struct sys_device sysdev; | 306 | struct sys_device sysdev; |
307 | struct iommu_flush flush; | ||
285 | #endif | 308 | #endif |
286 | struct q_inval *qi; /* Queued invalidation info */ | 309 | struct q_inval *qi; /* Queued invalidation info */ |
287 | #ifdef CONFIG_INTR_REMAP | 310 | #ifdef CONFIG_INTR_REMAP |
@@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu); | |||
303 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 326 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
304 | extern void qi_global_iec(struct intel_iommu *iommu); | 327 | extern void qi_global_iec(struct intel_iommu *iommu); |
305 | 328 | ||
329 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | ||
330 | u8 fm, u64 type, int non_present_entry_flush); | ||
331 | extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | ||
332 | unsigned int size_order, u64 type, | ||
333 | int non_present_entry_flush); | ||
334 | |||
306 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | 335 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
307 | 336 | ||
308 | void intel_iommu_domain_exit(struct dmar_domain *domain); | 337 | void intel_iommu_domain_exit(struct dmar_domain *domain); |
@@ -324,4 +353,11 @@ static inline int intel_iommu_found(void) | |||
324 | } | 353 | } |
325 | #endif /* CONFIG_DMAR */ | 354 | #endif /* CONFIG_DMAR */ |
326 | 355 | ||
356 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
357 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | ||
358 | extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); | ||
359 | extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
360 | extern int intel_map_sg(struct device *, struct scatterlist *, int, int); | ||
361 | extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
362 | |||
327 | #endif | 363 | #endif |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 3171ddc3b39d..452c280c8115 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -13,9 +13,9 @@ extern int nr_irqs; | |||
13 | # define for_each_irq_desc(irq, desc) \ | 13 | # define for_each_irq_desc(irq, desc) \ |
14 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) | 14 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) |
15 | 15 | ||
16 | # define for_each_irq_desc_reverse(irq, desc) \ | 16 | # define for_each_irq_desc_reverse(irq, desc) \ |
17 | for (irq = nr_irqs -1, desc = irq_desc + (nr_irqs -1 ); \ | 17 | for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ |
18 | irq > 0; irq--, desc--) | 18 | irq >= 0; irq--, desc--) |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #define for_each_irq_nr(irq) \ | 21 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 35d4f6342fac..346e2b80be7d 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -911,7 +911,7 @@ extern int journal_set_features | |||
911 | (journal_t *, unsigned long, unsigned long, unsigned long); | 911 | (journal_t *, unsigned long, unsigned long, unsigned long); |
912 | extern int journal_create (journal_t *); | 912 | extern int journal_create (journal_t *); |
913 | extern int journal_load (journal_t *journal); | 913 | extern int journal_load (journal_t *journal); |
914 | extern void journal_destroy (journal_t *); | 914 | extern int journal_destroy (journal_t *); |
915 | extern int journal_recover (journal_t *journal); | 915 | extern int journal_recover (journal_t *journal); |
916 | extern int journal_wipe (journal_t *, int); | 916 | extern int journal_wipe (journal_t *, int); |
917 | extern int journal_skip_recovery (journal_t *); | 917 | extern int journal_skip_recovery (journal_t *); |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 947cf84e555d..c261aa0584b1 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -340,6 +340,9 @@ enum { | |||
340 | 340 | ||
341 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 341 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
342 | 342 | ||
343 | /* mask of flags to transfer *to* the slave link */ | ||
344 | ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, | ||
345 | |||
343 | /* max tries if error condition is still set after ->error_handler */ | 346 | /* max tries if error condition is still set after ->error_handler */ |
344 | ATA_EH_MAX_TRIES = 5, | 347 | ATA_EH_MAX_TRIES = 5, |
345 | 348 | ||
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 77323a72dd3c..cf9c679ab38b 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -132,6 +132,15 @@ enum { | |||
132 | MLX4_MAILBOX_SIZE = 4096 | 132 | MLX4_MAILBOX_SIZE = 4096 |
133 | }; | 133 | }; |
134 | 134 | ||
135 | enum { | ||
136 | /* set port opcode modifiers */ | ||
137 | MLX4_SET_PORT_GENERAL = 0x0, | ||
138 | MLX4_SET_PORT_RQP_CALC = 0x1, | ||
139 | MLX4_SET_PORT_MAC_TABLE = 0x2, | ||
140 | MLX4_SET_PORT_VLAN_TABLE = 0x3, | ||
141 | MLX4_SET_PORT_PRIO_MAP = 0x4, | ||
142 | }; | ||
143 | |||
135 | struct mlx4_dev; | 144 | struct mlx4_dev; |
136 | 145 | ||
137 | struct mlx4_cmd_mailbox { | 146 | struct mlx4_cmd_mailbox { |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b2f944468313..bd9977b89490 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -60,6 +60,7 @@ enum { | |||
60 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, | 60 | MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, |
61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, | 61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, |
62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, | 62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, |
63 | MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, | ||
63 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, | 64 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, |
64 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, | 65 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, |
65 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, | 66 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, |
@@ -145,6 +146,29 @@ enum { | |||
145 | MLX4_MTT_FLAG_PRESENT = 1 | 146 | MLX4_MTT_FLAG_PRESENT = 1 |
146 | }; | 147 | }; |
147 | 148 | ||
149 | enum mlx4_qp_region { | ||
150 | MLX4_QP_REGION_FW = 0, | ||
151 | MLX4_QP_REGION_ETH_ADDR, | ||
152 | MLX4_QP_REGION_FC_ADDR, | ||
153 | MLX4_QP_REGION_FC_EXCH, | ||
154 | MLX4_NUM_QP_REGION | ||
155 | }; | ||
156 | |||
157 | enum mlx4_port_type { | ||
158 | MLX4_PORT_TYPE_IB = 1 << 0, | ||
159 | MLX4_PORT_TYPE_ETH = 1 << 1, | ||
160 | }; | ||
161 | |||
162 | enum mlx4_special_vlan_idx { | ||
163 | MLX4_NO_VLAN_IDX = 0, | ||
164 | MLX4_VLAN_MISS_IDX, | ||
165 | MLX4_VLAN_REGULAR | ||
166 | }; | ||
167 | |||
168 | enum { | ||
169 | MLX4_NUM_FEXCH = 64 * 1024, | ||
170 | }; | ||
171 | |||
148 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 172 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
149 | { | 173 | { |
150 | return (major << 32) | (minor << 16) | subminor; | 174 | return (major << 32) | (minor << 16) | subminor; |
@@ -154,7 +178,9 @@ struct mlx4_caps { | |||
154 | u64 fw_ver; | 178 | u64 fw_ver; |
155 | int num_ports; | 179 | int num_ports; |
156 | int vl_cap[MLX4_MAX_PORTS + 1]; | 180 | int vl_cap[MLX4_MAX_PORTS + 1]; |
157 | int mtu_cap[MLX4_MAX_PORTS + 1]; | 181 | int ib_mtu_cap[MLX4_MAX_PORTS + 1]; |
182 | u64 def_mac[MLX4_MAX_PORTS + 1]; | ||
183 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; | ||
158 | int gid_table_len[MLX4_MAX_PORTS + 1]; | 184 | int gid_table_len[MLX4_MAX_PORTS + 1]; |
159 | int pkey_table_len[MLX4_MAX_PORTS + 1]; | 185 | int pkey_table_len[MLX4_MAX_PORTS + 1]; |
160 | int local_ca_ack_delay; | 186 | int local_ca_ack_delay; |
@@ -169,7 +195,6 @@ struct mlx4_caps { | |||
169 | int max_rq_desc_sz; | 195 | int max_rq_desc_sz; |
170 | int max_qp_init_rdma; | 196 | int max_qp_init_rdma; |
171 | int max_qp_dest_rdma; | 197 | int max_qp_dest_rdma; |
172 | int reserved_qps; | ||
173 | int sqp_start; | 198 | int sqp_start; |
174 | int num_srqs; | 199 | int num_srqs; |
175 | int max_srq_wqes; | 200 | int max_srq_wqes; |
@@ -201,6 +226,15 @@ struct mlx4_caps { | |||
201 | u16 stat_rate_support; | 226 | u16 stat_rate_support; |
202 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 227 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
203 | int max_gso_sz; | 228 | int max_gso_sz; |
229 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | ||
230 | int reserved_qps; | ||
231 | int reserved_qps_base[MLX4_NUM_QP_REGION]; | ||
232 | int log_num_macs; | ||
233 | int log_num_vlans; | ||
234 | int log_num_prios; | ||
235 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; | ||
236 | u8 supported_type[MLX4_MAX_PORTS + 1]; | ||
237 | u32 port_mask; | ||
204 | }; | 238 | }; |
205 | 239 | ||
206 | struct mlx4_buf_list { | 240 | struct mlx4_buf_list { |
@@ -355,6 +389,11 @@ struct mlx4_init_port_param { | |||
355 | u64 si_guid; | 389 | u64 si_guid; |
356 | }; | 390 | }; |
357 | 391 | ||
392 | #define mlx4_foreach_port(port, dev, type) \ | ||
393 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | ||
394 | if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ | ||
395 | ~(dev)->caps.port_mask) & 1 << ((port) - 1)) | ||
396 | |||
358 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 397 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
359 | struct mlx4_buf *buf); | 398 | struct mlx4_buf *buf); |
360 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 399 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); |
@@ -400,7 +439,10 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
400 | int collapsed); | 439 | int collapsed); |
401 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 440 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
402 | 441 | ||
403 | int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp); | 442 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); |
443 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | ||
444 | |||
445 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | ||
404 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 446 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); |
405 | 447 | ||
406 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | 448 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, |
@@ -416,6 +458,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
416 | int block_mcast_loopback); | 458 | int block_mcast_loopback); |
417 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); | 459 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); |
418 | 460 | ||
461 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); | ||
462 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); | ||
463 | |||
464 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | ||
465 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); | ||
466 | |||
419 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | 467 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, |
420 | int npages, u64 iova, u32 *lkey, u32 *rkey); | 468 | int npages, u64 iova, u32 *lkey, u32 *rkey); |
421 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | 469 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index bcb8f725427c..5231861f357d 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -86,13 +86,6 @@ int oprofile_arch_init(struct oprofile_operations * ops); | |||
86 | void oprofile_arch_exit(void); | 86 | void oprofile_arch_exit(void); |
87 | 87 | ||
88 | /** | 88 | /** |
89 | * Add data to the event buffer. | ||
90 | * The data passed is free-form, but typically consists of | ||
91 | * file offsets, dcookies, context information, and ESCAPE codes. | ||
92 | */ | ||
93 | void add_event_entry(unsigned long data); | ||
94 | |||
95 | /** | ||
96 | * Add a sample. This may be called from any context. Pass | 89 | * Add a sample. This may be called from any context. Pass |
97 | * smp_processor_id() as cpu. | 90 | * smp_processor_id() as cpu. |
98 | */ | 91 | */ |
@@ -162,5 +155,14 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz | |||
162 | 155 | ||
163 | /** lock for read/write safety */ | 156 | /** lock for read/write safety */ |
164 | extern spinlock_t oprofilefs_lock; | 157 | extern spinlock_t oprofilefs_lock; |
158 | |||
159 | /** | ||
160 | * Add the contents of a circular buffer to the event buffer. | ||
161 | */ | ||
162 | void oprofile_put_buff(unsigned long *buf, unsigned int start, | ||
163 | unsigned int stop, unsigned int max); | ||
164 | |||
165 | unsigned long oprofile_get_cpu_buffer_size(void); | ||
166 | void oprofile_cpu_buffer_inc_smpl_lost(void); | ||
165 | 167 | ||
166 | #endif /* OPROFILE_H */ | 168 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 0fd39f2231ec..f546ad6fc028 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -99,5 +99,10 @@ static inline struct page_cgroup *lookup_page_cgroup(struct page *page) | |||
99 | { | 99 | { |
100 | return NULL; | 100 | return NULL; |
101 | } | 101 | } |
102 | |||
103 | static inline void page_cgroup_init(void) | ||
104 | { | ||
105 | } | ||
106 | |||
102 | #endif | 107 | #endif |
103 | #endif | 108 | #endif |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e5d344bfcb7e..369f44286353 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1944,6 +1944,14 @@ | |||
1944 | 1944 | ||
1945 | #define PCI_VENDOR_ID_OXSEMI 0x1415 | 1945 | #define PCI_VENDOR_ID_OXSEMI 0x1415 |
1946 | #define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 | 1946 | #define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 |
1947 | #define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 | ||
1948 | #define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 | ||
1949 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 | ||
1950 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 | ||
1951 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 | ||
1952 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 | ||
1953 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 | ||
1954 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C | ||
1947 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 | 1955 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 |
1948 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 | 1956 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 |
1949 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 | 1957 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 |
diff --git a/include/linux/profile.h b/include/linux/profile.h index 570045053ce9..a0fc32279fc0 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h | |||
@@ -19,10 +19,16 @@ struct notifier_block; | |||
19 | 19 | ||
20 | #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) | 20 | #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) |
21 | void create_prof_cpu_mask(struct proc_dir_entry *de); | 21 | void create_prof_cpu_mask(struct proc_dir_entry *de); |
22 | int create_proc_profile(void); | ||
22 | #else | 23 | #else |
23 | static inline void create_prof_cpu_mask(struct proc_dir_entry *de) | 24 | static inline void create_prof_cpu_mask(struct proc_dir_entry *de) |
24 | { | 25 | { |
25 | } | 26 | } |
27 | |||
28 | static inline int create_proc_profile(void) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
26 | #endif | 32 | #endif |
27 | 33 | ||
28 | enum profile_type { | 34 | enum profile_type { |
@@ -37,7 +43,6 @@ extern int prof_on __read_mostly; | |||
37 | /* init basic kernel profiler */ | 43 | /* init basic kernel profiler */ |
38 | int profile_init(void); | 44 | int profile_init(void); |
39 | int profile_setup(char *str); | 45 | int profile_setup(char *str); |
40 | int create_proc_profile(void); | ||
41 | void profile_tick(int type); | 46 | void profile_tick(int type); |
42 | 47 | ||
43 | /* | 48 | /* |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5c38db536e07..10bff55b0824 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -287,7 +287,6 @@ extern void trap_init(void); | |||
287 | extern void account_process_tick(struct task_struct *task, int user); | 287 | extern void account_process_tick(struct task_struct *task, int user); |
288 | extern void update_process_times(int user); | 288 | extern void update_process_times(int user); |
289 | extern void scheduler_tick(void); | 289 | extern void scheduler_tick(void); |
290 | extern void hrtick_resched(void); | ||
291 | 290 | ||
292 | extern void sched_show_task(struct task_struct *p); | 291 | extern void sched_show_task(struct task_struct *p); |
293 | 292 | ||
@@ -1665,6 +1664,7 @@ extern unsigned int sysctl_sched_features; | |||
1665 | extern unsigned int sysctl_sched_migration_cost; | 1664 | extern unsigned int sysctl_sched_migration_cost; |
1666 | extern unsigned int sysctl_sched_nr_migrate; | 1665 | extern unsigned int sysctl_sched_nr_migrate; |
1667 | extern unsigned int sysctl_sched_shares_ratelimit; | 1666 | extern unsigned int sysctl_sched_shares_ratelimit; |
1667 | extern unsigned int sysctl_sched_shares_thresh; | ||
1668 | 1668 | ||
1669 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1669 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1670 | struct file *file, void __user *buffer, size_t *length, | 1670 | struct file *file, void __user *buffer, size_t *length, |
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h new file mode 100644 index 000000000000..a102561e7026 --- /dev/null +++ b/include/linux/usb/wusb-wa.h | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Wireless USB Wire Adapter constants and structures. | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation. | ||
5 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * FIXME: docs | ||
23 | * FIXME: organize properly, group logically | ||
24 | * | ||
25 | * All the event structures are defined in uwb/spec.h, as they are | ||
26 | * common to the WHCI and WUSB radio control interfaces. | ||
27 | * | ||
28 | * References: | ||
29 | * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 | ||
30 | */ | ||
31 | #ifndef __LINUX_USB_WUSB_WA_H | ||
32 | #define __LINUX_USB_WUSB_WA_H | ||
33 | |||
34 | /** | ||
35 | * Radio Command Request for the Radio Control Interface | ||
36 | * | ||
37 | * Radio Control Interface command and event codes are the same as | ||
38 | * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* | ||
39 | */ | ||
40 | enum { | ||
41 | WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ | ||
42 | }; | ||
43 | |||
44 | /* Wireless Adapter Requests ([WUSB] table 8-51) */ | ||
45 | enum { | ||
46 | WUSB_REQ_ADD_MMC_IE = 20, | ||
47 | WUSB_REQ_REMOVE_MMC_IE = 21, | ||
48 | WUSB_REQ_SET_NUM_DNTS = 22, | ||
49 | WUSB_REQ_SET_CLUSTER_ID = 23, | ||
50 | WUSB_REQ_SET_DEV_INFO = 24, | ||
51 | WUSB_REQ_GET_TIME = 25, | ||
52 | WUSB_REQ_SET_STREAM_IDX = 26, | ||
53 | WUSB_REQ_SET_WUSB_MAS = 27, | ||
54 | }; | ||
55 | |||
56 | |||
57 | /* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ | ||
58 | enum { | ||
59 | WUSB_TIME_ADJ = 0, | ||
60 | WUSB_TIME_BPST = 1, | ||
61 | WUSB_TIME_WUSB = 2, | ||
62 | }; | ||
63 | |||
64 | enum { | ||
65 | WA_ENABLE = 0x01, | ||
66 | WA_RESET = 0x02, | ||
67 | RPIPE_PAUSE = 0x1, | ||
68 | }; | ||
69 | |||
70 | /* Responses from Get Status request ([WUSB] section 8.3.1.6) */ | ||
71 | enum { | ||
72 | WA_STATUS_ENABLED = 0x01, | ||
73 | WA_STATUS_RESETTING = 0x02 | ||
74 | }; | ||
75 | |||
76 | enum rpipe_crs { | ||
77 | RPIPE_CRS_CTL = 0x01, | ||
78 | RPIPE_CRS_ISO = 0x02, | ||
79 | RPIPE_CRS_BULK = 0x04, | ||
80 | RPIPE_CRS_INTR = 0x08 | ||
81 | }; | ||
82 | |||
83 | /** | ||
84 | * RPipe descriptor ([WUSB] section 8.5.2.11) | ||
85 | * | ||
86 | * FIXME: explain rpipes | ||
87 | */ | ||
88 | struct usb_rpipe_descriptor { | ||
89 | u8 bLength; | ||
90 | u8 bDescriptorType; | ||
91 | __le16 wRPipeIndex; | ||
92 | __le16 wRequests; | ||
93 | __le16 wBlocks; /* rw if 0 */ | ||
94 | __le16 wMaxPacketSize; /* rw? */ | ||
95 | u8 bHSHubAddress; /* reserved: 0 */ | ||
96 | u8 bHSHubPort; /* ??? FIXME ??? */ | ||
97 | u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ | ||
98 | u8 bDeviceAddress; /* rw: Target device address */ | ||
99 | u8 bEndpointAddress; /* rw: Target EP address */ | ||
100 | u8 bDataSequence; /* ro: Current Data sequence */ | ||
101 | __le32 dwCurrentWindow; /* ro */ | ||
102 | u8 bMaxDataSequence; /* ro?: max supported seq */ | ||
103 | u8 bInterval; /* rw: */ | ||
104 | u8 bOverTheAirInterval; /* rw: */ | ||
105 | u8 bmAttribute; /* ro? */ | ||
106 | u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ | ||
107 | u8 bmRetryOptions; /* rw? */ | ||
108 | __le16 wNumTransactionErrors; /* rw */ | ||
109 | } __attribute__ ((packed)); | ||
110 | |||
111 | /** | ||
112 | * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) | ||
113 | * | ||
114 | * These are the notifications coming on the notification endpoint of | ||
115 | * an HWA and a DWA. | ||
116 | */ | ||
117 | enum wa_notif_type { | ||
118 | DWA_NOTIF_RWAKE = 0x91, | ||
119 | DWA_NOTIF_PORTSTATUS = 0x92, | ||
120 | WA_NOTIF_TRANSFER = 0x93, | ||
121 | HWA_NOTIF_BPST_ADJ = 0x94, | ||
122 | HWA_NOTIF_DN = 0x95, | ||
123 | }; | ||
124 | |||
125 | /** | ||
126 | * Wire Adapter notification header | ||
127 | * | ||
128 | * Notifications coming from a wire adapter use a common header | ||
129 | * defined in [WUSB] sections 8.4.5 & 8.5.4. | ||
130 | */ | ||
131 | struct wa_notif_hdr { | ||
132 | u8 bLength; | ||
133 | u8 bNotifyType; /* enum wa_notif_type */ | ||
134 | } __attribute__((packed)); | ||
135 | |||
136 | /** | ||
137 | * HWA DN Received notification [(WUSB] section 8.5.4.2) | ||
138 | * | ||
139 | * The DNData is specified in WUSB1.0[7.6]. For each device | ||
140 | * notification we received, we just need to dispatch it. | ||
141 | * | ||
142 | * @dndata: this is really an array of notifications, but all start | ||
143 | * with the same header. | ||
144 | */ | ||
145 | struct hwa_notif_dn { | ||
146 | struct wa_notif_hdr hdr; | ||
147 | u8 bSourceDeviceAddr; /* from errata 2005/07 */ | ||
148 | u8 bmAttributes; | ||
149 | struct wusb_dn_hdr dndata[]; | ||
150 | } __attribute__((packed)); | ||
151 | |||
152 | /* [WUSB] section 8.3.3 */ | ||
153 | enum wa_xfer_type { | ||
154 | WA_XFER_TYPE_CTL = 0x80, | ||
155 | WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ | ||
156 | WA_XFER_TYPE_ISO = 0x82, | ||
157 | WA_XFER_RESULT = 0x83, | ||
158 | WA_XFER_ABORT = 0x84, | ||
159 | }; | ||
160 | |||
161 | /* [WUSB] section 8.3.3 */ | ||
162 | struct wa_xfer_hdr { | ||
163 | u8 bLength; /* 0x18 */ | ||
164 | u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ | ||
165 | __le16 wRPipe; /* RPipe index */ | ||
166 | __le32 dwTransferID; /* Host-assigned ID */ | ||
167 | __le32 dwTransferLength; /* Length of data to xfer */ | ||
168 | u8 bTransferSegment; | ||
169 | } __attribute__((packed)); | ||
170 | |||
171 | struct wa_xfer_ctl { | ||
172 | struct wa_xfer_hdr hdr; | ||
173 | u8 bmAttribute; | ||
174 | __le16 wReserved; | ||
175 | struct usb_ctrlrequest baSetupData; | ||
176 | } __attribute__((packed)); | ||
177 | |||
178 | struct wa_xfer_bi { | ||
179 | struct wa_xfer_hdr hdr; | ||
180 | u8 bReserved; | ||
181 | __le16 wReserved; | ||
182 | } __attribute__((packed)); | ||
183 | |||
184 | struct wa_xfer_hwaiso { | ||
185 | struct wa_xfer_hdr hdr; | ||
186 | u8 bReserved; | ||
187 | __le16 wPresentationTime; | ||
188 | __le32 dwNumOfPackets; | ||
189 | /* FIXME: u8 pktdata[]? */ | ||
190 | } __attribute__((packed)); | ||
191 | |||
192 | /* [WUSB] section 8.3.3.5 */ | ||
193 | struct wa_xfer_abort { | ||
194 | u8 bLength; | ||
195 | u8 bRequestType; | ||
196 | __le16 wRPipe; /* RPipe index */ | ||
197 | __le32 dwTransferID; /* Host-assigned ID */ | ||
198 | } __attribute__((packed)); | ||
199 | |||
200 | /** | ||
201 | * WA Transfer Complete notification ([WUSB] section 8.3.3.3) | ||
202 | * | ||
203 | */ | ||
204 | struct wa_notif_xfer { | ||
205 | struct wa_notif_hdr hdr; | ||
206 | u8 bEndpoint; | ||
207 | u8 Reserved; | ||
208 | } __attribute__((packed)); | ||
209 | |||
210 | /** Transfer result basic codes [WUSB] table 8-15 */ | ||
211 | enum { | ||
212 | WA_XFER_STATUS_SUCCESS, | ||
213 | WA_XFER_STATUS_HALTED, | ||
214 | WA_XFER_STATUS_DATA_BUFFER_ERROR, | ||
215 | WA_XFER_STATUS_BABBLE, | ||
216 | WA_XFER_RESERVED, | ||
217 | WA_XFER_STATUS_NOT_FOUND, | ||
218 | WA_XFER_STATUS_INSUFFICIENT_RESOURCE, | ||
219 | WA_XFER_STATUS_TRANSACTION_ERROR, | ||
220 | WA_XFER_STATUS_ABORTED, | ||
221 | WA_XFER_STATUS_RPIPE_NOT_READY, | ||
222 | WA_XFER_INVALID_FORMAT, | ||
223 | WA_XFER_UNEXPECTED_SEGMENT_NUMBER, | ||
224 | WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, | ||
225 | }; | ||
226 | |||
227 | /** [WUSB] section 8.3.3.4 */ | ||
228 | struct wa_xfer_result { | ||
229 | struct wa_notif_hdr hdr; | ||
230 | __le32 dwTransferID; | ||
231 | __le32 dwTransferLength; | ||
232 | u8 bTransferSegment; | ||
233 | u8 bTransferStatus; | ||
234 | __le32 dwNumOfPackets; | ||
235 | } __attribute__((packed)); | ||
236 | |||
237 | /** | ||
238 | * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). | ||
239 | * | ||
240 | * NOTE: u16 fields are read Little Endian from the hardware. | ||
241 | * | ||
242 | * @bNumPorts is the original max number of devices that the host can | ||
243 | * connect; we might chop this so the stack can handle | ||
244 | * it. In case you need to access it, use wusbhc->ports_max | ||
245 | * if it is a Wireless USB WA. | ||
246 | */ | ||
247 | struct usb_wa_descriptor { | ||
248 | u8 bLength; | ||
249 | u8 bDescriptorType; | ||
250 | u16 bcdWAVersion; | ||
251 | u8 bNumPorts; /* don't use!! */ | ||
252 | u8 bmAttributes; /* Reserved == 0 */ | ||
253 | u16 wNumRPipes; | ||
254 | u16 wRPipeMaxBlock; | ||
255 | u8 bRPipeBlockSize; | ||
256 | u8 bPwrOn2PwrGood; | ||
257 | u8 bNumMMCIEs; | ||
258 | u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ | ||
259 | } __attribute__((packed)); | ||
260 | |||
261 | /** | ||
262 | * HWA Device Information Buffer (WUSB1.0[T8.54]) | ||
263 | */ | ||
264 | struct hwa_dev_info { | ||
265 | u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ | ||
266 | u8 bDeviceAddress; | ||
267 | __le16 wPHYRates; | ||
268 | u8 bmDeviceAttribute; | ||
269 | } __attribute__((packed)); | ||
270 | |||
271 | #endif /* #ifndef __LINUX_USB_WUSB_WA_H */ | ||
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h new file mode 100644 index 000000000000..5f401b644ed5 --- /dev/null +++ b/include/linux/usb/wusb.h | |||
@@ -0,0 +1,376 @@ | |||
1 | /* | ||
2 | * Wireless USB Standard Definitions | ||
3 | * Event Size Tables | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: docs | ||
24 | * FIXME: organize properly, group logically | ||
25 | * | ||
26 | * All the event structures are defined in uwb/spec.h, as they are | ||
27 | * common to the WHCI and WUSB radio control interfaces. | ||
28 | */ | ||
29 | |||
30 | #ifndef __WUSB_H__ | ||
31 | #define __WUSB_H__ | ||
32 | |||
33 | #include <linux/types.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/uwb/spec.h> | ||
36 | #include <linux/usb/ch9.h> | ||
37 | #include <linux/param.h> | ||
38 | |||
39 | /** | ||
40 | * WUSB Information Element header | ||
41 | * | ||
42 | * I don't know why, they decided to make it different to the MBOA MAC | ||
43 | * IE Header; beats me. | ||
44 | */ | ||
45 | struct wuie_hdr { | ||
46 | u8 bLength; | ||
47 | u8 bIEIdentifier; | ||
48 | } __attribute__((packed)); | ||
49 | |||
50 | enum { | ||
51 | WUIE_ID_WCTA = 0x80, | ||
52 | WUIE_ID_CONNECTACK, | ||
53 | WUIE_ID_HOST_INFO, | ||
54 | WUIE_ID_CHANGE_ANNOUNCE, | ||
55 | WUIE_ID_DEVICE_DISCONNECT, | ||
56 | WUIE_ID_HOST_DISCONNECT, | ||
57 | WUIE_ID_KEEP_ALIVE = 0x89, | ||
58 | WUIE_ID_ISOCH_DISCARD, | ||
59 | WUIE_ID_RESET_DEVICE, | ||
60 | }; | ||
61 | |||
62 | /** | ||
63 | * Maximum number of array elements in a WUSB IE. | ||
64 | * | ||
65 | * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that | ||
66 | * are "arrays" have to limited to 4 elements. So we define it | ||
67 | * like that to ease up and submit only the neeed size. | ||
68 | */ | ||
69 | #define WUIE_ELT_MAX 4 | ||
70 | |||
71 | /** | ||
72 | * Wrapper for the data that defines a CHID, a CDID or a CK | ||
73 | * | ||
74 | * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of | ||
75 | * data. In order to avoid confusion and enforce types, we wrap it. | ||
76 | * | ||
77 | * Make it packed, as we use it in some hw defintions. | ||
78 | */ | ||
79 | struct wusb_ckhdid { | ||
80 | u8 data[16]; | ||
81 | } __attribute__((packed)); | ||
82 | |||
83 | const static | ||
84 | struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; | ||
85 | |||
86 | #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) | ||
87 | |||
88 | /** | ||
89 | * WUSB IE: Host Information (WUSB1.0[7.5.2]) | ||
90 | * | ||
91 | * Used to provide information about the host to the Wireless USB | ||
92 | * devices in range (CHID can be used as an ASCII string). | ||
93 | */ | ||
94 | struct wuie_host_info { | ||
95 | struct wuie_hdr hdr; | ||
96 | __le16 attributes; | ||
97 | struct wusb_ckhdid CHID; | ||
98 | } __attribute__((packed)); | ||
99 | |||
100 | /** | ||
101 | * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) | ||
102 | * | ||
103 | * Used to acknowledge device connect requests. See note for | ||
104 | * WUIE_ELT_MAX. | ||
105 | */ | ||
106 | struct wuie_connect_ack { | ||
107 | struct wuie_hdr hdr; | ||
108 | struct { | ||
109 | struct wusb_ckhdid CDID; | ||
110 | u8 bDeviceAddress; /* 0 means unused */ | ||
111 | u8 bReserved; | ||
112 | } blk[WUIE_ELT_MAX]; | ||
113 | } __attribute__((packed)); | ||
114 | |||
115 | /** | ||
116 | * WUSB IE Host Information Element, Connect Availability | ||
117 | * | ||
118 | * WUSB1.0[7.5.2], bmAttributes description | ||
119 | */ | ||
120 | enum { | ||
121 | WUIE_HI_CAP_RECONNECT = 0, | ||
122 | WUIE_HI_CAP_LIMITED, | ||
123 | WUIE_HI_CAP_RESERVED, | ||
124 | WUIE_HI_CAP_ALL, | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) | ||
129 | * | ||
130 | * Tells devices the host is going to stop sending MMCs and will dissapear. | ||
131 | */ | ||
132 | struct wuie_channel_stop { | ||
133 | struct wuie_hdr hdr; | ||
134 | u8 attributes; | ||
135 | u8 timestamp[3]; | ||
136 | } __attribute__((packed)); | ||
137 | |||
138 | /** | ||
139 | * WUSB IE: Keepalive (WUSB1.0[7.5.9]) | ||
140 | * | ||
141 | * Ask device(s) to send keepalives. | ||
142 | */ | ||
143 | struct wuie_keep_alive { | ||
144 | struct wuie_hdr hdr; | ||
145 | u8 bDeviceAddress[WUIE_ELT_MAX]; | ||
146 | } __attribute__((packed)); | ||
147 | |||
148 | /** | ||
149 | * WUSB IE: Reset device (WUSB1.0[7.5.11]) | ||
150 | * | ||
151 | * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only | ||
152 | * use it for one at the time... | ||
153 | * | ||
154 | * In any case, this request is a wee bit silly: why don't they target | ||
155 | * by address?? | ||
156 | */ | ||
157 | struct wuie_reset { | ||
158 | struct wuie_hdr hdr; | ||
159 | struct wusb_ckhdid CDID; | ||
160 | } __attribute__((packed)); | ||
161 | |||
162 | /** | ||
163 | * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) | ||
164 | * | ||
165 | * Tell device to disconnect; we can fit 4 addresses, but we only use | ||
166 | * it for one at the time... | ||
167 | */ | ||
168 | struct wuie_disconnect { | ||
169 | struct wuie_hdr hdr; | ||
170 | u8 bDeviceAddress; | ||
171 | u8 padding; | ||
172 | } __attribute__((packed)); | ||
173 | |||
174 | /** | ||
175 | * WUSB IE: Host disconnect ([WUSB] section 7.5.5) | ||
176 | * | ||
177 | * Tells all connected devices to disconnect. | ||
178 | */ | ||
179 | struct wuie_host_disconnect { | ||
180 | struct wuie_hdr hdr; | ||
181 | } __attribute__((packed)); | ||
182 | |||
183 | /** | ||
184 | * WUSB Device Notification header (WUSB1.0[7.6]) | ||
185 | */ | ||
186 | struct wusb_dn_hdr { | ||
187 | u8 bType; | ||
188 | u8 notifdata[]; | ||
189 | } __attribute__((packed)); | ||
190 | |||
191 | /** Device Notification codes (WUSB1.0[Table 7-54]) */ | ||
192 | enum WUSB_DN { | ||
193 | WUSB_DN_CONNECT = 0x01, | ||
194 | WUSB_DN_DISCONNECT = 0x02, | ||
195 | WUSB_DN_EPRDY = 0x03, | ||
196 | WUSB_DN_MASAVAILCHANGED = 0x04, | ||
197 | WUSB_DN_RWAKE = 0x05, | ||
198 | WUSB_DN_SLEEP = 0x06, | ||
199 | WUSB_DN_ALIVE = 0x07, | ||
200 | }; | ||
201 | |||
202 | /** WUSB Device Notification Connect */ | ||
203 | struct wusb_dn_connect { | ||
204 | struct wusb_dn_hdr hdr; | ||
205 | __le16 attributes; | ||
206 | struct wusb_ckhdid CDID; | ||
207 | } __attribute__((packed)); | ||
208 | |||
209 | static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) | ||
210 | { | ||
211 | return le16_to_cpu(dn->attributes) & 0xff; | ||
212 | } | ||
213 | |||
214 | static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) | ||
215 | { | ||
216 | return (le16_to_cpu(dn->attributes) >> 8) & 0x1; | ||
217 | } | ||
218 | |||
219 | static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) | ||
220 | { | ||
221 | return (le16_to_cpu(dn->attributes) >> 9) & 0x03; | ||
222 | } | ||
223 | |||
224 | /** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ | ||
225 | struct wusb_dn_alive { | ||
226 | struct wusb_dn_hdr hdr; | ||
227 | } __attribute__((packed)); | ||
228 | |||
229 | /** Device is disconnecting (WUSB1.0[7.6.2]) */ | ||
230 | struct wusb_dn_disconnect { | ||
231 | struct wusb_dn_hdr hdr; | ||
232 | } __attribute__((packed)); | ||
233 | |||
234 | /* General constants */ | ||
235 | enum { | ||
236 | WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ | ||
237 | }; | ||
238 | |||
239 | static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size, | ||
240 | const struct wusb_ckhdid *ckhdid) | ||
241 | { | ||
242 | return scnprintf(pr_ckhdid, size, | ||
243 | "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx " | ||
244 | "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx", | ||
245 | ckhdid->data[0], ckhdid->data[1], | ||
246 | ckhdid->data[2], ckhdid->data[3], | ||
247 | ckhdid->data[4], ckhdid->data[5], | ||
248 | ckhdid->data[6], ckhdid->data[7], | ||
249 | ckhdid->data[8], ckhdid->data[9], | ||
250 | ckhdid->data[10], ckhdid->data[11], | ||
251 | ckhdid->data[12], ckhdid->data[13], | ||
252 | ckhdid->data[14], ckhdid->data[15]); | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * WUSB Crypto stuff (WUSB1.0[6]) | ||
257 | */ | ||
258 | |||
259 | extern const char *wusb_et_name(u8); | ||
260 | |||
261 | /** | ||
262 | * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for | ||
263 | * the host or the device. | ||
264 | */ | ||
265 | static inline u8 wusb_key_index(int index, int type, int originator) | ||
266 | { | ||
267 | return (originator << 6) | (type << 4) | index; | ||
268 | } | ||
269 | |||
270 | #define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ | ||
271 | #define WUSB_KEY_INDEX_TYPE_ASSOC 1 | ||
272 | #define WUSB_KEY_INDEX_TYPE_GTK 2 | ||
273 | #define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 | ||
274 | #define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 | ||
275 | |||
276 | /* A CCM Nonce, defined in WUSB1.0[6.4.1] */ | ||
277 | struct aes_ccm_nonce { | ||
278 | u8 sfn[6]; /* Little Endian */ | ||
279 | u8 tkid[3]; /* LE */ | ||
280 | struct uwb_dev_addr dest_addr; | ||
281 | struct uwb_dev_addr src_addr; | ||
282 | } __attribute__((packed)); | ||
283 | |||
284 | /* A CCM operation label, defined on WUSB1.0[6.5.x] */ | ||
285 | struct aes_ccm_label { | ||
286 | u8 data[14]; | ||
287 | } __attribute__((packed)); | ||
288 | |||
289 | /* | ||
290 | * Input to the key derivation sequence defined in | ||
291 | * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the | ||
292 | * PRF function. | ||
293 | */ | ||
294 | struct wusb_keydvt_in { | ||
295 | u8 hnonce[16]; | ||
296 | u8 dnonce[16]; | ||
297 | } __attribute__((packed)); | ||
298 | |||
299 | /* | ||
300 | * Output from the key derivation sequence defined in | ||
301 | * WUSB1.0[6.5.1]. | ||
302 | */ | ||
303 | struct wusb_keydvt_out { | ||
304 | u8 kck[16]; | ||
305 | u8 ptk[16]; | ||
306 | } __attribute__((packed)); | ||
307 | |||
308 | /* Pseudo Random Function WUSB1.0[6.5] */ | ||
309 | extern int wusb_crypto_init(void); | ||
310 | extern void wusb_crypto_exit(void); | ||
311 | extern ssize_t wusb_prf(void *out, size_t out_size, | ||
312 | const u8 key[16], const struct aes_ccm_nonce *_n, | ||
313 | const struct aes_ccm_label *a, | ||
314 | const void *b, size_t blen, size_t len); | ||
315 | |||
316 | static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], | ||
317 | const struct aes_ccm_nonce *n, | ||
318 | const struct aes_ccm_label *a, | ||
319 | const void *b, size_t blen) | ||
320 | { | ||
321 | return wusb_prf(out, out_size, key, n, a, b, blen, 64); | ||
322 | } | ||
323 | |||
324 | static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], | ||
325 | const struct aes_ccm_nonce *n, | ||
326 | const struct aes_ccm_label *a, | ||
327 | const void *b, size_t blen) | ||
328 | { | ||
329 | return wusb_prf(out, out_size, key, n, a, b, blen, 128); | ||
330 | } | ||
331 | |||
332 | static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], | ||
333 | const struct aes_ccm_nonce *n, | ||
334 | const struct aes_ccm_label *a, | ||
335 | const void *b, size_t blen) | ||
336 | { | ||
337 | return wusb_prf(out, out_size, key, n, a, b, blen, 256); | ||
338 | } | ||
339 | |||
340 | /* Key derivation WUSB1.0[6.5.1] */ | ||
341 | static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, | ||
342 | const u8 key[16], | ||
343 | const struct aes_ccm_nonce *n, | ||
344 | const struct wusb_keydvt_in *keydvt_in) | ||
345 | { | ||
346 | const struct aes_ccm_label a = { .data = "Pair-wise keys" }; | ||
347 | return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, | ||
348 | keydvt_in, sizeof(*keydvt_in)); | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Out-of-band MIC Generation WUSB1.0[6.5.2] | ||
353 | * | ||
354 | * Compute the MIC over @key, @n and @hs and place it in @mic_out. | ||
355 | * | ||
356 | * @mic_out: Where to place the 8 byte MIC tag | ||
357 | * @key: KCK from the derivation process | ||
358 | * @n: CCM nonce, n->sfn == 0, TKID as established in the | ||
359 | * process. | ||
360 | * @hs: Handshake struct for phase 2 of the 4-way. | ||
361 | * hs->bStatus and hs->bReserved are zero. | ||
362 | * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] | ||
363 | * hs->dest_addr is the device's USB address padded with 0 | ||
364 | * hs->src_addr is the hosts's UWB device address | ||
365 | * hs->mic is ignored (as we compute that value). | ||
366 | */ | ||
367 | static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], | ||
368 | const struct aes_ccm_nonce *n, | ||
369 | const struct usb_handshake *hs) | ||
370 | { | ||
371 | const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; | ||
372 | return wusb_prf_64(mic_out, 8, key, n, &a, | ||
373 | hs, sizeof(*hs) - sizeof(hs->MIC)); | ||
374 | } | ||
375 | |||
376 | #endif /* #ifndef __WUSB_H__ */ | ||
diff --git a/include/linux/uwb.h b/include/linux/uwb.h new file mode 100644 index 000000000000..f9ccbd9a2ced --- /dev/null +++ b/include/linux/uwb.h | |||
@@ -0,0 +1,765 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * UWB API | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: doc: overview of the API, different parts and pointers | ||
24 | */ | ||
25 | |||
26 | #ifndef __LINUX__UWB_H__ | ||
27 | #define __LINUX__UWB_H__ | ||
28 | |||
29 | #include <linux/limits.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/workqueue.h> | ||
34 | #include <linux/uwb/spec.h> | ||
35 | |||
36 | struct uwb_dev; | ||
37 | struct uwb_beca_e; | ||
38 | struct uwb_rc; | ||
39 | struct uwb_rsv; | ||
40 | struct uwb_dbg; | ||
41 | |||
42 | /** | ||
43 | * struct uwb_dev - a UWB Device | ||
44 | * @rc: UWB Radio Controller that discovered the device (kind of its | ||
45 | * parent). | ||
46 | * @bce: a beacon cache entry for this device; or NULL if the device | ||
47 | * is a local radio controller. | ||
48 | * @mac_addr: the EUI-48 address of this device. | ||
49 | * @dev_addr: the current DevAddr used by this device. | ||
50 | * @beacon_slot: the slot number the beacon is using. | ||
51 | * @streams: bitmap of streams allocated to reservations targeted at | ||
52 | * this device. For an RC, this is the streams allocated for | ||
53 | * reservations targeted at DevAddrs. | ||
54 | * | ||
55 | * A UWB device may either by a neighbor or part of a local radio | ||
56 | * controller. | ||
57 | */ | ||
58 | struct uwb_dev { | ||
59 | struct mutex mutex; | ||
60 | struct list_head list_node; | ||
61 | struct device dev; | ||
62 | struct uwb_rc *rc; /* radio controller */ | ||
63 | struct uwb_beca_e *bce; /* Beacon Cache Entry */ | ||
64 | |||
65 | struct uwb_mac_addr mac_addr; | ||
66 | struct uwb_dev_addr dev_addr; | ||
67 | int beacon_slot; | ||
68 | DECLARE_BITMAP(streams, UWB_NUM_STREAMS); | ||
69 | }; | ||
70 | #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) | ||
71 | |||
72 | /** | ||
73 | * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs | ||
74 | * | ||
75 | * RC[CE]Bs have a 'context ID' field that matches the command with | ||
76 | * the event received to confirm it. | ||
77 | * | ||
78 | * Maximum number of context IDs | ||
79 | */ | ||
80 | enum { UWB_RC_CTX_MAX = 256 }; | ||
81 | |||
82 | |||
83 | /** Notification chain head for UWB generated events to listeners */ | ||
84 | struct uwb_notifs_chain { | ||
85 | struct list_head list; | ||
86 | struct mutex mutex; | ||
87 | }; | ||
88 | |||
89 | /** | ||
90 | * struct uwb_mas_bm - a bitmap of all MAS in a superframe | ||
91 | * @bm: a bitmap of length #UWB_NUM_MAS | ||
92 | */ | ||
93 | struct uwb_mas_bm { | ||
94 | DECLARE_BITMAP(bm, UWB_NUM_MAS); | ||
95 | }; | ||
96 | |||
97 | /** | ||
98 | * uwb_rsv_state - UWB Reservation state. | ||
99 | * | ||
100 | * NONE - reservation is not active (no DRP IE being transmitted). | ||
101 | * | ||
102 | * Owner reservation states: | ||
103 | * | ||
104 | * INITIATED - owner has sent an initial DRP request. | ||
105 | * PENDING - target responded with pending Reason Code. | ||
106 | * MODIFIED - reservation manager is modifying an established | ||
107 | * reservation with a different MAS allocation. | ||
108 | * ESTABLISHED - the reservation has been successfully negotiated. | ||
109 | * | ||
110 | * Target reservation states: | ||
111 | * | ||
112 | * DENIED - request is denied. | ||
113 | * ACCEPTED - request is accepted. | ||
114 | * PENDING - PAL has yet to make a decision to whether to accept or | ||
115 | * deny. | ||
116 | * | ||
117 | * FIXME: further target states TBD. | ||
118 | */ | ||
119 | enum uwb_rsv_state { | ||
120 | UWB_RSV_STATE_NONE, | ||
121 | UWB_RSV_STATE_O_INITIATED, | ||
122 | UWB_RSV_STATE_O_PENDING, | ||
123 | UWB_RSV_STATE_O_MODIFIED, | ||
124 | UWB_RSV_STATE_O_ESTABLISHED, | ||
125 | UWB_RSV_STATE_T_ACCEPTED, | ||
126 | UWB_RSV_STATE_T_DENIED, | ||
127 | UWB_RSV_STATE_T_PENDING, | ||
128 | |||
129 | UWB_RSV_STATE_LAST, | ||
130 | }; | ||
131 | |||
132 | enum uwb_rsv_target_type { | ||
133 | UWB_RSV_TARGET_DEV, | ||
134 | UWB_RSV_TARGET_DEVADDR, | ||
135 | }; | ||
136 | |||
137 | /** | ||
138 | * struct uwb_rsv_target - the target of a reservation. | ||
139 | * | ||
140 | * Reservations unicast and targeted at a single device | ||
141 | * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a | ||
142 | * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). | ||
143 | */ | ||
144 | struct uwb_rsv_target { | ||
145 | enum uwb_rsv_target_type type; | ||
146 | union { | ||
147 | struct uwb_dev *dev; | ||
148 | struct uwb_dev_addr devaddr; | ||
149 | }; | ||
150 | }; | ||
151 | |||
152 | /* | ||
153 | * Number of streams reserved for reservations targeted at DevAddrs. | ||
154 | */ | ||
155 | #define UWB_NUM_GLOBAL_STREAMS 1 | ||
156 | |||
157 | typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); | ||
158 | |||
159 | /** | ||
160 | * struct uwb_rsv - a DRP reservation | ||
161 | * | ||
162 | * Data structure management: | ||
163 | * | ||
164 | * @rc: the radio controller this reservation is for | ||
165 | * (as target or owner) | ||
166 | * @rc_node: a list node for the RC | ||
167 | * @pal_node: a list node for the PAL | ||
168 | * | ||
169 | * Owner and target parameters: | ||
170 | * | ||
171 | * @owner: the UWB device owning this reservation | ||
172 | * @target: the target UWB device | ||
173 | * @type: reservation type | ||
174 | * | ||
175 | * Owner parameters: | ||
176 | * | ||
177 | * @max_mas: maxiumum number of MAS | ||
178 | * @min_mas: minimum number of MAS | ||
179 | * @sparsity: owner selected sparsity | ||
180 | * @is_multicast: true iff multicast | ||
181 | * | ||
182 | * @callback: callback function when the reservation completes | ||
183 | * @pal_priv: private data for the PAL making the reservation | ||
184 | * | ||
185 | * Reservation status: | ||
186 | * | ||
187 | * @status: negotiation status | ||
188 | * @stream: stream index allocated for this reservation | ||
189 | * @mas: reserved MAS | ||
190 | * @drp_ie: the DRP IE | ||
191 | * @ie_valid: true iff the DRP IE matches the reservation parameters | ||
192 | * | ||
193 | * DRP reservations are uniquely identified by the owner, target and | ||
194 | * stream index. However, when using a DevAddr as a target (e.g., for | ||
195 | * a WUSB cluster reservation) the responses may be received from | ||
196 | * devices with different DevAddrs. In this case, reservations are | ||
197 | * uniquely identified by just the stream index. A number of stream | ||
198 | * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. | ||
199 | */ | ||
200 | struct uwb_rsv { | ||
201 | struct uwb_rc *rc; | ||
202 | struct list_head rc_node; | ||
203 | struct list_head pal_node; | ||
204 | |||
205 | struct uwb_dev *owner; | ||
206 | struct uwb_rsv_target target; | ||
207 | enum uwb_drp_type type; | ||
208 | int max_mas; | ||
209 | int min_mas; | ||
210 | int sparsity; | ||
211 | bool is_multicast; | ||
212 | |||
213 | uwb_rsv_cb_f callback; | ||
214 | void *pal_priv; | ||
215 | |||
216 | enum uwb_rsv_state state; | ||
217 | u8 stream; | ||
218 | struct uwb_mas_bm mas; | ||
219 | struct uwb_ie_drp *drp_ie; | ||
220 | bool ie_valid; | ||
221 | struct timer_list timer; | ||
222 | bool expired; | ||
223 | }; | ||
224 | |||
225 | static const | ||
226 | struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; | ||
227 | |||
228 | static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) | ||
229 | { | ||
230 | bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * struct uwb_drp_avail - a radio controller's view of MAS usage | ||
235 | * @global: MAS unused by neighbors (excluding reservations targetted | ||
236 | * or owned by the local radio controller) or the beaon period | ||
237 | * @local: MAS unused by local established reservations | ||
238 | * @pending: MAS unused by local pending reservations | ||
239 | * @ie: DRP Availability IE to be included in the beacon | ||
240 | * @ie_valid: true iff @ie is valid and does not need to regenerated from | ||
241 | * @global and @local | ||
242 | * | ||
243 | * Each radio controller maintains a view of MAS usage or | ||
244 | * availability. MAS available for a new reservation are determined | ||
245 | * from the intersection of @global, @local, and @pending. | ||
246 | * | ||
247 | * The radio controller must transmit a DRP Availability IE that's the | ||
248 | * intersection of @global and @local. | ||
249 | * | ||
250 | * A set bit indicates the MAS is unused and available. | ||
251 | * | ||
252 | * rc->rsvs_mutex should be held before accessing this data structure. | ||
253 | * | ||
254 | * [ECMA-368] section 17.4.3. | ||
255 | */ | ||
256 | struct uwb_drp_avail { | ||
257 | DECLARE_BITMAP(global, UWB_NUM_MAS); | ||
258 | DECLARE_BITMAP(local, UWB_NUM_MAS); | ||
259 | DECLARE_BITMAP(pending, UWB_NUM_MAS); | ||
260 | struct uwb_ie_drp_avail ie; | ||
261 | bool ie_valid; | ||
262 | }; | ||
263 | |||
264 | |||
265 | const char *uwb_rsv_state_str(enum uwb_rsv_state state); | ||
266 | const char *uwb_rsv_type_str(enum uwb_drp_type type); | ||
267 | |||
268 | struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, | ||
269 | void *pal_priv); | ||
270 | void uwb_rsv_destroy(struct uwb_rsv *rsv); | ||
271 | |||
272 | int uwb_rsv_establish(struct uwb_rsv *rsv); | ||
273 | int uwb_rsv_modify(struct uwb_rsv *rsv, | ||
274 | int max_mas, int min_mas, int sparsity); | ||
275 | void uwb_rsv_terminate(struct uwb_rsv *rsv); | ||
276 | |||
277 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); | ||
278 | |||
279 | /** | ||
280 | * Radio Control Interface instance | ||
281 | * | ||
282 | * | ||
283 | * Life cycle rules: those of the UWB Device. | ||
284 | * | ||
285 | * @index: an index number for this radio controller, as used in the | ||
286 | * device name. | ||
287 | * @version: version of protocol supported by this device | ||
288 | * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. | ||
289 | * @cmd: Backend implementation to execute commands; rw and call | ||
290 | * only with uwb_dev.dev.sem taken. | ||
291 | * @reset: Hardware reset of radio controller and any PAL controllers. | ||
292 | * @filter: Backend implementation to manipulate data to and from device | ||
293 | * to be compliant to specification assumed by driver (WHCI | ||
294 | * 0.95). | ||
295 | * | ||
296 | * uwb_dev.dev.mutex is used to execute commands and update | ||
297 | * the corresponding structures; can't use a spinlock | ||
298 | * because rc->cmd() can sleep. | ||
299 | * @ies: This is a dynamically allocated array cacheing the | ||
300 | * IEs (settable by the host) that the beacon of this | ||
301 | * radio controller is currently sending. | ||
302 | * | ||
303 | * In reality, we store here the full command we set to | ||
304 | * the radio controller (which is basically a command | ||
305 | * prefix followed by all the IEs the beacon currently | ||
306 | * contains). This way we don't have to realloc and | ||
307 | * memcpy when setting it. | ||
308 | * | ||
309 | * We set this up in uwb_rc_ie_setup(), where we alloc | ||
310 | * this struct, call get_ie() [so we know which IEs are | ||
311 | * currently being sent, if any]. | ||
312 | * | ||
313 | * @ies_capacity:Amount of space (in bytes) allocated in @ies. The | ||
314 | * amount used is given by sizeof(*ies) plus ies->wIELength | ||
315 | * (which is a little endian quantity all the time). | ||
316 | * @ies_mutex: protect the IE cache | ||
317 | * @dbg: information for the debug interface | ||
318 | */ | ||
319 | struct uwb_rc { | ||
320 | struct uwb_dev uwb_dev; | ||
321 | int index; | ||
322 | u16 version; | ||
323 | |||
324 | struct module *owner; | ||
325 | void *priv; | ||
326 | int (*start)(struct uwb_rc *rc); | ||
327 | void (*stop)(struct uwb_rc *rc); | ||
328 | int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); | ||
329 | int (*reset)(struct uwb_rc *rc); | ||
330 | int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); | ||
331 | int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, | ||
332 | size_t *, size_t *); | ||
333 | |||
334 | spinlock_t neh_lock; /* protects neh_* and ctx_* */ | ||
335 | struct list_head neh_list; /* Open NE handles */ | ||
336 | unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; | ||
337 | u8 ctx_roll; | ||
338 | |||
339 | int beaconing; /* Beaconing state [channel number] */ | ||
340 | int scanning; | ||
341 | enum uwb_scan_type scan_type:3; | ||
342 | unsigned ready:1; | ||
343 | struct uwb_notifs_chain notifs_chain; | ||
344 | |||
345 | struct uwb_drp_avail drp_avail; | ||
346 | struct list_head reservations; | ||
347 | struct mutex rsvs_mutex; | ||
348 | struct workqueue_struct *rsv_workq; | ||
349 | struct work_struct rsv_update_work; | ||
350 | |||
351 | struct mutex ies_mutex; | ||
352 | struct uwb_rc_cmd_set_ie *ies; | ||
353 | size_t ies_capacity; | ||
354 | |||
355 | spinlock_t pal_lock; | ||
356 | struct list_head pals; | ||
357 | |||
358 | struct uwb_dbg *dbg; | ||
359 | }; | ||
360 | |||
361 | |||
362 | /** | ||
363 | * struct uwb_pal - a UWB PAL | ||
364 | * @name: descriptive name for this PAL (wushc, wlp, etc.). | ||
365 | * @device: a device for the PAL. Used to link the PAL and the radio | ||
366 | * controller in sysfs. | ||
367 | * @new_rsv: called when a peer requests a reservation (may be NULL if | ||
368 | * the PAL cannot accept reservation requests). | ||
369 | * | ||
370 | * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB | ||
371 | * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). | ||
372 | * | ||
373 | * The PALs using a radio controller must register themselves to | ||
374 | * permit the UWB stack to coordinate usage of the radio between the | ||
375 | * various PALs or to allow PALs to response to certain requests from | ||
376 | * peers. | ||
377 | * | ||
378 | * A struct uwb_pal should be embedded in a containing structure | ||
379 | * belonging to the PAL and initialized with uwb_pal_init()). Fields | ||
380 | * should be set appropriately by the PAL before registering the PAL | ||
381 | * with uwb_pal_register(). | ||
382 | */ | ||
383 | struct uwb_pal { | ||
384 | struct list_head node; | ||
385 | const char *name; | ||
386 | struct device *device; | ||
387 | void (*new_rsv)(struct uwb_rsv *rsv); | ||
388 | }; | ||
389 | |||
390 | void uwb_pal_init(struct uwb_pal *pal); | ||
391 | int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); | ||
392 | void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); | ||
393 | |||
394 | /* | ||
395 | * General public API | ||
396 | * | ||
397 | * This API can be used by UWB device drivers or by those implementing | ||
398 | * UWB Radio Controllers | ||
399 | */ | ||
400 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | ||
401 | const struct uwb_dev_addr *devaddr); | ||
402 | struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); | ||
403 | static inline void uwb_dev_get(struct uwb_dev *uwb_dev) | ||
404 | { | ||
405 | get_device(&uwb_dev->dev); | ||
406 | } | ||
407 | static inline void uwb_dev_put(struct uwb_dev *uwb_dev) | ||
408 | { | ||
409 | put_device(&uwb_dev->dev); | ||
410 | } | ||
411 | struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); | ||
412 | |||
413 | /** | ||
414 | * Callback function for 'uwb_{dev,rc}_foreach()'. | ||
415 | * | ||
416 | * @dev: Linux device instance | ||
417 | * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' | ||
418 | * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. | ||
419 | * | ||
420 | * @returns: 0 to continue the iterations, any other val to stop | ||
421 | * iterating and return the value to the caller of | ||
422 | * _foreach(). | ||
423 | */ | ||
424 | typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); | ||
425 | int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); | ||
426 | |||
427 | struct uwb_rc *uwb_rc_alloc(void); | ||
428 | struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); | ||
429 | struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); | ||
430 | void uwb_rc_put(struct uwb_rc *rc); | ||
431 | |||
432 | typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, | ||
433 | struct uwb_rceb *reply, ssize_t reply_size); | ||
434 | |||
435 | int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, | ||
436 | struct uwb_rccb *cmd, size_t cmd_size, | ||
437 | u8 expected_type, u16 expected_event, | ||
438 | uwb_rc_cmd_cb_f cb, void *arg); | ||
439 | ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, | ||
440 | struct uwb_rccb *cmd, size_t cmd_size, | ||
441 | struct uwb_rceb *reply, size_t reply_size); | ||
442 | ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, | ||
443 | struct uwb_rccb *cmd, size_t cmd_size, | ||
444 | u8 expected_type, u16 expected_event, | ||
445 | struct uwb_rceb **preply); | ||
446 | ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); | ||
447 | int uwb_bg_joined(struct uwb_rc *rc); | ||
448 | |||
449 | size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); | ||
450 | |||
451 | int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); | ||
452 | int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); | ||
453 | int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); | ||
454 | int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); | ||
455 | int __uwb_mac_addr_assigned_check(struct device *, void *); | ||
456 | int __uwb_dev_addr_assigned_check(struct device *, void *); | ||
457 | |||
458 | /* Print in @buf a pretty repr of @addr */ | ||
459 | static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, | ||
460 | const struct uwb_dev_addr *addr) | ||
461 | { | ||
462 | return __uwb_addr_print(buf, buf_size, addr->data, 0); | ||
463 | } | ||
464 | |||
465 | /* Print in @buf a pretty repr of @addr */ | ||
466 | static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, | ||
467 | const struct uwb_mac_addr *addr) | ||
468 | { | ||
469 | return __uwb_addr_print(buf, buf_size, addr->data, 1); | ||
470 | } | ||
471 | |||
472 | /* @returns 0 if device addresses @addr2 and @addr1 are equal */ | ||
473 | static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, | ||
474 | const struct uwb_dev_addr *addr2) | ||
475 | { | ||
476 | return memcmp(addr1, addr2, sizeof(*addr1)); | ||
477 | } | ||
478 | |||
479 | /* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ | ||
480 | static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, | ||
481 | const struct uwb_mac_addr *addr2) | ||
482 | { | ||
483 | return memcmp(addr1, addr2, sizeof(*addr1)); | ||
484 | } | ||
485 | |||
486 | /* @returns !0 if a MAC @addr is a broadcast address */ | ||
487 | static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) | ||
488 | { | ||
489 | struct uwb_mac_addr bcast = { | ||
490 | .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } | ||
491 | }; | ||
492 | return !uwb_mac_addr_cmp(addr, &bcast); | ||
493 | } | ||
494 | |||
495 | /* @returns !0 if a MAC @addr is all zeroes*/ | ||
496 | static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) | ||
497 | { | ||
498 | struct uwb_mac_addr unset = { | ||
499 | .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } | ||
500 | }; | ||
501 | return !uwb_mac_addr_cmp(addr, &unset); | ||
502 | } | ||
503 | |||
504 | /* @returns !0 if the address is in use. */ | ||
505 | static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, | ||
506 | struct uwb_dev_addr *addr) | ||
507 | { | ||
508 | return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * UWB Radio Controller API | ||
513 | * | ||
514 | * This API is used (in addition to the general API) to implement UWB | ||
515 | * Radio Controllers. | ||
516 | */ | ||
517 | void uwb_rc_init(struct uwb_rc *); | ||
518 | int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); | ||
519 | void uwb_rc_rm(struct uwb_rc *); | ||
520 | void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); | ||
521 | void uwb_rc_neh_error(struct uwb_rc *, int); | ||
522 | void uwb_rc_reset_all(struct uwb_rc *rc); | ||
523 | |||
524 | /** | ||
525 | * uwb_rsv_is_owner - is the owner of this reservation the RC? | ||
526 | * @rsv: the reservation | ||
527 | */ | ||
528 | static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) | ||
529 | { | ||
530 | return rsv->owner == &rsv->rc->uwb_dev; | ||
531 | } | ||
532 | |||
533 | /** | ||
534 | * Events generated by UWB that can be passed to any listeners | ||
535 | * | ||
536 | * Higher layers can register callback functions with the radio | ||
537 | * controller using uwb_notifs_register(). The radio controller | ||
538 | * maintains a list of all registered handlers and will notify all | ||
539 | * nodes when an event occurs. | ||
540 | */ | ||
541 | enum uwb_notifs { | ||
542 | UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ | ||
543 | UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ | ||
544 | UWB_NOTIF_ONAIR, | ||
545 | UWB_NOTIF_OFFAIR, | ||
546 | }; | ||
547 | |||
548 | /* Callback function registered with UWB */ | ||
549 | struct uwb_notifs_handler { | ||
550 | struct list_head list_node; | ||
551 | void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); | ||
552 | void *data; | ||
553 | }; | ||
554 | |||
555 | int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); | ||
556 | int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); | ||
557 | |||
558 | |||
559 | /** | ||
560 | * UWB radio controller Event Size Entry (for creating entry tables) | ||
561 | * | ||
562 | * WUSB and WHCI define events and notifications, and they might have | ||
563 | * fixed or variable size. | ||
564 | * | ||
565 | * Each event/notification has a size which is not necessarily known | ||
566 | * in advance based on the event code. As well, vendor specific | ||
567 | * events/notifications will have a size impossible to determine | ||
568 | * unless we know about the device's specific details. | ||
569 | * | ||
570 | * It was way too smart of the spec writers not to think that it would | ||
571 | * be impossible for a generic driver to skip over vendor specific | ||
572 | * events/notifications if there are no LENGTH fields in the HEADER of | ||
573 | * each message...the transaction size cannot be counted on as the | ||
574 | * spec does not forbid to pack more than one event in a single | ||
575 | * transaction. | ||
576 | * | ||
577 | * Thus, we guess sizes with tables (or for events, when you know the | ||
578 | * size ahead of time you can use uwb_rc_neh_extra_size*()). We | ||
579 | * register tables with the known events and their sizes, and then we | ||
580 | * traverse those tables. For those with variable length, we provide a | ||
581 | * way to lookup the size inside the event/notification's | ||
582 | * payload. This allows device-specific event size tables to be | ||
583 | * registered. | ||
584 | * | ||
585 | * @size: Size of the payload | ||
586 | * | ||
587 | * @offset: if != 0, at offset @offset-1 starts a field with a length | ||
588 | * that has to be added to @size. The format of the field is | ||
589 | * given by @type. | ||
590 | * | ||
591 | * @type: Type and length of the offset field. Most common is LE 16 | ||
592 | * bits (that's why that is zero); others are there mostly to | ||
593 | * cover for bugs and weirdos. | ||
594 | */ | ||
595 | struct uwb_est_entry { | ||
596 | size_t size; | ||
597 | unsigned offset; | ||
598 | enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; | ||
599 | }; | ||
600 | |||
601 | int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, | ||
602 | const struct uwb_est_entry *, size_t entries); | ||
603 | int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, | ||
604 | const struct uwb_est_entry *, size_t entries); | ||
605 | ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, | ||
606 | size_t len); | ||
607 | |||
608 | /* -- Misc */ | ||
609 | |||
610 | enum { | ||
611 | EDC_MAX_ERRORS = 10, | ||
612 | EDC_ERROR_TIMEFRAME = HZ, | ||
613 | }; | ||
614 | |||
615 | /* error density counter */ | ||
616 | struct edc { | ||
617 | unsigned long timestart; | ||
618 | u16 errorcount; | ||
619 | }; | ||
620 | |||
621 | static inline | ||
622 | void edc_init(struct edc *edc) | ||
623 | { | ||
624 | edc->timestart = jiffies; | ||
625 | } | ||
626 | |||
627 | /* Called when an error occured. | ||
628 | * This is way to determine if the number of acceptable errors per time | ||
629 | * period has been exceeded. It is not accurate as there are cases in which | ||
630 | * this scheme will not work, for example if there are periodic occurences | ||
631 | * of errors that straddle updates to the start time. This scheme is | ||
632 | * sufficient for our usage. | ||
633 | * | ||
634 | * @returns 1 if maximum acceptable errors per timeframe has been exceeded. | ||
635 | */ | ||
636 | static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) | ||
637 | { | ||
638 | unsigned long now; | ||
639 | |||
640 | now = jiffies; | ||
641 | if (now - err_hist->timestart > timeframe) { | ||
642 | err_hist->errorcount = 1; | ||
643 | err_hist->timestart = now; | ||
644 | } else if (++err_hist->errorcount > max_err) { | ||
645 | err_hist->errorcount = 0; | ||
646 | err_hist->timestart = now; | ||
647 | return 1; | ||
648 | } | ||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | |||
653 | /* Information Element handling */ | ||
654 | |||
655 | /* For representing the state of writing to a buffer when iterating */ | ||
656 | struct uwb_buf_ctx { | ||
657 | char *buf; | ||
658 | size_t bytes, size; | ||
659 | }; | ||
660 | |||
661 | typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, | ||
662 | size_t, void *); | ||
663 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); | ||
664 | ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, | ||
665 | const void *buf, size_t size); | ||
666 | int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, | ||
667 | size_t, void *); | ||
668 | int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); | ||
669 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); | ||
670 | |||
671 | |||
672 | /* | ||
673 | * Transmission statistics | ||
674 | * | ||
675 | * UWB uses LQI and RSSI (one byte values) for reporting radio signal | ||
676 | * strength and line quality indication. We do quick and dirty | ||
677 | * averages of those. They are signed values, btw. | ||
678 | * | ||
679 | * For 8 bit quantities, we keep the min, the max, an accumulator | ||
680 | * (@sigma) and a # of samples. When @samples gets to 255, we compute | ||
681 | * the average (@sigma / @samples), place it in @sigma and reset | ||
682 | * @samples to 1 (so we use it as the first sample). | ||
683 | * | ||
684 | * Now, statistically speaking, probably I am kicking the kidneys of | ||
685 | * some books I have in my shelves collecting dust, but I just want to | ||
686 | * get an approx, not the Nobel. | ||
687 | * | ||
688 | * LOCKING: there is no locking per se, but we try to keep a lockless | ||
689 | * schema. Only _add_samples() modifies the values--as long as you | ||
690 | * have other locking on top that makes sure that no two calls of | ||
691 | * _add_sample() happen at the same time, then we are fine. Now, for | ||
692 | * resetting the values we just set @samples to 0 and that makes the | ||
693 | * next _add_sample() to start with defaults. Reading the values in | ||
694 | * _show() currently can race, so you need to make sure the calls are | ||
695 | * under the same lock that protects calls to _add_sample(). FIXME: | ||
696 | * currently unlocked (It is not ultraprecise but does the trick. Bite | ||
697 | * me). | ||
698 | */ | ||
699 | struct stats { | ||
700 | s8 min, max; | ||
701 | s16 sigma; | ||
702 | atomic_t samples; | ||
703 | }; | ||
704 | |||
705 | static inline | ||
706 | void stats_init(struct stats *stats) | ||
707 | { | ||
708 | atomic_set(&stats->samples, 0); | ||
709 | wmb(); | ||
710 | } | ||
711 | |||
712 | static inline | ||
713 | void stats_add_sample(struct stats *stats, s8 sample) | ||
714 | { | ||
715 | s8 min, max; | ||
716 | s16 sigma; | ||
717 | unsigned samples = atomic_read(&stats->samples); | ||
718 | if (samples == 0) { /* it was zero before, so we initialize */ | ||
719 | min = 127; | ||
720 | max = -128; | ||
721 | sigma = 0; | ||
722 | } else { | ||
723 | min = stats->min; | ||
724 | max = stats->max; | ||
725 | sigma = stats->sigma; | ||
726 | } | ||
727 | |||
728 | if (sample < min) /* compute new values */ | ||
729 | min = sample; | ||
730 | else if (sample > max) | ||
731 | max = sample; | ||
732 | sigma += sample; | ||
733 | |||
734 | stats->min = min; /* commit */ | ||
735 | stats->max = max; | ||
736 | stats->sigma = sigma; | ||
737 | if (atomic_add_return(1, &stats->samples) > 255) { | ||
738 | /* wrapped around! reset */ | ||
739 | stats->sigma = sigma / 256; | ||
740 | atomic_set(&stats->samples, 1); | ||
741 | } | ||
742 | } | ||
743 | |||
744 | static inline ssize_t stats_show(struct stats *stats, char *buf) | ||
745 | { | ||
746 | int min, max, avg; | ||
747 | int samples = atomic_read(&stats->samples); | ||
748 | if (samples == 0) | ||
749 | min = max = avg = 0; | ||
750 | else { | ||
751 | min = stats->min; | ||
752 | max = stats->max; | ||
753 | avg = stats->sigma / samples; | ||
754 | } | ||
755 | return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); | ||
756 | } | ||
757 | |||
758 | static inline ssize_t stats_store(struct stats *stats, const char *buf, | ||
759 | size_t size) | ||
760 | { | ||
761 | stats_init(stats); | ||
762 | return size; | ||
763 | } | ||
764 | |||
765 | #endif /* #ifndef __LINUX__UWB_H__ */ | ||
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h new file mode 100644 index 000000000000..1141f41bab5c --- /dev/null +++ b/include/linux/uwb/debug-cmd.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Debug interface commands | ||
4 | * | ||
5 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __LINUX__UWB__DEBUG_CMD_H__ | ||
20 | #define __LINUX__UWB__DEBUG_CMD_H__ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* | ||
25 | * Debug interface commands | ||
26 | * | ||
27 | * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. | ||
28 | * | ||
29 | * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. | ||
30 | */ | ||
31 | |||
32 | enum uwb_dbg_cmd_type { | ||
33 | UWB_DBG_CMD_RSV_ESTABLISH = 1, | ||
34 | UWB_DBG_CMD_RSV_TERMINATE = 2, | ||
35 | }; | ||
36 | |||
37 | struct uwb_dbg_cmd_rsv_establish { | ||
38 | __u8 target[6]; | ||
39 | __u8 type; | ||
40 | __u16 max_mas; | ||
41 | __u16 min_mas; | ||
42 | __u8 sparsity; | ||
43 | }; | ||
44 | |||
45 | struct uwb_dbg_cmd_rsv_terminate { | ||
46 | int index; | ||
47 | }; | ||
48 | |||
49 | struct uwb_dbg_cmd { | ||
50 | __u32 type; | ||
51 | union { | ||
52 | struct uwb_dbg_cmd_rsv_establish rsv_establish; | ||
53 | struct uwb_dbg_cmd_rsv_terminate rsv_terminate; | ||
54 | }; | ||
55 | }; | ||
56 | |||
57 | #endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ | ||
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h new file mode 100644 index 000000000000..a86a73fe303f --- /dev/null +++ b/include/linux/uwb/debug.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Debug Support | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: doc | ||
24 | * Invoke like: | ||
25 | * | ||
26 | * #define D_LOCAL 4 | ||
27 | * #include <linux/uwb/debug.h> | ||
28 | * | ||
29 | * At the end of your include files. | ||
30 | */ | ||
31 | #include <linux/types.h> | ||
32 | |||
33 | struct device; | ||
34 | extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); | ||
35 | |||
36 | /* Master debug switch; !0 enables, 0 disables */ | ||
37 | #define D_MASTER (!0) | ||
38 | |||
39 | /* Local (per-file) debug switch; #define before #including */ | ||
40 | #ifndef D_LOCAL | ||
41 | #define D_LOCAL 0 | ||
42 | #endif | ||
43 | |||
44 | #undef __d_printf | ||
45 | #undef d_fnstart | ||
46 | #undef d_fnend | ||
47 | #undef d_printf | ||
48 | #undef d_dump | ||
49 | |||
50 | #define __d_printf(l, _tag, _dev, f, a...) \ | ||
51 | do { \ | ||
52 | struct device *__dev = (_dev); \ | ||
53 | if (D_MASTER && D_LOCAL >= (l)) { \ | ||
54 | char __head[64] = ""; \ | ||
55 | if (_dev != NULL) { \ | ||
56 | if ((unsigned long)__dev < 4096) \ | ||
57 | printk(KERN_ERR "E: Corrupt dev %p\n", \ | ||
58 | __dev); \ | ||
59 | else \ | ||
60 | snprintf(__head, sizeof(__head), \ | ||
61 | "%s %s: ", \ | ||
62 | dev_driver_string(__dev), \ | ||
63 | __dev->bus_id); \ | ||
64 | } \ | ||
65 | printk(KERN_ERR "%s%s" _tag ": " f, __head, \ | ||
66 | __func__, ## a); \ | ||
67 | } \ | ||
68 | } while (0 && _dev) | ||
69 | |||
70 | #define d_fnstart(l, _dev, f, a...) \ | ||
71 | __d_printf(l, " FNSTART", _dev, f, ## a) | ||
72 | #define d_fnend(l, _dev, f, a...) \ | ||
73 | __d_printf(l, " FNEND", _dev, f, ## a) | ||
74 | #define d_printf(l, _dev, f, a...) \ | ||
75 | __d_printf(l, "", _dev, f, ## a) | ||
76 | #define d_dump(l, _dev, ptr, size) \ | ||
77 | do { \ | ||
78 | struct device *__dev = _dev; \ | ||
79 | if (D_MASTER && D_LOCAL >= (l)) \ | ||
80 | dump_bytes(__dev, ptr, size); \ | ||
81 | } while (0 && _dev) | ||
82 | #define d_test(l) (D_MASTER && D_LOCAL >= (l)) | ||
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h new file mode 100644 index 000000000000..198c15f8e251 --- /dev/null +++ b/include/linux/uwb/spec.h | |||
@@ -0,0 +1,727 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * UWB Standard definitions | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * All these definitions are based on the ECMA-368 standard. | ||
24 | * | ||
25 | * Note all definitions are Little Endian in the wire, and we will | ||
26 | * convert them to host order before operating on the bitfields (that | ||
27 | * yes, we use extensively). | ||
28 | */ | ||
29 | |||
30 | #ifndef __LINUX__UWB_SPEC_H__ | ||
31 | #define __LINUX__UWB_SPEC_H__ | ||
32 | |||
33 | #include <linux/types.h> | ||
34 | #include <linux/bitmap.h> | ||
35 | |||
36 | #define i1480_FW 0x00000303 | ||
37 | /* #define i1480_FW 0x00000302 */ | ||
38 | |||
39 | /** | ||
40 | * Number of Medium Access Slots in a superframe. | ||
41 | * | ||
42 | * UWB divides time in SuperFrames, each one divided in 256 pieces, or | ||
43 | * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the | ||
44 | * basic bandwidth allocation unit in UWB. | ||
45 | */ | ||
46 | enum { UWB_NUM_MAS = 256 }; | ||
47 | |||
48 | /** | ||
49 | * Number of Zones in superframe. | ||
50 | * | ||
51 | * UWB divides the superframe into zones with numbering starting from BPST. | ||
52 | * See MBOA MAC[16.8.6] | ||
53 | */ | ||
54 | enum { UWB_NUM_ZONES = 16 }; | ||
55 | |||
56 | /* | ||
57 | * Number of MAS in a zone. | ||
58 | */ | ||
59 | #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) | ||
60 | |||
61 | /* | ||
62 | * Number of streams per DRP reservation between a pair of devices. | ||
63 | * | ||
64 | * [ECMA-368] section 16.8.6. | ||
65 | */ | ||
66 | enum { UWB_NUM_STREAMS = 8 }; | ||
67 | |||
68 | /* | ||
69 | * mMasLength | ||
70 | * | ||
71 | * The length of a MAS in microseconds. | ||
72 | * | ||
73 | * [ECMA-368] section 17.16. | ||
74 | */ | ||
75 | enum { UWB_MAS_LENGTH_US = 256 }; | ||
76 | |||
77 | /* | ||
78 | * mBeaconSlotLength | ||
79 | * | ||
80 | * The length of the beacon slot in microseconds. | ||
81 | * | ||
82 | * [ECMA-368] section 17.16 | ||
83 | */ | ||
84 | enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; | ||
85 | |||
86 | /* | ||
87 | * mMaxLostBeacons | ||
88 | * | ||
89 | * The number beacons missing in consecutive superframes before a | ||
90 | * device can be considered as unreachable. | ||
91 | * | ||
92 | * [ECMA-368] section 17.16 | ||
93 | */ | ||
94 | enum { UWB_MAX_LOST_BEACONS = 3 }; | ||
95 | |||
96 | /* | ||
97 | * Length of a superframe in microseconds. | ||
98 | */ | ||
99 | #define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) | ||
100 | |||
101 | /** | ||
102 | * UWB MAC address | ||
103 | * | ||
104 | * It is *imperative* that this struct is exactly 6 packed bytes (as | ||
105 | * it is also used to define headers sent down and up the wire/radio). | ||
106 | */ | ||
107 | struct uwb_mac_addr { | ||
108 | u8 data[6]; | ||
109 | } __attribute__((packed)); | ||
110 | |||
111 | |||
112 | /** | ||
113 | * UWB device address | ||
114 | * | ||
115 | * It is *imperative* that this struct is exactly 6 packed bytes (as | ||
116 | * it is also used to define headers sent down and up the wire/radio). | ||
117 | */ | ||
118 | struct uwb_dev_addr { | ||
119 | u8 data[2]; | ||
120 | } __attribute__((packed)); | ||
121 | |||
122 | |||
123 | /** | ||
124 | * Types of UWB addresses | ||
125 | * | ||
126 | * Order matters (by size). | ||
127 | */ | ||
128 | enum uwb_addr_type { | ||
129 | UWB_ADDR_DEV = 0, | ||
130 | UWB_ADDR_MAC = 1, | ||
131 | }; | ||
132 | |||
133 | |||
134 | /** Size of a char buffer for printing a MAC/device address */ | ||
135 | enum { UWB_ADDR_STRSIZE = 32 }; | ||
136 | |||
137 | |||
138 | /** UWB WiMedia protocol IDs. */ | ||
139 | enum uwb_prid { | ||
140 | UWB_PRID_WLP_RESERVED = 0x0000, | ||
141 | UWB_PRID_WLP = 0x0001, | ||
142 | UWB_PRID_WUSB_BOT = 0x0010, | ||
143 | UWB_PRID_WUSB = 0x0010, | ||
144 | UWB_PRID_WUSB_TOP = 0x001F, | ||
145 | }; | ||
146 | |||
147 | |||
148 | /** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ | ||
149 | enum uwb_phy_rate { | ||
150 | UWB_PHY_RATE_53 = 0, | ||
151 | UWB_PHY_RATE_80, | ||
152 | UWB_PHY_RATE_106, | ||
153 | UWB_PHY_RATE_160, | ||
154 | UWB_PHY_RATE_200, | ||
155 | UWB_PHY_RATE_320, | ||
156 | UWB_PHY_RATE_400, | ||
157 | UWB_PHY_RATE_480, | ||
158 | UWB_PHY_RATE_INVALID | ||
159 | }; | ||
160 | |||
161 | |||
162 | /** | ||
163 | * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) | ||
164 | */ | ||
165 | enum uwb_scan_type { | ||
166 | UWB_SCAN_ONLY = 0, | ||
167 | UWB_SCAN_OUTSIDE_BP, | ||
168 | UWB_SCAN_WHILE_INACTIVE, | ||
169 | UWB_SCAN_DISABLED, | ||
170 | UWB_SCAN_ONLY_STARTTIME, | ||
171 | UWB_SCAN_TOP | ||
172 | }; | ||
173 | |||
174 | |||
175 | /** ACK Policy types (MBOA MAC[7.2.1.3]) */ | ||
176 | enum uwb_ack_pol { | ||
177 | UWB_ACK_NO = 0, | ||
178 | UWB_ACK_INM = 1, | ||
179 | UWB_ACK_B = 2, | ||
180 | UWB_ACK_B_REQ = 3, | ||
181 | }; | ||
182 | |||
183 | |||
184 | /** DRP reservation types ([ECMA-368 table 106) */ | ||
185 | enum uwb_drp_type { | ||
186 | UWB_DRP_TYPE_ALIEN_BP = 0, | ||
187 | UWB_DRP_TYPE_HARD, | ||
188 | UWB_DRP_TYPE_SOFT, | ||
189 | UWB_DRP_TYPE_PRIVATE, | ||
190 | UWB_DRP_TYPE_PCA, | ||
191 | }; | ||
192 | |||
193 | |||
194 | /** DRP Reason Codes ([ECMA-368] table 107) */ | ||
195 | enum uwb_drp_reason { | ||
196 | UWB_DRP_REASON_ACCEPTED = 0, | ||
197 | UWB_DRP_REASON_CONFLICT, | ||
198 | UWB_DRP_REASON_PENDING, | ||
199 | UWB_DRP_REASON_DENIED, | ||
200 | UWB_DRP_REASON_MODIFIED, | ||
201 | }; | ||
202 | |||
203 | /** | ||
204 | * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) | ||
205 | */ | ||
206 | enum uwb_drp_notif_reason { | ||
207 | UWB_DRP_NOTIF_DRP_IE_RCVD = 0, | ||
208 | UWB_DRP_NOTIF_CONFLICT, | ||
209 | UWB_DRP_NOTIF_TERMINATE, | ||
210 | }; | ||
211 | |||
212 | |||
213 | /** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ | ||
214 | struct uwb_drp_alloc { | ||
215 | __le16 zone_bm; | ||
216 | __le16 mas_bm; | ||
217 | } __attribute__((packed)); | ||
218 | |||
219 | |||
220 | /** General MAC Header format (ECMA-368[16.2]) */ | ||
221 | struct uwb_mac_frame_hdr { | ||
222 | __le16 Frame_Control; | ||
223 | struct uwb_dev_addr DestAddr; | ||
224 | struct uwb_dev_addr SrcAddr; | ||
225 | __le16 Sequence_Control; | ||
226 | __le16 Access_Information; | ||
227 | } __attribute__((packed)); | ||
228 | |||
229 | |||
230 | /** | ||
231 | * uwb_beacon_frame - a beacon frame including MAC headers | ||
232 | * | ||
233 | * [ECMA] section 16.3. | ||
234 | */ | ||
235 | struct uwb_beacon_frame { | ||
236 | struct uwb_mac_frame_hdr hdr; | ||
237 | struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ | ||
238 | u8 Beacon_Slot_Number; | ||
239 | u8 Device_Control; | ||
240 | u8 IEData[]; | ||
241 | } __attribute__((packed)); | ||
242 | |||
243 | |||
244 | /** Information Element codes (MBOA MAC[T54]) */ | ||
245 | enum uwb_ie { | ||
246 | UWB_PCA_AVAILABILITY = 2, | ||
247 | UWB_IE_DRP_AVAILABILITY = 8, | ||
248 | UWB_IE_DRP = 9, | ||
249 | UWB_BP_SWITCH_IE = 11, | ||
250 | UWB_MAC_CAPABILITIES_IE = 12, | ||
251 | UWB_PHY_CAPABILITIES_IE = 13, | ||
252 | UWB_APP_SPEC_PROBE_IE = 15, | ||
253 | UWB_IDENTIFICATION_IE = 19, | ||
254 | UWB_MASTER_KEY_ID_IE = 20, | ||
255 | UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ | ||
256 | UWB_APP_SPEC_IE = 255, | ||
257 | }; | ||
258 | |||
259 | |||
260 | /** | ||
261 | * Header common to all Information Elements (IEs) | ||
262 | */ | ||
263 | struct uwb_ie_hdr { | ||
264 | u8 element_id; /* enum uwb_ie */ | ||
265 | u8 length; | ||
266 | } __attribute__((packed)); | ||
267 | |||
268 | |||
269 | /** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ | ||
270 | struct uwb_ie_drp { | ||
271 | struct uwb_ie_hdr hdr; | ||
272 | __le16 drp_control; | ||
273 | struct uwb_dev_addr dev_addr; | ||
274 | struct uwb_drp_alloc allocs[]; | ||
275 | } __attribute__((packed)); | ||
276 | |||
277 | static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) | ||
278 | { | ||
279 | return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; | ||
280 | } | ||
281 | |||
282 | static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) | ||
283 | { | ||
284 | return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; | ||
285 | } | ||
286 | |||
287 | static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) | ||
288 | { | ||
289 | return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; | ||
290 | } | ||
291 | |||
292 | static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) | ||
293 | { | ||
294 | return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; | ||
295 | } | ||
296 | |||
297 | static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) | ||
298 | { | ||
299 | return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; | ||
300 | } | ||
301 | |||
302 | static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) | ||
303 | { | ||
304 | return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; | ||
305 | } | ||
306 | |||
307 | static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) | ||
308 | { | ||
309 | return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; | ||
310 | } | ||
311 | |||
312 | static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) | ||
313 | { | ||
314 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
315 | drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); | ||
316 | ie->drp_control = cpu_to_le16(drp_control); | ||
317 | } | ||
318 | |||
319 | static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) | ||
320 | { | ||
321 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
322 | drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); | ||
323 | ie->drp_control = cpu_to_le16(drp_control); | ||
324 | } | ||
325 | |||
326 | static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, | ||
327 | enum uwb_drp_reason reason_code) | ||
328 | { | ||
329 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
330 | drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); | ||
331 | ie->drp_control = cpu_to_le16(drp_control); | ||
332 | } | ||
333 | |||
334 | static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) | ||
335 | { | ||
336 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
337 | drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); | ||
338 | ie->drp_control = cpu_to_le16(drp_control); | ||
339 | } | ||
340 | |||
341 | static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) | ||
342 | { | ||
343 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
344 | drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); | ||
345 | ie->drp_control = cpu_to_le16(drp_control); | ||
346 | } | ||
347 | |||
348 | static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) | ||
349 | { | ||
350 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
351 | drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); | ||
352 | ie->drp_control = cpu_to_le16(drp_control); | ||
353 | } | ||
354 | |||
355 | static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) | ||
356 | { | ||
357 | u16 drp_control = le16_to_cpu(ie->drp_control); | ||
358 | drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); | ||
359 | ie->drp_control = cpu_to_le16(drp_control); | ||
360 | } | ||
361 | |||
362 | /** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ | ||
363 | struct uwb_ie_drp_avail { | ||
364 | struct uwb_ie_hdr hdr; | ||
365 | DECLARE_BITMAP(bmp, UWB_NUM_MAS); | ||
366 | } __attribute__((packed)); | ||
367 | |||
368 | /** | ||
369 | * The Vendor ID is set to an OUI that indicates the vendor of the device. | ||
370 | * ECMA-368 [16.8.10] | ||
371 | */ | ||
372 | struct uwb_vendor_id { | ||
373 | u8 data[3]; | ||
374 | } __attribute__((packed)); | ||
375 | |||
376 | /** | ||
377 | * The device type ID | ||
378 | * FIXME: clarify what this means | ||
379 | * ECMA-368 [16.8.10] | ||
380 | */ | ||
381 | struct uwb_device_type_id { | ||
382 | u8 data[3]; | ||
383 | } __attribute__((packed)); | ||
384 | |||
385 | |||
386 | /** | ||
387 | * UWB device information types | ||
388 | * ECMA-368 [16.8.10] | ||
389 | */ | ||
390 | enum uwb_dev_info_type { | ||
391 | UWB_DEV_INFO_VENDOR_ID = 0, | ||
392 | UWB_DEV_INFO_VENDOR_TYPE, | ||
393 | UWB_DEV_INFO_NAME, | ||
394 | }; | ||
395 | |||
396 | /** | ||
397 | * UWB device information found in Identification IE | ||
398 | * ECMA-368 [16.8.10] | ||
399 | */ | ||
400 | struct uwb_dev_info { | ||
401 | u8 type; /* enum uwb_dev_info_type */ | ||
402 | u8 length; | ||
403 | u8 data[]; | ||
404 | } __attribute__((packed)); | ||
405 | |||
406 | /** | ||
407 | * UWB Identification IE | ||
408 | * ECMA-368 [16.8.10] | ||
409 | */ | ||
410 | struct uwb_identification_ie { | ||
411 | struct uwb_ie_hdr hdr; | ||
412 | struct uwb_dev_info info[]; | ||
413 | } __attribute__((packed)); | ||
414 | |||
415 | /* | ||
416 | * UWB Radio Controller | ||
417 | * | ||
418 | * These definitions are common to the Radio Control layers as | ||
419 | * exported by the WUSB1.0 HWA and WHCI interfaces. | ||
420 | */ | ||
421 | |||
422 | /** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ | ||
423 | struct uwb_rccb { | ||
424 | u8 bCommandType; /* enum hwa_cet */ | ||
425 | __le16 wCommand; /* Command code */ | ||
426 | u8 bCommandContext; /* Context ID */ | ||
427 | } __attribute__((packed)); | ||
428 | |||
429 | |||
430 | /** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ | ||
431 | struct uwb_rceb { | ||
432 | u8 bEventType; /* enum hwa_cet */ | ||
433 | __le16 wEvent; /* Event code */ | ||
434 | u8 bEventContext; /* Context ID */ | ||
435 | } __attribute__((packed)); | ||
436 | |||
437 | |||
438 | enum { | ||
439 | UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ | ||
440 | UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ | ||
441 | }; | ||
442 | |||
443 | /* Commands to the radio controller */ | ||
444 | enum uwb_rc_cmd { | ||
445 | UWB_RC_CMD_CHANNEL_CHANGE = 16, | ||
446 | UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ | ||
447 | UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ | ||
448 | UWB_RC_CMD_RESET = 19, | ||
449 | UWB_RC_CMD_SCAN = 20, /* Scan management */ | ||
450 | UWB_RC_CMD_SET_BEACON_FILTER = 21, | ||
451 | UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ | ||
452 | UWB_RC_CMD_SET_IE = 23, /* Information Element management */ | ||
453 | UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, | ||
454 | UWB_RC_CMD_SET_TX_POWER = 25, | ||
455 | UWB_RC_CMD_SLEEP = 26, | ||
456 | UWB_RC_CMD_START_BEACON = 27, | ||
457 | UWB_RC_CMD_STOP_BEACON = 28, | ||
458 | UWB_RC_CMD_BP_MERGE = 29, | ||
459 | UWB_RC_CMD_SEND_COMMAND_FRAME = 30, | ||
460 | UWB_RC_CMD_SET_ASIE_NOTIF = 31, | ||
461 | }; | ||
462 | |||
463 | /* Notifications from the radio controller */ | ||
464 | enum uwb_rc_evt { | ||
465 | UWB_RC_EVT_IE_RCV = 0, | ||
466 | UWB_RC_EVT_BEACON = 1, | ||
467 | UWB_RC_EVT_BEACON_SIZE = 2, | ||
468 | UWB_RC_EVT_BPOIE_CHANGE = 3, | ||
469 | UWB_RC_EVT_BP_SLOT_CHANGE = 4, | ||
470 | UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, | ||
471 | UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, | ||
472 | UWB_RC_EVT_DRP_AVAIL = 7, | ||
473 | UWB_RC_EVT_DRP = 8, | ||
474 | UWB_RC_EVT_BP_SWITCH_STATUS = 9, | ||
475 | UWB_RC_EVT_CMD_FRAME_RCV = 10, | ||
476 | UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, | ||
477 | /* Events (command responses) use the same code as the command */ | ||
478 | UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, | ||
479 | }; | ||
480 | |||
481 | enum uwb_rc_extended_type_1_cmd { | ||
482 | UWB_RC_SET_DAA_ENERGY_MASK = 32, | ||
483 | UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, | ||
484 | }; | ||
485 | |||
486 | enum uwb_rc_extended_type_1_evt { | ||
487 | UWB_RC_DAA_ENERGY_DETECTED = 0, | ||
488 | }; | ||
489 | |||
490 | /* Radio Control Result Code. [WHCI] table 3-3. */ | ||
491 | enum { | ||
492 | UWB_RC_RES_SUCCESS = 0, | ||
493 | UWB_RC_RES_FAIL, | ||
494 | UWB_RC_RES_FAIL_HARDWARE, | ||
495 | UWB_RC_RES_FAIL_NO_SLOTS, | ||
496 | UWB_RC_RES_FAIL_BEACON_TOO_LARGE, | ||
497 | UWB_RC_RES_FAIL_INVALID_PARAMETER, | ||
498 | UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, | ||
499 | UWB_RC_RES_FAIL_INVALID_IE_DATA, | ||
500 | UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, | ||
501 | UWB_RC_RES_FAIL_CANCELLED, | ||
502 | UWB_RC_RES_FAIL_INVALID_STATE, | ||
503 | UWB_RC_RES_FAIL_INVALID_SIZE, | ||
504 | UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, | ||
505 | UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, | ||
506 | UWB_RC_RES_FAIL_TIME_OUT = 255, | ||
507 | }; | ||
508 | |||
509 | /* Confirm event. [WHCI] section 3.1.3.1 etc. */ | ||
510 | struct uwb_rc_evt_confirm { | ||
511 | struct uwb_rceb rceb; | ||
512 | u8 bResultCode; | ||
513 | } __attribute__((packed)); | ||
514 | |||
515 | /* Device Address Management event. [WHCI] section 3.1.3.2. */ | ||
516 | struct uwb_rc_evt_dev_addr_mgmt { | ||
517 | struct uwb_rceb rceb; | ||
518 | u8 baAddr[6]; | ||
519 | u8 bResultCode; | ||
520 | } __attribute__((packed)); | ||
521 | |||
522 | |||
523 | /* Get IE Event. [WHCI] section 3.1.3.3. */ | ||
524 | struct uwb_rc_evt_get_ie { | ||
525 | struct uwb_rceb rceb; | ||
526 | __le16 wIELength; | ||
527 | u8 IEData[]; | ||
528 | } __attribute__((packed)); | ||
529 | |||
530 | /* Set DRP IE Event. [WHCI] section 3.1.3.7. */ | ||
531 | struct uwb_rc_evt_set_drp_ie { | ||
532 | struct uwb_rceb rceb; | ||
533 | __le16 wRemainingSpace; | ||
534 | u8 bResultCode; | ||
535 | } __attribute__((packed)); | ||
536 | |||
537 | /* Set IE Event. [WHCI] section 3.1.3.8. */ | ||
538 | struct uwb_rc_evt_set_ie { | ||
539 | struct uwb_rceb rceb; | ||
540 | __le16 RemainingSpace; | ||
541 | u8 bResultCode; | ||
542 | } __attribute__((packed)); | ||
543 | |||
544 | /* Scan command. [WHCI] 3.1.3.5. */ | ||
545 | struct uwb_rc_cmd_scan { | ||
546 | struct uwb_rccb rccb; | ||
547 | u8 bChannelNumber; | ||
548 | u8 bScanState; | ||
549 | __le16 wStartTime; | ||
550 | } __attribute__((packed)); | ||
551 | |||
552 | /* Set DRP IE command. [WHCI] section 3.1.3.7. */ | ||
553 | struct uwb_rc_cmd_set_drp_ie { | ||
554 | struct uwb_rccb rccb; | ||
555 | __le16 wIELength; | ||
556 | struct uwb_ie_drp IEData[]; | ||
557 | } __attribute__((packed)); | ||
558 | |||
559 | /* Set IE command. [WHCI] section 3.1.3.8. */ | ||
560 | struct uwb_rc_cmd_set_ie { | ||
561 | struct uwb_rccb rccb; | ||
562 | __le16 wIELength; | ||
563 | u8 IEData[]; | ||
564 | } __attribute__((packed)); | ||
565 | |||
566 | /* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ | ||
567 | struct uwb_rc_evt_set_daa_energy_mask { | ||
568 | struct uwb_rceb rceb; | ||
569 | __le16 wLength; | ||
570 | u8 result; | ||
571 | } __attribute__((packed)); | ||
572 | |||
573 | /* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ | ||
574 | struct uwb_rc_evt_set_notification_filter_ex { | ||
575 | struct uwb_rceb rceb; | ||
576 | __le16 wLength; | ||
577 | u8 result; | ||
578 | } __attribute__((packed)); | ||
579 | |||
580 | /* IE Received notification. [WHCI] section 3.1.4.1. */ | ||
581 | struct uwb_rc_evt_ie_rcv { | ||
582 | struct uwb_rceb rceb; | ||
583 | struct uwb_dev_addr SrcAddr; | ||
584 | __le16 wIELength; | ||
585 | u8 IEData[]; | ||
586 | } __attribute__((packed)); | ||
587 | |||
588 | /* Type of the received beacon. [WHCI] section 3.1.4.2. */ | ||
589 | enum uwb_rc_beacon_type { | ||
590 | UWB_RC_BEACON_TYPE_SCAN = 0, | ||
591 | UWB_RC_BEACON_TYPE_NEIGHBOR, | ||
592 | UWB_RC_BEACON_TYPE_OL_ALIEN, | ||
593 | UWB_RC_BEACON_TYPE_NOL_ALIEN, | ||
594 | }; | ||
595 | |||
596 | /* Beacon received notification. [WHCI] 3.1.4.2. */ | ||
597 | struct uwb_rc_evt_beacon { | ||
598 | struct uwb_rceb rceb; | ||
599 | u8 bChannelNumber; | ||
600 | u8 bBeaconType; | ||
601 | __le16 wBPSTOffset; | ||
602 | u8 bLQI; | ||
603 | u8 bRSSI; | ||
604 | __le16 wBeaconInfoLength; | ||
605 | u8 BeaconInfo[]; | ||
606 | } __attribute__((packed)); | ||
607 | |||
608 | |||
609 | /* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ | ||
610 | struct uwb_rc_evt_beacon_size { | ||
611 | struct uwb_rceb rceb; | ||
612 | __le16 wNewBeaconSize; | ||
613 | } __attribute__((packed)); | ||
614 | |||
615 | |||
616 | /* BPOIE Change notification. [WHCI] section 3.1.4.4. */ | ||
617 | struct uwb_rc_evt_bpoie_change { | ||
618 | struct uwb_rceb rceb; | ||
619 | __le16 wBPOIELength; | ||
620 | u8 BPOIE[]; | ||
621 | } __attribute__((packed)); | ||
622 | |||
623 | |||
624 | /* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ | ||
625 | struct uwb_rc_evt_bp_slot_change { | ||
626 | struct uwb_rceb rceb; | ||
627 | u8 slot_info; | ||
628 | } __attribute__((packed)); | ||
629 | |||
630 | static inline int uwb_rc_evt_bp_slot_change_slot_num( | ||
631 | const struct uwb_rc_evt_bp_slot_change *evt) | ||
632 | { | ||
633 | return evt->slot_info & 0x7f; | ||
634 | } | ||
635 | |||
636 | static inline int uwb_rc_evt_bp_slot_change_no_slot( | ||
637 | const struct uwb_rc_evt_bp_slot_change *evt) | ||
638 | { | ||
639 | return (evt->slot_info & 0x80) >> 7; | ||
640 | } | ||
641 | |||
642 | /* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ | ||
643 | struct uwb_rc_evt_bp_switch_ie_rcv { | ||
644 | struct uwb_rceb rceb; | ||
645 | struct uwb_dev_addr wSrcAddr; | ||
646 | __le16 wIELength; | ||
647 | u8 IEData[]; | ||
648 | } __attribute__((packed)); | ||
649 | |||
650 | /* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ | ||
651 | struct uwb_rc_evt_dev_addr_conflict { | ||
652 | struct uwb_rceb rceb; | ||
653 | } __attribute__((packed)); | ||
654 | |||
655 | /* DRP notification. [WHCI] section 3.1.4.9. */ | ||
656 | struct uwb_rc_evt_drp { | ||
657 | struct uwb_rceb rceb; | ||
658 | struct uwb_dev_addr src_addr; | ||
659 | u8 reason; | ||
660 | u8 beacon_slot_number; | ||
661 | __le16 ie_length; | ||
662 | u8 ie_data[]; | ||
663 | } __attribute__((packed)); | ||
664 | |||
665 | static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) | ||
666 | { | ||
667 | return evt->reason & 0x0f; | ||
668 | } | ||
669 | |||
670 | |||
671 | /* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ | ||
672 | struct uwb_rc_evt_drp_avail { | ||
673 | struct uwb_rceb rceb; | ||
674 | DECLARE_BITMAP(bmp, UWB_NUM_MAS); | ||
675 | } __attribute__((packed)); | ||
676 | |||
677 | /* BP switch status notification. [WHCI] section 3.1.4.10. */ | ||
678 | struct uwb_rc_evt_bp_switch_status { | ||
679 | struct uwb_rceb rceb; | ||
680 | u8 status; | ||
681 | u8 slot_offset; | ||
682 | __le16 bpst_offset; | ||
683 | u8 move_countdown; | ||
684 | } __attribute__((packed)); | ||
685 | |||
686 | /* Command Frame Received notification. [WHCI] section 3.1.4.11. */ | ||
687 | struct uwb_rc_evt_cmd_frame_rcv { | ||
688 | struct uwb_rceb rceb; | ||
689 | __le16 receive_time; | ||
690 | struct uwb_dev_addr wSrcAddr; | ||
691 | struct uwb_dev_addr wDstAddr; | ||
692 | __le16 control; | ||
693 | __le16 reserved; | ||
694 | __le16 dataLength; | ||
695 | u8 data[]; | ||
696 | } __attribute__((packed)); | ||
697 | |||
698 | /* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ | ||
699 | struct uwb_rc_evt_channel_change_ie_rcv { | ||
700 | struct uwb_rceb rceb; | ||
701 | struct uwb_dev_addr wSrcAddr; | ||
702 | __le16 wIELength; | ||
703 | u8 IEData[]; | ||
704 | } __attribute__((packed)); | ||
705 | |||
706 | /* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ | ||
707 | struct uwb_rc_evt_daa_energy_detected { | ||
708 | struct uwb_rceb rceb; | ||
709 | __le16 wLength; | ||
710 | u8 bandID; | ||
711 | u8 reserved; | ||
712 | u8 toneBmp[16]; | ||
713 | } __attribute__((packed)); | ||
714 | |||
715 | |||
716 | /** | ||
717 | * Radio Control Interface Class Descriptor | ||
718 | * | ||
719 | * WUSB 1.0 [8.6.1.2] | ||
720 | */ | ||
721 | struct uwb_rc_control_intf_class_desc { | ||
722 | u8 bLength; | ||
723 | u8 bDescriptorType; | ||
724 | __le16 bcdRCIVersion; | ||
725 | } __attribute__((packed)); | ||
726 | |||
727 | #endif /* #ifndef __LINUX__UWB_SPEC_H__ */ | ||
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h new file mode 100644 index 000000000000..36a39e34f8d7 --- /dev/null +++ b/include/linux/uwb/umc.h | |||
@@ -0,0 +1,194 @@ | |||
1 | /* | ||
2 | * UWB Multi-interface Controller support. | ||
3 | * | ||
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This file is released under the GPLv2 | ||
7 | * | ||
8 | * UMC (UWB Multi-interface Controller) capabilities (e.g., radio | ||
9 | * controller, host controller) are presented as devices on the "umc" | ||
10 | * bus. | ||
11 | * | ||
12 | * The radio controller is not strictly a UMC capability but it's | ||
13 | * useful to present it as such. | ||
14 | * | ||
15 | * References: | ||
16 | * | ||
17 | * [WHCI] Wireless Host Controller Interface Specification for | ||
18 | * Certified Wireless Universal Serial Bus, revision 0.95. | ||
19 | * | ||
20 | * How this works is kind of convoluted but simple. The whci.ko driver | ||
21 | * loads when WHCI devices are detected. These WHCI devices expose | ||
22 | * many devices in the same PCI function (they couldn't have reused | ||
23 | * functions, no), so for each PCI function that exposes these many | ||
24 | * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] | ||
25 | * with umc_device_create() and adds it to the bus with | ||
26 | * umc_device_register(). | ||
27 | * | ||
28 | * umc_device_register() calls device_register() which will push the | ||
29 | * bus management code to load your UMC driver's somehting_probe() | ||
30 | * that you have registered for that capability code. | ||
31 | * | ||
32 | * Now when the WHCI device is removed, whci_remove() will go over | ||
33 | * each umc_dev assigned to each of the PCI function's capabilities | ||
34 | * and through whci_del_cap() call umc_device_unregister() each | ||
35 | * created umc_dev. Of course, if you are bound to the device, your | ||
36 | * driver's something_remove() will be called. | ||
37 | */ | ||
38 | |||
39 | #ifndef _LINUX_UWB_UMC_H_ | ||
40 | #define _LINUX_UWB_UMC_H_ | ||
41 | |||
42 | #include <linux/device.h> | ||
43 | #include <linux/pci.h> | ||
44 | |||
45 | /* | ||
46 | * UMC capability IDs. | ||
47 | * | ||
48 | * 0x00 is reserved so use it for the radio controller device. | ||
49 | * | ||
50 | * [WHCI] table 2-8 | ||
51 | */ | ||
52 | #define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ | ||
53 | #define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ | ||
54 | |||
55 | /** | ||
56 | * struct umc_dev - UMC capability device | ||
57 | * | ||
58 | * @version: version of the specification this capability conforms to. | ||
59 | * @cap_id: capability ID. | ||
60 | * @bar: PCI Bar (64 bit) where the resource lies | ||
61 | * @resource: register space resource. | ||
62 | * @irq: interrupt line. | ||
63 | */ | ||
64 | struct umc_dev { | ||
65 | u16 version; | ||
66 | u8 cap_id; | ||
67 | u8 bar; | ||
68 | struct resource resource; | ||
69 | unsigned irq; | ||
70 | struct device dev; | ||
71 | }; | ||
72 | |||
73 | #define to_umc_dev(d) container_of(d, struct umc_dev, dev) | ||
74 | |||
75 | /** | ||
76 | * struct umc_driver - UMC capability driver | ||
77 | * @cap_id: supported capability ID. | ||
78 | * @match: driver specific capability matching function. | ||
79 | * @match_data: driver specific data for match() (e.g., a | ||
80 | * table of pci_device_id's if umc_match_pci_id() is used). | ||
81 | */ | ||
82 | struct umc_driver { | ||
83 | char *name; | ||
84 | u8 cap_id; | ||
85 | int (*match)(struct umc_driver *, struct umc_dev *); | ||
86 | const void *match_data; | ||
87 | |||
88 | int (*probe)(struct umc_dev *); | ||
89 | void (*remove)(struct umc_dev *); | ||
90 | int (*suspend)(struct umc_dev *, pm_message_t state); | ||
91 | int (*resume)(struct umc_dev *); | ||
92 | |||
93 | struct device_driver driver; | ||
94 | }; | ||
95 | |||
96 | #define to_umc_driver(d) container_of(d, struct umc_driver, driver) | ||
97 | |||
98 | extern struct bus_type umc_bus_type; | ||
99 | |||
100 | struct umc_dev *umc_device_create(struct device *parent, int n); | ||
101 | int __must_check umc_device_register(struct umc_dev *umc); | ||
102 | void umc_device_unregister(struct umc_dev *umc); | ||
103 | |||
104 | int __must_check __umc_driver_register(struct umc_driver *umc_drv, | ||
105 | struct module *mod, | ||
106 | const char *mod_name); | ||
107 | |||
108 | /** | ||
109 | * umc_driver_register - register a UMC capabiltity driver. | ||
110 | * @umc_drv: pointer to the driver. | ||
111 | */ | ||
112 | static inline int __must_check umc_driver_register(struct umc_driver *umc_drv) | ||
113 | { | ||
114 | return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME); | ||
115 | } | ||
116 | void umc_driver_unregister(struct umc_driver *umc_drv); | ||
117 | |||
118 | /* | ||
119 | * Utility function you can use to match (umc_driver->match) against a | ||
120 | * null-terminated array of 'struct pci_device_id' in | ||
121 | * umc_driver->match_data. | ||
122 | */ | ||
123 | int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); | ||
124 | |||
125 | /** | ||
126 | * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none | ||
127 | * @umc_dev: UMC device whose parent PCI device we are looking for | ||
128 | * | ||
129 | * DIRTY!!! DON'T RELY ON THIS | ||
130 | * | ||
131 | * FIXME: This is as dirty as it gets, but we need some way to check | ||
132 | * the correct type of umc_dev->parent (so that for example, we can | ||
133 | * cast to pci_dev). Casting to pci_dev is necesary because at some | ||
134 | * point we need to request resources from the device. Mapping is | ||
135 | * easily over come (ioremap and stuff are bus agnostic), but hooking | ||
136 | * up to some error handlers (such as pci error handlers) might need | ||
137 | * this. | ||
138 | * | ||
139 | * THIS might (probably will) be removed in the future, so don't count | ||
140 | * on it. | ||
141 | */ | ||
142 | static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) | ||
143 | { | ||
144 | struct pci_dev *pci_dev = NULL; | ||
145 | if (umc_dev->dev.parent->bus == &pci_bus_type) | ||
146 | pci_dev = to_pci_dev(umc_dev->dev.parent); | ||
147 | return pci_dev; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * umc_dev_get() - reference a UMC device. | ||
152 | * @umc_dev: Pointer to UMC device. | ||
153 | * | ||
154 | * NOTE: we are assuming in this whole scheme that the parent device | ||
155 | * is referenced at _probe() time and unreferenced at _remove() | ||
156 | * time by the parent's subsystem. | ||
157 | */ | ||
158 | static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) | ||
159 | { | ||
160 | get_device(&umc_dev->dev); | ||
161 | return umc_dev; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * umc_dev_put() - unreference a UMC device. | ||
166 | * @umc_dev: Pointer to UMC device. | ||
167 | */ | ||
168 | static inline void umc_dev_put(struct umc_dev *umc_dev) | ||
169 | { | ||
170 | put_device(&umc_dev->dev); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * umc_set_drvdata - set UMC device's driver data. | ||
175 | * @umc_dev: Pointer to UMC device. | ||
176 | * @data: Data to set. | ||
177 | */ | ||
178 | static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) | ||
179 | { | ||
180 | dev_set_drvdata(&umc_dev->dev, data); | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * umc_get_drvdata - recover UMC device's driver data. | ||
185 | * @umc_dev: Pointer to UMC device. | ||
186 | */ | ||
187 | static inline void *umc_get_drvdata(struct umc_dev *umc_dev) | ||
188 | { | ||
189 | return dev_get_drvdata(&umc_dev->dev); | ||
190 | } | ||
191 | |||
192 | int umc_controller_reset(struct umc_dev *umc); | ||
193 | |||
194 | #endif /* #ifndef _LINUX_UWB_UMC_H_ */ | ||
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h new file mode 100644 index 000000000000..915ec23042d4 --- /dev/null +++ b/include/linux/uwb/whci.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * | ||
23 | * References: | ||
24 | * [WHCI] Wireless Host Controller Interface Specification for | ||
25 | * Certified Wireless Universal Serial Bus, revision 0.95. | ||
26 | */ | ||
27 | #ifndef _LINUX_UWB_WHCI_H_ | ||
28 | #define _LINUX_UWB_WHCI_H_ | ||
29 | |||
30 | #include <linux/pci.h> | ||
31 | |||
32 | /* | ||
33 | * UWB interface capability registers (offsets from UWBBASE) | ||
34 | * | ||
35 | * [WHCI] section 2.2 | ||
36 | */ | ||
37 | #define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ | ||
38 | # define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) | ||
39 | #define UWBCAPDATA(n) (8*(n)) | ||
40 | # define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) | ||
41 | # define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) | ||
42 | # define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) | ||
43 | # define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) | ||
44 | # define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) | ||
45 | |||
46 | /* Size of the WHCI capability data (including the RC capability) for | ||
47 | a device with n capabilities. */ | ||
48 | #define UWBCAPDATA_SIZE(n) (8 + 8*(n)) | ||
49 | |||
50 | |||
51 | /* | ||
52 | * URC registers (offsets from URCBASE) | ||
53 | * | ||
54 | * [WHCI] section 2.3 | ||
55 | */ | ||
56 | #define URCCMD 0x00 | ||
57 | # define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ | ||
58 | # define URCCMD_RS (1 << 30) /* Run/Stop */ | ||
59 | # define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ | ||
60 | # define URCCMD_ACTIVE (1 << 15) /* Command is active */ | ||
61 | # define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ | ||
62 | # define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ | ||
63 | #define URCSTS 0x04 | ||
64 | # define URCSTS_EPS (1 << 17) /* Event Processing Status */ | ||
65 | # define URCSTS_HALTED (1 << 16) /* RC halted */ | ||
66 | # define URCSTS_HSE (1 << 10) /* Host System Error...fried */ | ||
67 | # define URCSTS_ER (1 << 9) /* Event Ready */ | ||
68 | # define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ | ||
69 | # define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ | ||
70 | # define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ | ||
71 | #define URCINTR 0x08 | ||
72 | # define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ | ||
73 | #define URCCMDADDR 0x10 | ||
74 | #define URCEVTADDR 0x18 | ||
75 | # define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ | ||
76 | |||
77 | |||
78 | /** Write 32 bit @value to little endian register at @addr */ | ||
79 | static inline | ||
80 | void le_writel(u32 value, void __iomem *addr) | ||
81 | { | ||
82 | iowrite32(value, addr); | ||
83 | } | ||
84 | |||
85 | |||
86 | /** Read from 32 bit little endian register at @addr */ | ||
87 | static inline | ||
88 | u32 le_readl(void __iomem *addr) | ||
89 | { | ||
90 | return ioread32(addr); | ||
91 | } | ||
92 | |||
93 | |||
94 | /** Write 64 bit @value to little endian register at @addr */ | ||
95 | static inline | ||
96 | void le_writeq(u64 value, void __iomem *addr) | ||
97 | { | ||
98 | iowrite32(value, addr); | ||
99 | iowrite32(value >> 32, addr + 4); | ||
100 | } | ||
101 | |||
102 | |||
103 | /** Read from 64 bit little endian register at @addr */ | ||
104 | static inline | ||
105 | u64 le_readq(void __iomem *addr) | ||
106 | { | ||
107 | u64 value; | ||
108 | value = ioread32(addr); | ||
109 | value |= (u64)ioread32(addr + 4) << 32; | ||
110 | return value; | ||
111 | } | ||
112 | |||
113 | extern int whci_wait_for(struct device *dev, u32 __iomem *reg, | ||
114 | u32 mask, u32 result, | ||
115 | unsigned long max_ms, const char *tag); | ||
116 | |||
117 | #endif /* #ifndef _LINUX_UWB_WHCI_H_ */ | ||
diff --git a/include/linux/wlp.h b/include/linux/wlp.h new file mode 100644 index 000000000000..033545e145c7 --- /dev/null +++ b/include/linux/wlp.h | |||
@@ -0,0 +1,735 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Reinette Chatre <reinette.chatre@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * FIXME: docs | ||
23 | * | ||
24 | * - Does not (yet) include support for WLP control frames | ||
25 | * WLP Draft 0.99 [6.5]. | ||
26 | * | ||
27 | * A visual representation of the data structures. | ||
28 | * | ||
29 | * wssidB wssidB | ||
30 | * ^ ^ | ||
31 | * | | | ||
32 | * wssidA wssidA | ||
33 | * wlp interface { ^ ^ | ||
34 | * ... | | | ||
35 | * ... ... wssid wssid ... | ||
36 | * wlp --- ... | | | ||
37 | * }; neighbors --> neighbA --> neighbB | ||
38 | * ... | ||
39 | * wss | ||
40 | * ... | ||
41 | * eda cache --> neighborA --> neighborB --> neighborC ... | ||
42 | */ | ||
43 | |||
44 | #ifndef __LINUX__WLP_H_ | ||
45 | #define __LINUX__WLP_H_ | ||
46 | |||
47 | #include <linux/netdevice.h> | ||
48 | #include <linux/skbuff.h> | ||
49 | #include <linux/list.h> | ||
50 | #include <linux/uwb.h> | ||
51 | |||
52 | /** | ||
53 | * WLP Protocol ID | ||
54 | * WLP Draft 0.99 [6.2] | ||
55 | * | ||
56 | * The MUX header for all WLP frames | ||
57 | */ | ||
58 | #define WLP_PROTOCOL_ID 0x0100 | ||
59 | |||
60 | /** | ||
61 | * WLP Version | ||
62 | * WLP version placed in the association frames (WLP 0.99 [6.6]) | ||
63 | */ | ||
64 | #define WLP_VERSION 0x10 | ||
65 | |||
66 | /** | ||
67 | * Bytes needed to print UUID as string | ||
68 | */ | ||
69 | #define WLP_WSS_UUID_STRSIZE 48 | ||
70 | |||
71 | /** | ||
72 | * Bytes needed to print nonce as string | ||
73 | */ | ||
74 | #define WLP_WSS_NONCE_STRSIZE 48 | ||
75 | |||
76 | |||
77 | /** | ||
78 | * Size used for WLP name size | ||
79 | * | ||
80 | * The WSS name is set to 65 bytes, 1 byte larger than the maximum | ||
81 | * allowed by the WLP spec. This is to have a null terminated string | ||
82 | * for display to the user. A maximum of 64 bytes will still be used | ||
83 | * when placing the WSS name field in association frames. | ||
84 | */ | ||
85 | #define WLP_WSS_NAME_SIZE 65 | ||
86 | |||
87 | /** | ||
88 | * Number of bytes added by WLP to data frame | ||
89 | * | ||
90 | * A data frame transmitted from a host will be placed in a Standard or | ||
91 | * Abbreviated WLP frame. These have an extra 4 bytes of header (struct | ||
92 | * wlp_frame_std_abbrv_hdr). | ||
93 | * When the stack sends this data frame for transmission it needs to ensure | ||
94 | * there is enough headroom for this header. | ||
95 | */ | ||
96 | #define WLP_DATA_HLEN 4 | ||
97 | |||
98 | /** | ||
99 | * State of device regarding WLP Service Set | ||
100 | * | ||
101 | * WLP_WSS_STATE_NONE: the host does not participate in any WSS | ||
102 | * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence | ||
103 | * ("Partial Enroll"). This state is used to | ||
104 | * indicate the first part of enrollment that is | ||
105 | * unsecure. If the WSS is unsecure then the | ||
106 | * state will promptly go to WLP_WSS_STATE_ENROLLED, | ||
107 | * if the WSS is not secure then the enrollment | ||
108 | * procedure is a few more steps before we are | ||
109 | * enrolled. | ||
110 | * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS | ||
111 | * WLP_WSS_STATE_ACTIVE: WSS is activated | ||
112 | * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS | ||
113 | * | ||
114 | */ | ||
115 | enum wlp_wss_state { | ||
116 | WLP_WSS_STATE_NONE = 0, | ||
117 | WLP_WSS_STATE_PART_ENROLLED, | ||
118 | WLP_WSS_STATE_ENROLLED, | ||
119 | WLP_WSS_STATE_ACTIVE, | ||
120 | WLP_WSS_STATE_CONNECTED, | ||
121 | }; | ||
122 | |||
123 | /** | ||
124 | * WSS Secure status | ||
125 | * WLP 0.99 Table 6 | ||
126 | * | ||
127 | * Set to one if the WSS is secure, zero if it is not secure | ||
128 | */ | ||
129 | enum wlp_wss_sec_status { | ||
130 | WLP_WSS_UNSECURE = 0, | ||
131 | WLP_WSS_SECURE, | ||
132 | }; | ||
133 | |||
134 | /** | ||
135 | * WLP frame type | ||
136 | * WLP Draft 0.99 [6.2 Table 1] | ||
137 | */ | ||
138 | enum wlp_frame_type { | ||
139 | WLP_FRAME_STANDARD = 0, | ||
140 | WLP_FRAME_ABBREVIATED, | ||
141 | WLP_FRAME_CONTROL, | ||
142 | WLP_FRAME_ASSOCIATION, | ||
143 | }; | ||
144 | |||
145 | /** | ||
146 | * WLP Association Message Type | ||
147 | * WLP Draft 0.99 [6.6.1.2 Table 8] | ||
148 | */ | ||
149 | enum wlp_assoc_type { | ||
150 | WLP_ASSOC_D1 = 2, | ||
151 | WLP_ASSOC_D2 = 3, | ||
152 | WLP_ASSOC_M1 = 4, | ||
153 | WLP_ASSOC_M2 = 5, | ||
154 | WLP_ASSOC_M3 = 7, | ||
155 | WLP_ASSOC_M4 = 8, | ||
156 | WLP_ASSOC_M5 = 9, | ||
157 | WLP_ASSOC_M6 = 10, | ||
158 | WLP_ASSOC_M7 = 11, | ||
159 | WLP_ASSOC_M8 = 12, | ||
160 | WLP_ASSOC_F0 = 14, | ||
161 | WLP_ASSOC_E1 = 32, | ||
162 | WLP_ASSOC_E2 = 33, | ||
163 | WLP_ASSOC_C1 = 34, | ||
164 | WLP_ASSOC_C2 = 35, | ||
165 | WLP_ASSOC_C3 = 36, | ||
166 | WLP_ASSOC_C4 = 37, | ||
167 | }; | ||
168 | |||
169 | /** | ||
170 | * WLP Attribute Type | ||
171 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
172 | */ | ||
173 | enum wlp_attr_type { | ||
174 | WLP_ATTR_AUTH = 0x1005, /* Authenticator */ | ||
175 | WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */ | ||
176 | WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */ | ||
177 | WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */ | ||
178 | WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */ | ||
179 | WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */ | ||
180 | WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */ | ||
181 | WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */ | ||
182 | WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */ | ||
183 | WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */ | ||
184 | WLP_ATTR_MANUF = 0x1021, /* Manufacturer */ | ||
185 | WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */ | ||
186 | WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */ | ||
187 | WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */ | ||
188 | WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */ | ||
189 | WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */ | ||
190 | WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */ | ||
191 | WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */ | ||
192 | WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */ | ||
193 | WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */ | ||
194 | WLP_ATTR_SERIAL = 0x1042, /* Serial number */ | ||
195 | WLP_ATTR_UUID_E = 0x1047, /* UUID-E */ | ||
196 | WLP_ATTR_UUID_R = 0x1048, /* UUID-R */ | ||
197 | WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */ | ||
198 | WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */ | ||
199 | WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */ | ||
200 | WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */ | ||
201 | WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */ | ||
202 | WLP_ATTR_WSSID = 0x2001, /* WSSID */ | ||
203 | WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */ | ||
204 | WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */ | ||
205 | WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */ | ||
206 | WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */ | ||
207 | WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */ | ||
208 | WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */ | ||
209 | WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */ | ||
210 | WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */ | ||
211 | WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */ | ||
212 | WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */ | ||
213 | WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */ | ||
214 | WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */ | ||
215 | WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */ | ||
216 | WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */ | ||
217 | }; | ||
218 | |||
219 | /** | ||
220 | * WLP Category ID of primary/secondary device | ||
221 | * WLP Draft 0.99 [6.6.1.8 Table 12] | ||
222 | */ | ||
223 | enum wlp_dev_category_id { | ||
224 | WLP_DEV_CAT_COMPUTER = 1, | ||
225 | WLP_DEV_CAT_INPUT, | ||
226 | WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER, | ||
227 | WLP_DEV_CAT_CAMERA, | ||
228 | WLP_DEV_CAT_STORAGE, | ||
229 | WLP_DEV_CAT_INFRASTRUCTURE, | ||
230 | WLP_DEV_CAT_DISPLAY, | ||
231 | WLP_DEV_CAT_MULTIM, | ||
232 | WLP_DEV_CAT_GAMING, | ||
233 | WLP_DEV_CAT_TELEPHONE, | ||
234 | WLP_DEV_CAT_OTHER = 65535, | ||
235 | }; | ||
236 | |||
237 | /** | ||
238 | * WLP WSS selection method | ||
239 | * WLP Draft 0.99 [6.6.1.6 Table 10] | ||
240 | */ | ||
241 | enum wlp_wss_sel_mthd { | ||
242 | WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */ | ||
243 | WLP_WSS_REG_SELECT, /* Registrar selects */ | ||
244 | }; | ||
245 | |||
246 | /** | ||
247 | * WLP association error values | ||
248 | * WLP Draft 0.99 [6.6.1.5 Table 9] | ||
249 | */ | ||
250 | enum wlp_assc_error { | ||
251 | WLP_ASSOC_ERROR_NONE, | ||
252 | WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */ | ||
253 | WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */ | ||
254 | WLP_ASSOC_ERROR_BUSY, /* Device busy */ | ||
255 | WLP_ASSOC_ERROR_LOCK, /* Setup Locked */ | ||
256 | WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */ | ||
257 | WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */ | ||
258 | WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */ | ||
259 | WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */ | ||
260 | WLP_ASSOC_ERROR_PW, /* Device password invalid */ | ||
261 | WLP_ASSOC_ERROR_VER, /* Unsupported version */ | ||
262 | WLP_ASSOC_ERROR_INT, /* Internal error */ | ||
263 | WLP_ASSOC_ERROR_UNDEF, /* Undefined error */ | ||
264 | WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */ | ||
265 | WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */ | ||
266 | }; | ||
267 | |||
268 | /** | ||
269 | * WLP Parameters | ||
270 | * WLP 0.99 [7.7] | ||
271 | */ | ||
272 | enum wlp_parameters { | ||
273 | WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to | ||
274 | association message. */ | ||
275 | }; | ||
276 | |||
277 | /** | ||
278 | * WLP IE | ||
279 | * | ||
280 | * The WLP IE should be included in beacons by all devices. | ||
281 | * | ||
282 | * The driver can set only a few of the fields in this information element, | ||
283 | * most fields are managed by the device self. When the driver needs to set | ||
284 | * a field it will only provide values for the fields of interest, the rest | ||
285 | * will be filled with zeroes. The fields of interest are: | ||
286 | * | ||
287 | * Element ID | ||
288 | * Length | ||
289 | * Capabilities (only to include WSSID Hash list length) | ||
290 | * WSSID Hash List fields | ||
291 | * | ||
292 | * WLP 0.99 [6.7] | ||
293 | * | ||
294 | * Only the fields that will be used are detailed in this structure, rest | ||
295 | * are not detailed or marked as "notused". | ||
296 | */ | ||
297 | struct wlp_ie { | ||
298 | struct uwb_ie_hdr hdr; | ||
299 | __le16 capabilities; | ||
300 | __le16 cycle_param; | ||
301 | __le16 acw_anchor_addr; | ||
302 | u8 wssid_hash_list[]; | ||
303 | } __attribute__((packed)); | ||
304 | |||
305 | static inline int wlp_ie_hash_length(struct wlp_ie *ie) | ||
306 | { | ||
307 | return (le16_to_cpu(ie->capabilities) >> 12) & 0xf; | ||
308 | } | ||
309 | |||
310 | static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) | ||
311 | { | ||
312 | u16 caps = le16_to_cpu(ie->capabilities); | ||
313 | caps = (caps & ~(0xf << 12)) | (hash_length << 12); | ||
314 | ie->capabilities = cpu_to_le16(caps); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * WLP nonce | ||
319 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
320 | * | ||
321 | * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee | ||
322 | * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so | ||
323 | * it is packed. | ||
324 | */ | ||
325 | struct wlp_nonce { | ||
326 | u8 data[16]; | ||
327 | } __attribute__((packed)); | ||
328 | |||
329 | /** | ||
330 | * WLP UUID | ||
331 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
332 | * | ||
333 | * Universally Unique Identifier (UUID) encoded as an octet string in the | ||
334 | * order the octets are shown in string representation in RFC4122. A UUID | ||
335 | * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed. | ||
336 | */ | ||
337 | struct wlp_uuid { | ||
338 | u8 data[16]; | ||
339 | } __attribute__((packed)); | ||
340 | |||
341 | |||
342 | /** | ||
343 | * Primary and secondary device type attributes | ||
344 | * WLP Draft 0.99 [6.6.1.8] | ||
345 | */ | ||
346 | struct wlp_dev_type { | ||
347 | enum wlp_dev_category_id category:16; | ||
348 | u8 OUI[3]; | ||
349 | u8 OUIsubdiv; | ||
350 | __le16 subID; | ||
351 | } __attribute__((packed)); | ||
352 | |||
353 | /** | ||
354 | * WLP frame header | ||
355 | * WLP Draft 0.99 [6.2] | ||
356 | */ | ||
357 | struct wlp_frame_hdr { | ||
358 | __le16 mux_hdr; /* WLP_PROTOCOL_ID */ | ||
359 | enum wlp_frame_type type:8; | ||
360 | } __attribute__((packed)); | ||
361 | |||
362 | /** | ||
363 | * WLP attribute field header | ||
364 | * WLP Draft 0.99 [6.6.1] | ||
365 | * | ||
366 | * Header of each attribute found in an association frame | ||
367 | */ | ||
368 | struct wlp_attr_hdr { | ||
369 | __le16 type; | ||
370 | __le16 length; | ||
371 | } __attribute__((packed)); | ||
372 | |||
373 | /** | ||
374 | * Device information commonly used together | ||
375 | * | ||
376 | * Each of these device information elements has a specified range in which it | ||
377 | * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not | ||
378 | * include the termination null '\0' character (when used in the | ||
379 | * association protocol the attribute fields are accompanied | ||
380 | * with a "length" field so the full range from the spec can be used for | ||
381 | * the value). We thus allocate an extra byte to be able to store a string | ||
382 | * of max length with a terminating '\0'. | ||
383 | */ | ||
384 | struct wlp_device_info { | ||
385 | char name[33]; | ||
386 | char model_name[33]; | ||
387 | char manufacturer[65]; | ||
388 | char model_nr[33]; | ||
389 | char serial[33]; | ||
390 | struct wlp_dev_type prim_dev_type; | ||
391 | }; | ||
392 | |||
393 | /** | ||
394 | * Macros for the WLP attributes | ||
395 | * | ||
396 | * There are quite a few attributes (total is 43). The attribute layout can be | ||
397 | * in one of three categories: one value, an array, an enum forced to 8 bits. | ||
398 | * These macros help with their definitions. | ||
399 | */ | ||
400 | #define wlp_attr(type, name) \ | ||
401 | struct wlp_attr_##name { \ | ||
402 | struct wlp_attr_hdr hdr; \ | ||
403 | type name; \ | ||
404 | } __attribute__((packed)); | ||
405 | |||
406 | #define wlp_attr_array(type, name) \ | ||
407 | struct wlp_attr_##name { \ | ||
408 | struct wlp_attr_hdr hdr; \ | ||
409 | type name[]; \ | ||
410 | } __attribute__((packed)); | ||
411 | |||
412 | /** | ||
413 | * WLP association attribute fields | ||
414 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
415 | * | ||
416 | * Attributes appear in same order as the Table in the spec | ||
417 | * FIXME Does not define all attributes yet | ||
418 | */ | ||
419 | |||
420 | /* Device name: Friendly name of sending device */ | ||
421 | wlp_attr_array(u8, dev_name) | ||
422 | |||
423 | /* Enrollee Nonce: Random number generated by enrollee for an enrollment | ||
424 | * session */ | ||
425 | wlp_attr(struct wlp_nonce, enonce) | ||
426 | |||
427 | /* Manufacturer name: Name of manufacturer of the sending device */ | ||
428 | wlp_attr_array(u8, manufacturer) | ||
429 | |||
430 | /* WLP Message Type */ | ||
431 | wlp_attr(u8, msg_type) | ||
432 | |||
433 | /* WLP Model name: Model name of sending device */ | ||
434 | wlp_attr_array(u8, model_name) | ||
435 | |||
436 | /* WLP Model number: Model number of sending device */ | ||
437 | wlp_attr_array(u8, model_nr) | ||
438 | |||
439 | /* Registrar Nonce: Random number generated by registrar for an enrollment | ||
440 | * session */ | ||
441 | wlp_attr(struct wlp_nonce, rnonce) | ||
442 | |||
443 | /* Serial number of device */ | ||
444 | wlp_attr_array(u8, serial) | ||
445 | |||
446 | /* UUID of enrollee */ | ||
447 | wlp_attr(struct wlp_uuid, uuid_e) | ||
448 | |||
449 | /* UUID of registrar */ | ||
450 | wlp_attr(struct wlp_uuid, uuid_r) | ||
451 | |||
452 | /* WLP Primary device type */ | ||
453 | wlp_attr(struct wlp_dev_type, prim_dev_type) | ||
454 | |||
455 | /* WLP Secondary device type */ | ||
456 | wlp_attr(struct wlp_dev_type, sec_dev_type) | ||
457 | |||
458 | /* WLP protocol version */ | ||
459 | wlp_attr(u8, version) | ||
460 | |||
461 | /* WLP service set identifier */ | ||
462 | wlp_attr(struct wlp_uuid, wssid) | ||
463 | |||
464 | /* WLP WSS name */ | ||
465 | wlp_attr_array(u8, wss_name) | ||
466 | |||
467 | /* WLP WSS Secure Status */ | ||
468 | wlp_attr(u8, wss_sec_status) | ||
469 | |||
470 | /* WSS Broadcast Address */ | ||
471 | wlp_attr(struct uwb_mac_addr, wss_bcast) | ||
472 | |||
473 | /* WLP Accepting Enrollment */ | ||
474 | wlp_attr(u8, accept_enrl) | ||
475 | |||
476 | /** | ||
477 | * WSS information attributes | ||
478 | * WLP Draft 0.99 [6.6.3 Table 15] | ||
479 | */ | ||
480 | struct wlp_wss_info { | ||
481 | struct wlp_attr_wssid wssid; | ||
482 | struct wlp_attr_wss_name name; | ||
483 | struct wlp_attr_accept_enrl accept; | ||
484 | struct wlp_attr_wss_sec_status sec_stat; | ||
485 | struct wlp_attr_wss_bcast bcast; | ||
486 | } __attribute__((packed)); | ||
487 | |||
488 | /* WLP WSS Information */ | ||
489 | wlp_attr_array(struct wlp_wss_info, wss_info) | ||
490 | |||
491 | /* WLP WSS Selection method */ | ||
492 | wlp_attr(u8, wss_sel_mthd) | ||
493 | |||
494 | /* WLP WSS tag */ | ||
495 | wlp_attr(u8, wss_tag) | ||
496 | |||
497 | /* WSS Virtual Address */ | ||
498 | wlp_attr(struct uwb_mac_addr, wss_virt) | ||
499 | |||
500 | /* WLP association error */ | ||
501 | wlp_attr(u8, wlp_assc_err) | ||
502 | |||
503 | /** | ||
504 | * WLP standard and abbreviated frames | ||
505 | * | ||
506 | * WLP Draft 0.99 [6.3] and [6.4] | ||
507 | * | ||
508 | * The difference between the WLP standard frame and the WLP | ||
509 | * abbreviated frame is that the standard frame includes the src | ||
510 | * and dest addresses from the Ethernet header, the abbreviated frame does | ||
511 | * not. | ||
512 | * The src/dest (as well as the type/length and client data) are already | ||
513 | * defined as part of the Ethernet header, we do not do this here. | ||
514 | * From this perspective the standard and abbreviated frames appear the | ||
515 | * same - they will be treated differently though. | ||
516 | * | ||
517 | * The size of this header is also captured in WLP_DATA_HLEN to enable | ||
518 | * interfaces to prepare their headroom. | ||
519 | */ | ||
520 | struct wlp_frame_std_abbrv_hdr { | ||
521 | struct wlp_frame_hdr hdr; | ||
522 | u8 tag; | ||
523 | } __attribute__((packed)); | ||
524 | |||
525 | /** | ||
526 | * WLP association frames | ||
527 | * | ||
528 | * WLP Draft 0.99 [6.6] | ||
529 | */ | ||
530 | struct wlp_frame_assoc { | ||
531 | struct wlp_frame_hdr hdr; | ||
532 | enum wlp_assoc_type type:8; | ||
533 | struct wlp_attr_version version; | ||
534 | struct wlp_attr_msg_type msg_type; | ||
535 | u8 attr[]; | ||
536 | } __attribute__((packed)); | ||
537 | |||
538 | /* Ethernet to dev address mapping */ | ||
539 | struct wlp_eda { | ||
540 | spinlock_t lock; | ||
541 | struct list_head cache; /* Eth<->Dev Addr cache */ | ||
542 | }; | ||
543 | |||
544 | /** | ||
545 | * WSS information temporary storage | ||
546 | * | ||
547 | * This information is only stored temporarily during discovery. It should | ||
548 | * not be stored unless the device is enrolled in the advertised WSS. This | ||
549 | * is done mainly because we follow the letter of the spec in this regard. | ||
550 | * See WLP 0.99 [7.2.3]. | ||
551 | * When the device does become enrolled in a WSS the WSS information will | ||
552 | * be stored as part of the more comprehensive struct wlp_wss. | ||
553 | */ | ||
554 | struct wlp_wss_tmp_info { | ||
555 | char name[WLP_WSS_NAME_SIZE]; | ||
556 | u8 accept_enroll; | ||
557 | u8 sec_status; | ||
558 | struct uwb_mac_addr bcast; | ||
559 | }; | ||
560 | |||
561 | struct wlp_wssid_e { | ||
562 | struct list_head node; | ||
563 | struct wlp_uuid wssid; | ||
564 | struct wlp_wss_tmp_info *info; | ||
565 | }; | ||
566 | |||
567 | /** | ||
568 | * A cache entry of WLP neighborhood | ||
569 | * | ||
570 | * @node: head of list is wlp->neighbors | ||
571 | * @wssid: list of wssids of this neighbor, element is wlp_wssid_e | ||
572 | * @info: temporary storage for information learned during discovery. This | ||
573 | * storage is used together with the wssid_e temporary storage | ||
574 | * during discovery. | ||
575 | */ | ||
576 | struct wlp_neighbor_e { | ||
577 | struct list_head node; | ||
578 | struct wlp_uuid uuid; | ||
579 | struct uwb_dev *uwb_dev; | ||
580 | struct list_head wssid; /* Elements are wlp_wssid_e */ | ||
581 | struct wlp_device_info *info; | ||
582 | }; | ||
583 | |||
584 | struct wlp; | ||
585 | /** | ||
586 | * Information for an association session in progress. | ||
587 | * | ||
588 | * @exp_message: The type of the expected message. Both this message and a | ||
589 | * F0 message (which can be sent in response to any | ||
590 | * association frame) will be accepted as a valid message for | ||
591 | * this session. | ||
592 | * @cb: The function that will be called upon receipt of this | ||
593 | * message. | ||
594 | * @cb_priv: Private data of callback | ||
595 | * @data: Data used in association process (always a sk_buff?) | ||
596 | * @neighbor: Address of neighbor with which association session is in | ||
597 | * progress. | ||
598 | */ | ||
599 | struct wlp_session { | ||
600 | enum wlp_assoc_type exp_message; | ||
601 | void (*cb)(struct wlp *); | ||
602 | void *cb_priv; | ||
603 | void *data; | ||
604 | struct uwb_dev_addr neighbor_addr; | ||
605 | }; | ||
606 | |||
607 | /** | ||
608 | * WLP Service Set | ||
609 | * | ||
610 | * @mutex: used to protect entire WSS structure. | ||
611 | * | ||
612 | * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum | ||
613 | * allowed by the WLP spec. This is to have a null terminated string | ||
614 | * for display to the user. A maximum of 64 bytes will still be used | ||
615 | * when placing the WSS name field in association frames. | ||
616 | * | ||
617 | * @accept_enroll: Accepting enrollment: Set to one if registrar is | ||
618 | * accepting enrollment in WSS, or zero otherwise. | ||
619 | * | ||
620 | * Global and local information for each WSS in which we are enrolled. | ||
621 | * WLP 0.99 Section 7.2.1 and Section 7.2.2 | ||
622 | */ | ||
623 | struct wlp_wss { | ||
624 | struct mutex mutex; | ||
625 | struct kobject kobj; | ||
626 | /* Global properties. */ | ||
627 | struct wlp_uuid wssid; | ||
628 | u8 hash; | ||
629 | char name[WLP_WSS_NAME_SIZE]; | ||
630 | struct uwb_mac_addr bcast; | ||
631 | u8 secure_status:1; | ||
632 | u8 master_key[16]; | ||
633 | /* Local properties. */ | ||
634 | u8 tag; | ||
635 | struct uwb_mac_addr virtual_addr; | ||
636 | /* Extra */ | ||
637 | u8 accept_enroll:1; | ||
638 | enum wlp_wss_state state; | ||
639 | }; | ||
640 | |||
641 | /** | ||
642 | * WLP main structure | ||
643 | * @mutex: protect changes to WLP structure. We only allow changes to the | ||
644 | * uuid, so currently this mutex only protects this field. | ||
645 | */ | ||
646 | struct wlp { | ||
647 | struct mutex mutex; | ||
648 | struct uwb_rc *rc; /* UWB radio controller */ | ||
649 | struct uwb_pal pal; | ||
650 | struct wlp_eda eda; | ||
651 | struct wlp_uuid uuid; | ||
652 | struct wlp_session *session; | ||
653 | struct wlp_wss wss; | ||
654 | struct mutex nbmutex; /* Neighbor mutex protects neighbors list */ | ||
655 | struct list_head neighbors; /* Elements are wlp_neighbor_e */ | ||
656 | struct uwb_notifs_handler uwb_notifs_handler; | ||
657 | struct wlp_device_info *dev_info; | ||
658 | void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info); | ||
659 | int (*xmit_frame)(struct wlp *, struct sk_buff *, | ||
660 | struct uwb_dev_addr *); | ||
661 | void (*stop_queue)(struct wlp *); | ||
662 | void (*start_queue)(struct wlp *); | ||
663 | }; | ||
664 | |||
665 | /* sysfs */ | ||
666 | |||
667 | |||
668 | struct wlp_wss_attribute { | ||
669 | struct attribute attr; | ||
670 | ssize_t (*show)(struct wlp_wss *wss, char *buf); | ||
671 | ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count); | ||
672 | }; | ||
673 | |||
674 | #define WSS_ATTR(_name, _mode, _show, _store) \ | ||
675 | static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ | ||
676 | _show, _store) | ||
677 | |||
678 | extern int wlp_setup(struct wlp *, struct uwb_rc *); | ||
679 | extern void wlp_remove(struct wlp *); | ||
680 | extern ssize_t wlp_neighborhood_show(struct wlp *, char *); | ||
681 | extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); | ||
682 | extern void wlp_wss_remove(struct wlp_wss *); | ||
683 | extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *); | ||
684 | extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t); | ||
685 | extern ssize_t wlp_eda_show(struct wlp *, char *); | ||
686 | extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t); | ||
687 | extern ssize_t wlp_uuid_show(struct wlp *, char *); | ||
688 | extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t); | ||
689 | extern ssize_t wlp_dev_name_show(struct wlp *, char *); | ||
690 | extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t); | ||
691 | extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *); | ||
692 | extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t); | ||
693 | extern ssize_t wlp_dev_model_name_show(struct wlp *, char *); | ||
694 | extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t); | ||
695 | extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *); | ||
696 | extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t); | ||
697 | extern ssize_t wlp_dev_serial_show(struct wlp *, char *); | ||
698 | extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t); | ||
699 | extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *); | ||
700 | extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *, | ||
701 | size_t); | ||
702 | extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *); | ||
703 | extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t); | ||
704 | extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *); | ||
705 | extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *, | ||
706 | size_t); | ||
707 | extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *); | ||
708 | extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *, | ||
709 | size_t); | ||
710 | extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *, | ||
711 | struct uwb_dev_addr *); | ||
712 | extern int wlp_prepare_tx_frame(struct device *, struct wlp *, | ||
713 | struct sk_buff *, struct uwb_dev_addr *); | ||
714 | void wlp_reset_all(struct wlp *wlp); | ||
715 | |||
716 | /** | ||
717 | * Initialize WSS | ||
718 | */ | ||
719 | static inline | ||
720 | void wlp_wss_init(struct wlp_wss *wss) | ||
721 | { | ||
722 | mutex_init(&wss->mutex); | ||
723 | } | ||
724 | |||
725 | static inline | ||
726 | void wlp_init(struct wlp *wlp) | ||
727 | { | ||
728 | INIT_LIST_HEAD(&wlp->neighbors); | ||
729 | mutex_init(&wlp->mutex); | ||
730 | mutex_init(&wlp->nbmutex); | ||
731 | wlp_wss_init(&wlp->wss); | ||
732 | } | ||
733 | |||
734 | |||
735 | #endif /* #ifndef __LINUX__WLP_H_ */ | ||
diff --git a/init/main.c b/init/main.c index 3e17a3bafe60..672ae75b2059 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/key.h> | 52 | #include <linux/key.h> |
53 | #include <linux/unwind.h> | 53 | #include <linux/unwind.h> |
54 | #include <linux/buffer_head.h> | 54 | #include <linux/buffer_head.h> |
55 | #include <linux/page_cgroup.h> | ||
55 | #include <linux/debug_locks.h> | 56 | #include <linux/debug_locks.h> |
56 | #include <linux/debugobjects.h> | 57 | #include <linux/debugobjects.h> |
57 | #include <linux/lockdep.h> | 58 | #include <linux/lockdep.h> |
@@ -647,6 +648,7 @@ asmlinkage void __init start_kernel(void) | |||
647 | vmalloc_init(); | 648 | vmalloc_init(); |
648 | vfs_caches_init_early(); | 649 | vfs_caches_init_early(); |
649 | cpuset_init_early(); | 650 | cpuset_init_early(); |
651 | page_cgroup_init(); | ||
650 | mem_init(); | 652 | mem_init(); |
651 | enable_debug_pagealloc(); | 653 | enable_debug_pagealloc(); |
652 | cpu_hotplug_init(); | 654 | cpu_hotplug_init(); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4895fde4eb93..10b5092e9bfe 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -76,6 +76,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
76 | desc->chip_data = NULL; | 76 | desc->chip_data = NULL; |
77 | desc->handle_irq = handle_bad_irq; | 77 | desc->handle_irq = handle_bad_irq; |
78 | desc->chip = &no_irq_chip; | 78 | desc->chip = &no_irq_chip; |
79 | desc->name = NULL; | ||
79 | spin_unlock_irqrestore(&desc->lock, flags); | 80 | spin_unlock_irqrestore(&desc->lock, flags); |
80 | } | 81 | } |
81 | 82 | ||
@@ -127,7 +128,7 @@ int set_irq_type(unsigned int irq, unsigned int type) | |||
127 | return 0; | 128 | return 0; |
128 | 129 | ||
129 | spin_lock_irqsave(&desc->lock, flags); | 130 | spin_lock_irqsave(&desc->lock, flags); |
130 | ret = __irq_set_trigger(desc, irq, flags); | 131 | ret = __irq_set_trigger(desc, irq, type); |
131 | spin_unlock_irqrestore(&desc->lock, flags); | 132 | spin_unlock_irqrestore(&desc->lock, flags); |
132 | return ret; | 133 | return ret; |
133 | } | 134 | } |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 467d5940f624..ad63af8b2521 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -119,18 +119,19 @@ static void _rcu_barrier(enum rcu_barrier type) | |||
119 | /* Take cpucontrol mutex to protect against CPU hotplug */ | 119 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
120 | mutex_lock(&rcu_barrier_mutex); | 120 | mutex_lock(&rcu_barrier_mutex); |
121 | init_completion(&rcu_barrier_completion); | 121 | init_completion(&rcu_barrier_completion); |
122 | atomic_set(&rcu_barrier_cpu_count, 0); | ||
123 | /* | 122 | /* |
124 | * The queueing of callbacks in all CPUs must be atomic with | 123 | * Initialize rcu_barrier_cpu_count to 1, then invoke |
125 | * respect to RCU, otherwise one CPU may queue a callback, | 124 | * rcu_barrier_func() on each CPU, so that each CPU also has |
126 | * wait for a grace period, decrement barrier count and call | 125 | * incremented rcu_barrier_cpu_count. Only then is it safe to |
127 | * complete(), while other CPUs have not yet queued anything. | 126 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU |
128 | * So, we need to make sure that grace periods cannot complete | 127 | * might complete its grace period before all of the other CPUs |
129 | * until all the callbacks are queued. | 128 | * did their increment, causing this function to return too |
129 | * early. | ||
130 | */ | 130 | */ |
131 | rcu_read_lock(); | 131 | atomic_set(&rcu_barrier_cpu_count, 1); |
132 | on_each_cpu(rcu_barrier_func, (void *)type, 1); | 132 | on_each_cpu(rcu_barrier_func, (void *)type, 1); |
133 | rcu_read_unlock(); | 133 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
134 | complete(&rcu_barrier_completion); | ||
134 | wait_for_completion(&rcu_barrier_completion); | 135 | wait_for_completion(&rcu_barrier_completion); |
135 | mutex_unlock(&rcu_barrier_mutex); | 136 | mutex_unlock(&rcu_barrier_mutex); |
136 | } | 137 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index d906f72b42d2..945a97b9600d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -819,6 +819,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
819 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 819 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
820 | 820 | ||
821 | /* | 821 | /* |
822 | * Inject some fuzzyness into changing the per-cpu group shares | ||
823 | * this avoids remote rq-locks at the expense of fairness. | ||
824 | * default: 4 | ||
825 | */ | ||
826 | unsigned int sysctl_sched_shares_thresh = 4; | ||
827 | |||
828 | /* | ||
822 | * period over which we measure -rt task cpu usage in us. | 829 | * period over which we measure -rt task cpu usage in us. |
823 | * default: 1s | 830 | * default: 1s |
824 | */ | 831 | */ |
@@ -1454,8 +1461,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1454 | * Calculate and set the cpu's group shares. | 1461 | * Calculate and set the cpu's group shares. |
1455 | */ | 1462 | */ |
1456 | static void | 1463 | static void |
1457 | __update_group_shares_cpu(struct task_group *tg, int cpu, | 1464 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1458 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1465 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1459 | { | 1466 | { |
1460 | int boost = 0; | 1467 | int boost = 0; |
1461 | unsigned long shares; | 1468 | unsigned long shares; |
@@ -1486,19 +1493,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1486 | * | 1493 | * |
1487 | */ | 1494 | */ |
1488 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1495 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); |
1496 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | ||
1489 | 1497 | ||
1490 | /* | 1498 | if (abs(shares - tg->se[cpu]->load.weight) > |
1491 | * record the actual number of shares, not the boosted amount. | 1499 | sysctl_sched_shares_thresh) { |
1492 | */ | 1500 | struct rq *rq = cpu_rq(cpu); |
1493 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1501 | unsigned long flags; |
1494 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1495 | 1502 | ||
1496 | if (shares < MIN_SHARES) | 1503 | spin_lock_irqsave(&rq->lock, flags); |
1497 | shares = MIN_SHARES; | 1504 | /* |
1498 | else if (shares > MAX_SHARES) | 1505 | * record the actual number of shares, not the boosted amount. |
1499 | shares = MAX_SHARES; | 1506 | */ |
1507 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1508 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1500 | 1509 | ||
1501 | __set_se_shares(tg->se[cpu], shares); | 1510 | __set_se_shares(tg->se[cpu], shares); |
1511 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1512 | } | ||
1502 | } | 1513 | } |
1503 | 1514 | ||
1504 | /* | 1515 | /* |
@@ -1527,14 +1538,8 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1527 | if (!rq_weight) | 1538 | if (!rq_weight) |
1528 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | 1539 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; |
1529 | 1540 | ||
1530 | for_each_cpu_mask(i, sd->span) { | 1541 | for_each_cpu_mask(i, sd->span) |
1531 | struct rq *rq = cpu_rq(i); | 1542 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1532 | unsigned long flags; | ||
1533 | |||
1534 | spin_lock_irqsave(&rq->lock, flags); | ||
1535 | __update_group_shares_cpu(tg, i, shares, rq_weight); | ||
1536 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1537 | } | ||
1538 | 1543 | ||
1539 | return 0; | 1544 | return 0; |
1540 | } | 1545 | } |
@@ -4443,12 +4448,8 @@ need_resched_nonpreemptible: | |||
4443 | if (sched_feat(HRTICK)) | 4448 | if (sched_feat(HRTICK)) |
4444 | hrtick_clear(rq); | 4449 | hrtick_clear(rq); |
4445 | 4450 | ||
4446 | /* | 4451 | spin_lock_irq(&rq->lock); |
4447 | * Do the rq-clock update outside the rq lock: | ||
4448 | */ | ||
4449 | local_irq_disable(); | ||
4450 | update_rq_clock(rq); | 4452 | update_rq_clock(rq); |
4451 | spin_lock(&rq->lock); | ||
4452 | clear_tsk_need_resched(prev); | 4453 | clear_tsk_need_resched(prev); |
4453 | 4454 | ||
4454 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 4455 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f604dae71316..9573c33688b8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL; | |||
73 | 73 | ||
74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
75 | 75 | ||
76 | static const struct sched_class fair_sched_class; | ||
77 | |||
76 | /************************************************************** | 78 | /************************************************************** |
77 | * CFS operations on generic schedulable entities: | 79 | * CFS operations on generic schedulable entities: |
78 | */ | 80 | */ |
@@ -334,7 +336,7 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
334 | #endif | 336 | #endif |
335 | 337 | ||
336 | /* | 338 | /* |
337 | * delta *= w / rw | 339 | * delta *= P[w / rw] |
338 | */ | 340 | */ |
339 | static inline unsigned long | 341 | static inline unsigned long |
340 | calc_delta_weight(unsigned long delta, struct sched_entity *se) | 342 | calc_delta_weight(unsigned long delta, struct sched_entity *se) |
@@ -348,15 +350,13 @@ calc_delta_weight(unsigned long delta, struct sched_entity *se) | |||
348 | } | 350 | } |
349 | 351 | ||
350 | /* | 352 | /* |
351 | * delta *= rw / w | 353 | * delta /= w |
352 | */ | 354 | */ |
353 | static inline unsigned long | 355 | static inline unsigned long |
354 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | 356 | calc_delta_fair(unsigned long delta, struct sched_entity *se) |
355 | { | 357 | { |
356 | for_each_sched_entity(se) { | 358 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
357 | delta = calc_delta_mine(delta, | 359 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); |
358 | cfs_rq_of(se)->load.weight, &se->load); | ||
359 | } | ||
360 | 360 | ||
361 | return delta; | 361 | return delta; |
362 | } | 362 | } |
@@ -386,26 +386,26 @@ static u64 __sched_period(unsigned long nr_running) | |||
386 | * We calculate the wall-time slice from the period by taking a part | 386 | * We calculate the wall-time slice from the period by taking a part |
387 | * proportional to the weight. | 387 | * proportional to the weight. |
388 | * | 388 | * |
389 | * s = p*w/rw | 389 | * s = p*P[w/rw] |
390 | */ | 390 | */ |
391 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 391 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
392 | { | 392 | { |
393 | return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); | 393 | unsigned long nr_running = cfs_rq->nr_running; |
394 | |||
395 | if (unlikely(!se->on_rq)) | ||
396 | nr_running++; | ||
397 | |||
398 | return calc_delta_weight(__sched_period(nr_running), se); | ||
394 | } | 399 | } |
395 | 400 | ||
396 | /* | 401 | /* |
397 | * We calculate the vruntime slice of a to be inserted task | 402 | * We calculate the vruntime slice of a to be inserted task |
398 | * | 403 | * |
399 | * vs = s*rw/w = p | 404 | * vs = s/w |
400 | */ | 405 | */ |
401 | static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | 406 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
402 | { | 407 | { |
403 | unsigned long nr_running = cfs_rq->nr_running; | 408 | return calc_delta_fair(sched_slice(cfs_rq, se), se); |
404 | |||
405 | if (!se->on_rq) | ||
406 | nr_running++; | ||
407 | |||
408 | return __sched_period(nr_running); | ||
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
@@ -628,7 +628,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
628 | * stays open at the end. | 628 | * stays open at the end. |
629 | */ | 629 | */ |
630 | if (initial && sched_feat(START_DEBIT)) | 630 | if (initial && sched_feat(START_DEBIT)) |
631 | vruntime += sched_vslice_add(cfs_rq, se); | 631 | vruntime += sched_vslice(cfs_rq, se); |
632 | 632 | ||
633 | if (!initial) { | 633 | if (!initial) { |
634 | /* sleeps upto a single latency don't count. */ | 634 | /* sleeps upto a single latency don't count. */ |
@@ -748,7 +748,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
748 | struct rq *rq = rq_of(cfs_rq); | 748 | struct rq *rq = rq_of(cfs_rq); |
749 | u64 pair_slice = rq->clock - cfs_rq->pair_start; | 749 | u64 pair_slice = rq->clock - cfs_rq->pair_start; |
750 | 750 | ||
751 | if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) { | 751 | if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) { |
752 | cfs_rq->pair_start = rq->clock; | 752 | cfs_rq->pair_start = rq->clock; |
753 | return se; | 753 | return se; |
754 | } | 754 | } |
@@ -849,11 +849,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
849 | hrtick_start(rq, delta); | 849 | hrtick_start(rq, delta); |
850 | } | 850 | } |
851 | } | 851 | } |
852 | |||
853 | /* | ||
854 | * called from enqueue/dequeue and updates the hrtick when the | ||
855 | * current task is from our class and nr_running is low enough | ||
856 | * to matter. | ||
857 | */ | ||
858 | static void hrtick_update(struct rq *rq) | ||
859 | { | ||
860 | struct task_struct *curr = rq->curr; | ||
861 | |||
862 | if (curr->sched_class != &fair_sched_class) | ||
863 | return; | ||
864 | |||
865 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | ||
866 | hrtick_start_fair(rq, curr); | ||
867 | } | ||
852 | #else /* !CONFIG_SCHED_HRTICK */ | 868 | #else /* !CONFIG_SCHED_HRTICK */ |
853 | static inline void | 869 | static inline void |
854 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 870 | hrtick_start_fair(struct rq *rq, struct task_struct *p) |
855 | { | 871 | { |
856 | } | 872 | } |
873 | |||
874 | static inline void hrtick_update(struct rq *rq) | ||
875 | { | ||
876 | } | ||
857 | #endif | 877 | #endif |
858 | 878 | ||
859 | /* | 879 | /* |
@@ -874,7 +894,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | |||
874 | wakeup = 1; | 894 | wakeup = 1; |
875 | } | 895 | } |
876 | 896 | ||
877 | hrtick_start_fair(rq, rq->curr); | 897 | hrtick_update(rq); |
878 | } | 898 | } |
879 | 899 | ||
880 | /* | 900 | /* |
@@ -896,7 +916,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | |||
896 | sleep = 1; | 916 | sleep = 1; |
897 | } | 917 | } |
898 | 918 | ||
899 | hrtick_start_fair(rq, rq->curr); | 919 | hrtick_update(rq); |
900 | } | 920 | } |
901 | 921 | ||
902 | /* | 922 | /* |
@@ -1002,8 +1022,6 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1002 | 1022 | ||
1003 | #ifdef CONFIG_SMP | 1023 | #ifdef CONFIG_SMP |
1004 | 1024 | ||
1005 | static const struct sched_class fair_sched_class; | ||
1006 | |||
1007 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1025 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1008 | /* | 1026 | /* |
1009 | * effective_load() calculates the load change as seen from the root_task_group | 1027 | * effective_load() calculates the load change as seen from the root_task_group |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 7c9e8f4a049f..fda016218296 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -5,7 +5,7 @@ SCHED_FEAT(START_DEBIT, 1) | |||
5 | SCHED_FEAT(AFFINE_WAKEUPS, 1) | 5 | SCHED_FEAT(AFFINE_WAKEUPS, 1) |
6 | SCHED_FEAT(CACHE_HOT_BUDDY, 1) | 6 | SCHED_FEAT(CACHE_HOT_BUDDY, 1) |
7 | SCHED_FEAT(SYNC_WAKEUPS, 1) | 7 | SCHED_FEAT(SYNC_WAKEUPS, 1) |
8 | SCHED_FEAT(HRTICK, 1) | 8 | SCHED_FEAT(HRTICK, 0) |
9 | SCHED_FEAT(DOUBLE_TICK, 0) | 9 | SCHED_FEAT(DOUBLE_TICK, 0) |
10 | SCHED_FEAT(ASYM_GRAN, 1) | 10 | SCHED_FEAT(ASYM_GRAN, 1) |
11 | SCHED_FEAT(LB_BIAS, 1) | 11 | SCHED_FEAT(LB_BIAS, 1) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index b8c156979cf2..2df9d297d292 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -9,7 +9,7 @@ | |||
9 | static int show_schedstat(struct seq_file *seq, void *v) | 9 | static int show_schedstat(struct seq_file *seq, void *v) |
10 | { | 10 | { |
11 | int cpu; | 11 | int cpu; |
12 | int mask_len = NR_CPUS/32 * 9; | 12 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; |
13 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); | 13 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); |
14 | 14 | ||
15 | if (mask_str == NULL) | 15 | if (mask_str == NULL) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b3cc73931d1f..a13bd4dfaeb1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -276,6 +276,16 @@ static struct ctl_table kern_table[] = { | |||
276 | }, | 276 | }, |
277 | { | 277 | { |
278 | .ctl_name = CTL_UNNUMBERED, | 278 | .ctl_name = CTL_UNNUMBERED, |
279 | .procname = "sched_shares_thresh", | ||
280 | .data = &sysctl_sched_shares_thresh, | ||
281 | .maxlen = sizeof(unsigned int), | ||
282 | .mode = 0644, | ||
283 | .proc_handler = &proc_dointvec_minmax, | ||
284 | .strategy = &sysctl_intvec, | ||
285 | .extra1 = &zero, | ||
286 | }, | ||
287 | { | ||
288 | .ctl_name = CTL_UNNUMBERED, | ||
279 | .procname = "sched_child_runs_first", | 289 | .procname = "sched_child_runs_first", |
280 | .data = &sysctl_sched_child_runs_first, | 290 | .data = &sysctl_sched_child_runs_first, |
281 | .maxlen = sizeof(unsigned int), | 291 | .maxlen = sizeof(unsigned int), |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21e..1338469ac849 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -996,3 +996,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) | |||
996 | return 0; | 996 | return 0; |
997 | } | 997 | } |
998 | EXPORT_SYMBOL(bitmap_allocate_region); | 998 | EXPORT_SYMBOL(bitmap_allocate_region); |
999 | |||
1000 | /** | ||
1001 | * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. | ||
1002 | * @dst: destination buffer | ||
1003 | * @src: bitmap to copy | ||
1004 | * @nbits: number of bits in the bitmap | ||
1005 | * | ||
1006 | * Require nbits % BITS_PER_LONG == 0. | ||
1007 | */ | ||
1008 | void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) | ||
1009 | { | ||
1010 | unsigned long *d = dst; | ||
1011 | int i; | ||
1012 | |||
1013 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { | ||
1014 | if (BITS_PER_LONG == 64) | ||
1015 | d[i] = cpu_to_le64(src[i]); | ||
1016 | else | ||
1017 | d[i] = cpu_to_le32(src[i]); | ||
1018 | } | ||
1019 | } | ||
1020 | EXPORT_SYMBOL(bitmap_copy_le); | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d4a92b63e98e..866dcc7eeb0c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1088 | int node; | 1088 | int node; |
1089 | 1089 | ||
1090 | if (unlikely((cont->parent) == NULL)) { | 1090 | if (unlikely((cont->parent) == NULL)) { |
1091 | page_cgroup_init(); | ||
1092 | mem = &init_mem_cgroup; | 1091 | mem = &init_mem_cgroup; |
1093 | } else { | 1092 | } else { |
1094 | mem = mem_cgroup_alloc(); | 1093 | mem = mem_cgroup_alloc(); |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 5d86550701f2..f59d797dc5a9 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -4,7 +4,10 @@ | |||
4 | #include <linux/bit_spinlock.h> | 4 | #include <linux/bit_spinlock.h> |
5 | #include <linux/page_cgroup.h> | 5 | #include <linux/page_cgroup.h> |
6 | #include <linux/hash.h> | 6 | #include <linux/hash.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include <linux/vmalloc.h> | ||
10 | #include <linux/cgroup.h> | ||
8 | 11 | ||
9 | static void __meminit | 12 | static void __meminit |
10 | __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) | 13 | __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) |
@@ -66,6 +69,9 @@ void __init page_cgroup_init(void) | |||
66 | 69 | ||
67 | int nid, fail; | 70 | int nid, fail; |
68 | 71 | ||
72 | if (mem_cgroup_subsys.disabled) | ||
73 | return; | ||
74 | |||
69 | for_each_online_node(nid) { | 75 | for_each_online_node(nid) { |
70 | fail = alloc_node_page_cgroup(nid); | 76 | fail = alloc_node_page_cgroup(nid); |
71 | if (fail) | 77 | if (fail) |
@@ -106,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn) | |||
106 | nid = page_to_nid(pfn_to_page(pfn)); | 112 | nid = page_to_nid(pfn_to_page(pfn)); |
107 | 113 | ||
108 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; | 114 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
109 | base = kmalloc_node(table_size, GFP_KERNEL, nid); | 115 | if (slab_is_available()) { |
110 | if (!base) | 116 | base = kmalloc_node(table_size, GFP_KERNEL, nid); |
111 | base = vmalloc_node(table_size, nid); | 117 | if (!base) |
118 | base = vmalloc_node(table_size, nid); | ||
119 | } else { | ||
120 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, | ||
121 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
122 | } | ||
112 | 123 | ||
113 | if (!base) { | 124 | if (!base) { |
114 | printk(KERN_ERR "page cgroup allocation failure\n"); | 125 | printk(KERN_ERR "page cgroup allocation failure\n"); |
@@ -135,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn) | |||
135 | if (!ms || !ms->page_cgroup) | 146 | if (!ms || !ms->page_cgroup) |
136 | return; | 147 | return; |
137 | base = ms->page_cgroup + pfn; | 148 | base = ms->page_cgroup + pfn; |
138 | ms->page_cgroup = NULL; | 149 | if (is_vmalloc_addr(base)) { |
139 | if (is_vmalloc_addr(base)) | ||
140 | vfree(base); | 150 | vfree(base); |
141 | else | 151 | ms->page_cgroup = NULL; |
142 | kfree(base); | 152 | } else { |
153 | struct page *page = virt_to_page(base); | ||
154 | if (!PageReserved(page)) { /* Is bootmem ? */ | ||
155 | kfree(base); | ||
156 | ms->page_cgroup = NULL; | ||
157 | } | ||
158 | } | ||
143 | } | 159 | } |
144 | 160 | ||
145 | int online_page_cgroup(unsigned long start_pfn, | 161 | int online_page_cgroup(unsigned long start_pfn, |
@@ -213,6 +229,9 @@ void __init page_cgroup_init(void) | |||
213 | unsigned long pfn; | 229 | unsigned long pfn; |
214 | int fail = 0; | 230 | int fail = 0; |
215 | 231 | ||
232 | if (mem_cgroup_subsys.disabled) | ||
233 | return; | ||
234 | |||
216 | for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { | 235 | for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { |
217 | if (!pfn_present(pfn)) | 236 | if (!pfn_present(pfn)) |
218 | continue; | 237 | continue; |