aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_main.c9
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c78
-rw-r--r--drivers/staging/hv/channel.c8
-rw-r--r--drivers/staging/hv/connection.c4
-rw-r--r--drivers/staging/hv/hv_mouse.c5
-rw-r--r--drivers/staging/hv/netvsc_drv.c24
-rw-r--r--drivers/staging/hv/tools/hv_kvp_daemon.c90
-rw-r--r--drivers/staging/hv/vmbus_drv.c2
-rw-r--r--drivers/staging/hv/vmbus_private.h1
-rw-r--r--drivers/staging/iio/imu/adis16400.h3
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c20
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c12
-rw-r--r--drivers/staging/memrar/Kconfig15
-rw-r--r--drivers/staging/memrar/Makefile2
-rw-r--r--drivers/staging/memrar/TODO43
-rw-r--r--drivers/staging/memrar/memrar-abi89
-rw-r--r--drivers/staging/memrar/memrar.h174
-rw-r--r--drivers/staging/memrar/memrar_allocator.c432
-rw-r--r--drivers/staging/memrar/memrar_allocator.h149
-rw-r--r--drivers/staging/memrar/memrar_handler.c1007
-rw-r--r--drivers/staging/olpc_dcon/Kconfig2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c1
-rw-r--r--drivers/staging/rtl8187se/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c14
-rw-r--r--drivers/staging/rts_pstor/rtsx_chip.c11
-rw-r--r--drivers/staging/sep/sep_driver.c15
-rw-r--r--drivers/staging/sm7xx/smtcfb.c6
-rw-r--r--drivers/staging/usbip/stub_dev.c6
-rw-r--r--drivers/staging/usbip/stub_rx.c40
-rw-r--r--drivers/staging/usbip/stub_tx.c74
-rw-r--r--drivers/staging/usbip/usbip_common.c64
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_rx.c3
-rw-r--r--drivers/staging/vt6655/Kconfig2
-rw-r--r--drivers/staging/vt6656/Kconfig2
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget.c1
40 files changed, 324 insertions, 2092 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 18b43fcb417..dca4a0bb6ca 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -117,8 +117,6 @@ source "drivers/staging/hv/Kconfig"
117 117
118source "drivers/staging/vme/Kconfig" 118source "drivers/staging/vme/Kconfig"
119 119
120source "drivers/staging/memrar/Kconfig"
121
122source "drivers/staging/sep/Kconfig" 120source "drivers/staging/sep/Kconfig"
123 121
124source "drivers/staging/iio/Kconfig" 122source "drivers/staging/iio/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index cfd13cd55ef..eb93012b6f5 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_VT6655) += vt6655/
40obj-$(CONFIG_VT6656) += vt6656/ 40obj-$(CONFIG_VT6656) += vt6656/
41obj-$(CONFIG_HYPERV) += hv/ 41obj-$(CONFIG_HYPERV) += hv/
42obj-$(CONFIG_VME_BUS) += vme/ 42obj-$(CONFIG_VME_BUS) += vme/
43obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
44obj-$(CONFIG_DX_SEP) += sep/ 43obj-$(CONFIG_DX_SEP) += sep/
45obj-$(CONFIG_IIO) += iio/ 44obj-$(CONFIG_IIO) += iio/
46obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/ 45obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.c b/drivers/staging/brcm80211/brcmsmac/wlc_main.c
index 717fced4580..ab7ab850e19 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_main.c
+++ b/drivers/staging/brcm80211/brcmsmac/wlc_main.c
@@ -6283,7 +6283,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
6283 ((preamble_type[1] == WLC_MM_PREAMBLE) == 6283 ((preamble_type[1] == WLC_MM_PREAMBLE) ==
6284 (txh->MModeFbrLen != 0))); 6284 (txh->MModeFbrLen != 0)));
6285 6285
6286 ac = wme_fifo2ac[queue]; 6286 ac = skb_get_queue_mapping(p);
6287 if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) { 6287 if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) {
6288 uint frag_dur, dur, dur_fallback; 6288 uint frag_dur, dur, dur_fallback;
6289 6289
@@ -6919,8 +6919,7 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
6919 preamble = 0; 6919 preamble = 0;
6920 if (IS_CCK(rspec)) { 6920 if (IS_CCK(rspec)) {
6921 if (rxh->PhyRxStatus_0 & PRXS0_SHORTH) 6921 if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
6922 WL_ERROR("Short CCK\n"); 6922 rx_status->flag |= RX_FLAG_SHORTPRE;
6923 rx_status->flag |= RX_FLAG_SHORTPRE;
6924 } else if (IS_OFDM(rspec)) { 6923 } else if (IS_OFDM(rspec)) {
6925 rx_status->flag |= RX_FLAG_SHORTPRE; 6924 rx_status->flag |= RX_FLAG_SHORTPRE;
6926 } else { 6925 } else {
@@ -7079,10 +7078,8 @@ void BCMFASTPATH wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
7079 if (ieee80211_is_probe_req(h->frame_control)) 7078 if (ieee80211_is_probe_req(h->frame_control))
7080 goto toss; 7079 goto toss;
7081 7080
7082 if (is_amsdu) { 7081 if (is_amsdu)
7083 WL_ERROR("%s: is_amsdu causing toss\n", __func__);
7084 goto toss; 7082 goto toss;
7085 }
7086 7083
7087 wlc_recvctl(wlc, rxh, p); 7084 wlc_recvctl(wlc, rxh, p);
7088 return; 7085 return;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index b0729fc3c89..fb375ea26dd 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -95,47 +95,47 @@ void put_request_value(struct net_device *dev, long lvalue);
95USHORT hdr_checksum(PPSEUDO_HDR pHdr); 95USHORT hdr_checksum(PPSEUDO_HDR pHdr);
96 96
97typedef struct _DSP_FILE_HDR { 97typedef struct _DSP_FILE_HDR {
98 long build_date; 98 u32 build_date;
99 long dsp_coff_date; 99 u32 dsp_coff_date;
100 long loader_code_address; 100 u32 loader_code_address;
101 long loader_code_size; 101 u32 loader_code_size;
102 long loader_code_end; 102 u32 loader_code_end;
103 long dsp_code_address; 103 u32 dsp_code_address;
104 long dsp_code_size; 104 u32 dsp_code_size;
105 long dsp_code_end; 105 u32 dsp_code_end;
106 long reserved[8]; 106 u32 reserved[8];
107} __attribute__ ((packed)) DSP_FILE_HDR, *PDSP_FILE_HDR; 107} __attribute__ ((packed)) DSP_FILE_HDR, *PDSP_FILE_HDR;
108 108
109typedef struct _DSP_FILE_HDR_5 { 109typedef struct _DSP_FILE_HDR_5 {
110 long version_id; // Version ID of this image format. 110 u32 version_id; // Version ID of this image format.
111 long package_id; // Package ID of code release. 111 u32 package_id; // Package ID of code release.
112 long build_date; // Date/time stamp when file was built. 112 u32 build_date; // Date/time stamp when file was built.
113 long commands_offset; // Offset to attached commands in Pseudo Hdr format. 113 u32 commands_offset; // Offset to attached commands in Pseudo Hdr format.
114 long loader_offset; // Offset to bootloader code. 114 u32 loader_offset; // Offset to bootloader code.
115 long loader_code_address; // Start address of bootloader. 115 u32 loader_code_address; // Start address of bootloader.
116 long loader_code_end; // Where bootloader code ends. 116 u32 loader_code_end; // Where bootloader code ends.
117 long loader_code_size; 117 u32 loader_code_size;
118 long version_data_offset; // Offset were scrambled version data begins. 118 u32 version_data_offset; // Offset were scrambled version data begins.
119 long version_data_size; // Size, in words, of scrambled version data. 119 u32 version_data_size; // Size, in words, of scrambled version data.
120 long nDspImages; // Number of DSP images in file. 120 u32 nDspImages; // Number of DSP images in file.
121} __attribute__ ((packed)) DSP_FILE_HDR_5, *PDSP_FILE_HDR_5; 121} __attribute__ ((packed)) DSP_FILE_HDR_5, *PDSP_FILE_HDR_5;
122 122
123typedef struct _DSP_IMAGE_INFO { 123typedef struct _DSP_IMAGE_INFO {
124 long coff_date; // Date/time when DSP Coff image was built. 124 u32 coff_date; // Date/time when DSP Coff image was built.
125 long begin_offset; // Offset in file where image begins. 125 u32 begin_offset; // Offset in file where image begins.
126 long end_offset; // Offset in file where image begins. 126 u32 end_offset; // Offset in file where image begins.
127 long run_address; // On chip Start address of DSP code. 127 u32 run_address; // On chip Start address of DSP code.
128 long image_size; // Size of image. 128 u32 image_size; // Size of image.
129 long version; // Embedded version # of DSP code. 129 u32 version; // Embedded version # of DSP code.
130} __attribute__ ((packed)) DSP_IMAGE_INFO, *PDSP_IMAGE_INFO; 130} __attribute__ ((packed)) DSP_IMAGE_INFO, *PDSP_IMAGE_INFO;
131 131
132typedef struct _DSP_IMAGE_INFO_V6 { 132typedef struct _DSP_IMAGE_INFO_V6 {
133 long coff_date; // Date/time when DSP Coff image was built. 133 u32 coff_date; // Date/time when DSP Coff image was built.
134 long begin_offset; // Offset in file where image begins. 134 u32 begin_offset; // Offset in file where image begins.
135 long end_offset; // Offset in file where image begins. 135 u32 end_offset; // Offset in file where image begins.
136 long run_address; // On chip Start address of DSP code. 136 u32 run_address; // On chip Start address of DSP code.
137 long image_size; // Size of image. 137 u32 image_size; // Size of image.
138 long version; // Embedded version # of DSP code. 138 u32 version; // Embedded version # of DSP code.
139 unsigned short checksum; // Dsp File checksum 139 unsigned short checksum; // Dsp File checksum
140 unsigned short pad1; 140 unsigned short pad1;
141} __attribute__ ((packed)) DSP_IMAGE_INFO_V6, *PDSP_IMAGE_INFO_V6; 141} __attribute__ ((packed)) DSP_IMAGE_INFO_V6, *PDSP_IMAGE_INFO_V6;
@@ -846,8 +846,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
846 break; 846 break;
847 847
848 case STATE_DONE_DWNLD: 848 case STATE_DONE_DWNLD:
849 if (((UINT) (pUcFile) - (UINT) pFileStart) >= 849 if (((unsigned long) (pUcFile) - (unsigned long) pFileStart) >=
850 (UINT) FileLength) { 850 (unsigned long) FileLength) {
851 uiState = STATE_DONE_FILE; 851 uiState = STATE_DONE_FILE;
852 break; 852 break;
853 } 853 }
@@ -901,11 +901,11 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
901 &info->prov_list); 901 &info->prov_list);
902 // Move to next entry if available 902 // Move to next entry if available
903 pUcFile = 903 pUcFile =
904 (UCHAR *) ((UINT) pUcFile + 904 (UCHAR *) ((unsigned long) pUcFile +
905 (UINT) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(PSEUDO_HDR)); 905 (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(PSEUDO_HDR));
906 if ((UINT) (pUcFile) - 906 if ((unsigned long) (pUcFile) -
907 (UINT) (pFileStart) >= 907 (unsigned long) (pFileStart) >=
908 (UINT) FileLength) { 908 (unsigned long) FileLength) {
909 uiState = 909 uiState =
910 STATE_DONE_FILE; 910 STATE_DONE_FILE;
911 } 911 }
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
index 775a52a9122..f7ce7d2494b 100644
--- a/drivers/staging/hv/channel.c
+++ b/drivers/staging/hv/channel.c
@@ -81,14 +81,14 @@ static void vmbus_setevent(struct vmbus_channel *channel)
81 81
82 if (channel->offermsg.monitor_allocated) { 82 if (channel->offermsg.monitor_allocated) {
83 /* Each u32 represents 32 channels */ 83 /* Each u32 represents 32 channels */
84 set_bit(channel->offermsg.child_relid & 31, 84 sync_set_bit(channel->offermsg.child_relid & 31,
85 (unsigned long *) vmbus_connection.send_int_page + 85 (unsigned long *) vmbus_connection.send_int_page +
86 (channel->offermsg.child_relid >> 5)); 86 (channel->offermsg.child_relid >> 5));
87 87
88 monitorpage = vmbus_connection.monitor_pages; 88 monitorpage = vmbus_connection.monitor_pages;
89 monitorpage++; /* Get the child to parent monitor page */ 89 monitorpage++; /* Get the child to parent monitor page */
90 90
91 set_bit(channel->monitor_bit, 91 sync_set_bit(channel->monitor_bit,
92 (unsigned long *)&monitorpage->trigger_group 92 (unsigned long *)&monitorpage->trigger_group
93 [channel->monitor_grp].pending); 93 [channel->monitor_grp].pending);
94 94
@@ -104,7 +104,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
104 104
105 if (Channel->offermsg.monitor_allocated) { 105 if (Channel->offermsg.monitor_allocated) {
106 /* Each u32 represents 32 channels */ 106 /* Each u32 represents 32 channels */
107 clear_bit(Channel->offermsg.child_relid & 31, 107 sync_clear_bit(Channel->offermsg.child_relid & 31,
108 (unsigned long *)vmbus_connection.send_int_page + 108 (unsigned long *)vmbus_connection.send_int_page +
109 (Channel->offermsg.child_relid >> 5)); 109 (Channel->offermsg.child_relid >> 5));
110 110
@@ -112,7 +112,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
112 vmbus_connection.monitor_pages; 112 vmbus_connection.monitor_pages;
113 monitorPage++; /* Get the child to parent monitor page */ 113 monitorPage++; /* Get the child to parent monitor page */
114 114
115 clear_bit(Channel->monitor_bit, 115 sync_clear_bit(Channel->monitor_bit,
116 (unsigned long *)&monitorPage->trigger_group 116 (unsigned long *)&monitorPage->trigger_group
117 [Channel->monitor_grp].Pending); 117 [Channel->monitor_grp].Pending);
118 } 118 }
diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c
index 44b203b95a2..afc8116e7aa 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/staging/hv/connection.c
@@ -296,7 +296,7 @@ void vmbus_on_event(unsigned long data)
296 for (dword = 0; dword < maxdword; dword++) { 296 for (dword = 0; dword < maxdword; dword++) {
297 if (recv_int_page[dword]) { 297 if (recv_int_page[dword]) {
298 for (bit = 0; bit < 32; bit++) { 298 for (bit = 0; bit < 32; bit++) {
299 if (test_and_clear_bit(bit, 299 if (sync_test_and_clear_bit(bit,
300 (unsigned long *) 300 (unsigned long *)
301 &recv_int_page[dword])) { 301 &recv_int_page[dword])) {
302 relid = (dword << 5) + bit; 302 relid = (dword << 5) + bit;
@@ -338,7 +338,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
338int vmbus_set_event(u32 child_relid) 338int vmbus_set_event(u32 child_relid)
339{ 339{
340 /* Each u32 represents 32 channels */ 340 /* Each u32 represents 32 channels */
341 set_bit(child_relid & 31, 341 sync_set_bit(child_relid & 31,
342 (unsigned long *)vmbus_connection.send_int_page + 342 (unsigned long *)vmbus_connection.send_int_page +
343 (child_relid >> 5)); 343 (child_relid >> 5));
344 344
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
index 9c6d4d24f88..118c7be2256 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/staging/hv/hv_mouse.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/delay.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/workqueue.h> 19#include <linux/workqueue.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
@@ -375,7 +376,7 @@ static void MousevscOnReceiveDeviceInfo(struct mousevsc_dev *InputDevice, struct
375 desc->desc[0].wDescriptorLength); 376 desc->desc[0].wDescriptorLength);
376 377
377 /* Send the ack */ 378 /* Send the ack */
378 memset(&ack, sizeof(struct mousevsc_prt_msg), 0); 379 memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
379 380
380 ack.type = PipeMessageData; 381 ack.type = PipeMessageData;
381 ack.size = sizeof(struct synthhid_device_info_ack); 382 ack.size = sizeof(struct synthhid_device_info_ack);
@@ -596,7 +597,7 @@ static int MousevscConnectToVsp(struct hv_device *Device)
596 /* 597 /*
597 * Now, initiate the vsc/vsp initialization protocol on the open channel 598 * Now, initiate the vsc/vsp initialization protocol on the open channel
598 */ 599 */
599 memset(request, sizeof(struct mousevsc_prt_msg), 0); 600 memset(request, 0, sizeof(struct mousevsc_prt_msg));
600 601
601 request->type = PipeMessageData; 602 request->type = PipeMessageData;
602 request->size = sizeof(struct synthhid_protocol_request); 603 request->size = sizeof(struct synthhid_protocol_request);
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 2d40f5f86b2..33973568214 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -46,6 +46,7 @@ struct net_device_context {
46 /* point back to our device context */ 46 /* point back to our device context */
47 struct hv_device *device_ctx; 47 struct hv_device *device_ctx;
48 unsigned long avail; 48 unsigned long avail;
49 struct work_struct work;
49}; 50};
50 51
51 52
@@ -219,6 +220,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
219 unsigned int status) 220 unsigned int status)
220{ 221{
221 struct net_device *net = dev_get_drvdata(&device_obj->device); 222 struct net_device *net = dev_get_drvdata(&device_obj->device);
223 struct net_device_context *ndev_ctx;
222 224
223 if (!net) { 225 if (!net) {
224 DPRINT_ERR(NETVSC_DRV, "got link status but net device " 226 DPRINT_ERR(NETVSC_DRV, "got link status but net device "
@@ -230,6 +232,8 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
230 netif_carrier_on(net); 232 netif_carrier_on(net);
231 netif_wake_queue(net); 233 netif_wake_queue(net);
232 netif_notify_peers(net); 234 netif_notify_peers(net);
235 ndev_ctx = netdev_priv(net);
236 schedule_work(&ndev_ctx->work);
233 } else { 237 } else {
234 netif_carrier_off(net); 238 netif_carrier_off(net);
235 netif_stop_queue(net); 239 netif_stop_queue(net);
@@ -328,6 +332,25 @@ static const struct net_device_ops device_ops = {
328 .ndo_set_mac_address = eth_mac_addr, 332 .ndo_set_mac_address = eth_mac_addr,
329}; 333};
330 334
335/*
336 * Send GARP packet to network peers after migrations.
337 * After Quick Migration, the network is not immediately operational in the
338 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
339 * another netif_notify_peers() into a scheduled work, otherwise GARP packet
340 * will not be sent after quick migration, and cause network disconnection.
341 */
342static void netvsc_send_garp(struct work_struct *w)
343{
344 struct net_device_context *ndev_ctx;
345 struct net_device *net;
346
347 msleep(20);
348 ndev_ctx = container_of(w, struct net_device_context, work);
349 net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
350 netif_notify_peers(net);
351}
352
353
331static int netvsc_probe(struct device *device) 354static int netvsc_probe(struct device *device)
332{ 355{
333 struct hv_driver *drv = 356 struct hv_driver *drv =
@@ -353,6 +376,7 @@ static int netvsc_probe(struct device *device)
353 net_device_ctx->device_ctx = device_obj; 376 net_device_ctx->device_ctx = device_obj;
354 net_device_ctx->avail = ring_size; 377 net_device_ctx->avail = ring_size;
355 dev_set_drvdata(device, net); 378 dev_set_drvdata(device, net);
379 INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
356 380
357 /* Notify the netvsc driver of the new device */ 381 /* Notify the netvsc driver of the new device */
358 ret = net_drv_obj->base.dev_add(device_obj, &device_info); 382 ret = net_drv_obj->base.dev_add(device_obj, &device_info);
diff --git a/drivers/staging/hv/tools/hv_kvp_daemon.c b/drivers/staging/hv/tools/hv_kvp_daemon.c
index aa77a971aac..33f0f1c8ad7 100644
--- a/drivers/staging/hv/tools/hv_kvp_daemon.c
+++ b/drivers/staging/hv/tools/hv_kvp_daemon.c
@@ -102,22 +102,22 @@ static char kvp_send_buffer[4096];
102static char kvp_recv_buffer[4096]; 102static char kvp_recv_buffer[4096];
103static struct sockaddr_nl addr; 103static struct sockaddr_nl addr;
104 104
105static char os_name[100]; 105static char *os_name = "";
106static char os_major[50]; 106static char *os_major = "";
107static char os_minor[50]; 107static char *os_minor = "";
108static char processor_arch[50]; 108static char *processor_arch;
109static char os_build[100]; 109static char *os_build;
110static char *lic_version; 110static char *lic_version;
111static struct utsname uts_buf;
111 112
112void kvp_get_os_info(void) 113void kvp_get_os_info(void)
113{ 114{
114 FILE *file; 115 FILE *file;
115 char *eol; 116 char *p, buf[512];
116 struct utsname buf;
117 117
118 uname(&buf); 118 uname(&uts_buf);
119 strcpy(os_build, buf.release); 119 os_build = uts_buf.release;
120 strcpy(processor_arch, buf.machine); 120 processor_arch= uts_buf.machine;
121 121
122 file = fopen("/etc/SuSE-release", "r"); 122 file = fopen("/etc/SuSE-release", "r");
123 if (file != NULL) 123 if (file != NULL)
@@ -132,21 +132,46 @@ void kvp_get_os_info(void)
132 /* 132 /*
133 * We don't have information about the os. 133 * We don't have information about the os.
134 */ 134 */
135 strcpy(os_name, "Linux"); 135 os_name = uts_buf.sysname;
136 strcpy(os_major, "0");
137 strcpy(os_minor, "0");
138 return; 136 return;
139 137
140kvp_osinfo_found: 138kvp_osinfo_found:
141 fgets(os_name, 99, file); 139 /* up to three lines */
142 eol = index(os_name, '\n'); 140 p = fgets(buf, sizeof(buf), file);
143 *eol = '\0'; 141 if (p) {
144 fgets(os_major, 49, file); 142 p = strchr(buf, '\n');
145 eol = index(os_major, '\n'); 143 if (p)
146 *eol = '\0'; 144 *p = '\0';
147 fgets(os_minor, 49, file); 145 p = strdup(buf);
148 eol = index(os_minor, '\n'); 146 if (!p)
149 *eol = '\0'; 147 goto done;
148 os_name = p;
149
150 /* second line */
151 p = fgets(buf, sizeof(buf), file);
152 if (p) {
153 p = strchr(buf, '\n');
154 if (p)
155 *p = '\0';
156 p = strdup(buf);
157 if (!p)
158 goto done;
159 os_major = p;
160
161 /* third line */
162 p = fgets(buf, sizeof(buf), file);
163 if (p) {
164 p = strchr(buf, '\n');
165 if (p)
166 *p = '\0';
167 p = strdup(buf);
168 if (p)
169 os_minor = p;
170 }
171 }
172 }
173
174done:
150 fclose(file); 175 fclose(file);
151 return; 176 return;
152} 177}
@@ -293,7 +318,7 @@ netlink_send(int fd, struct cn_msg *msg)
293 return sendmsg(fd, &message, 0); 318 return sendmsg(fd, &message, 0);
294} 319}
295 320
296main(void) 321int main(void)
297{ 322{
298 int fd, len, sock_opt; 323 int fd, len, sock_opt;
299 int error; 324 int error;
@@ -301,9 +326,10 @@ main(void)
301 struct pollfd pfd; 326 struct pollfd pfd;
302 struct nlmsghdr *incoming_msg; 327 struct nlmsghdr *incoming_msg;
303 struct cn_msg *incoming_cn_msg; 328 struct cn_msg *incoming_cn_msg;
329 struct hv_ku_msg *hv_msg;
330 char *p;
304 char *key_value; 331 char *key_value;
305 char *key_name; 332 char *key_name;
306 int key_index;
307 333
308 daemon(1, 0); 334 daemon(1, 0);
309 openlog("KVP", 0, LOG_USER); 335 openlog("KVP", 0, LOG_USER);
@@ -373,9 +399,10 @@ main(void)
373 * Driver is registering with us; stash away the version 399 * Driver is registering with us; stash away the version
374 * information. 400 * information.
375 */ 401 */
376 lic_version = malloc(strlen(incoming_cn_msg->data) + 1); 402 p = (char *)incoming_cn_msg->data;
403 lic_version = malloc(strlen(p) + 1);
377 if (lic_version) { 404 if (lic_version) {
378 strcpy(lic_version, incoming_cn_msg->data); 405 strcpy(lic_version, p);
379 syslog(LOG_INFO, "KVP LIC Version: %s", 406 syslog(LOG_INFO, "KVP LIC Version: %s",
380 lic_version); 407 lic_version);
381 } else { 408 } else {
@@ -389,14 +416,11 @@ main(void)
389 continue; 416 continue;
390 } 417 }
391 418
392 key_index = 419 hv_msg = (struct hv_ku_msg *)incoming_cn_msg->data;
393 ((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_index; 420 key_name = (char *)hv_msg->kvp_key;
394 key_name = 421 key_value = (char *)hv_msg->kvp_value;
395 ((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_key;
396 key_value =
397 ((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_value;
398 422
399 switch (key_index) { 423 switch (hv_msg->kvp_index) {
400 case FullyQualifiedDomainName: 424 case FullyQualifiedDomainName:
401 kvp_get_domain_name(key_value, 425 kvp_get_domain_name(key_value,
402 HV_KVP_EXCHANGE_MAX_VALUE_SIZE); 426 HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index b473f468dd8..79089f85d90 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -254,7 +254,7 @@ static int vmbus_on_isr(void)
254 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; 254 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
255 255
256 /* Since we are a child, we only need to check bit 0 */ 256 /* Since we are a child, we only need to check bit 0 */
257 if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { 257 if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
258 DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]); 258 DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]);
259 ret |= 0x2; 259 ret |= 0x2;
260 } 260 }
diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h
index ca050a499b9..6f0d8df5e17 100644
--- a/drivers/staging/hv/vmbus_private.h
+++ b/drivers/staging/hv/vmbus_private.h
@@ -31,6 +31,7 @@
31#include "channel_mgmt.h" 31#include "channel_mgmt.h"
32#include "ring_buffer.h" 32#include "ring_buffer.h"
33#include <linux/list.h> 33#include <linux/list.h>
34#include <asm/sync_bitops.h>
34 35
35 36
36/* 37/*
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index 7a127029e09..e328bcc5922 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -17,7 +17,8 @@
17#ifndef SPI_ADIS16400_H_ 17#ifndef SPI_ADIS16400_H_
18#define SPI_ADIS16400_H_ 18#define SPI_ADIS16400_H_
19 19
20#define ADIS16400_STARTUP_DELAY 220 /* ms */ 20#define ADIS16400_STARTUP_DELAY 290 /* ms */
21#define ADIS16400_MTEST_DELAY 90 /* ms */
21 22
22#define ADIS16400_READ_REG(a) a 23#define ADIS16400_READ_REG(a) a
23#define ADIS16400_WRITE_REG(a) ((a) | 0x80) 24#define ADIS16400_WRITE_REG(a) ((a) | 0x80)
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index cfb108a1545..540bde69cc3 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -6,6 +6,7 @@
6 * 6 *
7 * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de> 7 * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
8 * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk> 8 * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
9 * Copyright (c) 2011 Analog Devices Inc.
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -93,7 +94,6 @@ static int adis16400_spi_write_reg_16(struct device *dev,
93 .tx_buf = st->tx + 2, 94 .tx_buf = st->tx + 2,
94 .bits_per_word = 8, 95 .bits_per_word = 8,
95 .len = 2, 96 .len = 2,
96 .cs_change = 1,
97 }, 97 },
98 }; 98 };
99 99
@@ -137,7 +137,6 @@ static int adis16400_spi_read_reg_16(struct device *dev,
137 .rx_buf = st->rx, 137 .rx_buf = st->rx,
138 .bits_per_word = 8, 138 .bits_per_word = 8,
139 .len = 2, 139 .len = 2,
140 .cs_change = 1,
141 }, 140 },
142 }; 141 };
143 142
@@ -375,7 +374,7 @@ static int adis16400_self_test(struct device *dev)
375 dev_err(dev, "problem starting self test"); 374 dev_err(dev, "problem starting self test");
376 goto err_ret; 375 goto err_ret;
377 } 376 }
378 377 msleep(ADIS16400_MTEST_DELAY);
379 adis16400_check_status(dev); 378 adis16400_check_status(dev);
380 379
381err_ret: 380err_ret:
@@ -471,10 +470,11 @@ static int adis16400_initial_setup(struct adis16400_state *st)
471 if (ret) 470 if (ret)
472 goto err_ret; 471 goto err_ret;
473 472
474 if (prod_id != ADIS16400_PRODUCT_ID_DEFAULT) 473 if ((prod_id & 0xF000) != ADIS16400_PRODUCT_ID_DEFAULT)
475 dev_warn(dev, "unknown product id"); 474 dev_warn(dev, "unknown product id");
476 475
477 printk(KERN_INFO DRIVER_NAME ": prod_id 0x%04x at CS%d (irq %d)\n", 476
477 dev_info(dev, ": prod_id 0x%04x at CS%d (irq %d)\n",
478 prod_id, st->us->chip_select, st->us->irq); 478 prod_id, st->us->chip_select, st->us->irq);
479 479
480 /* use high spi speed if possible */ 480 /* use high spi speed if possible */
@@ -497,12 +497,12 @@ err_ret:
497 _reg) 497 _reg)
498 498
499static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF); 499static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF);
500static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_XGYRO_OFF); 500static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_YGYRO_OFF);
501static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_XGYRO_OFF); 501static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_ZGYRO_OFF);
502 502
503static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF); 503static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF);
504static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_XACCL_OFF); 504static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_YACCL_OFF);
505static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_XACCL_OFF); 505static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_ZACCL_OFF);
506 506
507 507
508static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed, 508static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed,
@@ -647,7 +647,7 @@ static int __devinit adis16400_probe(struct spi_device *spi)
647 647
648 ret = iio_ring_buffer_register(st->indio_dev->ring, 0); 648 ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
649 if (ret) { 649 if (ret) {
650 printk(KERN_ERR "failed to initialize the ring\n"); 650 dev_err(&spi->dev, "failed to initialize the ring\n");
651 goto error_unreg_ring_funcs; 651 goto error_unreg_ring_funcs;
652 } 652 }
653 653
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 33293fba9bc..da28cb4288a 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -122,12 +122,10 @@ static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
122 .tx_buf = st->tx, 122 .tx_buf = st->tx,
123 .bits_per_word = 8, 123 .bits_per_word = 8,
124 .len = 2, 124 .len = 2,
125 .cs_change = 0,
126 }, { 125 }, {
127 .rx_buf = rx, 126 .rx_buf = rx,
128 .bits_per_word = 8, 127 .bits_per_word = 8,
129 .len = 24, 128 .len = 24,
130 .cs_change = 1,
131 }, 129 },
132 }; 130 };
133 131
@@ -162,9 +160,10 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
162 work_trigger_to_ring); 160 work_trigger_to_ring);
163 struct iio_ring_buffer *ring = st->indio_dev->ring; 161 struct iio_ring_buffer *ring = st->indio_dev->ring;
164 162
165 int i = 0; 163 int i = 0, j;
166 s16 *data; 164 s16 *data;
167 size_t datasize = ring->access.get_bytes_per_datum(ring); 165 size_t datasize = ring->access.get_bytes_per_datum(ring);
166 unsigned long mask = ring->scan_mask;
168 167
169 data = kmalloc(datasize , GFP_KERNEL); 168 data = kmalloc(datasize , GFP_KERNEL);
170 if (data == NULL) { 169 if (data == NULL) {
@@ -174,9 +173,12 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
174 173
175 if (ring->scan_count) 174 if (ring->scan_count)
176 if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0) 175 if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
177 for (; i < ring->scan_count; i++) 176 for (; i < ring->scan_count; i++) {
177 j = __ffs(mask);
178 mask &= ~(1 << j);
178 data[i] = be16_to_cpup( 179 data[i] = be16_to_cpup(
179 (__be16 *)&(st->rx[i*2])); 180 (__be16 *)&(st->rx[j*2]));
181 }
180 182
181 /* Guaranteed to be aligned with 8 byte boundary */ 183 /* Guaranteed to be aligned with 8 byte boundary */
182 if (ring->scan_timestamp) 184 if (ring->scan_timestamp)
diff --git a/drivers/staging/memrar/Kconfig b/drivers/staging/memrar/Kconfig
deleted file mode 100644
index cbeebc55090..00000000000
--- a/drivers/staging/memrar/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
1config MRST_RAR_HANDLER
2 tristate "RAR handler driver for Intel Moorestown platform"
3 depends on RAR_REGISTER
4 ---help---
5 This driver provides a memory management interface to
6 restricted access regions (RAR) available on the Intel
7 Moorestown platform.
8
9 Once locked down, restricted access regions are only
10 accessible by specific hardware on the platform. The x86
11 CPU is typically not one of those platforms. As such this
12 driver does not access RAR, and only provides a buffer
13 allocation/bookkeeping mechanism.
14
15 If unsure, say N.
diff --git a/drivers/staging/memrar/Makefile b/drivers/staging/memrar/Makefile
deleted file mode 100644
index a3336c00cc5..00000000000
--- a/drivers/staging/memrar/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
2memrar-y := memrar_allocator.o memrar_handler.o
diff --git a/drivers/staging/memrar/TODO b/drivers/staging/memrar/TODO
deleted file mode 100644
index 435e09ba44c..00000000000
--- a/drivers/staging/memrar/TODO
+++ /dev/null
@@ -1,43 +0,0 @@
1RAR Handler (memrar) Driver TODO Items
2======================================
3
4Maintainer: Eugene Epshteyn <eugene.epshteyn@intel.com>
5
6memrar.h
7--------
81. This header exposes the driver's user space and kernel space
9 interfaces. It should be moved to <linux/rar/memrar.h>, or
10 something along those lines, when this memrar driver is moved out
11 of `staging'.
12 a. It would be ideal if staging/rar_register/rar_register.h was
13 moved to the same directory.
14
15memrar_allocator.[ch]
16---------------------
171. Address potential fragmentation issues with the memrar_allocator.
18
192. Hide struct memrar_allocator details/fields. They need not be
20 exposed to the user.
21 a. Forward declare struct memrar_allocator.
22 b. Move all three struct definitions to `memrar_allocator.c'
23 source file.
24 c. Add a memrar_allocator_largest_free_area() function, or
25 something like that to get access to the value of the struct
26 memrar_allocator "largest_free_area" field. This allows the
27 struct memrar_allocator fields to be completely hidden from
28 the user. The memrar_handler code really only needs this for
29 statistic gathering on-demand.
30 d. Do the same for the "capacity" field as the
31 "largest_free_area" field.
32
333. Move memrar_allocator.* to kernel `lib' directory since it is HW
34 neutral.
35 a. Alternatively, use lib/genalloc.c instead.
36 b. A kernel port of Doug Lea's malloc() implementation may also
37 be an option.
38
39memrar_handler.c
40----------------
411. Split user space interface (ioctl code) from core/kernel code,
42 e.g.:
43 memrar_handler.c -> memrar_core.c, memrar_user.c
diff --git a/drivers/staging/memrar/memrar-abi b/drivers/staging/memrar/memrar-abi
deleted file mode 100644
index c23fc996a43..00000000000
--- a/drivers/staging/memrar/memrar-abi
+++ /dev/null
@@ -1,89 +0,0 @@
1What: /dev/memrar
2Date: March 2010
3KernelVersion: 2.6.34
4Contact: Eugene Epshteyn <eugene.epshteyn@intel.com>
5Description: The Intel Moorestown Restricted Access Region (RAR)
6 Handler driver exposes an ioctl() based interface that
7 allows a user to reserve and release blocks of RAR
8 memory.
9
10 Note: A sysfs based one was not appropriate for the
11 RAR handler's usage model.
12
13 =========================================================
14 ioctl() Requests
15 =========================================================
16 RAR_HANDLER_RESERVE
17 -------------------
18 Description: Reserve RAR block.
19 Type: struct RAR_block_info
20 Direction: in/out
21 Errors: EINVAL (invalid RAR type or size)
22 ENOMEM (not enough RAR memory)
23
24 RAR_HANDLER_STAT
25 ----------------
26 Description: Get RAR statistics.
27 Type: struct RAR_stat
28 Direction: in/out
29 Errors: EINVAL (invalid RAR type)
30
31 RAR_HANDLER_RELEASE
32 -------------------
33 Description: Release previously reserved RAR block.
34 Type: 32 bit unsigned integer
35 (e.g. uint32_t), i.e the RAR "handle".
36 Direction: in
37 Errors: EINVAL (invalid RAR handle)
38
39
40 =========================================================
41 ioctl() Request Parameter Types
42 =========================================================
43 The structures referred to above are defined as
44 follows:
45
46 /**
47 * struct RAR_block_info - user space struct that
48 * describes RAR buffer
49 * @type: Type of RAR memory (e.g.,
50 * RAR_TYPE_VIDEO or RAR_TYPE_AUDIO) [in]
51 * @size: Requested size of a block in bytes to
52 * be reserved in RAR. [in]
53 * @handle: Handle that can be used to refer to
54 * reserved block. [out]
55 *
56 * This is the basic structure exposed to the user
57 * space that describes a given RAR buffer. It used
58 * as the parameter for the RAR_HANDLER_RESERVE ioctl.
59 * The buffer's underlying bus address is not exposed
60 * to the user. User space code refers to the buffer
61 * entirely by "handle".
62 */
63 struct RAR_block_info {
64 __u32 type;
65 __u32 size;
66 __u32 handle;
67 };
68
69 /**
70 * struct RAR_stat - RAR statistics structure
71 * @type: Type of RAR memory (e.g.,
72 * RAR_TYPE_VIDEO or
73 * RAR_TYPE_AUDIO) [in]
74 * @capacity: Total size of RAR memory
75 * region. [out]
76 * @largest_block_size: Size of the largest reservable
77 * block. [out]
78 *
79 * This structure is used for RAR_HANDLER_STAT ioctl.
80 */
81 struct RAR_stat {
82 __u32 type;
83 __u32 capacity;
84 __u32 largest_block_size;
85 };
86
87 Lastly, the RAR_HANDLER_RELEASE ioctl expects a
88 "handle" to the RAR block of memory. It is a 32 bit
89 unsigned integer.
diff --git a/drivers/staging/memrar/memrar.h b/drivers/staging/memrar/memrar.h
deleted file mode 100644
index 0feb73b94c9..00000000000
--- a/drivers/staging/memrar/memrar.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * RAR Handler (/dev/memrar) internal driver API.
3 * Copyright (C) 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General
7 * Public License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be
10 * useful, but WITHOUT ANY WARRANTY; without even the implied
11 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
12 * PURPOSE. See the GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the Free
15 * Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 02111-1307, USA.
17 * The full GNU General Public License is included in this
18 * distribution in the file called COPYING.
19 */
20
21
22#ifndef _MEMRAR_H
23#define _MEMRAR_H
24
25#include <linux/ioctl.h>
26#include <linux/types.h>
27
28
29/**
30 * struct RAR_stat - RAR statistics structure
31 * @type: Type of RAR memory (e.g., audio vs. video)
32 * @capacity: Total size of RAR memory region.
33 * @largest_block_size: Size of the largest reservable block.
34 *
35 * This structure is used for RAR_HANDLER_STAT ioctl and for the
36 * RAR_get_stat() user space wrapper function.
37 */
38struct RAR_stat {
39 __u32 type;
40 __u32 capacity;
41 __u32 largest_block_size;
42};
43
44
45/**
46 * struct RAR_block_info - user space struct that describes RAR buffer
47 * @type: Type of RAR memory (e.g., audio vs. video)
48 * @size: Requested size of a block to be reserved in RAR.
49 * @handle: Handle that can be used to refer to reserved block.
50 *
51 * This is the basic structure exposed to the user space that
52 * describes a given RAR buffer. The buffer's underlying bus address
53 * is not exposed to the user. User space code refers to the buffer
54 * entirely by "handle".
55 */
56struct RAR_block_info {
57 __u32 type;
58 __u32 size;
59 __u32 handle;
60};
61
62
63#define RAR_IOCTL_BASE 0xE0
64
65/* Reserve RAR block. */
66#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
67
68/* Release previously reserved RAR block. */
69#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
70
71/* Get RAR stats. */
72#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
73
74
75#ifdef __KERNEL__
76
77/* -------------------------------------------------------------- */
78/* Kernel Side RAR Handler Interface */
79/* -------------------------------------------------------------- */
80
81/**
82 * struct RAR_buffer - kernel space struct that describes RAR buffer
83 * @info: structure containing base RAR buffer information
84 * @bus_address: buffer bus address
85 *
86 * Structure that contains all information related to a given block of
87 * memory in RAR. It is generally only used when retrieving RAR
88 * related bus addresses.
89 *
90 * Note: This structure is used only by RAR-enabled drivers, and is
91 * not intended to be exposed to the user space.
92 */
93struct RAR_buffer {
94 struct RAR_block_info info;
95 dma_addr_t bus_address;
96};
97
98#if defined(CONFIG_MRST_RAR_HANDLER)
99/**
100 * rar_reserve() - reserve RAR buffers
101 * @buffers: array of RAR_buffers where type and size of buffers to
102 * reserve are passed in, handle and bus address are
103 * passed out
104 * @count: number of RAR_buffers in the "buffers" array
105 *
106 * This function will reserve buffers in the restricted access regions
107 * of given types.
108 *
109 * It returns the number of successfully reserved buffers. Successful
110 * buffer reservations will have the corresponding bus_address field
111 * set to a non-zero value in the given buffers vector.
112 */
113extern size_t rar_reserve(struct RAR_buffer *buffers,
114 size_t count);
115
116/**
117 * rar_release() - release RAR buffers
118 * @buffers: array of RAR_buffers where handles to buffers to be
119 * released are passed in
120 * @count: number of RAR_buffers in the "buffers" array
121 *
122 * This function will release RAR buffers that were retrieved through
123 * a call to rar_reserve() or rar_handle_to_bus() by decrementing the
124 * reference count. The RAR buffer will be reclaimed when the
125 * reference count drops to zero.
126 *
127 * It returns the number of successfully released buffers. Successful
128 * releases will have their handle field set to zero in the given
129 * buffers vector.
130 */
131extern size_t rar_release(struct RAR_buffer *buffers,
132 size_t count);
133
134/**
135 * rar_handle_to_bus() - convert a vector of RAR handles to bus addresses
136 * @buffers: array of RAR_buffers containing handles to be
137 * converted to bus_addresses
138 * @count: number of RAR_buffers in the "buffers" array
139
140 * This function will retrieve the RAR buffer bus addresses, type and
141 * size corresponding to the RAR handles provided in the buffers
142 * vector.
143 *
144 * It returns the number of successfully converted buffers. The bus
145 * address will be set to 0 for unrecognized handles.
146 *
147 * The reference count for each corresponding buffer in RAR will be
148 * incremented. Call rar_release() when done with the buffers.
149 */
150extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
151 size_t count);
152
153#else
154
155extern inline size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
156{
157 return 0;
158}
159
160extern inline size_t rar_release(struct RAR_buffer *buffers, size_t count)
161{
162 return 0;
163}
164
165extern inline size_t rar_handle_to_bus(struct RAR_buffer *buffers,
166 size_t count)
167{
168 return 0;
169}
170
171#endif /* MRST_RAR_HANDLER */
172#endif /* __KERNEL__ */
173
174#endif /* _MEMRAR_H */
diff --git a/drivers/staging/memrar/memrar_allocator.c b/drivers/staging/memrar/memrar_allocator.c
deleted file mode 100644
index a4f8c5846a0..00000000000
--- a/drivers/staging/memrar/memrar_allocator.c
+++ /dev/null
@@ -1,432 +0,0 @@
1/*
2 * memrar_allocator 1.0: An allocator for Intel RAR.
3 *
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
20 *
21 *
22 * ------------------------------------------------------------------
23 *
24 * This simple allocator implementation provides a
25 * malloc()/free()-like interface for reserving space within a
26 * previously reserved block of memory. It is not specific to
27 * any hardware, nor is it coupled with the lower level paging
28 * mechanism.
29 *
30 * The primary goal of this implementation is to provide a means
31 * to partition an arbitrary block of memory without actually
32 * accessing the memory or incurring any hardware side-effects
33 * (e.g. paging). It is, in effect, a bookkeeping mechanism for
34 * buffers.
35 */
36
37
38#include "memrar_allocator.h"
39#include <linux/slab.h>
40#include <linux/bug.h>
41#include <linux/kernel.h>
42
43
44struct memrar_allocator *memrar_create_allocator(unsigned long base,
45 size_t capacity,
46 size_t block_size)
47{
48 struct memrar_allocator *allocator = NULL;
49 struct memrar_address_ranges *first_node = NULL;
50
51 /*
52 * Make sure the base address is aligned on a block_size
53 * boundary.
54 *
55 * @todo Is this necessary?
56 */
57 /* base = ALIGN(base, block_size); */
58
59 /* Validate parameters.
60 *
61 * Make sure we can allocate the entire memory space. Zero
62 * capacity or block size are obviously invalid.
63 */
64 if (base == 0
65 || capacity == 0
66 || block_size == 0
67 || ULONG_MAX - capacity < base
68 || capacity < block_size)
69 return allocator;
70
71 /*
72 * There isn't much point in creating a memory allocator that
73 * is only capable of holding one block but we'll allow it,
74 * and issue a diagnostic.
75 */
76 WARN(capacity < block_size * 2,
77 "memrar: Only one block available to allocator.\n");
78
79 allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
80
81 if (allocator == NULL)
82 return allocator;
83
84 mutex_init(&allocator->lock);
85 allocator->base = base;
86
87 /* Round the capacity down to a multiple of block_size. */
88 allocator->capacity = (capacity / block_size) * block_size;
89
90 allocator->block_size = block_size;
91
92 allocator->largest_free_area = allocator->capacity;
93
94 /* Initialize the handle and free lists. */
95 INIT_LIST_HEAD(&allocator->allocated_list.list);
96 INIT_LIST_HEAD(&allocator->free_list.list);
97
98 first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
99 if (first_node == NULL) {
100 kfree(allocator);
101 allocator = NULL;
102 } else {
103 /* Full range of blocks is available. */
104 first_node->range.begin = base;
105 first_node->range.end = base + allocator->capacity;
106 list_add(&first_node->list,
107 &allocator->free_list.list);
108 }
109
110 return allocator;
111}
112
113void memrar_destroy_allocator(struct memrar_allocator *allocator)
114{
115 /*
116 * Assume that the memory allocator lock isn't held at this
117 * point in time. Caller must ensure that.
118 */
119
120 struct memrar_address_ranges *pos = NULL;
121 struct memrar_address_ranges *n = NULL;
122
123 if (allocator == NULL)
124 return;
125
126 mutex_lock(&allocator->lock);
127
128 /* Reclaim free list resources. */
129 list_for_each_entry_safe(pos,
130 n,
131 &allocator->free_list.list,
132 list) {
133 list_del(&pos->list);
134 kfree(pos);
135 }
136
137 mutex_unlock(&allocator->lock);
138
139 kfree(allocator);
140}
141
142unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
143 size_t size)
144{
145 struct memrar_address_ranges *pos = NULL;
146
147 size_t num_blocks;
148 unsigned long reserved_bytes;
149
150 /*
151 * Address of allocated buffer. We assume that zero is not a
152 * valid address.
153 */
154 unsigned long addr = 0;
155
156 if (allocator == NULL || size == 0)
157 return addr;
158
159 /* Reserve enough blocks to hold the amount of bytes requested. */
160 num_blocks = DIV_ROUND_UP(size, allocator->block_size);
161
162 reserved_bytes = num_blocks * allocator->block_size;
163
164 mutex_lock(&allocator->lock);
165
166 if (reserved_bytes > allocator->largest_free_area) {
167 mutex_unlock(&allocator->lock);
168 return addr;
169 }
170
171 /*
172 * Iterate through the free list to find a suitably sized
173 * range of free contiguous memory blocks.
174 *
175 * We also take the opportunity to reset the size of the
176 * largest free area size statistic.
177 */
178 list_for_each_entry(pos, &allocator->free_list.list, list) {
179 struct memrar_address_range * const fr = &pos->range;
180 size_t const curr_size = fr->end - fr->begin;
181
182 if (curr_size >= reserved_bytes && addr == 0) {
183 struct memrar_address_range *range = NULL;
184 struct memrar_address_ranges * const new_node =
185 kmalloc(sizeof(*new_node), GFP_KERNEL);
186
187 if (new_node == NULL)
188 break;
189
190 list_add(&new_node->list,
191 &allocator->allocated_list.list);
192
193 /*
194 * Carve out area of memory from end of free
195 * range.
196 */
197 range = &new_node->range;
198 range->end = fr->end;
199 fr->end -= reserved_bytes;
200 range->begin = fr->end;
201 addr = range->begin;
202
203 /*
204 * Check if largest area has decreased in
205 * size. We'll need to continue scanning for
206 * the next largest area if it has.
207 */
208 if (curr_size == allocator->largest_free_area)
209 allocator->largest_free_area -=
210 reserved_bytes;
211 else
212 break;
213 }
214
215 /*
216 * Reset largest free area size statistic as needed,
217 * but only if we've actually allocated memory.
218 */
219 if (addr != 0
220 && curr_size > allocator->largest_free_area) {
221 allocator->largest_free_area = curr_size;
222 break;
223 }
224 }
225
226 mutex_unlock(&allocator->lock);
227
228 return addr;
229}
230
231long memrar_allocator_free(struct memrar_allocator *allocator,
232 unsigned long addr)
233{
234 struct list_head *pos = NULL;
235 struct list_head *tmp = NULL;
236 struct list_head *dst = NULL;
237
238 struct memrar_address_ranges *allocated = NULL;
239 struct memrar_address_range const *handle = NULL;
240
241 unsigned long old_end = 0;
242 unsigned long new_chunk_size = 0;
243
244 if (allocator == NULL)
245 return -EINVAL;
246
247 if (addr == 0)
248 return 0; /* Ignore "free(0)". */
249
250 mutex_lock(&allocator->lock);
251
252 /* Find the corresponding handle. */
253 list_for_each_entry(allocated,
254 &allocator->allocated_list.list,
255 list) {
256 if (allocated->range.begin == addr) {
257 handle = &allocated->range;
258 break;
259 }
260 }
261
262 /* No such buffer created by this allocator. */
263 if (handle == NULL) {
264 mutex_unlock(&allocator->lock);
265 return -EFAULT;
266 }
267
268 /*
269 * Coalesce adjacent chunks of memory if possible.
270 *
271 * @note This isn't full blown coalescing since we're only
272 * coalescing at most three chunks of memory.
273 */
274 list_for_each_safe(pos, tmp, &allocator->free_list.list) {
275 /* @todo O(n) performance. Optimize. */
276
277 struct memrar_address_range * const chunk =
278 &list_entry(pos,
279 struct memrar_address_ranges,
280 list)->range;
281
282 /* Extend size of existing free adjacent chunk. */
283 if (chunk->end == handle->begin) {
284 /*
285 * Chunk "less than" than the one we're
286 * freeing is adjacent.
287 *
288 * Before:
289 *
290 * +-----+------+
291 * |chunk|handle|
292 * +-----+------+
293 *
294 * After:
295 *
296 * +------------+
297 * | chunk |
298 * +------------+
299 */
300
301 struct memrar_address_ranges const * const next =
302 list_entry(pos->next,
303 struct memrar_address_ranges,
304 list);
305
306 chunk->end = handle->end;
307
308 /*
309 * Now check if next free chunk is adjacent to
310 * the current extended free chunk.
311 *
312 * Before:
313 *
314 * +------------+----+
315 * | chunk |next|
316 * +------------+----+
317 *
318 * After:
319 *
320 * +-----------------+
321 * | chunk |
322 * +-----------------+
323 */
324 if (!list_is_singular(pos)
325 && chunk->end == next->range.begin) {
326 chunk->end = next->range.end;
327 list_del(pos->next);
328 kfree(next);
329 }
330
331 list_del(&allocated->list);
332
333 new_chunk_size = chunk->end - chunk->begin;
334
335 goto exit_memrar_free;
336
337 } else if (handle->end == chunk->begin) {
338 /*
339 * Chunk "greater than" than the one we're
340 * freeing is adjacent.
341 *
342 * +------+-----+
343 * |handle|chunk|
344 * +------+-----+
345 *
346 * After:
347 *
348 * +------------+
349 * | chunk |
350 * +------------+
351 */
352
353 struct memrar_address_ranges const * const prev =
354 list_entry(pos->prev,
355 struct memrar_address_ranges,
356 list);
357
358 chunk->begin = handle->begin;
359
360 /*
361 * Now check if previous free chunk is
362 * adjacent to the current extended free
363 * chunk.
364 *
365 *
366 * Before:
367 *
368 * +----+------------+
369 * |prev| chunk |
370 * +----+------------+
371 *
372 * After:
373 *
374 * +-----------------+
375 * | chunk |
376 * +-----------------+
377 */
378 if (!list_is_singular(pos)
379 && prev->range.end == chunk->begin) {
380 chunk->begin = prev->range.begin;
381 list_del(pos->prev);
382 kfree(prev);
383 }
384
385 list_del(&allocated->list);
386
387 new_chunk_size = chunk->end - chunk->begin;
388
389 goto exit_memrar_free;
390
391 } else if (chunk->end < handle->begin
392 && chunk->end > old_end) {
393 /* Keep track of where the entry could be
394 * potentially moved from the "allocated" list
395 * to the "free" list if coalescing doesn't
396 * occur, making sure the "free" list remains
397 * sorted.
398 */
399 old_end = chunk->end;
400 dst = pos;
401 }
402 }
403
404 /*
405 * Nothing to coalesce.
406 *
407 * Move the entry from the "allocated" list to the "free"
408 * list.
409 */
410 list_move(&allocated->list, dst);
411 new_chunk_size = handle->end - handle->begin;
412 allocated = NULL;
413
414exit_memrar_free:
415
416 if (new_chunk_size > allocator->largest_free_area)
417 allocator->largest_free_area = new_chunk_size;
418
419 mutex_unlock(&allocator->lock);
420
421 kfree(allocated);
422
423 return 0;
424}
425
426
427
428/*
429 Local Variables:
430 c-file-style: "linux"
431 End:
432*/
diff --git a/drivers/staging/memrar/memrar_allocator.h b/drivers/staging/memrar/memrar_allocator.h
deleted file mode 100644
index 0b80dead710..00000000000
--- a/drivers/staging/memrar/memrar_allocator.h
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * Copyright (C) 2010 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General
6 * Public License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be
9 * useful, but WITHOUT ANY WARRANTY; without even the implied
10 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
11 * PURPOSE. See the GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public
13 * License along with this program; if not, write to the Free
14 * Software Foundation, Inc., 59 Temple Place - Suite 330,
15 * Boston, MA 02111-1307, USA.
16 * The full GNU General Public License is included in this
17 * distribution in the file called COPYING.
18 */
19
20#ifndef MEMRAR_ALLOCATOR_H
21#define MEMRAR_ALLOCATOR_H
22
23
24#include <linux/mutex.h>
25#include <linux/list.h>
26#include <linux/types.h>
27#include <linux/kernel.h>
28
29
30/**
31 * struct memrar_address_range - struct that describes a memory range
32 * @begin: Beginning of available address range.
33 * @end: End of available address range, one past the end,
34 * i.e. [begin, end).
35 */
36struct memrar_address_range {
37/* private: internal use only */
38 unsigned long begin;
39 unsigned long end;
40};
41
42/**
43 * struct memrar_address_ranges - list of areas of memory.
44 * @list: Linked list of address ranges.
45 * @range: Memory address range corresponding to given list node.
46 */
47struct memrar_address_ranges {
48/* private: internal use only */
49 struct list_head list;
50 struct memrar_address_range range;
51};
52
53/**
54 * struct memrar_allocator - encapsulation of the memory allocator state
55 * @lock: Lock used to synchronize access to the memory
56 * allocator state.
57 * @base: Base (start) address of the allocator memory
58 * space.
59 * @capacity: Size of the allocator memory space in bytes.
60 * @block_size: The size in bytes of individual blocks within
61 * the allocator memory space.
62 * @largest_free_area: Largest free area of memory in the allocator
63 * in bytes.
64 * @allocated_list: List of allocated memory block address
65 * ranges.
66 * @free_list: List of free address ranges.
67 *
68 * This structure contains all memory allocator state, including the
69 * base address, capacity, free list, lock, etc.
70 */
71struct memrar_allocator {
72/* private: internal use only */
73 struct mutex lock;
74 unsigned long base;
75 size_t capacity;
76 size_t block_size;
77 size_t largest_free_area;
78 struct memrar_address_ranges allocated_list;
79 struct memrar_address_ranges free_list;
80};
81
82/**
83 * memrar_create_allocator() - create a memory allocator
84 * @base: Address at which the memory allocator begins.
85 * @capacity: Desired size of the memory allocator. This value must
86 * be larger than the block_size, ideally more than twice
87 * as large since there wouldn't be much point in using a
88 * memory allocator otherwise.
89 * @block_size: The size of individual blocks within the memory
90 * allocator. This value must smaller than the
91 * capacity.
92 *
93 * Create a memory allocator with the given capacity and block size.
94 * The capacity will be reduced to be a multiple of the block size, if
95 * necessary.
96 *
97 * Returns an instance of the memory allocator, if creation succeeds,
98 * otherwise zero if creation fails. Failure may occur if not enough
99 * kernel memory exists to create the memrar_allocator instance
100 * itself, or if the capacity and block_size arguments are not
101 * compatible or make sense.
102 */
103struct memrar_allocator *memrar_create_allocator(unsigned long base,
104 size_t capacity,
105 size_t block_size);
106
107/**
108 * memrar_destroy_allocator() - destroy allocator
109 * @allocator: The allocator being destroyed.
110 *
111 * Reclaim resources held by the memory allocator. The caller must
112 * explicitly free all memory reserved by memrar_allocator_alloc()
113 * prior to calling this function. Otherwise leaks will occur.
114 */
115void memrar_destroy_allocator(struct memrar_allocator *allocator);
116
117/**
118 * memrar_allocator_alloc() - reserve an area of memory of given size
119 * @allocator: The allocator instance being used to reserve buffer.
120 * @size: The size in bytes of the buffer to allocate.
121 *
122 * This functions reserves an area of memory managed by the given
123 * allocator. It returns zero if allocation was not possible.
124 * Failure may occur if the allocator no longer has space available.
125 */
126unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
127 size_t size);
128
129/**
130 * memrar_allocator_free() - release buffer starting at given address
131 * @allocator: The allocator instance being used to release the buffer.
132 * @address: The address of the buffer being released.
133 *
134 * Release an area of memory starting at the given address. Failure
135 * could occur if the given address is not in the address space
136 * managed by the allocator. Returns zero on success or an errno
137 * (negative value) on failure.
138 */
139long memrar_allocator_free(struct memrar_allocator *allocator,
140 unsigned long address);
141
142#endif /* MEMRAR_ALLOCATOR_H */
143
144
145/*
146 Local Variables:
147 c-file-style: "linux"
148 End:
149*/
diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c
deleted file mode 100644
index 1d9399d6f10..00000000000
--- a/drivers/staging/memrar/memrar_handler.c
+++ /dev/null
@@ -1,1007 +0,0 @@
1/*
2 * memrar_handler 1.0: An Intel restricted access region handler device
3 *
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
20 *
21 * -------------------------------------------------------------------
22 *
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
25 * devices.
26 *
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
30 *
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
35 *
36 * RAR memory itself is never accessed directly by the RAR
37 * handler.
38 */
39
40#include <linux/miscdevice.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <linux/kref.h>
44#include <linux/mutex.h>
45#include <linux/kernel.h>
46#include <linux/uaccess.h>
47#include <linux/mm.h>
48#include <linux/ioport.h>
49#include <linux/io.h>
50#include <linux/rar_register.h>
51
52#include "memrar.h"
53#include "memrar_allocator.h"
54
55
56#define MEMRAR_VER "1.0"
57
58/*
59 * Moorestown supports three restricted access regions.
60 *
61 * We only care about the first two, video and audio. The third,
62 * reserved for Chaabi and the P-unit, will be handled by their
63 * respective drivers.
64 */
65#define MRST_NUM_RAR 2
66
67/* ---------------- -------------------- ------------------- */
68
69/**
70 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
71 * @list: Linked list of memrar_buffer_info objects.
72 * @buffer: Core RAR buffer information.
73 * @refcount: Reference count.
74 * @owner: File handle corresponding to process that reserved the
75 * block of memory in RAR. This will be zero for buffers
76 * allocated by other drivers instead of by a user space
77 * process.
78 *
79 * This structure encapsulates a link list of RAR buffers, as well as
80 * other characteristics specific to a given list node, such as the
81 * reference count on the corresponding RAR buffer.
82 */
83struct memrar_buffer_info {
84 struct list_head list;
85 struct RAR_buffer buffer;
86 struct kref refcount;
87 struct file *owner;
88};
89
90/**
91 * struct memrar_rar_info - characteristics of a given RAR
92 * @base: Base bus address of the RAR.
93 * @length: Length of the RAR.
94 * @iobase: Virtual address of RAR mapped into kernel.
95 * @allocator: Allocator associated with the RAR. Note the allocator
96 * "capacity" may be smaller than the RAR length if the
97 * length is not a multiple of the configured allocator
98 * block size.
99 * @buffers: Table that keeps track of all reserved RAR buffers.
100 * @lock: Lock used to synchronize access to RAR-specific data
101 * structures.
102 *
103 * Each RAR has an associated memrar_rar_info structure that describes
104 * where in memory the RAR is located, how large it is, and a list of
105 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
106 * associated with it to reduce lock contention when operations on
107 * multiple RARs are performed in parallel.
108 */
109struct memrar_rar_info {
110 dma_addr_t base;
111 unsigned long length;
112 void __iomem *iobase;
113 struct memrar_allocator *allocator;
114 struct memrar_buffer_info buffers;
115 struct mutex lock;
116 int allocated; /* True if we own this RAR */
117};
118
119/*
120 * Array of RAR characteristics.
121 */
122static struct memrar_rar_info memrars[MRST_NUM_RAR];
123
124/* ---------------- -------------------- ------------------- */
125
126/* Validate RAR type. */
127static inline int memrar_is_valid_rar_type(u32 type)
128{
129 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
130}
131
132/* Check if an address/handle falls with the given RAR memory range. */
133static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
134 u32 vaddr)
135{
136 unsigned long const iobase = (unsigned long) (rar->iobase);
137 return (vaddr >= iobase && vaddr < iobase + rar->length);
138}
139
140/* Retrieve RAR information associated with the given handle. */
141static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
142{
143 int i;
144 for (i = 0; i < MRST_NUM_RAR; ++i) {
145 struct memrar_rar_info * const rar = &memrars[i];
146 if (memrar_handle_in_range(rar, vaddr))
147 return rar;
148 }
149
150 return NULL;
151}
152
153/**
154 * memrar_get_bus address - handle to bus address
155 *
156 * Retrieve bus address from given handle.
157 *
158 * Returns address corresponding to given handle. Zero if handle is
159 * invalid.
160 */
161static dma_addr_t memrar_get_bus_address(
162 struct memrar_rar_info *rar,
163 u32 vaddr)
164{
165 unsigned long const iobase = (unsigned long) (rar->iobase);
166
167 if (!memrar_handle_in_range(rar, vaddr))
168 return 0;
169
170 /*
171 * An assumption is made that the virtual address offset is
172 * the same as the bus address offset, at least based on the
173 * way this driver is implemented. For example, vaddr + 2 ==
174 * baddr + 2.
175 *
176 * @todo Is that a valid assumption?
177 */
178 return rar->base + (vaddr - iobase);
179}
180
181/**
182 * memrar_get_physical_address - handle to physical address
183 *
184 * Retrieve physical address from given handle.
185 *
186 * Returns address corresponding to given handle. Zero if handle is
187 * invalid.
188 */
189static dma_addr_t memrar_get_physical_address(
190 struct memrar_rar_info *rar,
191 u32 vaddr)
192{
193 /*
194 * @todo This assumes that the bus address and physical
195 * address are the same. That is true for Moorestown
196 * but not necessarily on other platforms. This
197 * deficiency should be addressed at some point.
198 */
199 return memrar_get_bus_address(rar, vaddr);
200}
201
202/**
203 * memrar_release_block - release a block to the pool
204 * @kref: kref of block
205 *
206 * Core block release code. A node has hit zero references so can
207 * be released and the lists must be updated.
208 *
209 * Note: This code removes the node from a list. Make sure any list
210 * iteration is performed using list_for_each_safe().
211 */
212static void memrar_release_block_i(struct kref *ref)
213{
214 /*
215 * Last reference is being released. Remove from the table,
216 * and reclaim resources.
217 */
218
219 struct memrar_buffer_info * const node =
220 container_of(ref, struct memrar_buffer_info, refcount);
221
222 struct RAR_block_info * const user_info =
223 &node->buffer.info;
224
225 struct memrar_allocator * const allocator =
226 memrars[user_info->type].allocator;
227
228 list_del(&node->list);
229
230 memrar_allocator_free(allocator, user_info->handle);
231
232 kfree(node);
233}
234
235/**
236 * memrar_init_rar_resources - configure a RAR
237 * @rarnum: rar that has been allocated
238 * @devname: name of our device
239 *
240 * Initialize RAR parameters, such as bus addresses, etc and make
241 * the resource accessible.
242 */
243static int memrar_init_rar_resources(int rarnum, char const *devname)
244{
245 /* ---- Sanity Checks ----
246 * 1. RAR bus addresses in both Lincroft and Langwell RAR
247 * registers should be the same.
248 * a. There's no way we can do this through IA.
249 *
250 * 2. Secure device ID in Langwell RAR registers should be set
251 * appropriately, e.g. only LPE DMA for the audio RAR, and
252 * security for the other Langwell based RAR registers.
253 * a. There's no way we can do this through IA.
254 *
255 * 3. Audio and video RAR registers and RAR access should be
256 * locked down. If not, enable RAR access control. Except
257 * for debugging purposes, there is no reason for them to
258 * be unlocked.
259 * a. We can only do this for the Lincroft (IA) side.
260 *
261 * @todo Should the RAR handler driver even be aware of audio
262 * and video RAR settings?
263 */
264
265 /*
266 * RAR buffer block size.
267 *
268 * We choose it to be the size of a page to simplify the
269 * /dev/memrar mmap() implementation and usage. Otherwise
270 * paging is not involved once an RAR is locked down.
271 */
272 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
273
274 dma_addr_t low, high;
275 struct memrar_rar_info * const rar = &memrars[rarnum];
276
277 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
278 BUG_ON(!memrar_is_valid_rar_type(rarnum));
279 BUG_ON(rar->allocated);
280
281 if (rar_get_address(rarnum, &low, &high) != 0)
282 /* No RAR is available. */
283 return -ENODEV;
284
285 if (low == 0 || high == 0) {
286 rar->base = 0;
287 rar->length = 0;
288 rar->iobase = NULL;
289 rar->allocator = NULL;
290 return -ENOSPC;
291 }
292
293 /*
294 * @todo Verify that LNC and LNW RAR register contents
295 * addresses, security, etc are compatible and
296 * consistent).
297 */
298
299 rar->length = high - low + 1;
300
301 /* Claim RAR memory as our own. */
302 if (request_mem_region(low, rar->length, devname) == NULL) {
303 rar->length = 0;
304 pr_err("%s: Unable to claim RAR[%d] memory.\n",
305 devname, rarnum);
306 pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
307 return -EBUSY;
308 }
309
310 rar->base = low;
311
312 /*
313 * Now map it into the kernel address space.
314 *
315 * Note that the RAR memory may only be accessed by IA
316 * when debugging. Otherwise attempts to access the
317 * RAR memory when it is locked down will result in
318 * behavior similar to writing to /dev/null and
319 * reading from /dev/zero. This behavior is enforced
320 * by the hardware. Even if we don't access the
321 * memory, mapping it into the kernel provides us with
322 * a convenient RAR handle to bus address mapping.
323 */
324 rar->iobase = ioremap_nocache(rar->base, rar->length);
325 if (rar->iobase == NULL) {
326 pr_err("%s: Unable to map RAR memory.\n", devname);
327 release_mem_region(low, rar->length);
328 return -ENOMEM;
329 }
330
331 /* Initialize corresponding memory allocator. */
332 rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
333 rar->length, RAR_BLOCK_SIZE);
334 if (rar->allocator == NULL) {
335 iounmap(rar->iobase);
336 release_mem_region(low, rar->length);
337 return -ENOMEM;
338 }
339
340 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
341 devname, rarnum, (unsigned long) low, (unsigned long) high);
342
343 pr_info("%s: BRAR[%d] size = %zu KiB\n",
344 devname, rarnum, rar->allocator->capacity / 1024);
345
346 rar->allocated = 1;
347 return 0;
348}
349
350/**
351 * memrar_fini_rar_resources - free up RAR resources
352 *
353 * Finalize RAR resources. Free up the resource tables, hand the memory
354 * back to the kernel, unmap the device and release the address space.
355 */
356static void memrar_fini_rar_resources(void)
357{
358 int z;
359 struct memrar_buffer_info *pos;
360 struct memrar_buffer_info *tmp;
361
362 /*
363 * @todo Do we need to hold a lock at this point in time?
364 * (module initialization failure or exit?)
365 */
366
367 for (z = MRST_NUM_RAR; z-- != 0; ) {
368 struct memrar_rar_info * const rar = &memrars[z];
369
370 if (!rar->allocated)
371 continue;
372
373 /* Clean up remaining resources. */
374
375 list_for_each_entry_safe(pos,
376 tmp,
377 &rar->buffers.list,
378 list) {
379 kref_put(&pos->refcount, memrar_release_block_i);
380 }
381
382 memrar_destroy_allocator(rar->allocator);
383 rar->allocator = NULL;
384
385 iounmap(rar->iobase);
386 release_mem_region(rar->base, rar->length);
387
388 rar->iobase = NULL;
389 rar->base = 0;
390 rar->length = 0;
391
392 unregister_rar(z);
393 }
394}
395
396/**
397 * memrar_reserve_block - handle an allocation request
398 * @request: block being requested
399 * @filp: owner it is tied to
400 *
401 * Allocate a block of the requested RAR. If successful return the
402 * request object filled in and zero, if not report an error code
403 */
404
405static long memrar_reserve_block(struct RAR_buffer *request,
406 struct file *filp)
407{
408 struct RAR_block_info * const rinfo = &request->info;
409 struct RAR_buffer *buffer;
410 struct memrar_buffer_info *buffer_info;
411 u32 handle;
412 struct memrar_rar_info *rar = NULL;
413
414 /* Prevent array overflow. */
415 if (!memrar_is_valid_rar_type(rinfo->type))
416 return -EINVAL;
417
418 rar = &memrars[rinfo->type];
419 if (!rar->allocated)
420 return -ENODEV;
421
422 /* Reserve memory in RAR. */
423 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
424 if (handle == 0)
425 return -ENOMEM;
426
427 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
428
429 if (buffer_info == NULL) {
430 memrar_allocator_free(rar->allocator, handle);
431 return -ENOMEM;
432 }
433
434 buffer = &buffer_info->buffer;
435 buffer->info.type = rinfo->type;
436 buffer->info.size = rinfo->size;
437
438 /* Memory handle corresponding to the bus address. */
439 buffer->info.handle = handle;
440 buffer->bus_address = memrar_get_bus_address(rar, handle);
441
442 /*
443 * Keep track of owner so that we can later cleanup if
444 * necessary.
445 */
446 buffer_info->owner = filp;
447
448 kref_init(&buffer_info->refcount);
449
450 mutex_lock(&rar->lock);
451 list_add(&buffer_info->list, &rar->buffers.list);
452 mutex_unlock(&rar->lock);
453
454 rinfo->handle = buffer->info.handle;
455 request->bus_address = buffer->bus_address;
456
457 return 0;
458}
459
460/**
461 * memrar_release_block - release a RAR block
462 * @addr: address in RAR space
463 *
464 * Release a previously allocated block. Releases act on complete
465 * blocks, partially freeing a block is not supported
466 */
467
468static long memrar_release_block(u32 addr)
469{
470 struct memrar_buffer_info *pos;
471 struct memrar_buffer_info *tmp;
472 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
473 long result = -EINVAL;
474
475 if (rar == NULL)
476 return -ENOENT;
477
478 mutex_lock(&rar->lock);
479
480 /*
481 * Iterate through the buffer list to find the corresponding
482 * buffer to be released.
483 */
484 list_for_each_entry_safe(pos,
485 tmp,
486 &rar->buffers.list,
487 list) {
488 struct RAR_block_info * const info =
489 &pos->buffer.info;
490
491 /*
492 * Take into account handle offsets that may have been
493 * added to the base handle, such as in the following
494 * scenario:
495 *
496 * u32 handle = base + offset;
497 * rar_handle_to_bus(handle);
498 * rar_release(handle);
499 */
500 if (addr >= info->handle
501 && addr < (info->handle + info->size)
502 && memrar_is_valid_rar_type(info->type)) {
503 kref_put(&pos->refcount, memrar_release_block_i);
504 result = 0;
505 break;
506 }
507 }
508
509 mutex_unlock(&rar->lock);
510
511 return result;
512}
513
514/**
515 * memrar_get_stats - read statistics for a RAR
516 * @r: statistics to be filled in
517 *
518 * Returns the statistics data for the RAR, or an error code if
519 * the request cannot be completed
520 */
521static long memrar_get_stat(struct RAR_stat *r)
522{
523 struct memrar_allocator *allocator;
524
525 if (!memrar_is_valid_rar_type(r->type))
526 return -EINVAL;
527
528 if (!memrars[r->type].allocated)
529 return -ENODEV;
530
531 allocator = memrars[r->type].allocator;
532
533 BUG_ON(allocator == NULL);
534
535 /*
536 * Allocator capacity doesn't change over time. No
537 * need to synchronize.
538 */
539 r->capacity = allocator->capacity;
540
541 mutex_lock(&allocator->lock);
542 r->largest_block_size = allocator->largest_free_area;
543 mutex_unlock(&allocator->lock);
544 return 0;
545}
546
547/**
548 * memrar_ioctl - ioctl callback
549 * @filp: file issuing the request
550 * @cmd: command
551 * @arg: pointer to control information
552 *
553 * Perform one of the ioctls supported by the memrar device
554 */
555
556static long memrar_ioctl(struct file *filp,
557 unsigned int cmd,
558 unsigned long arg)
559{
560 void __user *argp = (void __user *)arg;
561 long result = 0;
562
563 struct RAR_buffer buffer;
564 struct RAR_block_info * const request = &buffer.info;
565 struct RAR_stat rar_info;
566 u32 rar_handle;
567
568 switch (cmd) {
569 case RAR_HANDLER_RESERVE:
570 if (copy_from_user(request,
571 argp,
572 sizeof(*request)))
573 return -EFAULT;
574
575 result = memrar_reserve_block(&buffer, filp);
576 if (result != 0)
577 return result;
578
579 return copy_to_user(argp, request, sizeof(*request));
580
581 case RAR_HANDLER_RELEASE:
582 if (copy_from_user(&rar_handle,
583 argp,
584 sizeof(rar_handle)))
585 return -EFAULT;
586
587 return memrar_release_block(rar_handle);
588
589 case RAR_HANDLER_STAT:
590 if (copy_from_user(&rar_info,
591 argp,
592 sizeof(rar_info)))
593 return -EFAULT;
594
595 /*
596 * Populate the RAR_stat structure based on the RAR
597 * type given by the user
598 */
599 if (memrar_get_stat(&rar_info) != 0)
600 return -EINVAL;
601
602 /*
603 * @todo Do we need to verify destination pointer
604 * "argp" is non-zero? Is that already done by
605 * copy_to_user()?
606 */
607 return copy_to_user(argp,
608 &rar_info,
609 sizeof(rar_info)) ? -EFAULT : 0;
610
611 default:
612 return -ENOTTY;
613 }
614
615 return 0;
616}
617
618/**
619 * memrar_mmap - mmap helper for deubgging
620 * @filp: handle doing the mapping
621 * @vma: memory area
622 *
623 * Support the mmap operation on the RAR space for debugging systems
624 * when the memory is not locked down.
625 */
626
627static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
628{
629 /*
630 * This mmap() implementation is predominantly useful for
631 * debugging since the CPU will be prevented from accessing
632 * RAR memory by the hardware when RAR is properly locked
633 * down.
634 *
635 * In order for this implementation to be useful RAR memory
636 * must be not be locked down. However, we only want to do
637 * that when debugging. DO NOT leave RAR memory unlocked in a
638 * deployed device that utilizes RAR.
639 */
640
641 size_t const size = vma->vm_end - vma->vm_start;
642
643 /* Users pass the RAR handle as the mmap() offset parameter. */
644 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
645
646 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
647 unsigned long pfn;
648
649 /* Only allow priviledged apps to go poking around this way */
650 if (!capable(CAP_SYS_RAWIO))
651 return -EPERM;
652
653 /* Invalid RAR handle or size passed to mmap(). */
654 if (rar == NULL
655 || handle == 0
656 || size > (handle - (unsigned long) rar->iobase))
657 return -EINVAL;
658
659 /*
660 * Retrieve physical address corresponding to the RAR handle,
661 * and convert it to a page frame.
662 */
663 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
664
665
666 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
667 handle,
668 handle + size);
669
670 /*
671 * Map RAR memory into user space. This is really only useful
672 * for debugging purposes since the memory won't be
673 * accessible, i.e. reads return zero and writes are ignored,
674 * when RAR access control is enabled.
675 */
676 if (remap_pfn_range(vma,
677 vma->vm_start,
678 pfn,
679 size,
680 vma->vm_page_prot))
681 return -EAGAIN;
682
683 /* vma->vm_ops = &memrar_mem_ops; */
684
685 return 0;
686}
687
688/**
689 * memrar_open - device open method
690 * @inode: inode to open
691 * @filp: file handle
692 *
693 * As we support multiple arbitrary opens there is no work to be done
694 * really.
695 */
696
697static int memrar_open(struct inode *inode, struct file *filp)
698{
699 nonseekable_open(inode, filp);
700 return 0;
701}
702
703/**
704 * memrar_release - close method for miscev
705 * @inode: inode of device
706 * @filp: handle that is going away
707 *
708 * Free up all the regions that belong to this file handle. We use
709 * the handle as a natural Linux style 'lifetime' indicator and to
710 * ensure resources are not leaked when their owner explodes in an
711 * unplanned fashion.
712 */
713
714static int memrar_release(struct inode *inode, struct file *filp)
715{
716 /* Free all regions associated with the given file handle. */
717
718 struct memrar_buffer_info *pos;
719 struct memrar_buffer_info *tmp;
720 int z;
721
722 for (z = 0; z != MRST_NUM_RAR; ++z) {
723 struct memrar_rar_info * const rar = &memrars[z];
724
725 mutex_lock(&rar->lock);
726
727 list_for_each_entry_safe(pos,
728 tmp,
729 &rar->buffers.list,
730 list) {
731 if (filp == pos->owner)
732 kref_put(&pos->refcount,
733 memrar_release_block_i);
734 }
735
736 mutex_unlock(&rar->lock);
737 }
738
739 return 0;
740}
741
742/**
743 * rar_reserve - reserve RAR memory
744 * @buffers: buffers to reserve
745 * @count: number wanted
746 *
747 * Reserve a series of buffers in the RAR space. Returns the number of
748 * buffers successfully allocated
749 */
750
751size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
752{
753 struct RAR_buffer * const end =
754 (buffers == NULL ? buffers : buffers + count);
755 struct RAR_buffer *i;
756
757 size_t reserve_count = 0;
758
759 for (i = buffers; i != end; ++i) {
760 if (memrar_reserve_block(i, NULL) == 0)
761 ++reserve_count;
762 else
763 i->bus_address = 0;
764 }
765
766 return reserve_count;
767}
768EXPORT_SYMBOL(rar_reserve);
769
770/**
771 * rar_release - return RAR buffers
772 * @buffers: buffers to release
773 * @size: size of released block
774 *
775 * Return a set of buffers to the RAR pool
776 */
777
778size_t rar_release(struct RAR_buffer *buffers, size_t count)
779{
780 struct RAR_buffer * const end =
781 (buffers == NULL ? buffers : buffers + count);
782 struct RAR_buffer *i;
783
784 size_t release_count = 0;
785
786 for (i = buffers; i != end; ++i) {
787 u32 * const handle = &i->info.handle;
788 if (memrar_release_block(*handle) == 0) {
789 /*
790 * @todo We assume we should do this each time
791 * the ref count is decremented. Should
792 * we instead only do this when the ref
793 * count has dropped to zero, and the
794 * buffer has been completely
795 * released/unmapped?
796 */
797 *handle = 0;
798 ++release_count;
799 }
800 }
801
802 return release_count;
803}
804EXPORT_SYMBOL(rar_release);
805
806/**
807 * rar_handle_to_bus - RAR to bus address
808 * @buffers: RAR buffer structure
809 * @count: number of buffers to convert
810 *
811 * Turn a list of RAR handle mappings into actual bus addresses. Note
812 * that when the device is locked down the bus addresses in question
813 * are not CPU accessible.
814 */
815
816size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
817{
818 struct RAR_buffer * const end =
819 (buffers == NULL ? buffers : buffers + count);
820 struct RAR_buffer *i;
821 struct memrar_buffer_info *pos;
822
823 size_t conversion_count = 0;
824
825 /*
826 * Find all bus addresses corresponding to the given handles.
827 *
828 * @todo Not liking this nested loop. Optimize.
829 */
830 for (i = buffers; i != end; ++i) {
831 struct memrar_rar_info * const rar =
832 memrar_get_rar_info(i->info.handle);
833
834 /*
835 * Check if we have a bogus handle, and then continue
836 * with remaining buffers.
837 */
838 if (rar == NULL) {
839 i->bus_address = 0;
840 continue;
841 }
842
843 mutex_lock(&rar->lock);
844
845 list_for_each_entry(pos, &rar->buffers.list, list) {
846 struct RAR_block_info * const user_info =
847 &pos->buffer.info;
848
849 /*
850 * Take into account handle offsets that may
851 * have been added to the base handle, such as
852 * in the following scenario:
853 *
854 * u32 handle = base + offset;
855 * rar_handle_to_bus(handle);
856 */
857
858 if (i->info.handle >= user_info->handle
859 && i->info.handle < (user_info->handle
860 + user_info->size)) {
861 u32 const offset =
862 i->info.handle - user_info->handle;
863
864 i->info.type = user_info->type;
865 i->info.size = user_info->size - offset;
866 i->bus_address =
867 pos->buffer.bus_address
868 + offset;
869
870 /* Increment the reference count. */
871 kref_get(&pos->refcount);
872
873 ++conversion_count;
874 break;
875 } else {
876 i->bus_address = 0;
877 }
878 }
879
880 mutex_unlock(&rar->lock);
881 }
882
883 return conversion_count;
884}
885EXPORT_SYMBOL(rar_handle_to_bus);
886
887static const struct file_operations memrar_fops = {
888 .owner = THIS_MODULE,
889 .unlocked_ioctl = memrar_ioctl,
890 .mmap = memrar_mmap,
891 .open = memrar_open,
892 .release = memrar_release,
893 .llseek = no_llseek,
894};
895
896static struct miscdevice memrar_miscdev = {
897 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
898 .name = "memrar", /* /dev/memrar */
899 .fops = &memrar_fops
900};
901
902static char const banner[] __initdata =
903 KERN_INFO
904 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
905
906/**
907 * memrar_registration_callback - RAR obtained
908 * @rar: RAR number
909 *
910 * We have been granted ownership of the RAR. Add it to our memory
911 * management tables
912 */
913
914static int memrar_registration_callback(unsigned long rar)
915{
916 /*
917 * We initialize the RAR parameters early on so that we can
918 * discontinue memrar device initialization and registration
919 * if suitably configured RARs are not available.
920 */
921 return memrar_init_rar_resources(rar, memrar_miscdev.name);
922}
923
924/**
925 * memrar_init - initialise RAR support
926 *
927 * Initialise support for RAR handlers. This may get loaded before
928 * the RAR support is activated, but the callbacks on the registration
929 * will handle that situation for us anyway.
930 */
931
932static int __init memrar_init(void)
933{
934 int err;
935 int i;
936
937 printk(banner);
938
939 /*
940 * Some delayed initialization is performed in this driver.
941 * Make sure resources that are used during driver clean-up
942 * (e.g. during driver's release() function) are fully
943 * initialized before first use. This is particularly
944 * important for the case when the delayed initialization
945 * isn't completed, leaving behind a partially initialized
946 * driver.
947 *
948 * Such a scenario can occur when RAR is not available on the
949 * platform, and the driver is release()d.
950 */
951 for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
952 struct memrar_rar_info * const rar = &memrars[i];
953 mutex_init(&rar->lock);
954 INIT_LIST_HEAD(&rar->buffers.list);
955 }
956
957 err = misc_register(&memrar_miscdev);
958 if (err)
959 return err;
960
961 /* Now claim the two RARs we want */
962 err = register_rar(0, memrar_registration_callback, 0);
963 if (err)
964 goto fail;
965
966 err = register_rar(1, memrar_registration_callback, 1);
967 if (err == 0)
968 return 0;
969
970 /* It is possible rar 0 registered and allocated resources then rar 1
971 failed so do a full resource free */
972 memrar_fini_rar_resources();
973fail:
974 misc_deregister(&memrar_miscdev);
975 return err;
976}
977
978/**
979 * memrar_exit - unregister and unload
980 *
981 * Unregister the device and then unload any mappings and release
982 * the RAR resources
983 */
984
985static void __exit memrar_exit(void)
986{
987 misc_deregister(&memrar_miscdev);
988 memrar_fini_rar_resources();
989}
990
991
992module_init(memrar_init);
993module_exit(memrar_exit);
994
995
996MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
997MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
998MODULE_LICENSE("GPL");
999MODULE_VERSION(MEMRAR_VER);
1000
1001
1002
1003/*
1004 Local Variables:
1005 c-file-style: "linux"
1006 End:
1007*/
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index f1082f50fdc..b0530676687 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -9,7 +9,7 @@ config FB_OLPC_DCON
9 9
10config FB_OLPC_DCON_1 10config FB_OLPC_DCON_1
11 bool "OLPC XO-1 DCON support" 11 bool "OLPC XO-1 DCON support"
12 depends on FB_OLPC_DCON 12 depends on FB_OLPC_DCON && GPIO_CS5535
13 default y 13 default y
14 ---help--- 14 ---help---
15 Enable support for the DCON in XO-1 model laptops. The kernel 15 Enable support for the DCON in XO-1 model laptops. The kernel
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index e213b63f811..7aa9b1a45bd 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/acpi.h> 9#include <linux/acpi.h>
10#include <linux/delay.h>
10#include <linux/pci.h> 11#include <linux/pci.h>
11#include <linux/gpio.h> 12#include <linux/gpio.h>
12#include <asm/olpc.h> 13#include <asm/olpc.h>
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 1b3103fbf29..3162aabbeb0 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,6 +1,7 @@
1config R8187SE 1config R8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver" 2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI && WLAN 3 depends on PCI && WLAN
4 depends on m
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select WEXT_PRIV 6 select WEXT_PRIV
6 select EEPROM_93CX6 7 select EEPROM_93CX6
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 2e64b239e24..750c347bfbe 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,6 +1,7 @@
1config RTL8192E 1config RTL8192E
2 tristate "RealTek RTL8192E Wireless LAN NIC driver" 2 tristate "RealTek RTL8192E Wireless LAN NIC driver"
3 depends on PCI && WLAN 3 depends on PCI && WLAN
4 depends on m
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select WEXT_PRIV 6 select WEXT_PRIV
6 select CRYPTO 7 select CRYPTO
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 28969198e7e..3f055091b35 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -1,6 +1,7 @@
1config RTL8192U 1config RTL8192U
2 tristate "RealTek RTL8192U Wireless LAN NIC driver" 2 tristate "RealTek RTL8192U Wireless LAN NIC driver"
3 depends on PCI && WLAN && USB 3 depends on PCI && WLAN && USB
4 depends on m
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select WEXT_PRIV 6 select WEXT_PRIV
6 select CRYPTO 7 select CRYPTO
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 4514419a5fb..02525d57ba8 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -824,13 +824,13 @@ static void rtsx_init_options(struct rtsx_chip *chip)
824 chip->fpga_ms_hg_clk = CLK_80; 824 chip->fpga_ms_hg_clk = CLK_80;
825 chip->fpga_ms_4bit_clk = CLK_80; 825 chip->fpga_ms_4bit_clk = CLK_80;
826 chip->fpga_ms_1bit_clk = CLK_40; 826 chip->fpga_ms_1bit_clk = CLK_40;
827 chip->asic_sd_sdr104_clk = 207; 827 chip->asic_sd_sdr104_clk = 203;
828 chip->asic_sd_sdr50_clk = 99; 828 chip->asic_sd_sdr50_clk = 98;
829 chip->asic_sd_ddr50_clk = 99; 829 chip->asic_sd_ddr50_clk = 98;
830 chip->asic_sd_hs_clk = 99; 830 chip->asic_sd_hs_clk = 98;
831 chip->asic_mmc_52m_clk = 99; 831 chip->asic_mmc_52m_clk = 98;
832 chip->asic_ms_hg_clk = 119; 832 chip->asic_ms_hg_clk = 117;
833 chip->asic_ms_4bit_clk = 79; 833 chip->asic_ms_4bit_clk = 78;
834 chip->asic_ms_1bit_clk = 39; 834 chip->asic_ms_1bit_clk = 39;
835 chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M; 835 chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
836 chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M; 836 chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
diff --git a/drivers/staging/rts_pstor/rtsx_chip.c b/drivers/staging/rts_pstor/rtsx_chip.c
index f443d97a56f..d2f1c715a68 100644
--- a/drivers/staging/rts_pstor/rtsx_chip.c
+++ b/drivers/staging/rts_pstor/rtsx_chip.c
@@ -684,6 +684,11 @@ static int rts5209_init(struct rtsx_chip *chip)
684 RTSX_DEBUGP("dw in 0x724: 0x%x\n", lval); 684 RTSX_DEBUGP("dw in 0x724: 0x%x\n", lval);
685 val = (u8)lval; 685 val = (u8)lval;
686 if (!(val & 0x80)) { 686 if (!(val & 0x80)) {
687 if (val & 0x08)
688 chip->lun_mode = DEFAULT_SINGLE;
689 else
690 chip->lun_mode = SD_MS_2LUN;
691
687 if (val & 0x04) { 692 if (val & 0x04) {
688 SET_SDIO_EXIST(chip); 693 SET_SDIO_EXIST(chip);
689 } else { 694 } else {
@@ -705,12 +710,6 @@ static int rts5209_init(struct rtsx_chip *chip)
705 710
706 chip->aspm_l0s_l1_en = (val >> 5) & 0x03; 711 chip->aspm_l0s_l1_en = (val >> 5) & 0x03;
707 712
708 if (val & 0x08) {
709 chip->lun_mode = DEFAULT_SINGLE;
710 } else {
711 chip->lun_mode = SD_MS_2LUN;
712 }
713
714 val = (u8)(lval >> 8); 713 val = (u8)(lval >> 8);
715 714
716 clk = (val >> 5) & 0x07; 715 clk = (val >> 5) & 0x07;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index c2f2664b61a..890eede5e3f 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -55,8 +55,6 @@
55#include <linux/jiffies.h> 55#include <linux/jiffies.h>
56#include <linux/rar_register.h> 56#include <linux/rar_register.h>
57 57
58#include "../memrar/memrar.h"
59
60#include "sep_driver_hw_defs.h" 58#include "sep_driver_hw_defs.h"
61#include "sep_driver_config.h" 59#include "sep_driver_config.h"
62#include "sep_driver_api.h" 60#include "sep_driver_api.h"
@@ -2372,7 +2370,6 @@ static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2372 int error = 0; 2370 int error = 0;
2373 /* Command args */ 2371 /* Command args */
2374 struct rar_hndl_to_bus_struct command_args; 2372 struct rar_hndl_to_bus_struct command_args;
2375 struct RAR_buffer rar_buf;
2376 /* Bus address */ 2373 /* Bus address */
2377 dma_addr_t rar_bus = 0; 2374 dma_addr_t rar_bus = 0;
2378 /* Holds the RAR address in the system memory offset */ 2375 /* Holds the RAR address in the system memory offset */
@@ -2386,16 +2383,8 @@ static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2386 } 2383 }
2387 2384
2388 /* Call to translation function only if user handle is not NULL */ 2385 /* Call to translation function only if user handle is not NULL */
2389 if (command_args.rar_handle) { 2386 if (command_args.rar_handle)
2390 memset(&rar_buf, 0, sizeof(rar_buf)); 2387 return -EOPNOTSUPP;
2391 rar_buf.info.handle = (u32)command_args.rar_handle;
2392
2393 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2394 error = -EFAULT;
2395 goto end_function;
2396 }
2397 rar_bus = rar_buf.bus_address;
2398 }
2399 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus); 2388 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2400 2389
2401 /* Set value in the SYSTEM MEMORY offset */ 2390 /* Set value in the SYSTEM MEMORY offset */
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 78a16a76850..3e2230f0879 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -26,10 +26,6 @@
26 * Boyod.yang <boyod.yang@siliconmotion.com.cn> 26 * Boyod.yang <boyod.yang@siliconmotion.com.cn>
27 */ 27 */
28 28
29#ifndef __KERNEL__
30#define __KERNEL__
31#endif
32
33#include <linux/io.h> 29#include <linux/io.h>
34#include <linux/fb.h> 30#include <linux/fb.h>
35#include <linux/pci.h> 31#include <linux/pci.h>
@@ -1019,6 +1015,7 @@ static void __devexit smtcfb_pci_remove(struct pci_dev *pdev)
1019 smtc_free_fb_info(sfb); 1015 smtc_free_fb_info(sfb);
1020} 1016}
1021 1017
1018#ifdef CONFIG_PM
1022/* Jason (08/14/2009) 1019/* Jason (08/14/2009)
1023 * suspend function, called when the suspend event is triggered 1020 * suspend function, called when the suspend event is triggered
1024 */ 1021 */
@@ -1111,6 +1108,7 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
1111 1108
1112 return 0; 1109 return 0;
1113} 1110}
1111#endif
1114 1112
1115/* Jason (08/13/2009) 1113/* Jason (08/13/2009)
1116 * pci_driver struct used to wrap the original driver 1114 * pci_driver struct used to wrap the original driver
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 8214c353d9f..bce7d039346 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -220,8 +220,10 @@ static void stub_shutdown_connection(struct usbip_device *ud)
220 } 220 }
221 221
222 /* 1. stop threads */ 222 /* 1. stop threads */
223 kthread_stop(ud->tcp_rx); 223 if (ud->tcp_rx && !task_is_dead(ud->tcp_rx))
224 kthread_stop(ud->tcp_tx); 224 kthread_stop(ud->tcp_rx);
225 if (ud->tcp_tx && !task_is_dead(ud->tcp_tx))
226 kthread_stop(ud->tcp_tx);
225 227
226 /* 2. close the socket */ 228 /* 2. close the socket */
227 /* 229 /*
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 6445f12cb4f..51fbd098647 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -171,33 +171,23 @@ static int tweak_set_configuration_cmd(struct urb *urb)
171 171
172static int tweak_reset_device_cmd(struct urb *urb) 172static int tweak_reset_device_cmd(struct urb *urb)
173{ 173{
174 struct usb_ctrlrequest *req; 174 struct stub_priv *priv = (struct stub_priv *) urb->context;
175 __u16 value; 175 struct stub_device *sdev = priv->sdev;
176 __u16 index;
177 int ret;
178
179 req = (struct usb_ctrlrequest *) urb->setup_packet;
180 value = le16_to_cpu(req->wValue);
181 index = le16_to_cpu(req->wIndex);
182
183 usbip_uinfo("reset_device (port %d) to %s\n", index,
184 dev_name(&urb->dev->dev));
185 176
186 /* all interfaces should be owned by usbip driver, so just reset it. */ 177 usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev));
187 ret = usb_lock_device_for_reset(urb->dev, NULL);
188 if (ret < 0) {
189 dev_err(&urb->dev->dev, "lock for reset\n");
190 return ret;
191 }
192
193 /* try to reset the device */
194 ret = usb_reset_device(urb->dev);
195 if (ret < 0)
196 dev_err(&urb->dev->dev, "device reset\n");
197 178
198 usb_unlock_device(urb->dev); 179 /*
199 180 * usb_lock_device_for_reset caused a deadlock: it causes the driver
200 return ret; 181 * to unbind. In the shutdown the rx thread is signalled to shut down
182 * but this thread is pending in the usb_lock_device_for_reset.
183 *
184 * Instead queue the reset.
185 *
186 * Unfortunatly an existing usbip connection will be dropped due to
187 * driver unbinding.
188 */
189 usb_queue_reset_device(sdev->interface);
190 return 0;
201} 191}
202 192
203/* 193/*
diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c
index 5523f25998e..64a52b26dcf 100644
--- a/drivers/staging/usbip/stub_tx.c
+++ b/drivers/staging/usbip/stub_tx.c
@@ -170,7 +170,6 @@ static int stub_send_ret_submit(struct stub_device *sdev)
170 struct stub_priv *priv, *tmp; 170 struct stub_priv *priv, *tmp;
171 171
172 struct msghdr msg; 172 struct msghdr msg;
173 struct kvec iov[3];
174 size_t txsize; 173 size_t txsize;
175 174
176 size_t total_size = 0; 175 size_t total_size = 0;
@@ -180,28 +179,73 @@ static int stub_send_ret_submit(struct stub_device *sdev)
180 struct urb *urb = priv->urb; 179 struct urb *urb = priv->urb;
181 struct usbip_header pdu_header; 180 struct usbip_header pdu_header;
182 void *iso_buffer = NULL; 181 void *iso_buffer = NULL;
182 struct kvec *iov = NULL;
183 int iovnum = 0;
183 184
184 txsize = 0; 185 txsize = 0;
185 memset(&pdu_header, 0, sizeof(pdu_header)); 186 memset(&pdu_header, 0, sizeof(pdu_header));
186 memset(&msg, 0, sizeof(msg)); 187 memset(&msg, 0, sizeof(msg));
187 memset(&iov, 0, sizeof(iov));
188 188
189 usbip_dbg_stub_tx("setup txdata urb %p\n", urb); 189 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
190 iovnum = 2 + urb->number_of_packets;
191 else
192 iovnum = 2;
193
194 iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL);
190 195
196 if (!iov) {
197 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
198 return -1;
199 }
200
201 iovnum = 0;
191 202
192 /* 1. setup usbip_header */ 203 /* 1. setup usbip_header */
193 setup_ret_submit_pdu(&pdu_header, urb); 204 setup_ret_submit_pdu(&pdu_header, urb);
205 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
206 pdu_header.base.seqnum, urb);
207 /*usbip_dump_header(pdu_header);*/
194 usbip_header_correct_endian(&pdu_header, 1); 208 usbip_header_correct_endian(&pdu_header, 1);
195 209
196 iov[0].iov_base = &pdu_header; 210 iov[iovnum].iov_base = &pdu_header;
197 iov[0].iov_len = sizeof(pdu_header); 211 iov[iovnum].iov_len = sizeof(pdu_header);
212 iovnum++;
198 txsize += sizeof(pdu_header); 213 txsize += sizeof(pdu_header);
199 214
200 /* 2. setup transfer buffer */ 215 /* 2. setup transfer buffer */
201 if (usb_pipein(urb->pipe) && urb->actual_length > 0) { 216 if (usb_pipein(urb->pipe) &&
202 iov[1].iov_base = urb->transfer_buffer; 217 usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
203 iov[1].iov_len = urb->actual_length; 218 urb->actual_length > 0) {
219 iov[iovnum].iov_base = urb->transfer_buffer;
220 iov[iovnum].iov_len = urb->actual_length;
221 iovnum++;
204 txsize += urb->actual_length; 222 txsize += urb->actual_length;
223 } else if (usb_pipein(urb->pipe) &&
224 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
225 /*
226 * For isochronous packets: actual length is the sum of
227 * the actual length of the individual, packets, but as
228 * the packet offsets are not changed there will be
229 * padding between the packets. To optimally use the
230 * bandwidth the padding is not transmitted.
231 */
232
233 int i;
234 for (i = 0; i < urb->number_of_packets; i++) {
235 iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
236 iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length;
237 iovnum++;
238 txsize += urb->iso_frame_desc[i].actual_length;
239 }
240
241 if (txsize != sizeof(pdu_header) + urb->actual_length) {
242 dev_err(&sdev->interface->dev,
243 "actual length of urb (%d) does not match iso packet sizes (%d)\n",
244 urb->actual_length, txsize-sizeof(pdu_header));
245 kfree(iov);
246 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
247 return -1;
248 }
205 } 249 }
206 250
207 /* 3. setup iso_packet_descriptor */ 251 /* 3. setup iso_packet_descriptor */
@@ -212,32 +256,34 @@ static int stub_send_ret_submit(struct stub_device *sdev)
212 if (!iso_buffer) { 256 if (!iso_buffer) {
213 usbip_event_add(&sdev->ud, 257 usbip_event_add(&sdev->ud,
214 SDEV_EVENT_ERROR_MALLOC); 258 SDEV_EVENT_ERROR_MALLOC);
259 kfree(iov);
215 return -1; 260 return -1;
216 } 261 }
217 262
218 iov[2].iov_base = iso_buffer; 263 iov[iovnum].iov_base = iso_buffer;
219 iov[2].iov_len = len; 264 iov[iovnum].iov_len = len;
220 txsize += len; 265 txsize += len;
266 iovnum++;
221 } 267 }
222 268
223 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, 269 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
224 3, txsize); 270 iov, iovnum, txsize);
225 if (ret != txsize) { 271 if (ret != txsize) {
226 dev_err(&sdev->interface->dev, 272 dev_err(&sdev->interface->dev,
227 "sendmsg failed!, retval %d for %zd\n", 273 "sendmsg failed!, retval %d for %zd\n",
228 ret, txsize); 274 ret, txsize);
275 kfree(iov);
229 kfree(iso_buffer); 276 kfree(iso_buffer);
230 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 277 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
231 return -1; 278 return -1;
232 } 279 }
233 280
281 kfree(iov);
234 kfree(iso_buffer); 282 kfree(iso_buffer);
235 usbip_dbg_stub_tx("send txdata\n");
236 283
237 total_size += txsize; 284 total_size += txsize;
238 } 285 }
239 286
240
241 spin_lock_irqsave(&sdev->priv_lock, flags); 287 spin_lock_irqsave(&sdev->priv_lock, flags);
242 288
243 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { 289 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 337abc48f71..7b1fe45bf7d 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -333,10 +333,11 @@ void usbip_dump_header(struct usbip_header *pdu)
333 usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); 333 usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum);
334 break; 334 break;
335 case USBIP_RET_SUBMIT: 335 case USBIP_RET_SUBMIT:
336 usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n", 336 usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n",
337 pdu->u.ret_submit.status, 337 pdu->u.ret_submit.status,
338 pdu->u.ret_submit.actual_length, 338 pdu->u.ret_submit.actual_length,
339 pdu->u.ret_submit.start_frame, 339 pdu->u.ret_submit.start_frame,
340 pdu->u.ret_submit.number_of_packets,
340 pdu->u.ret_submit.error_count); 341 pdu->u.ret_submit.error_count);
341 case USBIP_RET_UNLINK: 342 case USBIP_RET_UNLINK:
342 usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); 343 usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status);
@@ -520,6 +521,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb,
520 rpdu->status = urb->status; 521 rpdu->status = urb->status;
521 rpdu->actual_length = urb->actual_length; 522 rpdu->actual_length = urb->actual_length;
522 rpdu->start_frame = urb->start_frame; 523 rpdu->start_frame = urb->start_frame;
524 rpdu->number_of_packets = urb->number_of_packets;
523 rpdu->error_count = urb->error_count; 525 rpdu->error_count = urb->error_count;
524 } else { 526 } else {
525 /* vhci_rx.c */ 527 /* vhci_rx.c */
@@ -527,6 +529,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb,
527 urb->status = rpdu->status; 529 urb->status = rpdu->status;
528 urb->actual_length = rpdu->actual_length; 530 urb->actual_length = rpdu->actual_length;
529 urb->start_frame = rpdu->start_frame; 531 urb->start_frame = rpdu->start_frame;
532 urb->number_of_packets = rpdu->number_of_packets;
530 urb->error_count = rpdu->error_count; 533 urb->error_count = rpdu->error_count;
531 } 534 }
532} 535}
@@ -595,11 +598,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
595 cpu_to_be32s(&pdu->status); 598 cpu_to_be32s(&pdu->status);
596 cpu_to_be32s(&pdu->actual_length); 599 cpu_to_be32s(&pdu->actual_length);
597 cpu_to_be32s(&pdu->start_frame); 600 cpu_to_be32s(&pdu->start_frame);
601 cpu_to_be32s(&pdu->number_of_packets);
598 cpu_to_be32s(&pdu->error_count); 602 cpu_to_be32s(&pdu->error_count);
599 } else { 603 } else {
600 be32_to_cpus(&pdu->status); 604 be32_to_cpus(&pdu->status);
601 be32_to_cpus(&pdu->actual_length); 605 be32_to_cpus(&pdu->actual_length);
602 be32_to_cpus(&pdu->start_frame); 606 be32_to_cpus(&pdu->start_frame);
607 cpu_to_be32s(&pdu->number_of_packets);
603 be32_to_cpus(&pdu->error_count); 608 be32_to_cpus(&pdu->error_count);
604 } 609 }
605} 610}
@@ -725,6 +730,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
725 int size = np * sizeof(*iso); 730 int size = np * sizeof(*iso);
726 int i; 731 int i;
727 int ret; 732 int ret;
733 int total_length = 0;
728 734
729 if (!usb_pipeisoc(urb->pipe)) 735 if (!usb_pipeisoc(urb->pipe))
730 return 0; 736 return 0;
@@ -754,19 +760,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
754 return -EPIPE; 760 return -EPIPE;
755 } 761 }
756 762
763
757 for (i = 0; i < np; i++) { 764 for (i = 0; i < np; i++) {
758 iso = buff + (i * sizeof(*iso)); 765 iso = buff + (i * sizeof(*iso));
759 766
760 usbip_iso_pakcet_correct_endian(iso, 0); 767 usbip_iso_pakcet_correct_endian(iso, 0);
761 usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); 768 usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0);
769 total_length += urb->iso_frame_desc[i].actual_length;
762 } 770 }
763 771
764 kfree(buff); 772 kfree(buff);
765 773
774 if (total_length != urb->actual_length) {
775 dev_err(&urb->dev->dev,
776 "total length of iso packets (%d) not equal to actual length of buffer (%d)\n",
777 total_length, urb->actual_length);
778
779 if (ud->side == USBIP_STUB)
780 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
781 else
782 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
783
784 return -EPIPE;
785 }
786
766 return ret; 787 return ret;
767} 788}
768EXPORT_SYMBOL_GPL(usbip_recv_iso); 789EXPORT_SYMBOL_GPL(usbip_recv_iso);
769 790
791/*
792 * This functions restores the padding which was removed for optimizing
793 * the bandwidth during transfer over tcp/ip
794 *
795 * buffer and iso packets need to be stored and be in propeper endian in urb
796 * before calling this function
797 */
798int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
799{
800 int np = urb->number_of_packets;
801 int i;
802 int ret;
803 int actualoffset = urb->actual_length;
804
805 if (!usb_pipeisoc(urb->pipe))
806 return 0;
807
808 /* if no packets or length of data is 0, then nothing to unpack */
809 if (np == 0 || urb->actual_length == 0)
810 return 0;
811
812 /*
813 * if actual_length is transfer_buffer_length then no padding is
814 * present.
815 */
816 if (urb->actual_length == urb->transfer_buffer_length)
817 return 0;
818
819 /*
820 * loop over all packets from last to first (to prevent overwritting
821 * memory when padding) and move them into the proper place
822 */
823 for (i = np-1; i > 0; i--) {
824 actualoffset -= urb->iso_frame_desc[i].actual_length;
825 memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset,
826 urb->transfer_buffer + actualoffset,
827 urb->iso_frame_desc[i].actual_length);
828 }
829 return ret;
830}
831EXPORT_SYMBOL_GPL(usbip_pad_iso);
770 832
771/* some members of urb must be substituted before. */ 833/* some members of urb must be substituted before. */
772int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) 834int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 9f809c315d9..c767f52be5f 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -379,6 +379,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
379int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); 379int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
380/* some members of urb must be substituted before. */ 380/* some members of urb must be substituted before. */
381int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); 381int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
382/* some members of urb must be substituted before. */
383int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
382void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); 384void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
383 385
384 386
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09bf2355934..2ffc96a4c0d 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -100,6 +100,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
100 if (usbip_recv_iso(ud, urb) < 0) 100 if (usbip_recv_iso(ud, urb) < 0)
101 return; 101 return;
102 102
103 /* restore the padding in iso packets */
104 if (usbip_pad_iso(ud, urb) < 0)
105 return;
103 106
104 if (usbip_dbg_flag_vhci_rx) 107 if (usbip_dbg_flag_vhci_rx)
105 usbip_dump_urb(urb); 108 usbip_dump_urb(urb);
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 061e730df2d..c3ba693a8ca 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,6 @@
1config VT6655 1config VT6655
2 tristate "VIA Technologies VT6655 support" 2 tristate "VIA Technologies VT6655 support"
3 depends on PCI && WLAN 3 depends on PCI && WLAN && m
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV 5 select WEXT_PRIV
6 ---help--- 6 ---help---
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index a441ba513c4..f89ab205c8e 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,6 @@
1config VT6656 1config VT6656
2 tristate "VIA Technologies VT6656 support" 2 tristate "VIA Technologies VT6656 support"
3 depends on USB && WLAN 3 depends on USB && WLAN && m
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV 5 select WEXT_PRIV
6 select FW_LOADER 6 select FW_LOADER
diff --git a/drivers/staging/westbridge/astoria/gadget/cyasgadget.c b/drivers/staging/westbridge/astoria/gadget/cyasgadget.c
index defa05cd5e5..be851ca54ce 100644
--- a/drivers/staging/westbridge/astoria/gadget/cyasgadget.c
+++ b/drivers/staging/westbridge/astoria/gadget/cyasgadget.c
@@ -587,6 +587,7 @@ static int cyasgadget_enable(
587 "cy_as_usb_end_point_config EP %s mismatch " 587 "cy_as_usb_end_point_config EP %s mismatch "
588 "on enabled\n", an_ep->usb_ep_inst.name); 588 "on enabled\n", an_ep->usb_ep_inst.name);
589 #endif 589 #endif
590 spin_unlock_irqrestore(&an_dev->lock, flags);
590 return -EINVAL; 591 return -EINVAL;
591 } 592 }
592 593