aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-07-28 04:30:20 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-07-28 04:30:20 -0400
commit661299d9d0437a0ff72240f3d60016ac3a361a6e (patch)
tree765512576314fc3612b503f182b9ae4e60fcf849 /drivers
parent05caac585f8abd6c0113856bc8858e3ef214d8a6 (diff)
parent41c018b7ecb60b1c2c4d5dee0cd37d32a94c45af (diff)
Merge with Linus' 2.6 tree
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/as-iosched.c10
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/ub.c211
-rw-r--r--drivers/cdrom/isp16.c2
-rw-r--r--drivers/cdrom/mcdx.c8
-rw-r--r--drivers/cdrom/optcd.c28
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/drm/via_dma.c10
-rw-r--r--drivers/char/drm/via_drm.h2
-rw-r--r--drivers/char/drm/via_ds.c4
-rw-r--r--drivers/char/drm/via_ds.h4
-rw-r--r--drivers/char/drm/via_map.c3
-rw-r--r--drivers/char/drm/via_mm.c15
-rw-r--r--drivers/char/drm/via_video.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c6
-rw-r--r--drivers/char/rio/rioboot.c12
-rw-r--r--drivers/char/rio/rioroute.c2
-rw-r--r--drivers/char/rio/riotable.c2
-rw-r--r--drivers/char/tpm/Kconfig11
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/tpm_infineon.c467
-rw-r--r--drivers/char/watchdog/acquirewdt.c7
-rw-r--r--drivers/char/watchdog/advantechwdt.c7
-rw-r--r--drivers/char/watchdog/alim1535_wdt.c9
-rw-r--r--drivers/char/watchdog/alim7101_wdt.c7
-rw-r--r--drivers/char/watchdog/eurotechwdt.c7
-rw-r--r--drivers/char/watchdog/i8xx_tco.c7
-rw-r--r--drivers/char/watchdog/ib700wdt.c7
-rw-r--r--drivers/char/watchdog/indydog.c7
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c6
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c6
-rw-r--r--drivers/char/watchdog/machzwd.c7
-rw-r--r--drivers/char/watchdog/mixcomwd.c7
-rw-r--r--drivers/char/watchdog/pcwd.c7
-rw-r--r--drivers/char/watchdog/pcwd_pci.c7
-rw-r--r--drivers/char/watchdog/pcwd_usb.c7
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c7
-rw-r--r--drivers/char/watchdog/sa1100_wdt.c6
-rw-r--r--drivers/char/watchdog/sbc60xxwdt.c7
-rw-r--r--drivers/char/watchdog/sc1200wdt.c7
-rw-r--r--drivers/char/watchdog/sc520_wdt.c7
-rw-r--r--drivers/char/watchdog/scx200_wdt.c6
-rw-r--r--drivers/char/watchdog/shwdt.c6
-rw-r--r--drivers/char/watchdog/softdog.c7
-rw-r--r--drivers/char/watchdog/w83627hf_wdt.c7
-rw-r--r--drivers/char/watchdog/w83877f_wdt.c7
-rw-r--r--drivers/char/watchdog/wafer5823wdt.c7
-rw-r--r--drivers/char/watchdog/wdt.c7
-rw-r--r--drivers/char/watchdog/wdt977.c7
-rw-r--r--drivers/char/watchdog/wdt_pci.c7
-rw-r--r--drivers/i2c/busses/i2c-mpc.c94
-rw-r--r--drivers/ide/cris/Makefile2
-rw-r--r--drivers/ide/cris/ide-cris.c1107
-rw-r--r--drivers/ide/cris/ide-v10.c842
-rw-r--r--drivers/ide/pci/cmd640.c2
-rw-r--r--drivers/ide/pci/trm290.c2
-rw-r--r--drivers/ieee1394/sbp2.c1
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/agent.c22
-rw-r--r--drivers/infiniband/core/agent_priv.h3
-rw-r--r--drivers/infiniband/core/cm.c3324
-rw-r--r--drivers/infiniband/core/cm_msgs.h819
-rw-r--r--drivers/infiniband/core/fmr_pool.c7
-rw-r--r--drivers/infiniband/core/mad.c600
-rw-r--r--drivers/infiniband/core/mad_priv.h33
-rw-r--r--drivers/infiniband/core/mad_rmpp.c765
-rw-r--r--drivers/infiniband/core/mad_rmpp.h58
-rw-r--r--drivers/infiniband/core/sa_query.c206
-rw-r--r--drivers/infiniband/core/ucm.c1393
-rw-r--r--drivers/infiniband/core/ucm.h89
-rw-r--r--drivers/infiniband/core/user_mad.c299
-rw-r--r--drivers/infiniband/core/verbs.c35
-rw-r--r--drivers/infiniband/include/ib_cm.h568
-rw-r--r--drivers/infiniband/include/ib_fmr_pool.h5
-rw-r--r--drivers/infiniband/include/ib_mad.h213
-rw-r--r--drivers/infiniband/include/ib_sa.h87
-rw-r--r--drivers/infiniband/include/ib_user_cm.h328
-rw-r--r--drivers/infiniband/include/ib_user_mad.h28
-rw-r--r--drivers/infiniband/include/ib_verbs.h25
-rw-r--r--drivers/isdn/hisax/avm_a1.c2
-rw-r--r--drivers/isdn/hisax/config.c1
-rw-r--r--drivers/isdn/hisax/gazel.c9
-rw-r--r--drivers/isdn/hisax/isdnl2.c2
-rw-r--r--drivers/isdn/hisax/l3dss1.c8
-rw-r--r--drivers/isdn/hisax/teles3.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/md/raid6main.c1
-rw-r--r--drivers/media/dvb/frontends/Kconfig4
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c11
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c (renamed from drivers/media/dvb/frontends/lgdt3302.c)193
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.h (renamed from drivers/media/dvb/frontends/lgdt3302.h)20
-rw-r--r--drivers/media/dvb/frontends/lgdt330x_priv.h (renamed from drivers/media/dvb/frontends/lgdt3302_priv.h)10
-rw-r--r--drivers/media/radio/radio-maestro.c4
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/media/video/bttv-cards.c4
-rw-r--r--drivers/media/video/cx88/Makefile12
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c90
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c4
-rw-r--r--drivers/media/video/mxb.c39
-rw-r--r--drivers/media/video/saa7134/Makefile6
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c20
-rw-r--r--drivers/media/video/tvaudio.c1
-rw-r--r--drivers/media/video/tveeprom.c6
-rw-r--r--drivers/mmc/wbsd.c2
-rw-r--r--drivers/mtd/devices/docecc.c1
-rw-r--r--drivers/net/3c505.c2
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/Kconfig38
-rwxr-xr-xdrivers/net/amd8111e.c2
-rw-r--r--drivers/net/ne.c4
-rw-r--r--drivers/net/plip.c29
-rw-r--r--drivers/net/via-velocity.h4
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c23
-rw-r--r--drivers/pnp/pnpbios/rsparser.c2
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/char/tape.h7
-rw-r--r--drivers/s390/char/tape_core.c299
-rw-r--r--drivers/s390/char/vmcp.c6
-rw-r--r--drivers/s390/char/vmwatchdog.c6
-rw-r--r--drivers/s390/cio/chsc.c10
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/qdio.c20
-rw-r--r--drivers/s390/net/qeth.h26
-rw-r--r--drivers/scsi/NCR53c406a.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dpt/dptsig.h4
-rw-r--r--drivers/scsi/dtc.c4
-rw-r--r--drivers/scsi/dtc.h4
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h4
-rw-r--r--drivers/scsi/pas16.c1
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/Makefile1
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h16
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c2
-rw-r--r--drivers/scsi/t128.h1
-rw-r--r--drivers/serial/8250_pci.c20
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.c2
-rw-r--r--drivers/serial/jsm/jsm.h2
-rw-r--r--drivers/serial/jsm/jsm_driver.c3
-rw-r--r--drivers/serial/jsm/jsm_neo.c30
-rw-r--r--drivers/usb/image/microtek.c3
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/aty/radeon_base.c5
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbcmap.c96
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/video/fbmon.c2
-rw-r--r--drivers/video/fbsysfs.c82
-rw-r--r--drivers/video/pm2fb.c16
-rw-r--r--drivers/video/riva/fbdev.c2
165 files changed, 11119 insertions, 2161 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 91aeb678135d..95c0a3690b0f 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -1935,23 +1935,15 @@ struct as_fs_entry {
1935static ssize_t 1935static ssize_t
1936as_var_show(unsigned int var, char *page) 1936as_var_show(unsigned int var, char *page)
1937{ 1937{
1938 var = (var * 1000) / HZ;
1939 return sprintf(page, "%d\n", var); 1938 return sprintf(page, "%d\n", var);
1940} 1939}
1941 1940
1942static ssize_t 1941static ssize_t
1943as_var_store(unsigned long *var, const char *page, size_t count) 1942as_var_store(unsigned long *var, const char *page, size_t count)
1944{ 1943{
1945 unsigned long tmp;
1946 char *p = (char *) page; 1944 char *p = (char *) page;
1947 1945
1948 tmp = simple_strtoul(p, &p, 10); 1946 *var = simple_strtoul(p, &p, 10);
1949 if (tmp != 0) {
1950 tmp = (tmp * HZ) / 1000;
1951 if (tmp == 0)
1952 tmp = 1;
1953 }
1954 *var = tmp;
1955 return count; 1947 return count;
1956} 1948}
1957 1949
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 9db0a9e3e59c..d57007b92f77 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1582,7 +1582,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1582 if (rc) 1582 if (rc)
1583 goto err_out; 1583 goto err_out;
1584 1584
1585#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ 1585#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1586 rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1586 rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1587 if (!rc) { 1587 if (!rc) {
1588 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1588 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
@@ -1601,7 +1601,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1601 goto err_out_regions; 1601 goto err_out_regions;
1602 } 1602 }
1603 pci_dac = 0; 1603 pci_dac = 0;
1604#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ 1604#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1605 } 1605 }
1606#endif 1606#endif
1607 1607
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 685f061e69b2..a026567f5d18 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -23,6 +23,7 @@
23 * -- Exterminate P3 printks 23 * -- Exterminate P3 printks
24 * -- Resove XXX's 24 * -- Resove XXX's
25 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? 25 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
26 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
26 */ 27 */
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/module.h> 29#include <linux/module.h>
@@ -38,6 +39,73 @@
38#define UB_MAJOR 180 39#define UB_MAJOR 180
39 40
40/* 41/*
42 * The command state machine is the key model for understanding of this driver.
43 *
44 * The general rule is that all transitions are done towards the bottom
45 * of the diagram, thus preventing any loops.
46 *
47 * An exception to that is how the STAT state is handled. A counter allows it
48 * to be re-entered along the path marked with [C].
49 *
50 * +--------+
51 * ! INIT !
52 * +--------+
53 * !
54 * ub_scsi_cmd_start fails ->--------------------------------------\
55 * ! !
56 * V !
57 * +--------+ !
58 * ! CMD ! !
59 * +--------+ !
60 * ! +--------+ !
61 * was -EPIPE -->-------------------------------->! CLEAR ! !
62 * ! +--------+ !
63 * ! ! !
64 * was error -->------------------------------------- ! --------->\
65 * ! ! !
66 * /--<-- cmd->dir == NONE ? ! !
67 * ! ! ! !
68 * ! V ! !
69 * ! +--------+ ! !
70 * ! ! DATA ! ! !
71 * ! +--------+ ! !
72 * ! ! +---------+ ! !
73 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
74 * ! ! +---------+ ! !
75 * ! ! ! ! !
76 * ! ! was error -->---- ! --------->\
77 * ! was error -->--------------------- ! ------------- ! --------->\
78 * ! ! ! ! !
79 * ! V ! ! !
80 * \--->+--------+ ! ! !
81 * ! STAT !<--------------------------/ ! !
82 * /--->+--------+ ! !
83 * ! ! ! !
84 * [C] was -EPIPE -->-----------\ ! !
85 * ! ! ! ! !
86 * +<---- len == 0 ! ! !
87 * ! ! ! ! !
88 * ! was error -->--------------------------------------!---------->\
89 * ! ! ! ! !
90 * +<---- bad CSW ! ! !
91 * +<---- bad tag ! ! !
92 * ! ! V ! !
93 * ! ! +--------+ ! !
94 * ! ! ! CLRRS ! ! !
95 * ! ! +--------+ ! !
96 * ! ! ! ! !
97 * \------- ! --------------------[C]--------\ ! !
98 * ! ! ! !
99 * cmd->error---\ +--------+ ! !
100 * ! +--------------->! SENSE !<----------/ !
101 * STAT_FAIL----/ +--------+ !
102 * ! ! V
103 * ! V +--------+
104 * \--------------------------------\--------------------->! DONE !
105 * +--------+
106 */
107
108/*
41 * Definitions which have to be scattered once we understand the layout better. 109 * Definitions which have to be scattered once we understand the layout better.
42 */ 110 */
43 111
@@ -91,8 +159,6 @@ struct bulk_cs_wrap {
91 159
92#define US_BULK_CS_WRAP_LEN 13 160#define US_BULK_CS_WRAP_LEN 13
93#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ 161#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
94/* This is for Olympus Camedia digital cameras */
95#define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */
96#define US_BULK_STAT_OK 0 162#define US_BULK_STAT_OK 0
97#define US_BULK_STAT_FAIL 1 163#define US_BULK_STAT_FAIL 1
98#define US_BULK_STAT_PHASE 2 164#define US_BULK_STAT_PHASE 2
@@ -135,6 +201,7 @@ enum ub_scsi_cmd_state {
135 UB_CMDST_CLR2STS, /* Clearing before requesting status */ 201 UB_CMDST_CLR2STS, /* Clearing before requesting status */
136 UB_CMDST_STAT, /* Status phase */ 202 UB_CMDST_STAT, /* Status phase */
137 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ 203 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
204 UB_CMDST_CLRRS, /* Clearing before retrying status */
138 UB_CMDST_SENSE, /* Sending Request Sense */ 205 UB_CMDST_SENSE, /* Sending Request Sense */
139 UB_CMDST_DONE /* Final state */ 206 UB_CMDST_DONE /* Final state */
140}; 207};
@@ -146,6 +213,7 @@ static char *ub_scsi_cmd_stname[] = {
146 "c2s", 213 "c2s",
147 "sts", 214 "sts",
148 "clr", 215 "clr",
216 "crs",
149 "Sen", 217 "Sen",
150 "fin" 218 "fin"
151}; 219};
@@ -316,6 +384,7 @@ struct ub_dev {
316 struct urb work_urb; 384 struct urb work_urb;
317 struct timer_list work_timer; 385 struct timer_list work_timer;
318 int last_pipe; /* What might need clearing */ 386 int last_pipe; /* What might need clearing */
387 __le32 signature; /* Learned signature */
319 struct bulk_cb_wrap work_bcb; 388 struct bulk_cb_wrap work_bcb;
320 struct bulk_cs_wrap work_bcs; 389 struct bulk_cs_wrap work_bcs;
321 struct usb_ctrlrequest work_cr; 390 struct usb_ctrlrequest work_cr;
@@ -339,8 +408,9 @@ static void ub_scsi_action(unsigned long _dev);
339static void ub_scsi_dispatch(struct ub_dev *sc); 408static void ub_scsi_dispatch(struct ub_dev *sc);
340static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 409static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
341static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
342static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
343static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
413static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
344static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 414static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
345static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 415static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
346 int stalled_pipe); 416 int stalled_pipe);
@@ -1085,6 +1155,28 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1085 1155
1086 ub_state_stat(sc, cmd); 1156 ub_state_stat(sc, cmd);
1087 1157
1158 } else if (cmd->state == UB_CMDST_CLRRS) {
1159 if (urb->status == -EPIPE) {
1160 /*
1161 * STALL while clearning STALL.
1162 * The control pipe clears itself - nothing to do.
1163 * XXX Might try to reset the device here and retry.
1164 */
1165 printk(KERN_NOTICE "%s: stall on control pipe\n",
1166 sc->name);
1167 goto Bad_End;
1168 }
1169
1170 /*
1171 * We ignore the result for the halt clear.
1172 */
1173
1174 /* reset the endpoint toggle */
1175 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1176 usb_pipeout(sc->last_pipe), 0);
1177
1178 ub_state_stat_counted(sc, cmd);
1179
1088 } else if (cmd->state == UB_CMDST_CMD) { 1180 } else if (cmd->state == UB_CMDST_CMD) {
1089 if (urb->status == -EPIPE) { 1181 if (urb->status == -EPIPE) {
1090 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1182 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
@@ -1190,52 +1282,57 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1190 */ 1282 */
1191 goto Bad_End; 1283 goto Bad_End;
1192 } 1284 }
1193 cmd->state = UB_CMDST_CLEAR; 1285
1286 /*
1287 * Having a stall when getting CSW is an error, so
1288 * make sure uppper levels are not oblivious to it.
1289 */
1290 cmd->error = -EIO; /* A cheap trick... */
1291
1292 cmd->state = UB_CMDST_CLRRS;
1194 ub_cmdtr_state(sc, cmd); 1293 ub_cmdtr_state(sc, cmd);
1195 return; 1294 return;
1196 } 1295 }
1296 if (urb->status == -EOVERFLOW) {
1297 /*
1298 * XXX We are screwed here. Retrying is pointless,
1299 * because the pipelined data will not get in until
1300 * we read with a big enough buffer. We must reset XXX.
1301 */
1302 goto Bad_End;
1303 }
1197 if (urb->status != 0) 1304 if (urb->status != 0)
1198 goto Bad_End; 1305 goto Bad_End;
1199 1306
1200 if (urb->actual_length == 0) { 1307 if (urb->actual_length == 0) {
1201 /* 1308 ub_state_stat_counted(sc, cmd);
1202 * Some broken devices add unnecessary zero-length
1203 * packets to the end of their data transfers.
1204 * Such packets show up as 0-length CSWs. If we
1205 * encounter such a thing, try to read the CSW again.
1206 */
1207 if (++cmd->stat_count >= 4) {
1208 printk(KERN_NOTICE "%s: unable to get CSW\n",
1209 sc->name);
1210 goto Bad_End;
1211 }
1212 __ub_state_stat(sc, cmd);
1213 return; 1309 return;
1214 } 1310 }
1215 1311
1216 /* 1312 /*
1217 * Check the returned Bulk protocol status. 1313 * Check the returned Bulk protocol status.
1314 * The status block has to be validated first.
1218 */ 1315 */
1219 1316
1220 bcs = &sc->work_bcs; 1317 bcs = &sc->work_bcs;
1221 rc = le32_to_cpu(bcs->Residue); 1318
1222 if (rc != cmd->len - cmd->act_len) { 1319 if (sc->signature == cpu_to_le32(0)) {
1223 /* 1320 /*
1224 * It is all right to transfer less, the caller has 1321 * This is the first reply, so do not perform the check.
1225 * to check. But it's not all right if the device 1322 * Instead, remember the signature the device uses
1226 * counts disagree with our counts. 1323 * for future checks. But do not allow a nul.
1227 */ 1324 */
1228 /* P3 */ printk("%s: resid %d len %d act %d\n", 1325 sc->signature = bcs->Signature;
1229 sc->name, rc, cmd->len, cmd->act_len); 1326 if (sc->signature == cpu_to_le32(0)) {
1230 goto Bad_End; 1327 ub_state_stat_counted(sc, cmd);
1231 } 1328 return;
1232 1329 }
1233#if 0 1330 } else {
1234 if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && 1331 if (bcs->Signature != sc->signature) {
1235 bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) { 1332 ub_state_stat_counted(sc, cmd);
1236 /* Windows ignores signatures, so do we. */ 1333 return;
1334 }
1237 } 1335 }
1238#endif
1239 1336
1240 if (bcs->Tag != cmd->tag) { 1337 if (bcs->Tag != cmd->tag) {
1241 /* 1338 /*
@@ -1245,16 +1342,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1245 * commands and reply at commands we timed out before. 1342 * commands and reply at commands we timed out before.
1246 * Without flushing these replies we loop forever. 1343 * Without flushing these replies we loop forever.
1247 */ 1344 */
1248 if (++cmd->stat_count >= 4) { 1345 ub_state_stat_counted(sc, cmd);
1249 printk(KERN_NOTICE "%s: "
1250 "tag mismatch orig 0x%x reply 0x%x\n",
1251 sc->name, cmd->tag, bcs->Tag);
1252 goto Bad_End;
1253 }
1254 __ub_state_stat(sc, cmd);
1255 return; 1346 return;
1256 } 1347 }
1257 1348
1349 rc = le32_to_cpu(bcs->Residue);
1350 if (rc != cmd->len - cmd->act_len) {
1351 /*
1352 * It is all right to transfer less, the caller has
1353 * to check. But it's not all right if the device
1354 * counts disagree with our counts.
1355 */
1356 /* P3 */ printk("%s: resid %d len %d act %d\n",
1357 sc->name, rc, cmd->len, cmd->act_len);
1358 goto Bad_End;
1359 }
1360
1258 switch (bcs->Status) { 1361 switch (bcs->Status) {
1259 case US_BULK_STAT_OK: 1362 case US_BULK_STAT_OK:
1260 break; 1363 break;
@@ -1272,6 +1375,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1272 } 1375 }
1273 1376
1274 /* Not zeroing error to preserve a babble indicator */ 1377 /* Not zeroing error to preserve a babble indicator */
1378 if (cmd->error != 0) {
1379 ub_state_sense(sc, cmd);
1380 return;
1381 }
1275 cmd->state = UB_CMDST_DONE; 1382 cmd->state = UB_CMDST_DONE;
1276 ub_cmdtr_state(sc, cmd); 1383 ub_cmdtr_state(sc, cmd);
1277 ub_cmdq_pop(sc); 1384 ub_cmdq_pop(sc);
@@ -1310,7 +1417,7 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1310 * Factorization helper for the command state machine: 1417 * Factorization helper for the command state machine:
1311 * Submit a CSW read. 1418 * Submit a CSW read.
1312 */ 1419 */
1313static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1420static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1314{ 1421{
1315 int rc; 1422 int rc;
1316 1423
@@ -1328,11 +1435,12 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1328 /* XXX Clear stalls */ 1435 /* XXX Clear stalls */
1329 ub_complete(&sc->work_done); 1436 ub_complete(&sc->work_done);
1330 ub_state_done(sc, cmd, rc); 1437 ub_state_done(sc, cmd, rc);
1331 return; 1438 return -1;
1332 } 1439 }
1333 1440
1334 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; 1441 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1335 add_timer(&sc->work_timer); 1442 add_timer(&sc->work_timer);
1443 return 0;
1336} 1444}
1337 1445
1338/* 1446/*
@@ -1341,7 +1449,9 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1341 */ 1449 */
1342static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1450static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1343{ 1451{
1344 __ub_state_stat(sc, cmd); 1452
1453 if (__ub_state_stat(sc, cmd) != 0)
1454 return;
1345 1455
1346 cmd->stat_count = 0; 1456 cmd->stat_count = 0;
1347 cmd->state = UB_CMDST_STAT; 1457 cmd->state = UB_CMDST_STAT;
@@ -1350,6 +1460,25 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1350 1460
1351/* 1461/*
1352 * Factorization helper for the command state machine: 1462 * Factorization helper for the command state machine:
1463 * Submit a CSW read and go to STAT state with counter (along [C] path).
1464 */
1465static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1466{
1467
1468 if (++cmd->stat_count >= 4) {
1469 ub_state_sense(sc, cmd);
1470 return;
1471 }
1472
1473 if (__ub_state_stat(sc, cmd) != 0)
1474 return;
1475
1476 cmd->state = UB_CMDST_STAT;
1477 ub_cmdtr_state(sc, cmd);
1478}
1479
1480/*
1481 * Factorization helper for the command state machine:
1353 * Submit a REQUEST SENSE and go to SENSE state. 1482 * Submit a REQUEST SENSE and go to SENSE state.
1354 */ 1483 */
1355static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1484static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
diff --git a/drivers/cdrom/isp16.c b/drivers/cdrom/isp16.c
index 8e68d858ce64..db0fd9a240e3 100644
--- a/drivers/cdrom/isp16.c
+++ b/drivers/cdrom/isp16.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * 19 June 2004 -- check_region() converted to request_region() 19 * 19 June 2004 -- check_region() converted to request_region()
20 * and return statement cleanups. 20 * and return statement cleanups.
21 * Jesper Juhl <juhl-lkml@dif.dk> 21 * - Jesper Juhl
22 * 22 *
23 * Detect cdrom interface on ISP16 sound card. 23 * Detect cdrom interface on ISP16 sound card.
24 * Configure cdrom interface. 24 * Configure cdrom interface.
diff --git a/drivers/cdrom/mcdx.c b/drivers/cdrom/mcdx.c
index 07bbd24e3c18..b89420e6d704 100644
--- a/drivers/cdrom/mcdx.c
+++ b/drivers/cdrom/mcdx.c
@@ -51,7 +51,7 @@
51 */ 51 */
52 52
53 53
54#if RCS 54#ifdef RCS
55static const char *mcdx_c_version 55static const char *mcdx_c_version
56 = "$Id: mcdx.c,v 1.21 1997/01/26 07:12:59 davem Exp $"; 56 = "$Id: mcdx.c,v 1.21 1997/01/26 07:12:59 davem Exp $";
57#endif 57#endif
@@ -706,7 +706,7 @@ static int mcdx_open(struct cdrom_device_info *cdi, int purpose)
706 xtrace(OPENCLOSE, "open() init irq generation\n"); 706 xtrace(OPENCLOSE, "open() init irq generation\n");
707 if (-1 == mcdx_config(stuffp, 1)) 707 if (-1 == mcdx_config(stuffp, 1))
708 return -EIO; 708 return -EIO;
709#if FALLBACK 709#ifdef FALLBACK
710 /* Set the read speed */ 710 /* Set the read speed */
711 xwarn("AAA %x AAA\n", stuffp->readcmd); 711 xwarn("AAA %x AAA\n", stuffp->readcmd);
712 if (stuffp->readerrs) 712 if (stuffp->readerrs)
@@ -1216,7 +1216,7 @@ static int __init mcdx_init_drive(int drive)
1216 } 1216 }
1217 1217
1218 1218
1219#if WE_KNOW_WHY 1219#ifdef WE_KNOW_WHY
1220 /* irq 11 -> channel register */ 1220 /* irq 11 -> channel register */
1221 outb(0x50, stuffp->wreg_chn); 1221 outb(0x50, stuffp->wreg_chn);
1222#endif 1222#endif
@@ -1294,7 +1294,7 @@ static int mcdx_transfer(struct s_drive_stuff *stuffp,
1294 1294
1295 ans = mcdx_xfer(stuffp, p, sector, nr_sectors); 1295 ans = mcdx_xfer(stuffp, p, sector, nr_sectors);
1296 return ans; 1296 return ans;
1297#if FALLBACK 1297#ifdef FALLBACK
1298 if (-1 == ans) 1298 if (-1 == ans)
1299 stuffp->readerrs++; 1299 stuffp->readerrs++;
1300 else 1300 else
diff --git a/drivers/cdrom/optcd.c b/drivers/cdrom/optcd.c
index 7e69c54568bf..351a01dd503a 100644
--- a/drivers/cdrom/optcd.c
+++ b/drivers/cdrom/optcd.c
@@ -245,7 +245,7 @@ module_param(optcd_port, short, 0);
245 245
246 246
247/* Busy wait until FLAG goes low. Return 0 on timeout. */ 247/* Busy wait until FLAG goes low. Return 0 on timeout. */
248inline static int flag_low(int flag, unsigned long timeout) 248static inline int flag_low(int flag, unsigned long timeout)
249{ 249{
250 int flag_high; 250 int flag_high;
251 unsigned long count = 0; 251 unsigned long count = 0;
@@ -381,7 +381,7 @@ static int send_seek_params(struct cdrom_msf *params)
381 381
382/* Wait for command execution status. Choice between busy waiting 382/* Wait for command execution status. Choice between busy waiting
383 and sleeping. Return value <0 indicates timeout. */ 383 and sleeping. Return value <0 indicates timeout. */
384inline static int get_exec_status(int busy_waiting) 384static inline int get_exec_status(int busy_waiting)
385{ 385{
386 unsigned char exec_status; 386 unsigned char exec_status;
387 387
@@ -398,7 +398,7 @@ inline static int get_exec_status(int busy_waiting)
398 398
399/* Wait busy for extra byte of data that a command returns. 399/* Wait busy for extra byte of data that a command returns.
400 Return value <0 indicates timeout. */ 400 Return value <0 indicates timeout. */
401inline static int get_data(int short_timeout) 401static inline int get_data(int short_timeout)
402{ 402{
403 unsigned char data; 403 unsigned char data;
404 404
@@ -441,14 +441,14 @@ static int reset_drive(void)
441/* Facilities for asynchronous operation */ 441/* Facilities for asynchronous operation */
442 442
443/* Read status/data availability flags FL_STEN and FL_DTEN */ 443/* Read status/data availability flags FL_STEN and FL_DTEN */
444inline static int stdt_flags(void) 444static inline int stdt_flags(void)
445{ 445{
446 return inb(STATUS_PORT) & FL_STDT; 446 return inb(STATUS_PORT) & FL_STDT;
447} 447}
448 448
449 449
450/* Fetch status that has previously been waited for. <0 means not available */ 450/* Fetch status that has previously been waited for. <0 means not available */
451inline static int fetch_status(void) 451static inline int fetch_status(void)
452{ 452{
453 unsigned char status; 453 unsigned char status;
454 454
@@ -462,7 +462,7 @@ inline static int fetch_status(void)
462 462
463 463
464/* Fetch data that has previously been waited for. */ 464/* Fetch data that has previously been waited for. */
465inline static void fetch_data(char *buf, int n) 465static inline void fetch_data(char *buf, int n)
466{ 466{
467 insb(DATA_PORT, buf, n); 467 insb(DATA_PORT, buf, n);
468 DEBUG((DEBUG_DRIVE_IF, "fetched 0x%x bytes", n)); 468 DEBUG((DEBUG_DRIVE_IF, "fetched 0x%x bytes", n));
@@ -470,7 +470,7 @@ inline static void fetch_data(char *buf, int n)
470 470
471 471
472/* Flush status and data fifos */ 472/* Flush status and data fifos */
473inline static void flush_data(void) 473static inline void flush_data(void)
474{ 474{
475 while ((inb(STATUS_PORT) & FL_STDT) != FL_STDT) 475 while ((inb(STATUS_PORT) & FL_STDT) != FL_STDT)
476 inb(DATA_PORT); 476 inb(DATA_PORT);
@@ -482,7 +482,7 @@ inline static void flush_data(void)
482 482
483/* Send a simple command and wait for response. Command codes < COMFETCH 483/* Send a simple command and wait for response. Command codes < COMFETCH
484 are quick response commands */ 484 are quick response commands */
485inline static int exec_cmd(int cmd) 485static inline int exec_cmd(int cmd)
486{ 486{
487 int ack = send_cmd(cmd); 487 int ack = send_cmd(cmd);
488 if (ack < 0) 488 if (ack < 0)
@@ -493,7 +493,7 @@ inline static int exec_cmd(int cmd)
493 493
494/* Send a command with parameters. Don't wait for the response, 494/* Send a command with parameters. Don't wait for the response,
495 * which consists of data blocks read from the CD. */ 495 * which consists of data blocks read from the CD. */
496inline static int exec_read_cmd(int cmd, struct cdrom_msf *params) 496static inline int exec_read_cmd(int cmd, struct cdrom_msf *params)
497{ 497{
498 int ack = send_cmd(cmd); 498 int ack = send_cmd(cmd);
499 if (ack < 0) 499 if (ack < 0)
@@ -503,7 +503,7 @@ inline static int exec_read_cmd(int cmd, struct cdrom_msf *params)
503 503
504 504
505/* Send a seek command with parameters and wait for response */ 505/* Send a seek command with parameters and wait for response */
506inline static int exec_seek_cmd(int cmd, struct cdrom_msf *params) 506static inline int exec_seek_cmd(int cmd, struct cdrom_msf *params)
507{ 507{
508 int ack = send_cmd(cmd); 508 int ack = send_cmd(cmd);
509 if (ack < 0) 509 if (ack < 0)
@@ -516,7 +516,7 @@ inline static int exec_seek_cmd(int cmd, struct cdrom_msf *params)
516 516
517 517
518/* Send a command with parameters and wait for response */ 518/* Send a command with parameters and wait for response */
519inline static int exec_long_cmd(int cmd, struct cdrom_msf *params) 519static inline int exec_long_cmd(int cmd, struct cdrom_msf *params)
520{ 520{
521 int ack = exec_read_cmd(cmd, params); 521 int ack = exec_read_cmd(cmd, params);
522 if (ack < 0) 522 if (ack < 0)
@@ -528,7 +528,7 @@ inline static int exec_long_cmd(int cmd, struct cdrom_msf *params)
528 528
529 529
530/* Binary to BCD (2 digits) */ 530/* Binary to BCD (2 digits) */
531inline static void single_bin2bcd(u_char *p) 531static inline void single_bin2bcd(u_char *p)
532{ 532{
533 DEBUG((DEBUG_CONV, "bin2bcd %02d", *p)); 533 DEBUG((DEBUG_CONV, "bin2bcd %02d", *p));
534 *p = (*p % 10) | ((*p / 10) << 4); 534 *p = (*p % 10) | ((*p / 10) << 4);
@@ -565,7 +565,7 @@ static void lba2msf(int lba, struct cdrom_msf *msf)
565 565
566 566
567/* Two BCD digits to binary */ 567/* Two BCD digits to binary */
568inline static u_char bcd2bin(u_char bcd) 568static inline u_char bcd2bin(u_char bcd)
569{ 569{
570 DEBUG((DEBUG_CONV, "bcd2bin %x%02x", bcd)); 570 DEBUG((DEBUG_CONV, "bcd2bin %x%02x", bcd));
571 return (bcd >> 4) * 10 + (bcd & 0x0f); 571 return (bcd >> 4) * 10 + (bcd & 0x0f);
@@ -988,7 +988,7 @@ static char buf[CD_FRAMESIZE * N_BUFS];
988static volatile int buf_bn[N_BUFS], next_bn; 988static volatile int buf_bn[N_BUFS], next_bn;
989static volatile int buf_in = 0, buf_out = NOBUF; 989static volatile int buf_in = 0, buf_out = NOBUF;
990 990
991inline static void opt_invalidate_buffers(void) 991static inline void opt_invalidate_buffers(void)
992{ 992{
993 int i; 993 int i;
994 994
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 43d0cb19ef6a..4f27e5519296 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -735,7 +735,7 @@ config SGI_IP27_RTC
735 735
736config GEN_RTC 736config GEN_RTC
737 tristate "Generic /dev/rtc emulation" 737 tristate "Generic /dev/rtc emulation"
738 depends on RTC!=y && !IA64 && !ARM 738 depends on RTC!=y && !IA64 && !ARM && !PPC64
739 ---help--- 739 ---help---
740 If you say Y here and create a character special file /dev/rtc with 740 If you say Y here and create a character special file /dev/rtc with
741 major number 10 and minor number 135 using mknod ("man mknod"), you 741 major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 82f839451622..4f60f7f4193d 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -231,7 +231,7 @@ int via_dma_init(DRM_IOCTL_ARGS)
231 drm_via_dma_init_t init; 231 drm_via_dma_init_t init;
232 int retcode = 0; 232 int retcode = 0;
233 233
234 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data, 234 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data,
235 sizeof(init)); 235 sizeof(init));
236 236
237 switch (init.func) { 237 switch (init.func) {
@@ -343,7 +343,7 @@ int via_cmdbuffer(DRM_IOCTL_ARGS)
343 343
344 LOCK_TEST_WITH_RETURN( dev, filp ); 344 LOCK_TEST_WITH_RETURN( dev, filp );
345 345
346 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data, 346 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
347 sizeof(cmdbuf)); 347 sizeof(cmdbuf));
348 348
349 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); 349 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
@@ -386,7 +386,7 @@ int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
386 386
387 LOCK_TEST_WITH_RETURN( dev, filp ); 387 LOCK_TEST_WITH_RETURN( dev, filp );
388 388
389 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data, 389 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
390 sizeof(cmdbuf)); 390 sizeof(cmdbuf));
391 391
392 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, 392 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
@@ -701,7 +701,7 @@ via_cmdbuf_size(DRM_IOCTL_ARGS)
701 return DRM_ERR(EFAULT); 701 return DRM_ERR(EFAULT);
702 } 702 }
703 703
704 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t *) data, 704 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
705 sizeof(d_siz)); 705 sizeof(d_siz));
706 706
707 707
@@ -735,7 +735,7 @@ via_cmdbuf_size(DRM_IOCTL_ARGS)
735 } 735 }
736 d_siz.size = tmp_size; 736 d_siz.size = tmp_size;
737 737
738 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t *) data, d_siz, 738 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
739 sizeof(d_siz)); 739 sizeof(d_siz));
740 return ret; 740 return ret;
741} 741}
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index 4588c9bd1816..be346bb0a26a 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -158,7 +158,7 @@ typedef struct _drm_via_dma_init {
158} drm_via_dma_init_t; 158} drm_via_dma_init_t;
159 159
160typedef struct _drm_via_cmdbuffer { 160typedef struct _drm_via_cmdbuffer {
161 char *buf; 161 char __user *buf;
162 unsigned long size; 162 unsigned long size;
163} drm_via_cmdbuffer_t; 163} drm_via_cmdbuffer_t;
164 164
diff --git a/drivers/char/drm/via_ds.c b/drivers/char/drm/via_ds.c
index daf3df75a20e..5c71e089246c 100644
--- a/drivers/char/drm/via_ds.c
+++ b/drivers/char/drm/via_ds.c
@@ -133,7 +133,7 @@ memHeap_t *via_mmInit(int ofs, int size)
133 PMemBlock blocks; 133 PMemBlock blocks;
134 134
135 if (size <= 0) 135 if (size <= 0)
136 return 0; 136 return NULL;
137 137
138 blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER); 138 blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
139 139
@@ -143,7 +143,7 @@ memHeap_t *via_mmInit(int ofs, int size)
143 blocks->free = 1; 143 blocks->free = 1;
144 return (memHeap_t *) blocks; 144 return (memHeap_t *) blocks;
145 } else 145 } else
146 return 0; 146 return NULL;
147} 147}
148 148
149static TMemBlock *SliceBlock(TMemBlock * p, 149static TMemBlock *SliceBlock(TMemBlock * p,
diff --git a/drivers/char/drm/via_ds.h b/drivers/char/drm/via_ds.h
index be9c7f9f1aee..d2bb9f37ca38 100644
--- a/drivers/char/drm/via_ds.h
+++ b/drivers/char/drm/via_ds.h
@@ -61,8 +61,8 @@ struct mem_block_t {
61 struct mem_block_t *heap; 61 struct mem_block_t *heap;
62 int ofs, size; 62 int ofs, size;
63 int align; 63 int align;
64 int free:1; 64 unsigned int free:1;
65 int reserved:1; 65 unsigned int reserved:1;
66}; 66};
67typedef struct mem_block_t TMemBlock; 67typedef struct mem_block_t TMemBlock;
68typedef struct mem_block_t *PMemBlock; 68typedef struct mem_block_t *PMemBlock;
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 0be829b6ec65..bb171139e737 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -95,7 +95,8 @@ int via_map_init(DRM_IOCTL_ARGS)
95 95
96 DRM_DEBUG("%s\n", __FUNCTION__); 96 DRM_DEBUG("%s\n", __FUNCTION__);
97 97
98 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t *) data, sizeof(init)); 98 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data,
99 sizeof(init));
99 100
100 switch (init.func) { 101 switch (init.func) {
101 case VIA_INIT_MAP: 102 case VIA_INIT_MAP:
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index c22712f44d42..13921f3c0ec2 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -76,7 +76,8 @@ int via_agp_init(DRM_IOCTL_ARGS)
76{ 76{
77 drm_via_agp_t agp; 77 drm_via_agp_t agp;
78 78
79 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t *) data, sizeof(agp)); 79 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
80 sizeof(agp));
80 81
81 AgpHeap = via_mmInit(agp.offset, agp.size); 82 AgpHeap = via_mmInit(agp.offset, agp.size);
82 83
@@ -92,7 +93,7 @@ int via_fb_init(DRM_IOCTL_ARGS)
92{ 93{
93 drm_via_fb_t fb; 94 drm_via_fb_t fb;
94 95
95 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t *) data, sizeof(fb)); 96 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
96 97
97 FBHeap = via_mmInit(fb.offset, fb.size); 98 FBHeap = via_mmInit(fb.offset, fb.size);
98 99
@@ -193,19 +194,20 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
193{ 194{
194 drm_via_mem_t mem; 195 drm_via_mem_t mem;
195 196
196 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem)); 197 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
198 sizeof(mem));
197 199
198 switch (mem.type) { 200 switch (mem.type) {
199 case VIDEO: 201 case VIDEO:
200 if (via_fb_alloc(&mem) < 0) 202 if (via_fb_alloc(&mem) < 0)
201 return -EFAULT; 203 return -EFAULT;
202 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem, 204 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
203 sizeof(mem)); 205 sizeof(mem));
204 return 0; 206 return 0;
205 case AGP: 207 case AGP:
206 if (via_agp_alloc(&mem) < 0) 208 if (via_agp_alloc(&mem) < 0)
207 return -EFAULT; 209 return -EFAULT;
208 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem, 210 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
209 sizeof(mem)); 211 sizeof(mem));
210 return 0; 212 return 0;
211 } 213 }
@@ -289,7 +291,8 @@ int via_mem_free(DRM_IOCTL_ARGS)
289{ 291{
290 drm_via_mem_t mem; 292 drm_via_mem_t mem;
291 293
292 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem)); 294 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
295 sizeof(mem));
293 296
294 switch (mem.type) { 297 switch (mem.type) {
295 298
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index 37a61c67b292..1e2d444587bf 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -76,7 +76,8 @@ via_decoder_futex(DRM_IOCTL_ARGS)
76 76
77 DRM_DEBUG("%s\n", __FUNCTION__); 77 DRM_DEBUG("%s\n", __FUNCTION__);
78 78
79 DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t *) data, sizeof(fx)); 79 DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data,
80 sizeof(fx));
80 81
81 if (fx.lock > VIA_NR_XVMC_LOCKS) 82 if (fx.lock > VIA_NR_XVMC_LOCKS)
82 return -EFAULT; 83 return -EFAULT;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 298574e16061..a44b97304e95 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1726,7 +1726,7 @@ static int dmi_table(u32 base, int len, int num)
1726 return status; 1726 return status;
1727} 1727}
1728 1728
1729inline static int dmi_checksum(u8 *buf) 1729static inline int dmi_checksum(u8 *buf)
1730{ 1730{
1731 u8 sum=0; 1731 u8 sum=0;
1732 int a; 1732 int a;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index fcd1c02a32cb..d35a953961cb 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -131,11 +131,7 @@
131#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int) 131#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
132#endif 132#endif
133 133
134#ifdef CONFIG_WATCHDOG_NOWAYOUT 134static int nowayout = WATCHDOG_NOWAYOUT;
135static int nowayout = 1;
136#else
137static int nowayout;
138#endif
139 135
140static ipmi_user_t watchdog_user = NULL; 136static ipmi_user_t watchdog_user = NULL;
141 137
diff --git a/drivers/char/rio/rioboot.c b/drivers/char/rio/rioboot.c
index a8be11dfcba3..34cbb13aad4b 100644
--- a/drivers/char/rio/rioboot.c
+++ b/drivers/char/rio/rioboot.c
@@ -902,7 +902,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
902 (HostP->Mapping[entry].RtaUniqueNum==RtaUniq)) 902 (HostP->Mapping[entry].RtaUniqueNum==RtaUniq))
903 { 903 {
904 HostP->Mapping[entry].Flags |= RTA_BOOTED|RTA_NEWBOOT; 904 HostP->Mapping[entry].Flags |= RTA_BOOTED|RTA_NEWBOOT;
905#if NEED_TO_FIX 905#ifdef NEED_TO_FIX
906 RIO_SV_BROADCAST(HostP->svFlags[entry]); 906 RIO_SV_BROADCAST(HostP->svFlags[entry]);
907#endif 907#endif
908 if ( (sysport=HostP->Mapping[entry].SysPort) != NO_PORT ) 908 if ( (sysport=HostP->Mapping[entry].SysPort) != NO_PORT )
@@ -918,7 +918,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
918 { 918 {
919 entry2 = HostP->Mapping[entry].ID2 - 1; 919 entry2 = HostP->Mapping[entry].ID2 - 1;
920 HostP->Mapping[entry2].Flags |= RTA_BOOTED|RTA_NEWBOOT; 920 HostP->Mapping[entry2].Flags |= RTA_BOOTED|RTA_NEWBOOT;
921#if NEED_TO_FIX 921#ifdef NEED_TO_FIX
922 RIO_SV_BROADCAST(HostP->svFlags[entry2]); 922 RIO_SV_BROADCAST(HostP->svFlags[entry2]);
923#endif 923#endif
924 sysport = HostP->Mapping[entry2].SysPort; 924 sysport = HostP->Mapping[entry2].SysPort;
@@ -1143,7 +1143,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
1143 CCOPY( MapP->Name, HostP->Mapping[entry].Name, MAX_NAME_LEN ); 1143 CCOPY( MapP->Name, HostP->Mapping[entry].Name, MAX_NAME_LEN );
1144 HostP->Mapping[entry].Flags = 1144 HostP->Mapping[entry].Flags =
1145 SLOT_IN_USE | RTA_BOOTED | RTA_NEWBOOT; 1145 SLOT_IN_USE | RTA_BOOTED | RTA_NEWBOOT;
1146#if NEED_TO_FIX 1146#ifdef NEED_TO_FIX
1147 RIO_SV_BROADCAST(HostP->svFlags[entry]); 1147 RIO_SV_BROADCAST(HostP->svFlags[entry]);
1148#endif 1148#endif
1149 RIOReMapPorts( p, HostP, &HostP->Mapping[entry] ); 1149 RIOReMapPorts( p, HostP, &HostP->Mapping[entry] );
@@ -1159,7 +1159,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
1159 "This RTA has a tentative entry on another host - delete that entry (1)\n"); 1159 "This RTA has a tentative entry on another host - delete that entry (1)\n");
1160 HostP->Mapping[entry].Flags = 1160 HostP->Mapping[entry].Flags =
1161 SLOT_TENTATIVE | RTA_BOOTED | RTA_NEWBOOT; 1161 SLOT_TENTATIVE | RTA_BOOTED | RTA_NEWBOOT;
1162#if NEED_TO_FIX 1162#ifdef NEED_TO_FIX
1163 RIO_SV_BROADCAST(HostP->svFlags[entry]); 1163 RIO_SV_BROADCAST(HostP->svFlags[entry]);
1164#endif 1164#endif
1165 } 1165 }
@@ -1169,7 +1169,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
1169 { 1169 {
1170 HostP->Mapping[entry2].Flags = SLOT_IN_USE | 1170 HostP->Mapping[entry2].Flags = SLOT_IN_USE |
1171 RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT; 1171 RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT;
1172#if NEED_TO_FIX 1172#ifdef NEED_TO_FIX
1173 RIO_SV_BROADCAST(HostP->svFlags[entry2]); 1173 RIO_SV_BROADCAST(HostP->svFlags[entry2]);
1174#endif 1174#endif
1175 HostP->Mapping[entry2].SysPort = MapP2->SysPort; 1175 HostP->Mapping[entry2].SysPort = MapP2->SysPort;
@@ -1188,7 +1188,7 @@ static int RIOBootComplete( struct rio_info *p, struct Host *HostP, uint Rup, st
1188 else 1188 else
1189 HostP->Mapping[entry2].Flags = SLOT_TENTATIVE | 1189 HostP->Mapping[entry2].Flags = SLOT_TENTATIVE |
1190 RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT; 1190 RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT;
1191#if NEED_TO_FIX 1191#ifdef NEED_TO_FIX
1192 RIO_SV_BROADCAST(HostP->svFlags[entry2]); 1192 RIO_SV_BROADCAST(HostP->svFlags[entry2]);
1193#endif 1193#endif
1194 bzero( (caddr_t)MapP2, sizeof(struct Map) ); 1194 bzero( (caddr_t)MapP2, sizeof(struct Map) );
diff --git a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c
index 106b31f48a21..e9564c9fb37c 100644
--- a/drivers/char/rio/rioroute.c
+++ b/drivers/char/rio/rioroute.c
@@ -1023,7 +1023,7 @@ RIOFreeDisconnected(struct rio_info *p, struct Host *HostP, int unit)
1023 if (link < LINKS_PER_UNIT) 1023 if (link < LINKS_PER_UNIT)
1024 return 1; 1024 return 1;
1025 1025
1026#if NEED_TO_FIX_THIS 1026#ifdef NEED_TO_FIX_THIS
1027 /* Ok so all the links are disconnected. But we may have only just 1027 /* Ok so all the links are disconnected. But we may have only just
1028 ** made this slot tentative and not yet received a topology update. 1028 ** made this slot tentative and not yet received a topology update.
1029 ** Lets check how long ago we made it tentative. 1029 ** Lets check how long ago we made it tentative.
diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c
index 8fb26ad2aa12..e45bc275907a 100644
--- a/drivers/char/rio/riotable.c
+++ b/drivers/char/rio/riotable.c
@@ -771,7 +771,7 @@ int RIOAssignRta( struct rio_info *p, struct Map *MapP )
771 if ((MapP->Flags & RTA16_SECOND_SLOT) == 0) 771 if ((MapP->Flags & RTA16_SECOND_SLOT) == 0)
772 CCOPY( MapP->Name, HostMapP->Name, MAX_NAME_LEN ); 772 CCOPY( MapP->Name, HostMapP->Name, MAX_NAME_LEN );
773 HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED; 773 HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED;
774#if NEED_TO_FIX 774#ifdef NEED_TO_FIX
775 RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID-1]); 775 RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID-1]);
776#endif 776#endif
777 if (MapP->Flags & RTA16_SECOND_SLOT) 777 if (MapP->Flags & RTA16_SECOND_SLOT)
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 7a969778915a..94a3b3e20bf9 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -35,5 +35,16 @@ config TCG_ATMEL
35 will be accessible from within Linux. To compile this driver 35 will be accessible from within Linux. To compile this driver
36 as a module, choose M here; the module will be called tpm_atmel. 36 as a module, choose M here; the module will be called tpm_atmel.
37 37
38config TCG_INFINEON
39 tristate "Infineon Technologies SLD 9630 TPM Interface"
40 depends on TCG_TPM
41 ---help---
42 If you have a TPM security chip from Infineon Technologies
43 say Yes and it will be accessible from within Linux. To
44 compile this driver as a module, choose M here; the module
45 will be called tpm_infineon.
46 Further information on this driver and the supported hardware
47 can be found at http://www.prosec.rub.de/tpm
48
38endmenu 49endmenu
39 50
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 736d3df266f5..2392e404e8d1 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -4,4 +4,4 @@
4obj-$(CONFIG_TCG_TPM) += tpm.o 4obj-$(CONFIG_TCG_TPM) += tpm.o
5obj-$(CONFIG_TCG_NSC) += tpm_nsc.o 5obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
6obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o 6obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
7 7obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
new file mode 100644
index 000000000000..0e3241645c19
--- /dev/null
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -0,0 +1,467 @@
1/*
2 * Description:
3 * Device Driver for the Infineon Technologies
4 * SLD 9630 TT Trusted Platform Module
5 * Specifications at www.trustedcomputinggroup.org
6 *
7 * Copyright (C) 2005, Marcel Selhorst <selhorst@crypto.rub.de>
8 * Applied Data Security Group, Ruhr-University Bochum, Germany
9 * Project-Homepage: http://www.prosec.rub.de/tpm
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the
14 * License.
15 *
16 */
17
18#include "tpm.h"
19
20/* Infineon specific definitions */
21/* maximum number of WTX-packages */
22#define TPM_MAX_WTX_PACKAGES 50
23/* msleep-Time for WTX-packages */
24#define TPM_WTX_MSLEEP_TIME 20
25/* msleep-Time --> Interval to check status register */
26#define TPM_MSLEEP_TIME 3
27/* gives number of max. msleep()-calls before throwing timeout */
28#define TPM_MAX_TRIES 5000
29#define TCPA_INFINEON_DEV_VEN_VALUE 0x15D1
30#define TPM_DATA (TPM_ADDR + 1) & 0xff
31
32/* TPM header definitions */
33enum infineon_tpm_header {
34 TPM_VL_VER = 0x01,
35 TPM_VL_CHANNEL_CONTROL = 0x07,
36 TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
37 TPM_VL_CHANNEL_TPM = 0x0B,
38 TPM_VL_CONTROL = 0x00,
39 TPM_INF_NAK = 0x15,
40 TPM_CTRL_WTX = 0x10,
41 TPM_CTRL_WTX_ABORT = 0x18,
42 TPM_CTRL_WTX_ABORT_ACK = 0x18,
43 TPM_CTRL_ERROR = 0x20,
44 TPM_CTRL_CHAININGACK = 0x40,
45 TPM_CTRL_CHAINING = 0x80,
46 TPM_CTRL_DATA = 0x04,
47 TPM_CTRL_DATA_CHA = 0x84,
48 TPM_CTRL_DATA_CHA_ACK = 0xC4
49};
50
51enum infineon_tpm_register {
52 WRFIFO = 0x00,
53 RDFIFO = 0x01,
54 STAT = 0x02,
55 CMD = 0x03
56};
57
58enum infineon_tpm_command_bits {
59 CMD_DIS = 0x00,
60 CMD_LP = 0x01,
61 CMD_RES = 0x02,
62 CMD_IRQC = 0x06
63};
64
65enum infineon_tpm_status_bits {
66 STAT_XFE = 0x00,
67 STAT_LPA = 0x01,
68 STAT_FOK = 0x02,
69 STAT_TOK = 0x03,
70 STAT_IRQA = 0x06,
71 STAT_RDA = 0x07
72};
73
74/* some outgoing values */
75enum infineon_tpm_values {
76 CHIP_ID1 = 0x20,
77 CHIP_ID2 = 0x21,
78 TPM_DAR = 0x30,
79 RESET_LP_IRQC_DISABLE = 0x41,
80 ENABLE_REGISTER_PAIR = 0x55,
81 IOLIMH = 0x60,
82 IOLIML = 0x61,
83 DISABLE_REGISTER_PAIR = 0xAA,
84 IDVENL = 0xF1,
85 IDVENH = 0xF2,
86 IDPDL = 0xF3,
87 IDPDH = 0xF4
88};
89
90static int number_of_wtx;
91
92static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
93{
94 int status;
95 int check = 0;
96 int i;
97
98 if (clear_wrfifo) {
99 for (i = 0; i < 4096; i++) {
100 status = inb(chip->vendor->base + WRFIFO);
101 if (status == 0xff) {
102 if (check == 5)
103 break;
104 else
105 check++;
106 }
107 }
108 }
109 /* Note: The values which are currently in the FIFO of the TPM
110 are thrown away since there is no usage for them. Usually,
111 this has nothing to say, since the TPM will give its answer
112 immediately or will be aborted anyway, so the data here is
113 usually garbage and useless.
114 We have to clean this, because the next communication with
115 the TPM would be rubbish, if there is still some old data
116 in the Read FIFO.
117 */
118 i = 0;
119 do {
120 status = inb(chip->vendor->base + RDFIFO);
121 status = inb(chip->vendor->base + STAT);
122 i++;
123 if (i == TPM_MAX_TRIES)
124 return -EIO;
125 } while ((status & (1 << STAT_RDA)) != 0);
126 return 0;
127}
128
129static int wait(struct tpm_chip *chip, int wait_for_bit)
130{
131 int status;
132 int i;
133 for (i = 0; i < TPM_MAX_TRIES; i++) {
134 status = inb(chip->vendor->base + STAT);
135 /* check the status-register if wait_for_bit is set */
136 if (status & 1 << wait_for_bit)
137 break;
138 msleep(TPM_MSLEEP_TIME);
139 }
140 if (i == TPM_MAX_TRIES) { /* timeout occurs */
141 if (wait_for_bit == STAT_XFE)
142 dev_err(&chip->pci_dev->dev,
143 "Timeout in wait(STAT_XFE)\n");
144 if (wait_for_bit == STAT_RDA)
145 dev_err(&chip->pci_dev->dev,
146 "Timeout in wait(STAT_RDA)\n");
147 return -EIO;
148 }
149 return 0;
150};
151
152static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
153{
154 wait(chip, STAT_XFE);
155 outb(sendbyte, chip->vendor->base + WRFIFO);
156}
157
158 /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
159 calculation time, it sends a WTX-package, which has to be acknowledged
160 or aborted. This usually occurs if you are hammering the TPM with key
161 creation. Set the maximum number of WTX-packages in the definitions
162 above, if the number is reached, the waiting-time will be denied
163 and the TPM command has to be resend.
164 */
165
166static void tpm_wtx(struct tpm_chip *chip)
167{
168 number_of_wtx++;
169 dev_info(&chip->pci_dev->dev, "Granting WTX (%02d / %02d)\n",
170 number_of_wtx, TPM_MAX_WTX_PACKAGES);
171 wait_and_send(chip, TPM_VL_VER);
172 wait_and_send(chip, TPM_CTRL_WTX);
173 wait_and_send(chip, 0x00);
174 wait_and_send(chip, 0x00);
175 msleep(TPM_WTX_MSLEEP_TIME);
176}
177
178static void tpm_wtx_abort(struct tpm_chip *chip)
179{
180 dev_info(&chip->pci_dev->dev, "Aborting WTX\n");
181 wait_and_send(chip, TPM_VL_VER);
182 wait_and_send(chip, TPM_CTRL_WTX_ABORT);
183 wait_and_send(chip, 0x00);
184 wait_and_send(chip, 0x00);
185 number_of_wtx = 0;
186 msleep(TPM_WTX_MSLEEP_TIME);
187}
188
189static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
190{
191 int i;
192 int ret;
193 u32 size = 0;
194
195recv_begin:
196 /* start receiving header */
197 for (i = 0; i < 4; i++) {
198 ret = wait(chip, STAT_RDA);
199 if (ret)
200 return -EIO;
201 buf[i] = inb(chip->vendor->base + RDFIFO);
202 }
203
204 if (buf[0] != TPM_VL_VER) {
205 dev_err(&chip->pci_dev->dev,
206 "Wrong transport protocol implementation!\n");
207 return -EIO;
208 }
209
210 if (buf[1] == TPM_CTRL_DATA) {
211 /* size of the data received */
212 size = ((buf[2] << 8) | buf[3]);
213
214 for (i = 0; i < size; i++) {
215 wait(chip, STAT_RDA);
216 buf[i] = inb(chip->vendor->base + RDFIFO);
217 }
218
219 if ((size == 0x6D00) && (buf[1] == 0x80)) {
220 dev_err(&chip->pci_dev->dev,
221 "Error handling on vendor layer!\n");
222 return -EIO;
223 }
224
225 for (i = 0; i < size; i++)
226 buf[i] = buf[i + 6];
227
228 size = size - 6;
229 return size;
230 }
231
232 if (buf[1] == TPM_CTRL_WTX) {
233 dev_info(&chip->pci_dev->dev, "WTX-package received\n");
234 if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
235 tpm_wtx(chip);
236 goto recv_begin;
237 } else {
238 tpm_wtx_abort(chip);
239 goto recv_begin;
240 }
241 }
242
243 if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
244 dev_info(&chip->pci_dev->dev, "WTX-abort acknowledged\n");
245 return size;
246 }
247
248 if (buf[1] == TPM_CTRL_ERROR) {
249 dev_err(&chip->pci_dev->dev, "ERROR-package received:\n");
250 if (buf[4] == TPM_INF_NAK)
251 dev_err(&chip->pci_dev->dev,
252 "-> Negative acknowledgement"
253 " - retransmit command!\n");
254 return -EIO;
255 }
256 return -EIO;
257}
258
259static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
260{
261 int i;
262 int ret;
263 u8 count_high, count_low, count_4, count_3, count_2, count_1;
264
265 /* Disabling Reset, LP and IRQC */
266 outb(RESET_LP_IRQC_DISABLE, chip->vendor->base + CMD);
267
268 ret = empty_fifo(chip, 1);
269 if (ret) {
270 dev_err(&chip->pci_dev->dev, "Timeout while clearing FIFO\n");
271 return -EIO;
272 }
273
274 ret = wait(chip, STAT_XFE);
275 if (ret)
276 return -EIO;
277
278 count_4 = (count & 0xff000000) >> 24;
279 count_3 = (count & 0x00ff0000) >> 16;
280 count_2 = (count & 0x0000ff00) >> 8;
281 count_1 = (count & 0x000000ff);
282 count_high = ((count + 6) & 0xffffff00) >> 8;
283 count_low = ((count + 6) & 0x000000ff);
284
285 /* Sending Header */
286 wait_and_send(chip, TPM_VL_VER);
287 wait_and_send(chip, TPM_CTRL_DATA);
288 wait_and_send(chip, count_high);
289 wait_and_send(chip, count_low);
290
291 /* Sending Data Header */
292 wait_and_send(chip, TPM_VL_VER);
293 wait_and_send(chip, TPM_VL_CHANNEL_TPM);
294 wait_and_send(chip, count_4);
295 wait_and_send(chip, count_3);
296 wait_and_send(chip, count_2);
297 wait_and_send(chip, count_1);
298
299 /* Sending Data */
300 for (i = 0; i < count; i++) {
301 wait_and_send(chip, buf[i]);
302 }
303 return count;
304}
305
306static void tpm_inf_cancel(struct tpm_chip *chip)
307{
308 /* Nothing yet!
309 This has something to do with the internal functions
310 of the TPM. Abort isn't really necessary...
311 */
312}
313
314static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
315static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
316static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
317static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
318
319static struct attribute *inf_attrs[] = {
320 &dev_attr_pubek.attr,
321 &dev_attr_pcrs.attr,
322 &dev_attr_caps.attr,
323 &dev_attr_cancel.attr,
324 NULL,
325};
326
327static struct attribute_group inf_attr_grp = {.attrs = inf_attrs };
328
329static struct file_operations inf_ops = {
330 .owner = THIS_MODULE,
331 .llseek = no_llseek,
332 .open = tpm_open,
333 .read = tpm_read,
334 .write = tpm_write,
335 .release = tpm_release,
336};
337
338static struct tpm_vendor_specific tpm_inf = {
339 .recv = tpm_inf_recv,
340 .send = tpm_inf_send,
341 .cancel = tpm_inf_cancel,
342 .req_complete_mask = 0,
343 .req_complete_val = 0,
344 .attr_group = &inf_attr_grp,
345 .miscdev = {.fops = &inf_ops,},
346};
347
348static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
349 const struct pci_device_id *pci_id)
350{
351 int rc = 0;
352 u8 iol, ioh;
353 int vendorid[2];
354 int version[2];
355 int productid[2];
356
357 if (pci_enable_device(pci_dev))
358 return -EIO;
359
360 dev_info(&pci_dev->dev, "LPC-bus found at 0x%x\n", pci_id->device);
361
362 /* query chip for its vendor, its version number a.s.o. */
363 outb(ENABLE_REGISTER_PAIR, TPM_ADDR);
364 outb(IDVENL, TPM_ADDR);
365 vendorid[1] = inb(TPM_DATA);
366 outb(IDVENH, TPM_ADDR);
367 vendorid[0] = inb(TPM_DATA);
368 outb(IDPDL, TPM_ADDR);
369 productid[1] = inb(TPM_DATA);
370 outb(IDPDH, TPM_ADDR);
371 productid[0] = inb(TPM_DATA);
372 outb(CHIP_ID1, TPM_ADDR);
373 version[1] = inb(TPM_DATA);
374 outb(CHIP_ID2, TPM_ADDR);
375 version[0] = inb(TPM_DATA);
376
377 if ((vendorid[0] << 8 | vendorid[1]) == (TCPA_INFINEON_DEV_VEN_VALUE)) {
378
379 /* read IO-ports from TPM */
380 outb(IOLIMH, TPM_ADDR);
381 ioh = inb(TPM_DATA);
382 outb(IOLIML, TPM_ADDR);
383 iol = inb(TPM_DATA);
384 tpm_inf.base = (ioh << 8) | iol;
385
386 if (tpm_inf.base == 0) {
387 dev_err(&pci_dev->dev, "No IO-ports set!\n");
388 pci_disable_device(pci_dev);
389 return -ENODEV;
390 }
391
392 /* activate register */
393 outb(TPM_DAR, TPM_ADDR);
394 outb(0x01, TPM_DATA);
395 outb(DISABLE_REGISTER_PAIR, TPM_ADDR);
396
397 /* disable RESET, LP and IRQC */
398 outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD);
399
400 /* Finally, we're done, print some infos */
401 dev_info(&pci_dev->dev, "TPM found: "
402 "io base 0x%x, "
403 "chip version %02x%02x, "
404 "vendor id %x%x (Infineon), "
405 "product id %02x%02x"
406 "%s\n",
407 tpm_inf.base,
408 version[0], version[1],
409 vendorid[0], vendorid[1],
410 productid[0], productid[1], ((productid[0] == 0)
411 && (productid[1] ==
412 6)) ?
413 " (SLD 9630 TT 1.1)" : "");
414
415 rc = tpm_register_hardware(pci_dev, &tpm_inf);
416 if (rc < 0) {
417 pci_disable_device(pci_dev);
418 return -ENODEV;
419 }
420 return 0;
421 } else {
422 dev_info(&pci_dev->dev, "No Infineon TPM found!\n");
423 pci_disable_device(pci_dev);
424 return -ENODEV;
425 }
426}
427
428static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
429 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
430 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
431 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
432 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
433 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
434 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
435 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
436 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2)},
437 {0,}
438};
439
440MODULE_DEVICE_TABLE(pci, tpm_pci_tbl);
441
442static struct pci_driver inf_pci_driver = {
443 .name = "tpm_inf",
444 .id_table = tpm_pci_tbl,
445 .probe = tpm_inf_probe,
446 .remove = __devexit_p(tpm_remove),
447 .suspend = tpm_pm_suspend,
448 .resume = tpm_pm_resume,
449};
450
451static int __init init_inf(void)
452{
453 return pci_register_driver(&inf_pci_driver);
454}
455
456static void __exit cleanup_inf(void)
457{
458 pci_unregister_driver(&inf_pci_driver);
459}
460
461module_init(init_inf);
462module_exit(cleanup_inf);
463
464MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
465MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT");
466MODULE_VERSION("1.4");
467MODULE_LICENSE("GPL");
diff --git a/drivers/char/watchdog/acquirewdt.c b/drivers/char/watchdog/acquirewdt.c
index 8f302121741b..7289f4af93d0 100644
--- a/drivers/char/watchdog/acquirewdt.c
+++ b/drivers/char/watchdog/acquirewdt.c
@@ -82,12 +82,7 @@ static int wdt_start = 0x443;
82module_param(wdt_start, int, 0); 82module_param(wdt_start, int, 0);
83MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)"); 83MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)");
84 84
85#ifdef CONFIG_WATCHDOG_NOWAYOUT 85static int nowayout = WATCHDOG_NOWAYOUT;
86static int nowayout = 1;
87#else
88static int nowayout = 0;
89#endif
90
91module_param(nowayout, int, 0); 86module_param(nowayout, int, 0);
92MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 87MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
93 88
diff --git a/drivers/char/watchdog/advantechwdt.c b/drivers/char/watchdog/advantechwdt.c
index ea73c8379bdd..194a3fd36b91 100644
--- a/drivers/char/watchdog/advantechwdt.c
+++ b/drivers/char/watchdog/advantechwdt.c
@@ -73,12 +73,7 @@ static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
73module_param(timeout, int, 0); 73module_param(timeout, int, 0);
74MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=63, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); 74MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=63, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
75 75
76#ifdef CONFIG_WATCHDOG_NOWAYOUT 76static int nowayout = WATCHDOG_NOWAYOUT;
77static int nowayout = 1;
78#else
79static int nowayout = 0;
80#endif
81
82module_param(nowayout, int, 0); 77module_param(nowayout, int, 0);
83MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 78MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
84 79
diff --git a/drivers/char/watchdog/alim1535_wdt.c b/drivers/char/watchdog/alim1535_wdt.c
index 35dcbf8be7d1..8338ca300e2e 100644
--- a/drivers/char/watchdog/alim1535_wdt.c
+++ b/drivers/char/watchdog/alim1535_wdt.c
@@ -38,12 +38,7 @@ static int timeout = WATCHDOG_TIMEOUT;
38module_param(timeout, int, 0); 38module_param(timeout, int, 0);
39MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (0<timeout<18000, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 39MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (0<timeout<18000, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
40 40
41#ifdef CONFIG_WATCHDOG_NOWAYOUT 41static int nowayout = WATCHDOG_NOWAYOUT;
42static int nowayout = 1;
43#else
44static int nowayout = 0;
45#endif
46
47module_param(nowayout, int, 0); 42module_param(nowayout, int, 0);
48MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 43MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
49 44
@@ -317,7 +312,7 @@ static int ali_notify_sys(struct notifier_block *this, unsigned long code, void
317 */ 312 */
318 313
319static struct pci_device_id ali_pci_tbl[] = { 314static struct pci_device_id ali_pci_tbl[] = {
320 { PCI_VENDOR_ID_AL, 1535, PCI_ANY_ID, PCI_ANY_ID,}, 315 { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
321 { 0, }, 316 { 0, },
322}; 317};
323MODULE_DEVICE_TABLE(pci, ali_pci_tbl); 318MODULE_DEVICE_TABLE(pci, ali_pci_tbl);
diff --git a/drivers/char/watchdog/alim7101_wdt.c b/drivers/char/watchdog/alim7101_wdt.c
index 90c091d9e0f5..c05ac188a4d7 100644
--- a/drivers/char/watchdog/alim7101_wdt.c
+++ b/drivers/char/watchdog/alim7101_wdt.c
@@ -75,12 +75,7 @@ static unsigned long wdt_is_open;
75static char wdt_expect_close; 75static char wdt_expect_close;
76static struct pci_dev *alim7101_pmu; 76static struct pci_dev *alim7101_pmu;
77 77
78#ifdef CONFIG_WATCHDOG_NOWAYOUT 78static int nowayout = WATCHDOG_NOWAYOUT;
79static int nowayout = 1;
80#else
81static int nowayout = 0;
82#endif
83
84module_param(nowayout, int, 0); 79module_param(nowayout, int, 0);
85MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 80MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
86 81
diff --git a/drivers/char/watchdog/eurotechwdt.c b/drivers/char/watchdog/eurotechwdt.c
index 2a29a511df7f..25c2f2575611 100644
--- a/drivers/char/watchdog/eurotechwdt.c
+++ b/drivers/char/watchdog/eurotechwdt.c
@@ -72,12 +72,7 @@ static char *ev = "int";
72 72
73#define WDT_TIMEOUT 60 /* 1 minute */ 73#define WDT_TIMEOUT 60 /* 1 minute */
74 74
75#ifdef CONFIG_WATCHDOG_NOWAYOUT 75static int nowayout = WATCHDOG_NOWAYOUT;
76static int nowayout = 1;
77#else
78static int nowayout = 0;
79#endif
80
81module_param(nowayout, int, 0); 76module_param(nowayout, int, 0);
82MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 77MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
83 78
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index 5d07ee59679d..f975dab1ddf9 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -105,12 +105,7 @@ static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
105module_param(heartbeat, int, 0); 105module_param(heartbeat, int, 0);
106MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 106MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
107 107
108#ifdef CONFIG_WATCHDOG_NOWAYOUT 108static int nowayout = WATCHDOG_NOWAYOUT;
109static int nowayout = 1;
110#else
111static int nowayout = 0;
112#endif
113
114module_param(nowayout, int, 0); 109module_param(nowayout, int, 0);
115MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 110MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
116 111
diff --git a/drivers/char/watchdog/ib700wdt.c b/drivers/char/watchdog/ib700wdt.c
index d974f16e84d2..cf60329eec85 100644
--- a/drivers/char/watchdog/ib700wdt.c
+++ b/drivers/char/watchdog/ib700wdt.c
@@ -117,12 +117,7 @@ static int wd_times[] = {
117 117
118static int wd_margin = WD_TIMO; 118static int wd_margin = WD_TIMO;
119 119
120#ifdef CONFIG_WATCHDOG_NOWAYOUT 120static int nowayout = WATCHDOG_NOWAYOUT;
121static int nowayout = 1;
122#else
123static int nowayout = 0;
124#endif
125
126module_param(nowayout, int, 0); 121module_param(nowayout, int, 0);
127MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 122MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
128 123
diff --git a/drivers/char/watchdog/indydog.c b/drivers/char/watchdog/indydog.c
index 6af2c799b57e..b4b94daba67e 100644
--- a/drivers/char/watchdog/indydog.c
+++ b/drivers/char/watchdog/indydog.c
@@ -29,14 +29,9 @@
29#define PFX "indydog: " 29#define PFX "indydog: "
30static int indydog_alive; 30static int indydog_alive;
31 31
32#ifdef CONFIG_WATCHDOG_NOWAYOUT
33static int nowayout = 1;
34#else
35static int nowayout = 0;
36#endif
37
38#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ 32#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
39 33
34static int nowayout = WATCHDOG_NOWAYOUT;
40module_param(nowayout, int, 0); 35module_param(nowayout, int, 0);
41MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 36MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
42 37
diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c
index 4b039516cc86..e7640bc4904b 100644
--- a/drivers/char/watchdog/ixp2000_wdt.c
+++ b/drivers/char/watchdog/ixp2000_wdt.c
@@ -30,11 +30,7 @@
30#include <asm/hardware.h> 30#include <asm/hardware.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33#ifdef CONFIG_WATCHDOG_NOWAYOUT 33static int nowayout = WATCHDOG_NOWAYOUT;
34static int nowayout = 1;
35#else
36static int nowayout = 0;
37#endif
38static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */ 34static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
39static unsigned long wdt_status; 35static unsigned long wdt_status;
40 36
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
index 83df369113a4..8d916afbf4fa 100644
--- a/drivers/char/watchdog/ixp4xx_wdt.c
+++ b/drivers/char/watchdog/ixp4xx_wdt.c
@@ -27,11 +27,7 @@
27#include <asm/hardware.h> 27#include <asm/hardware.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30#ifdef CONFIG_WATCHDOG_NOWAYOUT 30static int nowayout = WATCHDOG_NOWAYOUT;
31static int nowayout = 1;
32#else
33static int nowayout = 0;
34#endif
35static int heartbeat = 60; /* (secs) Default is 1 minute */ 31static int heartbeat = 60; /* (secs) Default is 1 minute */
36static unsigned long wdt_status; 32static unsigned long wdt_status;
37static unsigned long boot_status; 33static unsigned long boot_status;
diff --git a/drivers/char/watchdog/machzwd.c b/drivers/char/watchdog/machzwd.c
index 9da395fa7794..a9a20aad61e7 100644
--- a/drivers/char/watchdog/machzwd.c
+++ b/drivers/char/watchdog/machzwd.c
@@ -94,12 +94,7 @@ MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver");
94MODULE_LICENSE("GPL"); 94MODULE_LICENSE("GPL");
95MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 95MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
96 96
97#ifdef CONFIG_WATCHDOG_NOWAYOUT 97static int nowayout = WATCHDOG_NOWAYOUT;
98static int nowayout = 1;
99#else
100static int nowayout = 0;
101#endif
102
103module_param(nowayout, int, 0); 98module_param(nowayout, int, 0);
104MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 99MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
105 100
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index 3143e4a07535..c9b301dccec3 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -62,12 +62,7 @@ static int mixcomwd_timer_alive;
62static struct timer_list mixcomwd_timer = TIMER_INITIALIZER(NULL, 0, 0); 62static struct timer_list mixcomwd_timer = TIMER_INITIALIZER(NULL, 0, 0);
63static char expect_close; 63static char expect_close;
64 64
65#ifdef CONFIG_WATCHDOG_NOWAYOUT 65static int nowayout = WATCHDOG_NOWAYOUT;
66static int nowayout = 1;
67#else
68static int nowayout = 0;
69#endif
70
71module_param(nowayout, int, 0); 66module_param(nowayout, int, 0);
72MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 67MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
73 68
diff --git a/drivers/char/watchdog/pcwd.c b/drivers/char/watchdog/pcwd.c
index 6ebce3f2ef9c..427ad51b7a35 100644
--- a/drivers/char/watchdog/pcwd.c
+++ b/drivers/char/watchdog/pcwd.c
@@ -146,12 +146,7 @@ static int heartbeat = WATCHDOG_HEARTBEAT;
146module_param(heartbeat, int, 0); 146module_param(heartbeat, int, 0);
147MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<=heartbeat<=7200, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 147MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<=heartbeat<=7200, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
148 148
149#ifdef CONFIG_WATCHDOG_NOWAYOUT 149static int nowayout = WATCHDOG_NOWAYOUT;
150static int nowayout = 1;
151#else
152static int nowayout = 0;
153#endif
154
155module_param(nowayout, int, 0); 150module_param(nowayout, int, 0);
156MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 151MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
157 152
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 8ce066627326..2b13afb09c5d 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -103,12 +103,7 @@ static int heartbeat = WATCHDOG_HEARTBEAT;
103module_param(heartbeat, int, 0); 103module_param(heartbeat, int, 0);
104MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 104MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
105 105
106#ifdef CONFIG_WATCHDOG_NOWAYOUT 106static int nowayout = WATCHDOG_NOWAYOUT;
107static int nowayout = 1;
108#else
109static int nowayout = 0;
110#endif
111
112module_param(nowayout, int, 0); 107module_param(nowayout, int, 0);
113MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 108MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
114 109
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 1127201d73b8..092e9b133750 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -79,12 +79,7 @@ static int heartbeat = WATCHDOG_HEARTBEAT;
79module_param(heartbeat, int, 0); 79module_param(heartbeat, int, 0);
80MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 80MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
81 81
82#ifdef CONFIG_WATCHDOG_NOWAYOUT 82static int nowayout = WATCHDOG_NOWAYOUT;
83static int nowayout = 1;
84#else
85static int nowayout = 0;
86#endif
87
88module_param(nowayout, int, 0); 83module_param(nowayout, int, 0);
89MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 84MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
90 85
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 1699d2c28ce5..f85ac898a49a 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -62,12 +62,7 @@
62#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0) 62#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0)
63#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15) 63#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
64 64
65#ifdef CONFIG_WATCHDOG_NOWAYOUT 65static int nowayout = WATCHDOG_NOWAYOUT;
66static int nowayout = 1;
67#else
68static int nowayout = 0;
69#endif
70
71static int tmr_margin = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME; 66static int tmr_margin = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME;
72static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT; 67static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
73static int soft_noboot = 0; 68static int soft_noboot = 0;
diff --git a/drivers/char/watchdog/sa1100_wdt.c b/drivers/char/watchdog/sa1100_wdt.c
index 34e8f7b15e30..1b2132617dc3 100644
--- a/drivers/char/watchdog/sa1100_wdt.c
+++ b/drivers/char/watchdog/sa1100_wdt.c
@@ -42,11 +42,7 @@ static unsigned long sa1100wdt_users;
42static int expect_close; 42static int expect_close;
43static int pre_margin; 43static int pre_margin;
44static int boot_status; 44static int boot_status;
45#ifdef CONFIG_WATCHDOG_NOWAYOUT 45static int nowayout = WATCHDOG_NOWAYOUT;
46static int nowayout = 1;
47#else
48static int nowayout = 0;
49#endif
50 46
51/* 47/*
52 * Allow only one person to hold it open 48 * Allow only one person to hold it open
diff --git a/drivers/char/watchdog/sbc60xxwdt.c b/drivers/char/watchdog/sbc60xxwdt.c
index d7de9880605a..ed0bd55fbfc1 100644
--- a/drivers/char/watchdog/sbc60xxwdt.c
+++ b/drivers/char/watchdog/sbc60xxwdt.c
@@ -98,12 +98,7 @@ static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ t
98module_param(timeout, int, 0); 98module_param(timeout, int, 0);
99MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 99MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
100 100
101#ifdef CONFIG_WATCHDOG_NOWAYOUT 101static int nowayout = WATCHDOG_NOWAYOUT;
102static int nowayout = 1;
103#else
104static int nowayout = 0;
105#endif
106
107module_param(nowayout, int, 0); 102module_param(nowayout, int, 0);
108MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 103MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
109 104
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 24401e84729e..515ce7572049 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -91,12 +91,7 @@ MODULE_PARM_DESC(io, "io port");
91module_param(timeout, int, 0); 91module_param(timeout, int, 0);
92MODULE_PARM_DESC(timeout, "range is 0-255 minutes, default is 1"); 92MODULE_PARM_DESC(timeout, "range is 0-255 minutes, default is 1");
93 93
94#ifdef CONFIG_WATCHDOG_NOWAYOUT 94static int nowayout = WATCHDOG_NOWAYOUT;
95static int nowayout = 1;
96#else
97static int nowayout = 0;
98#endif
99
100module_param(nowayout, int, 0); 95module_param(nowayout, int, 0);
101MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 96MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
102 97
diff --git a/drivers/char/watchdog/sc520_wdt.c b/drivers/char/watchdog/sc520_wdt.c
index f6d143e1900d..72501be79b0c 100644
--- a/drivers/char/watchdog/sc520_wdt.c
+++ b/drivers/char/watchdog/sc520_wdt.c
@@ -94,12 +94,7 @@ static int timeout = WATCHDOG_TIMEOUT; /* in seconds, will be multiplied by HZ t
94module_param(timeout, int, 0); 94module_param(timeout, int, 0);
95MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 95MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
96 96
97#ifdef CONFIG_WATCHDOG_NOWAYOUT 97static int nowayout = WATCHDOG_NOWAYOUT;
98static int nowayout = 1;
99#else
100static int nowayout = 0;
101#endif
102
103module_param(nowayout, int, 0); 98module_param(nowayout, int, 0);
104MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 99MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
105 100
diff --git a/drivers/char/watchdog/scx200_wdt.c b/drivers/char/watchdog/scx200_wdt.c
index b569670e4ed5..c4568569f3a8 100644
--- a/drivers/char/watchdog/scx200_wdt.c
+++ b/drivers/char/watchdog/scx200_wdt.c
@@ -39,15 +39,11 @@ MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 40MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
41 41
42#ifndef CONFIG_WATCHDOG_NOWAYOUT
43#define CONFIG_WATCHDOG_NOWAYOUT 0
44#endif
45
46static int margin = 60; /* in seconds */ 42static int margin = 60; /* in seconds */
47module_param(margin, int, 0); 43module_param(margin, int, 0);
48MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); 44MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
49 45
50static int nowayout = CONFIG_WATCHDOG_NOWAYOUT; 46static int nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, int, 0); 47module_param(nowayout, int, 0);
52MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); 48MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
53 49
diff --git a/drivers/char/watchdog/shwdt.c b/drivers/char/watchdog/shwdt.c
index 3bc9272a474c..1f4cab55b2ef 100644
--- a/drivers/char/watchdog/shwdt.c
+++ b/drivers/char/watchdog/shwdt.c
@@ -75,11 +75,7 @@ static unsigned long next_heartbeat;
75#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ 75#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
76static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 76static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
77 77
78#ifdef CONFIG_WATCHDOG_NOWAYOUT 78static int nowayout = WATCHDOG_NOWAYOUT;
79static int nowayout = 1;
80#else
81static int nowayout = 0;
82#endif
83 79
84/** 80/**
85 * sh_wdt_start - Start the Watchdog 81 * sh_wdt_start - Start the Watchdog
diff --git a/drivers/char/watchdog/softdog.c b/drivers/char/watchdog/softdog.c
index 98c7578740e2..4d7ed931f5c6 100644
--- a/drivers/char/watchdog/softdog.c
+++ b/drivers/char/watchdog/softdog.c
@@ -56,12 +56,7 @@ static int soft_margin = TIMER_MARGIN; /* in seconds */
56module_param(soft_margin, int, 0); 56module_param(soft_margin, int, 0);
57MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0<soft_margin<65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); 57MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0<soft_margin<65536, default=" __MODULE_STRING(TIMER_MARGIN) ")");
58 58
59#ifdef CONFIG_WATCHDOG_NOWAYOUT 59static int nowayout = WATCHDOG_NOWAYOUT;
60static int nowayout = 1;
61#else
62static int nowayout = 0;
63#endif
64
65module_param(nowayout, int, 0); 60module_param(nowayout, int, 0);
66MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 61MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
67 62
diff --git a/drivers/char/watchdog/w83627hf_wdt.c b/drivers/char/watchdog/w83627hf_wdt.c
index 813c97038f84..465e0fd0423d 100644
--- a/drivers/char/watchdog/w83627hf_wdt.c
+++ b/drivers/char/watchdog/w83627hf_wdt.c
@@ -54,12 +54,7 @@ static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
54module_param(timeout, int, 0); 54module_param(timeout, int, 0);
55MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=63, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); 55MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=63, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
56 56
57#ifdef CONFIG_WATCHDOG_NOWAYOUT 57static int nowayout = WATCHDOG_NOWAYOUT;
58static int nowayout = 1;
59#else
60static int nowayout = 0;
61#endif
62
63module_param(nowayout, int, 0); 58module_param(nowayout, int, 0);
64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 59MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
65 60
diff --git a/drivers/char/watchdog/w83877f_wdt.c b/drivers/char/watchdog/w83877f_wdt.c
index bccbd4d6ac2d..52a8bd0a5988 100644
--- a/drivers/char/watchdog/w83877f_wdt.c
+++ b/drivers/char/watchdog/w83877f_wdt.c
@@ -85,12 +85,7 @@ module_param(timeout, int, 0);
85MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); 85MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
86 86
87 87
88#ifdef CONFIG_WATCHDOG_NOWAYOUT 88static int nowayout = WATCHDOG_NOWAYOUT;
89static int nowayout = 1;
90#else
91static int nowayout = 0;
92#endif
93
94module_param(nowayout, int, 0); 89module_param(nowayout, int, 0);
95MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 90MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
96 91
diff --git a/drivers/char/watchdog/wafer5823wdt.c b/drivers/char/watchdog/wafer5823wdt.c
index abb0bea45c02..7cf6c9bbf486 100644
--- a/drivers/char/watchdog/wafer5823wdt.c
+++ b/drivers/char/watchdog/wafer5823wdt.c
@@ -63,12 +63,7 @@ static int timeout = WD_TIMO; /* in seconds */
63module_param(timeout, int, 0); 63module_param(timeout, int, 0);
64MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255, default=" __MODULE_STRING(WD_TIMO) "."); 64MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255, default=" __MODULE_STRING(WD_TIMO) ".");
65 65
66#ifdef CONFIG_WATCHDOG_NOWAYOUT 66static int nowayout = WATCHDOG_NOWAYOUT;
67static int nowayout = 1;
68#else
69static int nowayout = 0;
70#endif
71
72module_param(nowayout, int, 0); 67module_param(nowayout, int, 0);
73MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 68MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
74 69
diff --git a/drivers/char/watchdog/wdt.c b/drivers/char/watchdog/wdt.c
index 1210ca0c425b..ec7e401228ee 100644
--- a/drivers/char/watchdog/wdt.c
+++ b/drivers/char/watchdog/wdt.c
@@ -63,12 +63,7 @@ static int wd_heartbeat;
63module_param(heartbeat, int, 0); 63module_param(heartbeat, int, 0);
64MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")"); 64MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")");
65 65
66#ifdef CONFIG_WATCHDOG_NOWAYOUT 66static int nowayout = WATCHDOG_NOWAYOUT;
67static int nowayout = 1;
68#else
69static int nowayout = 0;
70#endif
71
72module_param(nowayout, int, 0); 67module_param(nowayout, int, 0);
73MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 68MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
74 69
diff --git a/drivers/char/watchdog/wdt977.c b/drivers/char/watchdog/wdt977.c
index 072e9b214759..44d49dfacbb3 100644
--- a/drivers/char/watchdog/wdt977.c
+++ b/drivers/char/watchdog/wdt977.c
@@ -53,12 +53,7 @@ MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (60..15300), default=" __M
53module_param(testmode, int, 0); 53module_param(testmode, int, 0);
54MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0"); 54MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0");
55 55
56#ifdef CONFIG_WATCHDOG_NOWAYOUT 56static int nowayout = WATCHDOG_NOWAYOUT;
57static int nowayout = 1;
58#else
59static int nowayout = 0;
60#endif
61
62module_param(nowayout, int, 0); 57module_param(nowayout, int, 0);
63MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 58MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
64 59
diff --git a/drivers/char/watchdog/wdt_pci.c b/drivers/char/watchdog/wdt_pci.c
index c80cb77b92fb..4b3311993d48 100644
--- a/drivers/char/watchdog/wdt_pci.c
+++ b/drivers/char/watchdog/wdt_pci.c
@@ -89,12 +89,7 @@ static int wd_heartbeat;
89module_param(heartbeat, int, 0); 89module_param(heartbeat, int, 0);
90MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")"); 90MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")");
91 91
92#ifdef CONFIG_WATCHDOG_NOWAYOUT 92static int nowayout = WATCHDOG_NOWAYOUT;
93static int nowayout = 1;
94#else
95static int nowayout = 0;
96#endif
97
98module_param(nowayout, int, 0); 93module_param(nowayout, int, 0);
99MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 94MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
100 95
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 03c23ce98edb..9ad3e9262e8a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -288,6 +288,100 @@ static struct i2c_adapter mpc_ops = {
288 .retries = 1 288 .retries = 1
289}; 289};
290 290
291static int fsl_i2c_probe(struct device *device)
292{
293 int result = 0;
294 struct mpc_i2c *i2c;
295 struct platform_device *pdev = to_platform_device(device);
296 struct fsl_i2c_platform_data *pdata;
297 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
298
299 pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data;
300
301 if (!(i2c = kmalloc(sizeof(*i2c), GFP_KERNEL))) {
302 return -ENOMEM;
303 }
304 memset(i2c, 0, sizeof(*i2c));
305
306 i2c->irq = platform_get_irq(pdev, 0);
307 i2c->flags = pdata->device_flags;
308 init_waitqueue_head(&i2c->queue);
309
310 i2c->base = ioremap((phys_addr_t)r->start, MPC_I2C_REGION);
311
312 if (!i2c->base) {
313 printk(KERN_ERR "i2c-mpc - failed to map controller\n");
314 result = -ENOMEM;
315 goto fail_map;
316 }
317
318 if (i2c->irq != 0)
319 if ((result = request_irq(i2c->irq, mpc_i2c_isr,
320 SA_SHIRQ, "i2c-mpc", i2c)) < 0) {
321 printk(KERN_ERR
322 "i2c-mpc - failed to attach interrupt\n");
323 goto fail_irq;
324 }
325
326 mpc_i2c_setclock(i2c);
327 dev_set_drvdata(device, i2c);
328
329 i2c->adap = mpc_ops;
330 i2c_set_adapdata(&i2c->adap, i2c);
331 i2c->adap.dev.parent = &pdev->dev;
332 if ((result = i2c_add_adapter(&i2c->adap)) < 0) {
333 printk(KERN_ERR "i2c-mpc - failed to add adapter\n");
334 goto fail_add;
335 }
336
337 return result;
338
339 fail_add:
340 if (i2c->irq != 0)
341 free_irq(i2c->irq, NULL);
342 fail_irq:
343 iounmap(i2c->base);
344 fail_map:
345 kfree(i2c);
346 return result;
347};
348
349static int fsl_i2c_remove(struct device *device)
350{
351 struct mpc_i2c *i2c = dev_get_drvdata(device);
352
353 i2c_del_adapter(&i2c->adap);
354 dev_set_drvdata(device, NULL);
355
356 if (i2c->irq != 0)
357 free_irq(i2c->irq, i2c);
358
359 iounmap(i2c->base);
360 kfree(i2c);
361 return 0;
362};
363
364/* Structure for a device driver */
365static struct device_driver fsl_i2c_driver = {
366 .name = "fsl-i2c",
367 .bus = &platform_bus_type,
368 .probe = fsl_i2c_probe,
369 .remove = fsl_i2c_remove,
370};
371
372static int __init fsl_i2c_init(void)
373{
374 return driver_register(&fsl_i2c_driver);
375}
376
377static void __exit fsl_i2c_exit(void)
378{
379 driver_unregister(&fsl_i2c_driver);
380}
381
382module_init(fsl_i2c_init);
383module_exit(fsl_i2c_exit);
384
291MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>"); 385MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>");
292MODULE_DESCRIPTION 386MODULE_DESCRIPTION
293 ("I2C-Bus adapter for MPC107 bridge and MPC824x/85xx/52xx processors"); 387 ("I2C-Bus adapter for MPC107 bridge and MPC824x/85xx/52xx processors");
diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
index fdc294325d00..6176e8d6b2e6 100644
--- a/drivers/ide/cris/Makefile
+++ b/drivers/ide/cris/Makefile
@@ -1,3 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/ide 1EXTRA_CFLAGS += -Idrivers/ide
2 2
3obj-$(CONFIG_ETRAX_ARCH_V10) += ide-v10.o 3obj-y += ide-cris.o
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
new file mode 100644
index 000000000000..cd15e6260510
--- /dev/null
+++ b/drivers/ide/cris/ide-cris.c
@@ -0,0 +1,1107 @@
1/* $Id: cris-ide-driver.patch,v 1.1 2005/06/29 21:39:07 akpm Exp $
2 *
3 * Etrax specific IDE functions, like init and PIO-mode setting etc.
4 * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
5 * Copyright (c) 2000-2005 Axis Communications AB
6 *
7 * Authors: Bjorn Wesen (initial version)
8 * Mikael Starvik (crisv32 port)
9 */
10
11/* Regarding DMA:
12 *
13 * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
14 * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
15 * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
16 * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
17 * device can't do DMA handshaking for some stupid reason. We don't need to do that.
18 */
19
20#undef REALLY_SLOW_IO /* most systems can safely undef this */
21
22#include <linux/config.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/blkdev.h>
30#include <linux/hdreg.h>
31#include <linux/ide.h>
32#include <linux/init.h>
33
34#include <asm/io.h>
35#include <asm/dma.h>
36
37/* number of DMA descriptors */
38#define MAX_DMA_DESCRS 64
39
40/* number of times to retry busy-flags when reading/writing IDE-registers
41 * this can't be too high because a hung harddisk might cause the watchdog
42 * to trigger (sometimes INB and OUTB are called with irq's disabled)
43 */
44
45#define IDE_REGISTER_TIMEOUT 300
46
47#define LOWDB(x)
48#define D(x)
49
50enum /* Transfer types */
51{
52 TYPE_PIO,
53 TYPE_DMA,
54 TYPE_UDMA
55};
56
57/* CRISv32 specifics */
58#ifdef CONFIG_ETRAX_ARCH_V32
59#include <asm/arch/hwregs/ata_defs.h>
60#include <asm/arch/hwregs/dma_defs.h>
61#include <asm/arch/hwregs/dma.h>
62#include <asm/arch/pinmux.h>
63
64#define ATA_UDMA2_CYC 2
65#define ATA_UDMA2_DVS 3
66#define ATA_UDMA1_CYC 2
67#define ATA_UDMA1_DVS 4
68#define ATA_UDMA0_CYC 4
69#define ATA_UDMA0_DVS 6
70#define ATA_DMA2_STROBE 7
71#define ATA_DMA2_HOLD 1
72#define ATA_DMA1_STROBE 8
73#define ATA_DMA1_HOLD 3
74#define ATA_DMA0_STROBE 25
75#define ATA_DMA0_HOLD 19
76#define ATA_PIO4_SETUP 3
77#define ATA_PIO4_STROBE 7
78#define ATA_PIO4_HOLD 1
79#define ATA_PIO3_SETUP 3
80#define ATA_PIO3_STROBE 9
81#define ATA_PIO3_HOLD 3
82#define ATA_PIO2_SETUP 3
83#define ATA_PIO2_STROBE 13
84#define ATA_PIO2_HOLD 5
85#define ATA_PIO1_SETUP 5
86#define ATA_PIO1_STROBE 23
87#define ATA_PIO1_HOLD 9
88#define ATA_PIO0_SETUP 9
89#define ATA_PIO0_STROBE 39
90#define ATA_PIO0_HOLD 9
91
92int
93cris_ide_ack_intr(ide_hwif_t* hwif)
94{
95 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
96 int, hwif->io_ports[0]);
97 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
98 return 1;
99}
100
101static inline int
102cris_ide_busy(void)
103{
104 reg_ata_rs_stat_data stat_data;
105 stat_data = REG_RD(ata, regi_ata, rs_stat_data);
106 return stat_data.busy;
107}
108
109static inline int
110cris_ide_ready(void)
111{
112 return !cris_ide_busy();
113}
114
115static inline int
116cris_ide_data_available(unsigned short* data)
117{
118 reg_ata_rs_stat_data stat_data;
119 stat_data = REG_RD(ata, regi_ata, rs_stat_data);
120 *data = stat_data.data;
121 return stat_data.dav;
122}
123
124static void
125cris_ide_write_command(unsigned long command)
126{
127 REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
128}
129
130static void
131cris_ide_set_speed(int type, int setup, int strobe, int hold)
132{
133 reg_ata_rw_ctrl0 ctrl0 = REG_RD(ata, regi_ata, rw_ctrl0);
134 reg_ata_rw_ctrl1 ctrl1 = REG_RD(ata, regi_ata, rw_ctrl1);
135
136 if (type == TYPE_PIO) {
137 ctrl0.pio_setup = setup;
138 ctrl0.pio_strb = strobe;
139 ctrl0.pio_hold = hold;
140 } else if (type == TYPE_DMA) {
141 ctrl0.dma_strb = strobe;
142 ctrl0.dma_hold = hold;
143 } else if (type == TYPE_UDMA) {
144 ctrl1.udma_tcyc = setup;
145 ctrl1.udma_tdvs = strobe;
146 }
147 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
148 REG_WR(ata, regi_ata, rw_ctrl1, ctrl1);
149}
150
151static unsigned long
152cris_ide_base_address(int bus)
153{
154 reg_ata_rw_ctrl2 ctrl2 = {0};
155 ctrl2.sel = bus;
156 return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
157}
158
159static unsigned long
160cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
161{
162 reg_ata_rw_ctrl2 ctrl2 = {0};
163 ctrl2.addr = addr;
164 ctrl2.cs1 = cs1;
165 ctrl2.cs0 = cs0;
166 return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
167}
168
169static __init void
170cris_ide_reset(unsigned val)
171{
172 reg_ata_rw_ctrl0 ctrl0 = {0};
173 ctrl0.rst = val ? regk_ata_active : regk_ata_inactive;
174 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
175}
176
177static __init void
178cris_ide_init(void)
179{
180 reg_ata_rw_ctrl0 ctrl0 = {0};
181 reg_ata_rw_intr_mask intr_mask = {0};
182
183 ctrl0.en = regk_ata_yes;
184 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
185
186 intr_mask.bus0 = regk_ata_yes;
187 intr_mask.bus1 = regk_ata_yes;
188 intr_mask.bus2 = regk_ata_yes;
189 intr_mask.bus3 = regk_ata_yes;
190
191 REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
192
193 crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
194 crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
195
196 crisv32_pinmux_alloc_fixed(pinmux_ata);
197 crisv32_pinmux_alloc_fixed(pinmux_ata0);
198 crisv32_pinmux_alloc_fixed(pinmux_ata1);
199 crisv32_pinmux_alloc_fixed(pinmux_ata2);
200 crisv32_pinmux_alloc_fixed(pinmux_ata3);
201
202 DMA_RESET(regi_dma2);
203 DMA_ENABLE(regi_dma2);
204 DMA_RESET(regi_dma3);
205 DMA_ENABLE(regi_dma3);
206
207 DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
208 DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
209}
210
211static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
212
213#define cris_dma_descr_type dma_descr_data
214#define cris_pio_read regk_ata_rd
215#define cris_ultra_mask 0x7
216#define MAX_DESCR_SIZE 0xffffffffUL
217
218static unsigned long
219cris_ide_get_reg(unsigned long reg)
220{
221 return (reg & 0x0e000000) >> 25;
222}
223
224static void
225cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
226{
227 d->buf = (char*)virt_to_phys(buf);
228 d->after = d->buf + len;
229 d->eol = last;
230}
231
232static void
233cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,int len)
234{
235 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, IDE_DATA_REG);
236 reg_ata_rw_trf_cnt trf_cnt = {0};
237
238 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
239 mycontext.saved_data_buf = d->buf;
240 /* start the dma channel */
241 DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
242
243 /* initiate a multi word dma read using PIO handshaking */
244 trf_cnt.cnt = len >> 1;
245 /* Due to a "feature" the transfer count has to be one extra word for UDMA. */
246 if (type == TYPE_UDMA)
247 trf_cnt.cnt++;
248 REG_WR(ata, regi_ata, rw_trf_cnt, trf_cnt);
249
250 ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
251 ctrl2.trf_mode = regk_ata_dma;
252 ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
253 type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
254 ctrl2.multi = regk_ata_yes;
255 ctrl2.dma_size = regk_ata_word;
256 REG_WR(ata, regi_ata, rw_ctrl2, ctrl2);
257}
258
259static void
260cris_ide_wait_dma(int dir)
261{
262 reg_dma_rw_stat status;
263 do
264 {
265 status = REG_RD(dma, dir ? regi_dma3 : regi_dma2, rw_stat);
266 } while(status.list_state != regk_dma_data_at_eol);
267}
268
269static int cris_dma_test_irq(ide_drive_t *drive)
270{
271 int intr = REG_RD_INT(ata, regi_ata, r_intr);
272 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, IDE_DATA_REG);
273 return intr & (1 << ctrl2.sel) ? 1 : 0;
274}
275
276static void cris_ide_initialize_dma(int dir)
277{
278}
279
280#else
281/* CRISv10 specifics */
282#include <asm/arch/svinto.h>
283#include <asm/arch/io_interface_mux.h>
284
285/* PIO timing (in R_ATA_CONFIG)
286 *
287 * _____________________________
288 * ADDRESS : ________/
289 *
290 * _______________
291 * DIOR : ____________/ \__________
292 *
293 * _______________
294 * DATA : XXXXXXXXXXXXXXXX_______________XXXXXXXX
295 *
296 *
297 * DIOR is unbuffered while address and data is buffered.
298 * This creates two problems:
299 * 1. The DIOR pulse is to early (because it is unbuffered)
300 * 2. The rise time of DIOR is long
301 *
302 * There are at least three different plausible solutions
303 * 1. Use a pad capable of larger currents in Etrax
304 * 2. Use an external buffer
305 * 3. Make the strobe pulse longer
306 *
307 * Some of the strobe timings below are modified to compensate
308 * for this. This implies a slight performance decrease.
309 *
310 * THIS SHOULD NEVER BE CHANGED!
311 *
312 * TODO: Is this true for the latest LX boards still ?
313 */
314
315#define ATA_UDMA2_CYC 0 /* No UDMA supported, just to make it compile. */
316#define ATA_UDMA2_DVS 0
317#define ATA_UDMA1_CYC 0
318#define ATA_UDMA1_DVS 0
319#define ATA_UDMA0_CYC 0
320#define ATA_UDMA0_DVS 0
321#define ATA_DMA2_STROBE 4
322#define ATA_DMA2_HOLD 0
323#define ATA_DMA1_STROBE 4
324#define ATA_DMA1_HOLD 1
325#define ATA_DMA0_STROBE 12
326#define ATA_DMA0_HOLD 9
327#define ATA_PIO4_SETUP 1
328#define ATA_PIO4_STROBE 5
329#define ATA_PIO4_HOLD 0
330#define ATA_PIO3_SETUP 1
331#define ATA_PIO3_STROBE 5
332#define ATA_PIO3_HOLD 1
333#define ATA_PIO2_SETUP 1
334#define ATA_PIO2_STROBE 6
335#define ATA_PIO2_HOLD 2
336#define ATA_PIO1_SETUP 2
337#define ATA_PIO1_STROBE 11
338#define ATA_PIO1_HOLD 4
339#define ATA_PIO0_SETUP 4
340#define ATA_PIO0_STROBE 19
341#define ATA_PIO0_HOLD 4
342
343int
344cris_ide_ack_intr(ide_hwif_t* hwif)
345{
346 return 1;
347}
348
349static inline int
350cris_ide_busy(void)
351{
352 return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
353}
354
355static inline int
356cris_ide_ready(void)
357{
358 return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
359}
360
361static inline int
362cris_ide_data_available(unsigned short* data)
363{
364 unsigned long status = *R_ATA_STATUS_DATA;
365 *data = (unsigned short)status;
366 return status & IO_MASK(R_ATA_STATUS_DATA, dav);
367}
368
369static void
370cris_ide_write_command(unsigned long command)
371{
372 *R_ATA_CTRL_DATA = command;
373}
374
375static void
376cris_ide_set_speed(int type, int setup, int strobe, int hold)
377{
378 static int pio_setup = ATA_PIO4_SETUP;
379 static int pio_strobe = ATA_PIO4_STROBE;
380 static int pio_hold = ATA_PIO4_HOLD;
381 static int dma_strobe = ATA_DMA2_STROBE;
382 static int dma_hold = ATA_DMA2_HOLD;
383
384 if (type == TYPE_PIO) {
385 pio_setup = setup;
386 pio_strobe = strobe;
387 pio_hold = hold;
388 } else if (type == TYPE_DMA) {
389 dma_strobe = strobe;
390 dma_hold = hold;
391 }
392 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
393 IO_FIELD( R_ATA_CONFIG, dma_strobe, dma_strobe ) |
394 IO_FIELD( R_ATA_CONFIG, dma_hold, dma_hold ) |
395 IO_FIELD( R_ATA_CONFIG, pio_setup, pio_setup ) |
396 IO_FIELD( R_ATA_CONFIG, pio_strobe, pio_strobe ) |
397 IO_FIELD( R_ATA_CONFIG, pio_hold, pio_hold ) );
398}
399
400static unsigned long
401cris_ide_base_address(int bus)
402{
403 return IO_FIELD(R_ATA_CTRL_DATA, sel, bus);
404}
405
406static unsigned long
407cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
408{
409 return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
410 IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
411 IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
412}
413
414static __init void
415cris_ide_reset(unsigned val)
416{
417#ifdef CONFIG_ETRAX_IDE_G27_RESET
418 REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, val);
419#endif
420#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
421 REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, val);
422#endif
423#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
424 REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, val);
425#endif
426#ifdef CONFIG_ETRAX_IDE_PB7_RESET
427 port_pb_dir_shadow = port_pb_dir_shadow |
428 IO_STATE(R_PORT_PB_DIR, dir7, output);
429 *R_PORT_PB_DIR = port_pb_dir_shadow;
430 REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, val);
431#endif
432}
433
434static __init void
435cris_ide_init(void)
436{
437 volatile unsigned int dummy;
438
439 *R_ATA_CTRL_DATA = 0;
440 *R_ATA_TRANSFER_CNT = 0;
441 *R_ATA_CONFIG = 0;
442
443 if (cris_request_io_interface(if_ata, "ETRAX100LX IDE")) {
444 printk(KERN_CRIT "ide: Failed to get IO interface\n");
445 return;
446 } else if (cris_request_dma(ATA_TX_DMA_NBR,
447 "ETRAX100LX IDE TX",
448 DMA_VERBOSE_ON_ERROR,
449 dma_ata)) {
450 cris_free_io_interface(if_ata);
451 printk(KERN_CRIT "ide: Failed to get Tx DMA channel\n");
452 return;
453 } else if (cris_request_dma(ATA_RX_DMA_NBR,
454 "ETRAX100LX IDE RX",
455 DMA_VERBOSE_ON_ERROR,
456 dma_ata)) {
457 cris_free_dma(ATA_TX_DMA_NBR, "ETRAX100LX IDE Tx");
458 cris_free_io_interface(if_ata);
459 printk(KERN_CRIT "ide: Failed to get Rx DMA channel\n");
460 return;
461 }
462
463 /* make a dummy read to set the ata controller in a proper state */
464 dummy = *R_ATA_STATUS_DATA;
465
466 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ));
467 *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw, read) |
468 IO_FIELD( R_ATA_CTRL_DATA, addr, 1 ) );
469
470 while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
471
472 *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
473 IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
474 IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
475 IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
476
477 /* reset the dma channels we will use */
478
479 RESET_DMA(ATA_TX_DMA_NBR);
480 RESET_DMA(ATA_RX_DMA_NBR);
481 WAIT_DMA(ATA_TX_DMA_NBR);
482 WAIT_DMA(ATA_RX_DMA_NBR);
483}
484
485#define cris_dma_descr_type etrax_dma_descr
486#define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
487#define cris_ultra_mask 0x0
488#define MAX_DESCR_SIZE 0x10000UL
489
490static unsigned long
491cris_ide_get_reg(unsigned long reg)
492{
493 return (reg & 0x0e000000) >> 25;
494}
495
496static void
497cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
498{
499 d->buf = virt_to_phys(buf);
500 d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
501 if (last)
502 d->ctrl |= d_eol;
503}
504
505static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
506{
507 unsigned long cmd;
508
509 if (dir) {
510 /* need to do this before RX DMA due to a chip bug
511 * it is enough to just flush the part of the cache that
512 * corresponds to the buffers we start, but since HD transfers
513 * usually are more than 8 kB, it is easier to optimize for the
514 * normal case and just flush the entire cache. its the only
515 * way to be sure! (OB movie quote)
516 */
517 flush_etrax_cache();
518 *R_DMA_CH3_FIRST = virt_to_phys(d);
519 *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start);
520
521 } else {
522 *R_DMA_CH2_FIRST = virt_to_phys(d);
523 *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start);
524 }
525
526 /* initiate a multi word dma read using DMA handshaking */
527
528 *R_ATA_TRANSFER_CNT =
529 IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
530
531 cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
532 cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
533 IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
534 *R_ATA_CTRL_DATA =
535 cmd |
536 IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
537 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
538 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
539 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
540}
541
542static void
543cris_ide_wait_dma(int dir)
544{
545 if (dir)
546 WAIT_DMA(ATA_RX_DMA_NBR);
547 else
548 WAIT_DMA(ATA_TX_DMA_NBR);
549}
550
551static int cris_dma_test_irq(ide_drive_t *drive)
552{
553 int intr = *R_IRQ_MASK0_RD;
554 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, IDE_DATA_REG);
555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
556}
557
558
559static void cris_ide_initialize_dma(int dir)
560{
561 if (dir)
562 {
563 RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
564 WAIT_DMA(ATA_RX_DMA_NBR);
565 }
566 else
567 {
568 RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
569 WAIT_DMA(ATA_TX_DMA_NBR);
570 }
571}
572
573#endif
574
575void
576cris_ide_outw(unsigned short data, unsigned long reg) {
577 int timeleft;
578
579 LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
580
581 /* note the lack of handling any timeouts. we stop waiting, but we don't
582 * really notify anybody.
583 */
584
585 timeleft = IDE_REGISTER_TIMEOUT;
586 /* wait for busy flag */
587 do {
588 timeleft--;
589 } while(timeleft && cris_ide_busy());
590
591 /*
592 * Fall through at a timeout, so the ongoing command will be
593 * aborted by the write below, which is expected to be a dummy
594 * command to the command register. This happens when a faulty
595 * drive times out on a command. See comment on timeout in
596 * INB.
597 */
598 if(!timeleft)
599 printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
600
601 cris_ide_write_command(reg|data); /* write data to the drive's register */
602
603 timeleft = IDE_REGISTER_TIMEOUT;
604 /* wait for transmitter ready */
605 do {
606 timeleft--;
607 } while(timeleft && !cris_ide_ready());
608}
609
610void
611cris_ide_outb(unsigned char data, unsigned long reg)
612{
613 cris_ide_outw(data, reg);
614}
615
616void
617cris_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
618{
619 cris_ide_outw(addr, port);
620}
621
622unsigned short
623cris_ide_inw(unsigned long reg) {
624 int timeleft;
625 unsigned short val;
626
627 timeleft = IDE_REGISTER_TIMEOUT;
628 /* wait for busy flag */
629 do {
630 timeleft--;
631 } while(timeleft && cris_ide_busy());
632
633 if(!timeleft) {
634 /*
635 * If we're asked to read the status register, like for
636 * example when a command does not complete for an
637 * extended time, but the ATA interface is stuck in a
638 * busy state at the *ETRAX* ATA interface level (as has
639 * happened repeatedly with at least one bad disk), then
640 * the best thing to do is to pretend that we read
641 * "busy" in the status register, so the IDE driver will
642 * time-out, abort the ongoing command and perform a
643 * reset sequence. Note that the subsequent OUT_BYTE
644 * call will also timeout on busy, but as long as the
645 * write is still performed, everything will be fine.
646 */
647 if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET)
648 return BUSY_STAT;
649 else
650 /* For other rare cases we assume 0 is good enough. */
651 return 0;
652 }
653
654 cris_ide_write_command(reg | cris_pio_read);
655
656 timeleft = IDE_REGISTER_TIMEOUT;
657 /* wait for available */
658 do {
659 timeleft--;
660 } while(timeleft && !cris_ide_data_available(&val));
661
662 if(!timeleft)
663 return 0;
664
665 LOWDB(printk("inb: 0x%x from reg 0x%x\n", val & 0xff, reg));
666
667 return val;
668}
669
670unsigned char
671cris_ide_inb(unsigned long reg)
672{
673 return (unsigned char)cris_ide_inw(reg);
674}
675
676static int cris_dma_check (ide_drive_t *drive);
677static int cris_dma_end (ide_drive_t *drive);
678static int cris_dma_setup (ide_drive_t *drive);
679static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
680static int cris_dma_test_irq(ide_drive_t *drive);
681static void cris_dma_start(ide_drive_t *drive);
682static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
683static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
684static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
685static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
686static int cris_dma_off (ide_drive_t *drive);
687static int cris_dma_on (ide_drive_t *drive);
688
689static void tune_cris_ide(ide_drive_t *drive, u8 pio)
690{
691 int setup, strobe, hold;
692
693 switch(pio)
694 {
695 case 0:
696 setup = ATA_PIO0_SETUP;
697 strobe = ATA_PIO0_STROBE;
698 hold = ATA_PIO0_HOLD;
699 break;
700 case 1:
701 setup = ATA_PIO1_SETUP;
702 strobe = ATA_PIO1_STROBE;
703 hold = ATA_PIO1_HOLD;
704 break;
705 case 2:
706 setup = ATA_PIO2_SETUP;
707 strobe = ATA_PIO2_STROBE;
708 hold = ATA_PIO2_HOLD;
709 break;
710 case 3:
711 setup = ATA_PIO3_SETUP;
712 strobe = ATA_PIO3_STROBE;
713 hold = ATA_PIO3_HOLD;
714 break;
715 case 4:
716 setup = ATA_PIO4_SETUP;
717 strobe = ATA_PIO4_STROBE;
718 hold = ATA_PIO4_HOLD;
719 break;
720 default:
721 return;
722 }
723
724 cris_ide_set_speed(TYPE_PIO, setup, strobe, hold);
725}
726
727static int speed_cris_ide(ide_drive_t *drive, u8 speed)
728{
729 int cyc = 0, dvs = 0, strobe = 0, hold = 0;
730
731 if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
732 tune_cris_ide(drive, speed - XFER_PIO_0);
733 return 0;
734 }
735
736 switch(speed)
737 {
738 case XFER_UDMA_0:
739 cyc = ATA_UDMA0_CYC;
740 dvs = ATA_UDMA0_DVS;
741 break;
742 case XFER_UDMA_1:
743 cyc = ATA_UDMA1_CYC;
744 dvs = ATA_UDMA1_DVS;
745 break;
746 case XFER_UDMA_2:
747 cyc = ATA_UDMA2_CYC;
748 dvs = ATA_UDMA2_DVS;
749 break;
750 case XFER_MW_DMA_0:
751 strobe = ATA_DMA0_STROBE;
752 hold = ATA_DMA0_HOLD;
753 break;
754 case XFER_MW_DMA_1:
755 strobe = ATA_DMA1_STROBE;
756 hold = ATA_DMA1_HOLD;
757 break;
758 case XFER_MW_DMA_2:
759 strobe = ATA_DMA2_STROBE;
760 hold = ATA_DMA2_HOLD;
761 break;
762 default:
763 return 0;
764 }
765
766 if (speed >= XFER_UDMA_0)
767 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
768 else
769 cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
770
771 return 0;
772}
773
774void __init
775init_e100_ide (void)
776{
777 hw_regs_t hw;
778 int ide_offsets[IDE_NR_PORTS];
779 int h;
780 int i;
781
782 printk("ide: ETRAX FS built-in ATA DMA controller\n");
783
784 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
785 ide_offsets[i] = cris_ide_reg_addr(i, 0, 1);
786
787 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
788 ide_offsets[IDE_CONTROL_OFFSET] = cris_ide_reg_addr(6, 1, 0);
789
790 /* first fill in some stuff in the ide_hwifs fields */
791
792 for(h = 0; h < MAX_HWIFS; h++) {
793 ide_hwif_t *hwif = &ide_hwifs[h];
794 ide_setup_ports(&hw, cris_ide_base_address(h),
795 ide_offsets,
796 0, 0, cris_ide_ack_intr,
797 ide_default_irq(0));
798 ide_register_hw(&hw, &hwif);
799 hwif->mmio = 2;
800 hwif->chipset = ide_etrax100;
801 hwif->tuneproc = &tune_cris_ide;
802 hwif->speedproc = &speed_cris_ide;
803 hwif->ata_input_data = &cris_ide_input_data;
804 hwif->ata_output_data = &cris_ide_output_data;
805 hwif->atapi_input_bytes = &cris_atapi_input_bytes;
806 hwif->atapi_output_bytes = &cris_atapi_output_bytes;
807 hwif->ide_dma_check = &cris_dma_check;
808 hwif->ide_dma_end = &cris_dma_end;
809 hwif->dma_setup = &cris_dma_setup;
810 hwif->dma_exec_cmd = &cris_dma_exec_cmd;
811 hwif->ide_dma_test_irq = &cris_dma_test_irq;
812 hwif->dma_start = &cris_dma_start;
813 hwif->OUTB = &cris_ide_outb;
814 hwif->OUTW = &cris_ide_outw;
815 hwif->OUTBSYNC = &cris_ide_outbsync;
816 hwif->INB = &cris_ide_inb;
817 hwif->INW = &cris_ide_inw;
818 hwif->ide_dma_host_off = &cris_dma_off;
819 hwif->ide_dma_host_on = &cris_dma_on;
820 hwif->ide_dma_off_quietly = &cris_dma_off;
821 hwif->udma_four = 0;
822 hwif->ultra_mask = cris_ultra_mask;
823 hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
824 hwif->swdma_mask = 0x07; /* Singleword DMA 0-2 */
825 }
826
827 /* Reset pulse */
828 cris_ide_reset(0);
829 udelay(25);
830 cris_ide_reset(1);
831
832 cris_ide_init();
833
834 cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
835 cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
836 cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
837}
838
839static int cris_dma_off (ide_drive_t *drive)
840{
841 return 0;
842}
843
844static int cris_dma_on (ide_drive_t *drive)
845{
846 return 0;
847}
848
849
850static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
851
852/*
853 * The following routines are mainly used by the ATAPI drivers.
854 *
855 * These routines will round up any request for an odd number of bytes,
856 * so if an odd bytecount is specified, be sure that there's at least one
857 * extra byte allocated for the buffer.
858 */
859static void
860cris_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
861{
862 D(printk("atapi_input_bytes, buffer 0x%x, count %d\n",
863 buffer, bytecount));
864
865 if(bytecount & 1) {
866 printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
867 bytecount++; /* to round off */
868 }
869
870 /* setup DMA and start transfer */
871
872 cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
873 cris_ide_start_dma(drive, &mydescr, 1, TYPE_PIO, bytecount);
874
875 /* wait for completion */
876 LED_DISK_READ(1);
877 cris_ide_wait_dma(1);
878 LED_DISK_READ(0);
879}
880
881static void
882cris_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
883{
884 D(printk("atapi_output_bytes, buffer 0x%x, count %d\n",
885 buffer, bytecount));
886
887 if(bytecount & 1) {
888 printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
889 bytecount++;
890 }
891
892 cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
893 cris_ide_start_dma(drive, &mydescr, 0, TYPE_PIO, bytecount);
894
895 /* wait for completion */
896
897 LED_DISK_WRITE(1);
898 LED_DISK_READ(1);
899 cris_ide_wait_dma(0);
900 LED_DISK_WRITE(0);
901}
902
903/*
904 * This is used for most PIO data transfers *from* the IDE interface
905 */
906static void
907cris_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
908{
909 cris_atapi_input_bytes(drive, buffer, wcount << 2);
910}
911
912/*
913 * This is used for most PIO data transfers *to* the IDE interface
914 */
915static void
916cris_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
917{
918 cris_atapi_output_bytes(drive, buffer, wcount << 2);
919}
920
921/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
922static cris_dma_descr_type ata_descrs[MAX_DMA_DESCRS] __attribute__ ((__aligned__(16)));
923static unsigned int ata_tot_size;
924
925/*
926 * cris_ide_build_dmatable() prepares a dma request.
927 * Returns 0 if all went okay, returns 1 otherwise.
928 */
929static int cris_ide_build_dmatable (ide_drive_t *drive)
930{
931 ide_hwif_t *hwif = drive->hwif;
932 struct scatterlist* sg;
933 struct request *rq = drive->hwif->hwgroup->rq;
934 unsigned long size, addr;
935 unsigned int count = 0;
936 int i = 0;
937
938 sg = hwif->sg_table;
939
940 ata_tot_size = 0;
941
942 ide_map_sg(drive, rq);
943 i = hwif->sg_nents;
944
945 while(i) {
946 /*
947 * Determine addr and size of next buffer area. We assume that
948 * individual virtual buffers are always composed linearly in
949 * physical memory. For example, we assume that any 8kB buffer
950 * is always composed of two adjacent physical 4kB pages rather
951 * than two possibly non-adjacent physical 4kB pages.
952 */
953 /* group sequential buffers into one large buffer */
954 addr = page_to_phys(sg->page) + sg->offset;
955 size = sg_dma_len(sg);
956 while (sg++, --i) {
957 if ((addr + size) != page_to_phys(sg->page) + sg->offset)
958 break;
959 size += sg_dma_len(sg);
960 }
961
962 /* did we run out of descriptors? */
963
964 if(count >= MAX_DMA_DESCRS) {
965 printk("%s: too few DMA descriptors\n", drive->name);
966 return 1;
967 }
968
969 /* however, this case is more difficult - rw_trf_cnt cannot be more
970 than 65536 words per transfer, so in that case we need to either
971 1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
972 the descriptors, or
973 2) simply do the request here, and get dma_intr to only ide_end_request on
974 those blocks that were actually set-up for transfer.
975 */
976
977 if(ata_tot_size + size > 131072) {
978 printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
979 return 1;
980 }
981
982 /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
983 don't handle size > 131072 only one split is necessary */
984
985 if(size > MAX_DESCR_SIZE) {
986 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
987 count++;
988 ata_tot_size += MAX_DESCR_SIZE;
989 size -= MAX_DESCR_SIZE;
990 addr += MAX_DESCR_SIZE;
991 }
992
993 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
994 count++;
995 ata_tot_size += size;
996 }
997
998 if (count) {
999 /* return and say all is ok */
1000 return 0;
1001 }
1002
1003 printk("%s: empty DMA table?\n", drive->name);
1004 return 1; /* let the PIO routines handle this weirdness */
1005}
1006
1007static int cris_config_drive_for_dma (ide_drive_t *drive)
1008{
1009 u8 speed = ide_dma_speed(drive, 1);
1010
1011 if (!speed)
1012 return 0;
1013
1014 speed_cris_ide(drive, speed);
1015 ide_config_drive_speed(drive, speed);
1016
1017 return ide_dma_enable(drive);
1018}
1019
1020/*
1021 * cris_dma_intr() is the handler for disk read/write DMA interrupts
1022 */
1023static ide_startstop_t cris_dma_intr (ide_drive_t *drive)
1024{
1025 LED_DISK_READ(0);
1026 LED_DISK_WRITE(0);
1027
1028 return ide_dma_intr(drive);
1029}
1030
1031/*
1032 * Functions below initiates/aborts DMA read/write operations on a drive.
1033 *
1034 * The caller is assumed to have selected the drive and programmed the drive's
1035 * sector address using CHS or LBA. All that remains is to prepare for DMA
1036 * and then issue the actual read/write DMA/PIO command to the drive.
1037 *
1038 * For ATAPI devices, we just prepare for DMA and return. The caller should
1039 * then issue the packet command to the drive and call us again with
1040 * cris_dma_start afterwards.
1041 *
1042 * Returns 0 if all went well.
1043 * Returns 1 if DMA read/write could not be started, in which case
1044 * the caller should revert to PIO for the current request.
1045 */
1046
1047static int cris_dma_check(ide_drive_t *drive)
1048{
1049 ide_hwif_t *hwif = drive->hwif;
1050 struct hd_driveid* id = drive->id;
1051
1052 if (id && (id->capability & 1)) {
1053 if (ide_use_dma(drive)) {
1054 if (cris_config_drive_for_dma(drive))
1055 return hwif->ide_dma_on(drive);
1056 }
1057 }
1058
1059 return hwif->ide_dma_off_quietly(drive);
1060}
1061
1062static int cris_dma_end(ide_drive_t *drive)
1063{
1064 drive->waiting_for_dma = 0;
1065 return 0;
1066}
1067
1068static int cris_dma_setup(ide_drive_t *drive)
1069{
1070 struct request *rq = drive->hwif->hwgroup->rq;
1071
1072 cris_ide_initialize_dma(!rq_data_dir(rq));
1073 if (cris_ide_build_dmatable (drive)) {
1074 ide_map_sg(drive, rq);
1075 return 1;
1076 }
1077
1078 drive->waiting_for_dma = 1;
1079 return 0;
1080}
1081
1082static void cris_dma_exec_cmd(ide_drive_t *drive, u8 command)
1083{
1084 /* set the irq handler which will finish the request when DMA is done */
1085 ide_set_handler(drive, &cris_dma_intr, WAIT_CMD, NULL);
1086
1087 /* issue cmd to drive */
1088 cris_ide_outb(command, IDE_COMMAND_REG);
1089}
1090
1091static void cris_dma_start(ide_drive_t *drive)
1092{
1093 struct request *rq = drive->hwif->hwgroup->rq;
1094 int writing = rq_data_dir(rq);
1095 int type = TYPE_DMA;
1096
1097 if (drive->current_speed >= XFER_UDMA_0)
1098 type = TYPE_UDMA;
1099
1100 cris_ide_start_dma(drive, &ata_descrs[0], writing ? 0 : 1, type, ata_tot_size);
1101
1102 if (writing) {
1103 LED_DISK_WRITE(1);
1104 } else {
1105 LED_DISK_READ(1);
1106 }
1107}
diff --git a/drivers/ide/cris/ide-v10.c b/drivers/ide/cris/ide-v10.c
deleted file mode 100644
index 5b40220d3ddc..000000000000
--- a/drivers/ide/cris/ide-v10.c
+++ /dev/null
@@ -1,842 +0,0 @@
1/* $Id: ide.c,v 1.4 2004/10/12 07:55:48 starvik Exp $
2 *
3 * Etrax specific IDE functions, like init and PIO-mode setting etc.
4 * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
5 * Copyright (c) 2000-2004 Axis Communications AB
6 *
7 * Authors: Bjorn Wesen (initial version)
8 * Mikael Starvik (pio setup stuff, Linux 2.6 port)
9 */
10
11/* Regarding DMA:
12 *
13 * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
14 * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
15 * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
16 * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
17 * device can't do DMA handshaking for some stupid reason. We don't need to do that.
18 */
19
20#undef REALLY_SLOW_IO /* most systems can safely undef this */
21
22#include <linux/config.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/blkdev.h>
30#include <linux/hdreg.h>
31#include <linux/ide.h>
32#include <linux/init.h>
33#include <linux/scatterlist.h>
34
35#include <asm/io.h>
36#include <asm/arch/svinto.h>
37#include <asm/dma.h>
38
39/* number of Etrax DMA descriptors */
40#define MAX_DMA_DESCRS 64
41
42/* number of times to retry busy-flags when reading/writing IDE-registers
43 * this can't be too high because a hung harddisk might cause the watchdog
44 * to trigger (sometimes INB and OUTB are called with irq's disabled)
45 */
46
47#define IDE_REGISTER_TIMEOUT 300
48
49static int e100_read_command = 0;
50
51#define LOWDB(x)
52#define D(x)
53
54static int e100_ide_build_dmatable (ide_drive_t *drive);
55static ide_startstop_t etrax_dma_intr (ide_drive_t *drive);
56
57void
58etrax100_ide_outw(unsigned short data, unsigned long reg) {
59 int timeleft;
60 LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
61
62 /* note the lack of handling any timeouts. we stop waiting, but we don't
63 * really notify anybody.
64 */
65
66 timeleft = IDE_REGISTER_TIMEOUT;
67 /* wait for busy flag */
68 while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)))
69 timeleft--;
70
71 /*
72 * Fall through at a timeout, so the ongoing command will be
73 * aborted by the write below, which is expected to be a dummy
74 * command to the command register. This happens when a faulty
75 * drive times out on a command. See comment on timeout in
76 * INB.
77 */
78 if(!timeleft)
79 printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
80
81 *R_ATA_CTRL_DATA = reg | data; /* write data to the drive's register */
82
83 timeleft = IDE_REGISTER_TIMEOUT;
84 /* wait for transmitter ready */
85 while(timeleft && !(*R_ATA_STATUS_DATA &
86 IO_MASK(R_ATA_STATUS_DATA, tr_rdy)))
87 timeleft--;
88}
89
90void
91etrax100_ide_outb(unsigned char data, unsigned long reg)
92{
93 etrax100_ide_outw(data, reg);
94}
95
96void
97etrax100_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
98{
99 etrax100_ide_outw(addr, port);
100}
101
102unsigned short
103etrax100_ide_inw(unsigned long reg) {
104 int status;
105 int timeleft;
106
107 timeleft = IDE_REGISTER_TIMEOUT;
108 /* wait for busy flag */
109 while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)))
110 timeleft--;
111
112 if(!timeleft) {
113 /*
114 * If we're asked to read the status register, like for
115 * example when a command does not complete for an
116 * extended time, but the ATA interface is stuck in a
117 * busy state at the *ETRAX* ATA interface level (as has
118 * happened repeatedly with at least one bad disk), then
119 * the best thing to do is to pretend that we read
120 * "busy" in the status register, so the IDE driver will
121 * time-out, abort the ongoing command and perform a
122 * reset sequence. Note that the subsequent OUT_BYTE
123 * call will also timeout on busy, but as long as the
124 * write is still performed, everything will be fine.
125 */
126 if ((reg & IO_MASK (R_ATA_CTRL_DATA, addr))
127 == IO_FIELD (R_ATA_CTRL_DATA, addr, IDE_STATUS_OFFSET))
128 return BUSY_STAT;
129 else
130 /* For other rare cases we assume 0 is good enough. */
131 return 0;
132 }
133
134 *R_ATA_CTRL_DATA = reg | IO_STATE(R_ATA_CTRL_DATA, rw, read); /* read data */
135
136 timeleft = IDE_REGISTER_TIMEOUT;
137 /* wait for available */
138 while(timeleft && !((status = *R_ATA_STATUS_DATA) &
139 IO_MASK(R_ATA_STATUS_DATA, dav)))
140 timeleft--;
141
142 if(!timeleft)
143 return 0;
144
145 LOWDB(printk("inb: 0x%x from reg 0x%x\n", status & 0xff, reg));
146
147 return (unsigned short)status;
148}
149
150unsigned char
151etrax100_ide_inb(unsigned long reg)
152{
153 return (unsigned char)etrax100_ide_inw(reg);
154}
155
156/* PIO timing (in R_ATA_CONFIG)
157 *
158 * _____________________________
159 * ADDRESS : ________/
160 *
161 * _______________
162 * DIOR : ____________/ \__________
163 *
164 * _______________
165 * DATA : XXXXXXXXXXXXXXXX_______________XXXXXXXX
166 *
167 *
168 * DIOR is unbuffered while address and data is buffered.
169 * This creates two problems:
170 * 1. The DIOR pulse is to early (because it is unbuffered)
171 * 2. The rise time of DIOR is long
172 *
173 * There are at least three different plausible solutions
174 * 1. Use a pad capable of larger currents in Etrax
175 * 2. Use an external buffer
176 * 3. Make the strobe pulse longer
177 *
178 * Some of the strobe timings below are modified to compensate
179 * for this. This implies a slight performance decrease.
180 *
181 * THIS SHOULD NEVER BE CHANGED!
182 *
183 * TODO: Is this true for the latest LX boards still ?
184 */
185
186#define ATA_DMA2_STROBE 4
187#define ATA_DMA2_HOLD 0
188#define ATA_DMA1_STROBE 4
189#define ATA_DMA1_HOLD 1
190#define ATA_DMA0_STROBE 12
191#define ATA_DMA0_HOLD 9
192#define ATA_PIO4_SETUP 1
193#define ATA_PIO4_STROBE 5
194#define ATA_PIO4_HOLD 0
195#define ATA_PIO3_SETUP 1
196#define ATA_PIO3_STROBE 5
197#define ATA_PIO3_HOLD 1
198#define ATA_PIO2_SETUP 1
199#define ATA_PIO2_STROBE 6
200#define ATA_PIO2_HOLD 2
201#define ATA_PIO1_SETUP 2
202#define ATA_PIO1_STROBE 11
203#define ATA_PIO1_HOLD 4
204#define ATA_PIO0_SETUP 4
205#define ATA_PIO0_STROBE 19
206#define ATA_PIO0_HOLD 4
207
208static int e100_dma_check (ide_drive_t *drive);
209static void e100_dma_start(ide_drive_t *drive);
210static int e100_dma_end (ide_drive_t *drive);
211static void e100_ide_input_data (ide_drive_t *drive, void *, unsigned int);
212static void e100_ide_output_data (ide_drive_t *drive, void *, unsigned int);
213static void e100_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
214static void e100_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
215static int e100_dma_off (ide_drive_t *drive);
216
217
218/*
219 * good_dma_drives() lists the model names (from "hdparm -i")
220 * of drives which do not support mword2 DMA but which are
221 * known to work fine with this interface under Linux.
222 */
223
224const char *good_dma_drives[] = {"Micropolis 2112A",
225 "CONNER CTMA 4000",
226 "CONNER CTT8000-A",
227 NULL};
228
229static void tune_e100_ide(ide_drive_t *drive, byte pio)
230{
231 pio = 4;
232 /* pio = ide_get_best_pio_mode(drive, pio, 4, NULL); */
233
234 /* set pio mode! */
235
236 switch(pio) {
237 case 0:
238 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
239 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
240 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
241 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO0_SETUP ) |
242 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO0_STROBE ) |
243 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO0_HOLD ) );
244 break;
245 case 1:
246 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
247 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
248 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
249 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO1_SETUP ) |
250 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO1_STROBE ) |
251 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO1_HOLD ) );
252 break;
253 case 2:
254 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
255 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
256 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
257 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO2_SETUP ) |
258 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO2_STROBE ) |
259 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO2_HOLD ) );
260 break;
261 case 3:
262 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
263 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
264 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
265 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO3_SETUP ) |
266 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO3_STROBE ) |
267 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO3_HOLD ) );
268 break;
269 case 4:
270 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
271 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
272 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
273 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) |
274 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) |
275 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) );
276 break;
277 }
278}
279
280static int e100_dma_setup(ide_drive_t *drive)
281{
282 struct request *rq = drive->hwif->hwgroup->rq;
283
284 if (rq_data_dir(rq)) {
285 e100_read_command = 0;
286
287 RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
288 WAIT_DMA(ATA_TX_DMA_NBR);
289 } else {
290 e100_read_command = 1;
291
292 RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
293 WAIT_DMA(ATA_RX_DMA_NBR);
294 }
295
296 /* set up the Etrax DMA descriptors */
297 if (e100_ide_build_dmatable(drive)) {
298 ide_map_sg(drive, rq);
299 return 1;
300 }
301
302 return 0;
303}
304
305static void e100_dma_exec_cmd(ide_drive_t *drive, u8 command)
306{
307 /* set the irq handler which will finish the request when DMA is done */
308 ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL);
309
310 /* issue cmd to drive */
311 etrax100_ide_outb(command, IDE_COMMAND_REG);
312}
313
314void __init
315init_e100_ide (void)
316{
317 volatile unsigned int dummy;
318 int h;
319
320 printk("ide: ETRAX 100LX built-in ATA DMA controller\n");
321
322 /* first fill in some stuff in the ide_hwifs fields */
323
324 for(h = 0; h < MAX_HWIFS; h++) {
325 ide_hwif_t *hwif = &ide_hwifs[h];
326 hwif->mmio = 2;
327 hwif->chipset = ide_etrax100;
328 hwif->tuneproc = &tune_e100_ide;
329 hwif->ata_input_data = &e100_ide_input_data;
330 hwif->ata_output_data = &e100_ide_output_data;
331 hwif->atapi_input_bytes = &e100_atapi_input_bytes;
332 hwif->atapi_output_bytes = &e100_atapi_output_bytes;
333 hwif->ide_dma_check = &e100_dma_check;
334 hwif->ide_dma_end = &e100_dma_end;
335 hwif->dma_setup = &e100_dma_setup;
336 hwif->dma_exec_cmd = &e100_dma_exec_cmd;
337 hwif->dma_start = &e100_dma_start;
338 hwif->OUTB = &etrax100_ide_outb;
339 hwif->OUTW = &etrax100_ide_outw;
340 hwif->OUTBSYNC = &etrax100_ide_outbsync;
341 hwif->INB = &etrax100_ide_inb;
342 hwif->INW = &etrax100_ide_inw;
343 hwif->ide_dma_off_quietly = &e100_dma_off;
344 }
345
346 /* actually reset and configure the etrax100 ide/ata interface */
347
348 *R_ATA_CTRL_DATA = 0;
349 *R_ATA_TRANSFER_CNT = 0;
350 *R_ATA_CONFIG = 0;
351
352 genconfig_shadow = (genconfig_shadow &
353 ~IO_MASK(R_GEN_CONFIG, dma2) &
354 ~IO_MASK(R_GEN_CONFIG, dma3) &
355 ~IO_MASK(R_GEN_CONFIG, ata)) |
356 ( IO_STATE( R_GEN_CONFIG, dma3, ata ) |
357 IO_STATE( R_GEN_CONFIG, dma2, ata ) |
358 IO_STATE( R_GEN_CONFIG, ata, select ) );
359
360 *R_GEN_CONFIG = genconfig_shadow;
361
362 /* pull the chosen /reset-line low */
363
364#ifdef CONFIG_ETRAX_IDE_G27_RESET
365 REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 0);
366#endif
367#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
368 REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 0);
369#endif
370#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
371 REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 0);
372#endif
373#ifdef CONFIG_ETRAX_IDE_PB7_RESET
374 port_pb_dir_shadow = port_pb_dir_shadow |
375 IO_STATE(R_PORT_PB_DIR, dir7, output);
376 *R_PORT_PB_DIR = port_pb_dir_shadow;
377 REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, 1);
378#endif
379
380 /* wait some */
381
382 udelay(25);
383
384 /* de-assert bus-reset */
385
386#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
387 REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 1);
388#endif
389#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
390 REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 1);
391#endif
392#ifdef CONFIG_ETRAX_IDE_G27_RESET
393 REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 1);
394#endif
395
396 /* make a dummy read to set the ata controller in a proper state */
397 dummy = *R_ATA_STATUS_DATA;
398
399 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
400 IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
401 IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) |
402 IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) |
403 IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) |
404 IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) );
405
406 *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw, read) |
407 IO_FIELD( R_ATA_CTRL_DATA, addr, 1 ) );
408
409 while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
410
411 *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
412 IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
413 IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
414 IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
415
416 printk("ide: waiting %d seconds for drives to regain consciousness\n",
417 CONFIG_ETRAX_IDE_DELAY);
418
419 h = jiffies + (CONFIG_ETRAX_IDE_DELAY * HZ);
420 while(time_before(jiffies, h)) /* nothing */ ;
421
422 /* reset the dma channels we will use */
423
424 RESET_DMA(ATA_TX_DMA_NBR);
425 RESET_DMA(ATA_RX_DMA_NBR);
426 WAIT_DMA(ATA_TX_DMA_NBR);
427 WAIT_DMA(ATA_RX_DMA_NBR);
428
429}
430
431static int e100_dma_off (ide_drive_t *drive)
432{
433 return 0;
434}
435
436static etrax_dma_descr mydescr;
437
438/*
439 * The following routines are mainly used by the ATAPI drivers.
440 *
441 * These routines will round up any request for an odd number of bytes,
442 * so if an odd bytecount is specified, be sure that there's at least one
443 * extra byte allocated for the buffer.
444 */
445static void
446e100_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
447{
448 unsigned long data_reg = IDE_DATA_REG;
449
450 D(printk("atapi_input_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
451 data_reg, buffer, bytecount));
452
453 if(bytecount & 1) {
454 printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
455 bytecount++; /* to round off */
456 }
457
458 /* make sure the DMA channel is available */
459 RESET_DMA(ATA_RX_DMA_NBR);
460 WAIT_DMA(ATA_RX_DMA_NBR);
461
462 /* setup DMA descriptor */
463
464 mydescr.sw_len = bytecount;
465 mydescr.ctrl = d_eol;
466 mydescr.buf = virt_to_phys(buffer);
467
468 /* start the dma channel */
469
470 *R_DMA_CH3_FIRST = virt_to_phys(&mydescr);
471 *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start);
472
473 /* initiate a multi word dma read using PIO handshaking */
474
475 *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
476
477 *R_ATA_CTRL_DATA = data_reg |
478 IO_STATE(R_ATA_CTRL_DATA, rw, read) |
479 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
480 IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
481 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
482 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
483
484 /* wait for completion */
485
486 LED_DISK_READ(1);
487 WAIT_DMA(ATA_RX_DMA_NBR);
488 LED_DISK_READ(0);
489
490#if 0
491 /* old polled transfer code
492 * this should be moved into a new function that can do polled
493 * transfers if DMA is not available
494 */
495
496 /* initiate a multi word read */
497
498 *R_ATA_TRANSFER_CNT = wcount << 1;
499
500 *R_ATA_CTRL_DATA = data_reg |
501 IO_STATE(R_ATA_CTRL_DATA, rw, read) |
502 IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
503 IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
504 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
505 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
506
507 /* svinto has a latency until the busy bit actually is set */
508
509 nop(); nop();
510 nop(); nop();
511 nop(); nop();
512 nop(); nop();
513 nop(); nop();
514
515 /* unit should be busy during multi transfer */
516 while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) {
517 while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav)))
518 status = *R_ATA_STATUS_DATA;
519 *ptr++ = (unsigned short)(status & 0xffff);
520 }
521#endif
522}
523
524static void
525e100_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
526{
527 unsigned long data_reg = IDE_DATA_REG;
528
529 D(printk("atapi_output_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
530 data_reg, buffer, bytecount));
531
532 if(bytecount & 1) {
533 printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
534 bytecount++;
535 }
536
537 /* make sure the DMA channel is available */
538 RESET_DMA(ATA_TX_DMA_NBR);
539 WAIT_DMA(ATA_TX_DMA_NBR);
540
541 /* setup DMA descriptor */
542
543 mydescr.sw_len = bytecount;
544 mydescr.ctrl = d_eol;
545 mydescr.buf = virt_to_phys(buffer);
546
547 /* start the dma channel */
548
549 *R_DMA_CH2_FIRST = virt_to_phys(&mydescr);
550 *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start);
551
552 /* initiate a multi word dma write using PIO handshaking */
553
554 *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
555
556 *R_ATA_CTRL_DATA = data_reg |
557 IO_STATE(R_ATA_CTRL_DATA, rw, write) |
558 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
559 IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
560 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
561 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
562
563 /* wait for completion */
564
565 LED_DISK_WRITE(1);
566 WAIT_DMA(ATA_TX_DMA_NBR);
567 LED_DISK_WRITE(0);
568
569#if 0
570 /* old polled write code - see comment in input_bytes */
571
572 /* wait for busy flag */
573 while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
574
575 /* initiate a multi word write */
576
577 *R_ATA_TRANSFER_CNT = bytecount >> 1;
578
579 ctrl = data_reg |
580 IO_STATE(R_ATA_CTRL_DATA, rw, write) |
581 IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
582 IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
583 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
584 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
585
586 LED_DISK_WRITE(1);
587
588 /* Etrax will set busy = 1 until the multi pio transfer has finished
589 * and tr_rdy = 1 after each successful word transfer.
590 * When the last byte has been transferred Etrax will first set tr_tdy = 1
591 * and then busy = 0 (not in the same cycle). If we read busy before it
592 * has been set to 0 we will think that we should transfer more bytes
593 * and then tr_rdy would be 0 forever. This is solved by checking busy
594 * in the inner loop.
595 */
596
597 do {
598 *R_ATA_CTRL_DATA = ctrl | *ptr++;
599 while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) &&
600 (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)));
601 } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
602
603 LED_DISK_WRITE(0);
604#endif
605
606}
607
608/*
609 * This is used for most PIO data transfers *from* the IDE interface
610 */
611static void
612e100_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
613{
614 e100_atapi_input_bytes(drive, buffer, wcount << 2);
615}
616
617/*
618 * This is used for most PIO data transfers *to* the IDE interface
619 */
620static void
621e100_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
622{
623 e100_atapi_output_bytes(drive, buffer, wcount << 2);
624}
625
626/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
627static etrax_dma_descr ata_descrs[MAX_DMA_DESCRS];
628static unsigned int ata_tot_size;
629
630/*
631 * e100_ide_build_dmatable() prepares a dma request.
632 * Returns 0 if all went okay, returns 1 otherwise.
633 */
634static int e100_ide_build_dmatable (ide_drive_t *drive)
635{
636 ide_hwif_t *hwif = HWIF(drive);
637 struct scatterlist* sg;
638 struct request *rq = HWGROUP(drive)->rq;
639 unsigned long size, addr;
640 unsigned int count = 0;
641 int i = 0;
642
643 sg = hwif->sg_table;
644
645 ata_tot_size = 0;
646
647 ide_map_sg(drive, rq);
648
649 i = hwif->sg_nents;
650
651 while(i) {
652 /*
653 * Determine addr and size of next buffer area. We assume that
654 * individual virtual buffers are always composed linearly in
655 * physical memory. For example, we assume that any 8kB buffer
656 * is always composed of two adjacent physical 4kB pages rather
657 * than two possibly non-adjacent physical 4kB pages.
658 */
659 /* group sequential buffers into one large buffer */
660 addr = page_to_phys(sg->page) + sg->offset;
661 size = sg_dma_len(sg);
662 while (sg++, --i) {
663 if ((addr + size) != page_to_phys(sg->page) + sg->offset)
664 break;
665 size += sg_dma_len(sg);
666 }
667
668 /* did we run out of descriptors? */
669
670 if(count >= MAX_DMA_DESCRS) {
671 printk("%s: too few DMA descriptors\n", drive->name);
672 return 1;
673 }
674
675 /* however, this case is more difficult - R_ATA_TRANSFER_CNT cannot be more
676 than 65536 words per transfer, so in that case we need to either
677 1) use a DMA interrupt to re-trigger R_ATA_TRANSFER_CNT and continue with
678 the descriptors, or
679 2) simply do the request here, and get dma_intr to only ide_end_request on
680 those blocks that were actually set-up for transfer.
681 */
682
683 if(ata_tot_size + size > 131072) {
684 printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
685 return 1;
686 }
687
688 /* If size > 65536 it has to be splitted into new descriptors. Since we don't handle
689 size > 131072 only one split is necessary */
690
691 if(size > 65536) {
692 /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */
693 ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */
694 ata_descrs[count].ctrl = 0;
695 ata_descrs[count].buf = addr;
696 ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]);
697 count++;
698 ata_tot_size += 65536;
699 /* size and addr should refere to not handled data */
700 size -= 65536;
701 addr += 65536;
702 }
703 /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */
704 if(size == 65536) {
705 ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */
706 } else {
707 ata_descrs[count].sw_len = size;
708 }
709 ata_descrs[count].ctrl = 0;
710 ata_descrs[count].buf = addr;
711 ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]);
712 count++;
713 ata_tot_size += size;
714 }
715
716 if (count) {
717 /* set the end-of-list flag on the last descriptor */
718 ata_descrs[count - 1].ctrl |= d_eol;
719 /* return and say all is ok */
720 return 0;
721 }
722
723 printk("%s: empty DMA table?\n", drive->name);
724 return 1; /* let the PIO routines handle this weirdness */
725}
726
727static int config_drive_for_dma (ide_drive_t *drive)
728{
729 const char **list;
730 struct hd_driveid *id = drive->id;
731
732 if (id && (id->capability & 1)) {
733 /* Enable DMA on any drive that supports mword2 DMA */
734 if ((id->field_valid & 2) && (id->dma_mword & 0x404) == 0x404) {
735 drive->using_dma = 1;
736 return 0; /* DMA enabled */
737 }
738
739 /* Consult the list of known "good" drives */
740 list = good_dma_drives;
741 while (*list) {
742 if (!strcmp(*list++,id->model)) {
743 drive->using_dma = 1;
744 return 0; /* DMA enabled */
745 }
746 }
747 }
748 return 1; /* DMA not enabled */
749}
750
751/*
752 * etrax_dma_intr() is the handler for disk read/write DMA interrupts
753 */
754static ide_startstop_t etrax_dma_intr (ide_drive_t *drive)
755{
756 LED_DISK_READ(0);
757 LED_DISK_WRITE(0);
758
759 return ide_dma_intr(drive);
760}
761
762/*
763 * Functions below initiates/aborts DMA read/write operations on a drive.
764 *
765 * The caller is assumed to have selected the drive and programmed the drive's
766 * sector address using CHS or LBA. All that remains is to prepare for DMA
767 * and then issue the actual read/write DMA/PIO command to the drive.
768 *
769 * Returns 0 if all went well.
770 * Returns 1 if DMA read/write could not be started, in which case
771 * the caller should revert to PIO for the current request.
772 */
773
774static int e100_dma_check(ide_drive_t *drive)
775{
776 return config_drive_for_dma (drive);
777}
778
779static int e100_dma_end(ide_drive_t *drive)
780{
781 /* TODO: check if something went wrong with the DMA */
782 return 0;
783}
784
785static void e100_dma_start(ide_drive_t *drive)
786{
787 if (e100_read_command) {
788 /* begin DMA */
789
790 /* need to do this before RX DMA due to a chip bug
791 * it is enough to just flush the part of the cache that
792 * corresponds to the buffers we start, but since HD transfers
793 * usually are more than 8 kB, it is easier to optimize for the
794 * normal case and just flush the entire cache. its the only
795 * way to be sure! (OB movie quote)
796 */
797 flush_etrax_cache();
798 *R_DMA_CH3_FIRST = virt_to_phys(ata_descrs);
799 *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start);
800
801 /* initiate a multi word dma read using DMA handshaking */
802
803 *R_ATA_TRANSFER_CNT =
804 IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
805
806 *R_ATA_CTRL_DATA =
807 IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
808 IO_STATE(R_ATA_CTRL_DATA, rw, read) |
809 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
810 IO_STATE(R_ATA_CTRL_DATA, handsh, dma) |
811 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
812 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
813
814 LED_DISK_READ(1);
815
816 D(printk("dma read of %d bytes.\n", ata_tot_size));
817
818 } else {
819 /* writing */
820 /* begin DMA */
821
822 *R_DMA_CH2_FIRST = virt_to_phys(ata_descrs);
823 *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start);
824
825 /* initiate a multi word dma write using DMA handshaking */
826
827 *R_ATA_TRANSFER_CNT =
828 IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
829
830 *R_ATA_CTRL_DATA =
831 IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
832 IO_STATE(R_ATA_CTRL_DATA, rw, write) |
833 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
834 IO_STATE(R_ATA_CTRL_DATA, handsh, dma) |
835 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
836 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
837
838 LED_DISK_WRITE(1);
839
840 D(printk("dma write of %d bytes.\n", ata_tot_size));
841 }
842}
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 92a2b7caed58..11d035f1983d 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -487,7 +487,7 @@ static void display_clocks (unsigned int index)
487 * Pack active and recovery counts into single byte representation 487 * Pack active and recovery counts into single byte representation
488 * used by controller 488 * used by controller
489 */ 489 */
490inline static u8 pack_nibbles (u8 upper, u8 lower) 490static inline u8 pack_nibbles (u8 upper, u8 lower)
491{ 491{
492 return ((upper & 0x0f) << 4) | (lower & 0x0f); 492 return ((upper & 0x0f) << 4) | (lower & 0x0f);
493} 493}
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index 8b5eea5405ef..c26c8ca90dd4 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -5,7 +5,7 @@
5 * May be copied or modified under the terms of the GNU General Public License 5 * May be copied or modified under the terms of the GNU General Public License
6 * 6 *
7 * June 22, 2004 - get rid of check_region 7 * June 22, 2004 - get rid of check_region
8 * Jesper Juhl <juhl-lkml@dif.dk> 8 * - Jesper Juhl
9 * 9 *
10 */ 10 */
11 11
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index fe3e1703fa61..627af507643a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -169,6 +169,7 @@ MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
169 * Debug levels, configured via kernel config, or enable here. 169 * Debug levels, configured via kernel config, or enable here.
170 */ 170 */
171 171
172#define CONFIG_IEEE1394_SBP2_DEBUG 0
172/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */ 173/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
173/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */ 174/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
174/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */ 175/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index e1a7cf3e8636..10be36731ed7 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,15 +1,20 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include 1EXTRA_CFLAGS += -Idrivers/infiniband/include
2 2
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o ib_umad.o 3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o ib_umad.o ib_ucm.o
4obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o 5obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
5 6
6ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 7ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
7 device.o fmr_pool.o cache.o 8 device.o fmr_pool.o cache.o
8 9
9ib_mad-y := mad.o smi.o agent.o 10ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
10 11
11ib_sa-y := sa_query.o 12ib_sa-y := sa_query.o
12 13
14ib_cm-y := cm.o
15
13ib_umad-y := user_mad.o 16ib_umad-y := user_mad.o
14 17
18ib_ucm-y := ucm.o
19
15ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o 20ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 23d1957c4b29..729f0b0d983a 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -134,7 +134,7 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
134 sizeof(mad_priv->mad), 134 sizeof(mad_priv->mad),
135 DMA_TO_DEVICE); 135 DMA_TO_DEVICE);
136 gather_list.length = sizeof(mad_priv->mad); 136 gather_list.length = sizeof(mad_priv->mad);
137 gather_list.lkey = (*port_priv->mr).lkey; 137 gather_list.lkey = mad_agent->mr->lkey;
138 138
139 send_wr.next = NULL; 139 send_wr.next = NULL;
140 send_wr.opcode = IB_WR_SEND; 140 send_wr.opcode = IB_WR_SEND;
@@ -156,10 +156,10 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
156 /* Should sgid be looked up ? */ 156 /* Should sgid be looked up ? */
157 ah_attr.grh.sgid_index = 0; 157 ah_attr.grh.sgid_index = 0;
158 ah_attr.grh.hop_limit = grh->hop_limit; 158 ah_attr.grh.hop_limit = grh->hop_limit;
159 ah_attr.grh.flow_label = be32_to_cpup( 159 ah_attr.grh.flow_label = be32_to_cpu(
160 &grh->version_tclass_flow) & 0xfffff; 160 grh->version_tclass_flow) & 0xfffff;
161 ah_attr.grh.traffic_class = (be32_to_cpup( 161 ah_attr.grh.traffic_class = (be32_to_cpu(
162 &grh->version_tclass_flow) >> 20) & 0xff; 162 grh->version_tclass_flow) >> 20) & 0xff;
163 memcpy(ah_attr.grh.dgid.raw, 163 memcpy(ah_attr.grh.dgid.raw,
164 grh->sgid.raw, 164 grh->sgid.raw,
165 sizeof(ah_attr.grh.dgid)); 165 sizeof(ah_attr.grh.dgid));
@@ -322,22 +322,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
322 goto error3; 322 goto error3;
323 } 323 }
324 324
325 port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
326 IB_ACCESS_LOCAL_WRITE);
327 if (IS_ERR(port_priv->mr)) {
328 printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
329 ret = PTR_ERR(port_priv->mr);
330 goto error4;
331 }
332
333 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 325 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
334 list_add_tail(&port_priv->port_list, &ib_agent_port_list); 326 list_add_tail(&port_priv->port_list, &ib_agent_port_list);
335 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 327 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
336 328
337 return 0; 329 return 0;
338 330
339error4:
340 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
341error3: 331error3:
342 ib_unregister_mad_agent(port_priv->smp_agent); 332 ib_unregister_mad_agent(port_priv->smp_agent);
343error2: 333error2:
@@ -361,8 +351,6 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
361 list_del(&port_priv->port_list); 351 list_del(&port_priv->port_list);
362 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 352 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
363 353
364 ib_dereg_mr(port_priv->mr);
365
366 ib_unregister_mad_agent(port_priv->perf_mgmt_agent); 354 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
367 ib_unregister_mad_agent(port_priv->smp_agent); 355 ib_unregister_mad_agent(port_priv->smp_agent);
368 kfree(port_priv); 356 kfree(port_priv);
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h
index 17a0cce5813c..17435af1e914 100644
--- a/drivers/infiniband/core/agent_priv.h
+++ b/drivers/infiniband/core/agent_priv.h
@@ -33,7 +33,7 @@
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE. 34 * SOFTWARE.
35 * 35 *
36 * $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $ 36 * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
37 */ 37 */
38 38
39#ifndef __IB_AGENT_PRIV_H__ 39#ifndef __IB_AGENT_PRIV_H__
@@ -57,7 +57,6 @@ struct ib_agent_port_private {
57 int port_num; 57 int port_num;
58 struct ib_mad_agent *smp_agent; /* SM class */ 58 struct ib_mad_agent *smp_agent; /* SM class */
59 struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ 59 struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
60 struct ib_mr *mr;
61}; 60};
62 61
63#endif /* __IB_AGENT_PRIV_H__ */ 62#endif /* __IB_AGENT_PRIV_H__ */
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
new file mode 100644
index 000000000000..403ed125d8f4
--- /dev/null
+++ b/drivers/infiniband/core/cm.c
@@ -0,0 +1,3324 @@
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
46#include <ib_cache.h>
47#include <ib_cm.h>
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58 .name = "cm",
59 .add = cm_add_one,
60 .remove = cm_remove_one
61};
62
63static struct ib_cm {
64 spinlock_t lock;
65 struct list_head device_list;
66 rwlock_t device_lock;
67 struct rb_root listen_service_table;
68 u64 listen_service_id;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table;
71 struct rb_root remote_id_table;
72 struct rb_root remote_sidr_table;
73 struct idr local_id_table;
74 struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78 struct cm_device *cm_dev;
79 struct ib_mad_agent *mad_agent;
80 u8 port_num;
81};
82
83struct cm_device {
84 struct list_head list;
85 struct ib_device *device;
86 u64 ca_guid;
87 struct cm_port port[0];
88};
89
90struct cm_av {
91 struct cm_port *port;
92 union ib_gid dgid;
93 struct ib_ah_attr ah_attr;
94 u16 pkey_index;
95 u8 packet_life_time;
96};
97
98struct cm_work {
99 struct work_struct work;
100 struct list_head list;
101 struct cm_port *port;
102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
103 u32 local_id; /* Established / timewait */
104 u32 remote_id;
105 struct ib_cm_event cm_event;
106 struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110 struct cm_work work; /* Must be first. */
111 struct rb_node remote_qp_node;
112 struct rb_node remote_id_node;
113 u64 remote_ca_guid;
114 u32 remote_qpn;
115 u8 inserted_remote_qp;
116 u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120 struct ib_cm_id id;
121
122 struct rb_node service_node;
123 struct rb_node sidr_id_node;
124 spinlock_t lock;
125 wait_queue_head_t wait;
126 atomic_t refcount;
127
128 struct ib_mad_send_buf *msg;
129 struct cm_timewait_info *timewait_info;
130 /* todo: use alternate port on send failure */
131 struct cm_av av;
132 struct cm_av alt_av;
133
134 void *private_data;
135 u64 tid;
136 u32 local_qpn;
137 u32 remote_qpn;
138 u32 sq_psn;
139 u32 rq_psn;
140 int timeout_ms;
141 enum ib_mtu path_mtu;
142 u8 private_data_len;
143 u8 max_cm_retries;
144 u8 peer_to_peer;
145 u8 responder_resources;
146 u8 initiator_depth;
147 u8 local_ack_timeout;
148 u8 retry_count;
149 u8 rnr_retry_count;
150 u8 service_timeout;
151
152 struct list_head work_list;
153 atomic_t work_count;
154};
155
156static void cm_work_handler(void *data);
157
158static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
159{
160 if (atomic_dec_and_test(&cm_id_priv->refcount))
161 wake_up(&cm_id_priv->wait);
162}
163
164static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
165 struct ib_mad_send_buf **msg)
166{
167 struct ib_mad_agent *mad_agent;
168 struct ib_mad_send_buf *m;
169 struct ib_ah *ah;
170
171 mad_agent = cm_id_priv->av.port->mad_agent;
172 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
173 if (IS_ERR(ah))
174 return PTR_ERR(ah);
175
176 m = ib_create_send_mad(mad_agent, 1, cm_id_priv->av.pkey_index,
177 ah, 0, sizeof(struct ib_mad_hdr),
178 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
179 GFP_ATOMIC);
180 if (IS_ERR(m)) {
181 ib_destroy_ah(ah);
182 return PTR_ERR(m);
183 }
184
185 /* Timeout set by caller if response is expected. */
186 m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
187
188 atomic_inc(&cm_id_priv->refcount);
189 m->context[0] = cm_id_priv;
190 *msg = m;
191 return 0;
192}
193
194static int cm_alloc_response_msg(struct cm_port *port,
195 struct ib_mad_recv_wc *mad_recv_wc,
196 struct ib_mad_send_buf **msg)
197{
198 struct ib_mad_send_buf *m;
199 struct ib_ah *ah;
200
201 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
202 mad_recv_wc->recv_buf.grh, port->port_num);
203 if (IS_ERR(ah))
204 return PTR_ERR(ah);
205
206 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
207 ah, 0, sizeof(struct ib_mad_hdr),
208 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
209 GFP_ATOMIC);
210 if (IS_ERR(m)) {
211 ib_destroy_ah(ah);
212 return PTR_ERR(m);
213 }
214 *msg = m;
215 return 0;
216}
217
218static void cm_free_msg(struct ib_mad_send_buf *msg)
219{
220 ib_destroy_ah(msg->send_wr.wr.ud.ah);
221 if (msg->context[0])
222 cm_deref_id(msg->context[0]);
223 ib_free_send_mad(msg);
224}
225
226static void * cm_copy_private_data(const void *private_data,
227 u8 private_data_len)
228{
229 void *data;
230
231 if (!private_data || !private_data_len)
232 return NULL;
233
234 data = kmalloc(private_data_len, GFP_KERNEL);
235 if (!data)
236 return ERR_PTR(-ENOMEM);
237
238 memcpy(data, private_data, private_data_len);
239 return data;
240}
241
242static void cm_set_private_data(struct cm_id_private *cm_id_priv,
243 void *private_data, u8 private_data_len)
244{
245 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
246 kfree(cm_id_priv->private_data);
247
248 cm_id_priv->private_data = private_data;
249 cm_id_priv->private_data_len = private_data_len;
250}
251
252static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
253 u16 dlid, u8 sl, u16 src_path_bits)
254{
255 memset(ah_attr, 0, sizeof ah_attr);
256 ah_attr->dlid = be16_to_cpu(dlid);
257 ah_attr->sl = sl;
258 ah_attr->src_path_bits = src_path_bits;
259 ah_attr->port_num = port_num;
260}
261
262static void cm_init_av_for_response(struct cm_port *port,
263 struct ib_wc *wc, struct cm_av *av)
264{
265 av->port = port;
266 av->pkey_index = wc->pkey_index;
267 cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid),
268 wc->sl, wc->dlid_path_bits);
269}
270
271static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
272{
273 struct cm_device *cm_dev;
274 struct cm_port *port = NULL;
275 unsigned long flags;
276 int ret;
277 u8 p;
278
279 read_lock_irqsave(&cm.device_lock, flags);
280 list_for_each_entry(cm_dev, &cm.device_list, list) {
281 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
282 &p, NULL)) {
283 port = &cm_dev->port[p-1];
284 break;
285 }
286 }
287 read_unlock_irqrestore(&cm.device_lock, flags);
288
289 if (!port)
290 return -EINVAL;
291
292 ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
293 be16_to_cpu(path->pkey), &av->pkey_index);
294 if (ret)
295 return ret;
296
297 av->port = port;
298 cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid,
299 path->sl, path->slid & 0x7F);
300 av->packet_life_time = path->packet_life_time;
301 return 0;
302}
303
304static int cm_alloc_id(struct cm_id_private *cm_id_priv)
305{
306 unsigned long flags;
307 int ret;
308
309 do {
310 spin_lock_irqsave(&cm.lock, flags);
311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
312 (int *) &cm_id_priv->id.local_id);
313 spin_unlock_irqrestore(&cm.lock, flags);
314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
315 return ret;
316}
317
318static void cm_free_id(u32 local_id)
319{
320 unsigned long flags;
321
322 spin_lock_irqsave(&cm.lock, flags);
323 idr_remove(&cm.local_id_table, (int) local_id);
324 spin_unlock_irqrestore(&cm.lock, flags);
325}
326
327static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
328{
329 struct cm_id_private *cm_id_priv;
330
331 cm_id_priv = idr_find(&cm.local_id_table, (int) local_id);
332 if (cm_id_priv) {
333 if (cm_id_priv->id.remote_id == remote_id)
334 atomic_inc(&cm_id_priv->refcount);
335 else
336 cm_id_priv = NULL;
337 }
338
339 return cm_id_priv;
340}
341
342static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id)
343{
344 struct cm_id_private *cm_id_priv;
345 unsigned long flags;
346
347 spin_lock_irqsave(&cm.lock, flags);
348 cm_id_priv = cm_get_id(local_id, remote_id);
349 spin_unlock_irqrestore(&cm.lock, flags);
350
351 return cm_id_priv;
352}
353
354static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
355{
356 struct rb_node **link = &cm.listen_service_table.rb_node;
357 struct rb_node *parent = NULL;
358 struct cm_id_private *cur_cm_id_priv;
359 u64 service_id = cm_id_priv->id.service_id;
360 u64 service_mask = cm_id_priv->id.service_mask;
361
362 while (*link) {
363 parent = *link;
364 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
365 service_node);
366 if ((cur_cm_id_priv->id.service_mask & service_id) ==
367 (service_mask & cur_cm_id_priv->id.service_id))
368 return cm_id_priv;
369 if (service_id < cur_cm_id_priv->id.service_id)
370 link = &(*link)->rb_left;
371 else
372 link = &(*link)->rb_right;
373 }
374 rb_link_node(&cm_id_priv->service_node, parent, link);
375 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
376 return NULL;
377}
378
379static struct cm_id_private * cm_find_listen(u64 service_id)
380{
381 struct rb_node *node = cm.listen_service_table.rb_node;
382 struct cm_id_private *cm_id_priv;
383
384 while (node) {
385 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
386 if ((cm_id_priv->id.service_mask & service_id) ==
387 (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
388 return cm_id_priv;
389 if (service_id < cm_id_priv->id.service_id)
390 node = node->rb_left;
391 else
392 node = node->rb_right;
393 }
394 return NULL;
395}
396
397static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
398 *timewait_info)
399{
400 struct rb_node **link = &cm.remote_id_table.rb_node;
401 struct rb_node *parent = NULL;
402 struct cm_timewait_info *cur_timewait_info;
403 u64 remote_ca_guid = timewait_info->remote_ca_guid;
404 u32 remote_id = timewait_info->work.remote_id;
405
406 while (*link) {
407 parent = *link;
408 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
409 remote_id_node);
410 if (remote_id < cur_timewait_info->work.remote_id)
411 link = &(*link)->rb_left;
412 else if (remote_id > cur_timewait_info->work.remote_id)
413 link = &(*link)->rb_right;
414 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
415 link = &(*link)->rb_left;
416 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
417 link = &(*link)->rb_right;
418 else
419 return cur_timewait_info;
420 }
421 timewait_info->inserted_remote_id = 1;
422 rb_link_node(&timewait_info->remote_id_node, parent, link);
423 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
424 return NULL;
425}
426
427static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid,
428 u32 remote_id)
429{
430 struct rb_node *node = cm.remote_id_table.rb_node;
431 struct cm_timewait_info *timewait_info;
432
433 while (node) {
434 timewait_info = rb_entry(node, struct cm_timewait_info,
435 remote_id_node);
436 if (remote_id < timewait_info->work.remote_id)
437 node = node->rb_left;
438 else if (remote_id > timewait_info->work.remote_id)
439 node = node->rb_right;
440 else if (remote_ca_guid < timewait_info->remote_ca_guid)
441 node = node->rb_left;
442 else if (remote_ca_guid > timewait_info->remote_ca_guid)
443 node = node->rb_right;
444 else
445 return timewait_info;
446 }
447 return NULL;
448}
449
450static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
451 *timewait_info)
452{
453 struct rb_node **link = &cm.remote_qp_table.rb_node;
454 struct rb_node *parent = NULL;
455 struct cm_timewait_info *cur_timewait_info;
456 u64 remote_ca_guid = timewait_info->remote_ca_guid;
457 u32 remote_qpn = timewait_info->remote_qpn;
458
459 while (*link) {
460 parent = *link;
461 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
462 remote_qp_node);
463 if (remote_qpn < cur_timewait_info->remote_qpn)
464 link = &(*link)->rb_left;
465 else if (remote_qpn > cur_timewait_info->remote_qpn)
466 link = &(*link)->rb_right;
467 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
468 link = &(*link)->rb_left;
469 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
470 link = &(*link)->rb_right;
471 else
472 return cur_timewait_info;
473 }
474 timewait_info->inserted_remote_qp = 1;
475 rb_link_node(&timewait_info->remote_qp_node, parent, link);
476 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
477 return NULL;
478}
479
480static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
481 *cm_id_priv)
482{
483 struct rb_node **link = &cm.remote_sidr_table.rb_node;
484 struct rb_node *parent = NULL;
485 struct cm_id_private *cur_cm_id_priv;
486 union ib_gid *port_gid = &cm_id_priv->av.dgid;
487 u32 remote_id = cm_id_priv->id.remote_id;
488
489 while (*link) {
490 parent = *link;
491 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
492 sidr_id_node);
493 if (remote_id < cur_cm_id_priv->id.remote_id)
494 link = &(*link)->rb_left;
495 else if (remote_id > cur_cm_id_priv->id.remote_id)
496 link = &(*link)->rb_right;
497 else {
498 int cmp;
499 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
500 sizeof *port_gid);
501 if (cmp < 0)
502 link = &(*link)->rb_left;
503 else if (cmp > 0)
504 link = &(*link)->rb_right;
505 else
506 return cur_cm_id_priv;
507 }
508 }
509 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
510 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
511 return NULL;
512}
513
514static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
515 enum ib_cm_sidr_status status)
516{
517 struct ib_cm_sidr_rep_param param;
518
519 memset(&param, 0, sizeof param);
520 param.status = status;
521 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
522}
523
524struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
525 void *context)
526{
527 struct cm_id_private *cm_id_priv;
528 int ret;
529
530 cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
531 if (!cm_id_priv)
532 return ERR_PTR(-ENOMEM);
533
534 memset(cm_id_priv, 0, sizeof *cm_id_priv);
535 cm_id_priv->id.state = IB_CM_IDLE;
536 cm_id_priv->id.cm_handler = cm_handler;
537 cm_id_priv->id.context = context;
538 ret = cm_alloc_id(cm_id_priv);
539 if (ret)
540 goto error;
541
542 spin_lock_init(&cm_id_priv->lock);
543 init_waitqueue_head(&cm_id_priv->wait);
544 INIT_LIST_HEAD(&cm_id_priv->work_list);
545 atomic_set(&cm_id_priv->work_count, -1);
546 atomic_set(&cm_id_priv->refcount, 1);
547 return &cm_id_priv->id;
548
549error:
550 kfree(cm_id_priv);
551 return ERR_PTR(-ENOMEM);
552}
553EXPORT_SYMBOL(ib_create_cm_id);
554
555static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
556{
557 struct cm_work *work;
558
559 if (list_empty(&cm_id_priv->work_list))
560 return NULL;
561
562 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
563 list_del(&work->list);
564 return work;
565}
566
567static void cm_free_work(struct cm_work *work)
568{
569 if (work->mad_recv_wc)
570 ib_free_recv_mad(work->mad_recv_wc);
571 kfree(work);
572}
573
574static inline int cm_convert_to_ms(int iba_time)
575{
576 /* approximate conversion to ms from 4.096us x 2^iba_time */
577 return 1 << max(iba_time - 8, 0);
578}
579
580static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
581{
582 unsigned long flags;
583
584 if (!timewait_info->inserted_remote_id &&
585 !timewait_info->inserted_remote_qp)
586 return;
587
588 spin_lock_irqsave(&cm.lock, flags);
589 if (timewait_info->inserted_remote_id) {
590 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
591 timewait_info->inserted_remote_id = 0;
592 }
593
594 if (timewait_info->inserted_remote_qp) {
595 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
596 timewait_info->inserted_remote_qp = 0;
597 }
598 spin_unlock_irqrestore(&cm.lock, flags);
599}
600
601static struct cm_timewait_info * cm_create_timewait_info(u32 local_id)
602{
603 struct cm_timewait_info *timewait_info;
604
605 timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
606 if (!timewait_info)
607 return ERR_PTR(-ENOMEM);
608 memset(timewait_info, 0, sizeof *timewait_info);
609
610 timewait_info->work.local_id = local_id;
611 INIT_WORK(&timewait_info->work.work, cm_work_handler,
612 &timewait_info->work);
613 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
614 return timewait_info;
615}
616
617static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
618{
619 int wait_time;
620
621 /*
622 * The cm_id could be destroyed by the user before we exit timewait.
623 * To protect against this, we search for the cm_id after exiting
624 * timewait before notifying the user that we've exited timewait.
625 */
626 cm_id_priv->id.state = IB_CM_TIMEWAIT;
627 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
628 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
629 msecs_to_jiffies(wait_time));
630 cm_id_priv->timewait_info = NULL;
631}
632
633static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
634{
635 cm_id_priv->id.state = IB_CM_IDLE;
636 if (cm_id_priv->timewait_info) {
637 cm_cleanup_timewait(cm_id_priv->timewait_info);
638 kfree(cm_id_priv->timewait_info);
639 cm_id_priv->timewait_info = NULL;
640 }
641}
642
643void ib_destroy_cm_id(struct ib_cm_id *cm_id)
644{
645 struct cm_id_private *cm_id_priv;
646 struct cm_work *work;
647 unsigned long flags;
648
649 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
650retest:
651 spin_lock_irqsave(&cm_id_priv->lock, flags);
652 switch (cm_id->state) {
653 case IB_CM_LISTEN:
654 cm_id->state = IB_CM_IDLE;
655 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
656 spin_lock_irqsave(&cm.lock, flags);
657 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
658 spin_unlock_irqrestore(&cm.lock, flags);
659 break;
660 case IB_CM_SIDR_REQ_SENT:
661 cm_id->state = IB_CM_IDLE;
662 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
663 (unsigned long) cm_id_priv->msg);
664 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
665 break;
666 case IB_CM_SIDR_REQ_RCVD:
667 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
668 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
669 break;
670 case IB_CM_REQ_SENT:
671 case IB_CM_MRA_REQ_RCVD:
672 case IB_CM_REP_SENT:
673 case IB_CM_MRA_REP_RCVD:
674 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
675 (unsigned long) cm_id_priv->msg);
676 /* Fall through */
677 case IB_CM_REQ_RCVD:
678 case IB_CM_MRA_REQ_SENT:
679 case IB_CM_REP_RCVD:
680 case IB_CM_MRA_REP_SENT:
681 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
682 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
683 &cm_id_priv->av.port->cm_dev->ca_guid,
684 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
685 NULL, 0);
686 break;
687 case IB_CM_ESTABLISHED:
688 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
689 ib_send_cm_dreq(cm_id, NULL, 0);
690 goto retest;
691 case IB_CM_DREQ_SENT:
692 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
693 (unsigned long) cm_id_priv->msg);
694 cm_enter_timewait(cm_id_priv);
695 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
696 break;
697 case IB_CM_DREQ_RCVD:
698 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
699 ib_send_cm_drep(cm_id, NULL, 0);
700 break;
701 default:
702 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
703 break;
704 }
705
706 cm_free_id(cm_id->local_id);
707 atomic_dec(&cm_id_priv->refcount);
708 wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
709 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
710 cm_free_work(work);
711 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
712 kfree(cm_id_priv->private_data);
713 kfree(cm_id_priv);
714}
715EXPORT_SYMBOL(ib_destroy_cm_id);
716
717int ib_cm_listen(struct ib_cm_id *cm_id,
718 u64 service_id,
719 u64 service_mask)
720{
721 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
722 unsigned long flags;
723 int ret = 0;
724
725 service_mask = service_mask ? service_mask : ~0ULL;
726 service_id &= service_mask;
727 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
728 (service_id != IB_CM_ASSIGN_SERVICE_ID))
729 return -EINVAL;
730
731 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
732 BUG_ON(cm_id->state != IB_CM_IDLE);
733
734 cm_id->state = IB_CM_LISTEN;
735
736 spin_lock_irqsave(&cm.lock, flags);
737 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
738 cm_id->service_id = __cpu_to_be64(cm.listen_service_id++);
739 cm_id->service_mask = ~0ULL;
740 } else {
741 cm_id->service_id = service_id;
742 cm_id->service_mask = service_mask;
743 }
744 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
745 spin_unlock_irqrestore(&cm.lock, flags);
746
747 if (cur_cm_id_priv) {
748 cm_id->state = IB_CM_IDLE;
749 ret = -EBUSY;
750 }
751 return ret;
752}
753EXPORT_SYMBOL(ib_cm_listen);
754
755static u64 cm_form_tid(struct cm_id_private *cm_id_priv,
756 enum cm_msg_sequence msg_seq)
757{
758 u64 hi_tid, low_tid;
759
760 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
761 low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
762 return cpu_to_be64(hi_tid | low_tid);
763}
764
765static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
766 enum cm_msg_attr_id attr_id, u64 tid)
767{
768 hdr->base_version = IB_MGMT_BASE_VERSION;
769 hdr->mgmt_class = IB_MGMT_CLASS_CM;
770 hdr->class_version = IB_CM_CLASS_VERSION;
771 hdr->method = IB_MGMT_METHOD_SEND;
772 hdr->attr_id = attr_id;
773 hdr->tid = tid;
774}
775
776static void cm_format_req(struct cm_req_msg *req_msg,
777 struct cm_id_private *cm_id_priv,
778 struct ib_cm_req_param *param)
779{
780 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
781 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
782
783 req_msg->local_comm_id = cm_id_priv->id.local_id;
784 req_msg->service_id = param->service_id;
785 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
786 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
787 cm_req_set_resp_res(req_msg, param->responder_resources);
788 cm_req_set_init_depth(req_msg, param->initiator_depth);
789 cm_req_set_remote_resp_timeout(req_msg,
790 param->remote_cm_response_timeout);
791 cm_req_set_qp_type(req_msg, param->qp_type);
792 cm_req_set_flow_ctrl(req_msg, param->flow_control);
793 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
794 cm_req_set_local_resp_timeout(req_msg,
795 param->local_cm_response_timeout);
796 cm_req_set_retry_count(req_msg, param->retry_count);
797 req_msg->pkey = param->primary_path->pkey;
798 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
799 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
800 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
801 cm_req_set_srq(req_msg, param->srq);
802
803 req_msg->primary_local_lid = param->primary_path->slid;
804 req_msg->primary_remote_lid = param->primary_path->dlid;
805 req_msg->primary_local_gid = param->primary_path->sgid;
806 req_msg->primary_remote_gid = param->primary_path->dgid;
807 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
808 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
809 req_msg->primary_traffic_class = param->primary_path->traffic_class;
810 req_msg->primary_hop_limit = param->primary_path->hop_limit;
811 cm_req_set_primary_sl(req_msg, param->primary_path->sl);
812 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
813 cm_req_set_primary_local_ack_timeout(req_msg,
814 min(31, param->primary_path->packet_life_time + 1));
815
816 if (param->alternate_path) {
817 req_msg->alt_local_lid = param->alternate_path->slid;
818 req_msg->alt_remote_lid = param->alternate_path->dlid;
819 req_msg->alt_local_gid = param->alternate_path->sgid;
820 req_msg->alt_remote_gid = param->alternate_path->dgid;
821 cm_req_set_alt_flow_label(req_msg,
822 param->alternate_path->flow_label);
823 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
824 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
825 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
826 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
827 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
828 cm_req_set_alt_local_ack_timeout(req_msg,
829 min(31, param->alternate_path->packet_life_time + 1));
830 }
831
832 if (param->private_data && param->private_data_len)
833 memcpy(req_msg->private_data, param->private_data,
834 param->private_data_len);
835}
836
837static inline int cm_validate_req_param(struct ib_cm_req_param *param)
838{
839 /* peer-to-peer not supported */
840 if (param->peer_to_peer)
841 return -EINVAL;
842
843 if (!param->primary_path)
844 return -EINVAL;
845
846 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
847 return -EINVAL;
848
849 if (param->private_data &&
850 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
851 return -EINVAL;
852
853 if (param->alternate_path &&
854 (param->alternate_path->pkey != param->primary_path->pkey ||
855 param->alternate_path->mtu != param->primary_path->mtu))
856 return -EINVAL;
857
858 return 0;
859}
860
861int ib_send_cm_req(struct ib_cm_id *cm_id,
862 struct ib_cm_req_param *param)
863{
864 struct cm_id_private *cm_id_priv;
865 struct ib_send_wr *bad_send_wr;
866 struct cm_req_msg *req_msg;
867 unsigned long flags;
868 int ret;
869
870 ret = cm_validate_req_param(param);
871 if (ret)
872 return ret;
873
874 /* Verify that we're not in timewait. */
875 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
876 spin_lock_irqsave(&cm_id_priv->lock, flags);
877 if (cm_id->state != IB_CM_IDLE) {
878 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
879 ret = -EINVAL;
880 goto out;
881 }
882 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
883
884 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
885 id.local_id);
886 if (IS_ERR(cm_id_priv->timewait_info))
887 goto out;
888
889 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
890 if (ret)
891 goto error1;
892 if (param->alternate_path) {
893 ret = cm_init_av_by_path(param->alternate_path,
894 &cm_id_priv->alt_av);
895 if (ret)
896 goto error1;
897 }
898 cm_id->service_id = param->service_id;
899 cm_id->service_mask = ~0ULL;
900 cm_id_priv->timeout_ms = cm_convert_to_ms(
901 param->primary_path->packet_life_time) * 2 +
902 cm_convert_to_ms(
903 param->remote_cm_response_timeout);
904 cm_id_priv->max_cm_retries = param->max_cm_retries;
905 cm_id_priv->initiator_depth = param->initiator_depth;
906 cm_id_priv->responder_resources = param->responder_resources;
907 cm_id_priv->retry_count = param->retry_count;
908 cm_id_priv->path_mtu = param->primary_path->mtu;
909
910 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
911 if (ret)
912 goto error1;
913
914 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
915 cm_format_req(req_msg, cm_id_priv, param);
916 cm_id_priv->tid = req_msg->hdr.tid;
917 cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
918 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
919
920 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
921 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
922 cm_id_priv->local_ack_timeout =
923 cm_req_get_primary_local_ack_timeout(req_msg);
924
925 spin_lock_irqsave(&cm_id_priv->lock, flags);
926 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
927 &cm_id_priv->msg->send_wr, &bad_send_wr);
928 if (ret) {
929 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
930 goto error2;
931 }
932 BUG_ON(cm_id->state != IB_CM_IDLE);
933 cm_id->state = IB_CM_REQ_SENT;
934 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
935 return 0;
936
937error2: cm_free_msg(cm_id_priv->msg);
938error1: kfree(cm_id_priv->timewait_info);
939out: return ret;
940}
941EXPORT_SYMBOL(ib_send_cm_req);
942
943static int cm_issue_rej(struct cm_port *port,
944 struct ib_mad_recv_wc *mad_recv_wc,
945 enum ib_cm_rej_reason reason,
946 enum cm_msg_response msg_rejected,
947 void *ari, u8 ari_length)
948{
949 struct ib_mad_send_buf *msg = NULL;
950 struct ib_send_wr *bad_send_wr;
951 struct cm_rej_msg *rej_msg, *rcv_msg;
952 int ret;
953
954 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
955 if (ret)
956 return ret;
957
958 /* We just need common CM header information. Cast to any message. */
959 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
960 rej_msg = (struct cm_rej_msg *) msg->mad;
961
962 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
963 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
964 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
965 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
966 rej_msg->reason = reason;
967
968 if (ari && ari_length) {
969 cm_rej_set_reject_info_len(rej_msg, ari_length);
970 memcpy(rej_msg->ari, ari, ari_length);
971 }
972
973 ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
974 if (ret)
975 cm_free_msg(msg);
976
977 return ret;
978}
979
980static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid,
981 u32 local_qpn, u32 remote_qpn)
982{
983 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
984 ((local_ca_guid == remote_ca_guid) &&
985 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
986}
987
988static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
989 struct ib_sa_path_rec *primary_path,
990 struct ib_sa_path_rec *alt_path)
991{
992 memset(primary_path, 0, sizeof *primary_path);
993 primary_path->dgid = req_msg->primary_local_gid;
994 primary_path->sgid = req_msg->primary_remote_gid;
995 primary_path->dlid = req_msg->primary_local_lid;
996 primary_path->slid = req_msg->primary_remote_lid;
997 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
998 primary_path->hop_limit = req_msg->primary_hop_limit;
999 primary_path->traffic_class = req_msg->primary_traffic_class;
1000 primary_path->reversible = 1;
1001 primary_path->pkey = req_msg->pkey;
1002 primary_path->sl = cm_req_get_primary_sl(req_msg);
1003 primary_path->mtu_selector = IB_SA_EQ;
1004 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1005 primary_path->rate_selector = IB_SA_EQ;
1006 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1007 primary_path->packet_life_time_selector = IB_SA_EQ;
1008 primary_path->packet_life_time =
1009 cm_req_get_primary_local_ack_timeout(req_msg);
1010 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1011
1012 if (req_msg->alt_local_lid) {
1013 memset(alt_path, 0, sizeof *alt_path);
1014 alt_path->dgid = req_msg->alt_local_gid;
1015 alt_path->sgid = req_msg->alt_remote_gid;
1016 alt_path->dlid = req_msg->alt_local_lid;
1017 alt_path->slid = req_msg->alt_remote_lid;
1018 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1019 alt_path->hop_limit = req_msg->alt_hop_limit;
1020 alt_path->traffic_class = req_msg->alt_traffic_class;
1021 alt_path->reversible = 1;
1022 alt_path->pkey = req_msg->pkey;
1023 alt_path->sl = cm_req_get_alt_sl(req_msg);
1024 alt_path->mtu_selector = IB_SA_EQ;
1025 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1026 alt_path->rate_selector = IB_SA_EQ;
1027 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1028 alt_path->packet_life_time_selector = IB_SA_EQ;
1029 alt_path->packet_life_time =
1030 cm_req_get_alt_local_ack_timeout(req_msg);
1031 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1032 }
1033}
1034
1035static void cm_format_req_event(struct cm_work *work,
1036 struct cm_id_private *cm_id_priv,
1037 struct ib_cm_id *listen_id)
1038{
1039 struct cm_req_msg *req_msg;
1040 struct ib_cm_req_event_param *param;
1041
1042 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1043 param = &work->cm_event.param.req_rcvd;
1044 param->listen_id = listen_id;
1045 param->device = cm_id_priv->av.port->mad_agent->device;
1046 param->port = cm_id_priv->av.port->port_num;
1047 param->primary_path = &work->path[0];
1048 if (req_msg->alt_local_lid)
1049 param->alternate_path = &work->path[1];
1050 else
1051 param->alternate_path = NULL;
1052 param->remote_ca_guid = req_msg->local_ca_guid;
1053 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1054 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1055 param->qp_type = cm_req_get_qp_type(req_msg);
1056 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1057 param->responder_resources = cm_req_get_init_depth(req_msg);
1058 param->initiator_depth = cm_req_get_resp_res(req_msg);
1059 param->local_cm_response_timeout =
1060 cm_req_get_remote_resp_timeout(req_msg);
1061 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1062 param->remote_cm_response_timeout =
1063 cm_req_get_local_resp_timeout(req_msg);
1064 param->retry_count = cm_req_get_retry_count(req_msg);
1065 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1066 param->srq = cm_req_get_srq(req_msg);
1067 work->cm_event.private_data = &req_msg->private_data;
1068}
1069
1070static void cm_process_work(struct cm_id_private *cm_id_priv,
1071 struct cm_work *work)
1072{
1073 unsigned long flags;
1074 int ret;
1075
1076 /* We will typically only have the current event to report. */
1077 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1078 cm_free_work(work);
1079
1080 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1081 spin_lock_irqsave(&cm_id_priv->lock, flags);
1082 work = cm_dequeue_work(cm_id_priv);
1083 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1084 BUG_ON(!work);
1085 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1086 &work->cm_event);
1087 cm_free_work(work);
1088 }
1089 cm_deref_id(cm_id_priv);
1090 if (ret)
1091 ib_destroy_cm_id(&cm_id_priv->id);
1092}
1093
1094static void cm_format_mra(struct cm_mra_msg *mra_msg,
1095 struct cm_id_private *cm_id_priv,
1096 enum cm_msg_response msg_mraed, u8 service_timeout,
1097 const void *private_data, u8 private_data_len)
1098{
1099 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1100 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1101 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1102 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1103 cm_mra_set_service_timeout(mra_msg, service_timeout);
1104
1105 if (private_data && private_data_len)
1106 memcpy(mra_msg->private_data, private_data, private_data_len);
1107}
1108
1109static void cm_format_rej(struct cm_rej_msg *rej_msg,
1110 struct cm_id_private *cm_id_priv,
1111 enum ib_cm_rej_reason reason,
1112 void *ari,
1113 u8 ari_length,
1114 const void *private_data,
1115 u8 private_data_len)
1116{
1117 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1118 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1119
1120 switch(cm_id_priv->id.state) {
1121 case IB_CM_REQ_RCVD:
1122 rej_msg->local_comm_id = 0;
1123 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1124 break;
1125 case IB_CM_MRA_REQ_SENT:
1126 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1127 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1128 break;
1129 case IB_CM_REP_RCVD:
1130 case IB_CM_MRA_REP_SENT:
1131 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1132 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1133 break;
1134 default:
1135 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1136 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1137 break;
1138 }
1139
1140 rej_msg->reason = reason;
1141 if (ari && ari_length) {
1142 cm_rej_set_reject_info_len(rej_msg, ari_length);
1143 memcpy(rej_msg->ari, ari, ari_length);
1144 }
1145
1146 if (private_data && private_data_len)
1147 memcpy(rej_msg->private_data, private_data, private_data_len);
1148}
1149
1150static void cm_dup_req_handler(struct cm_work *work,
1151 struct cm_id_private *cm_id_priv)
1152{
1153 struct ib_mad_send_buf *msg = NULL;
1154 struct ib_send_wr *bad_send_wr;
1155 unsigned long flags;
1156 int ret;
1157
1158 /* Quick state check to discard duplicate REQs. */
1159 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1160 return;
1161
1162 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1163 if (ret)
1164 return;
1165
1166 spin_lock_irqsave(&cm_id_priv->lock, flags);
1167 switch (cm_id_priv->id.state) {
1168 case IB_CM_MRA_REQ_SENT:
1169 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1170 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1171 cm_id_priv->private_data,
1172 cm_id_priv->private_data_len);
1173 break;
1174 case IB_CM_TIMEWAIT:
1175 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1176 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1177 break;
1178 default:
1179 goto unlock;
1180 }
1181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1182
1183 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1184 &bad_send_wr);
1185 if (ret)
1186 goto free;
1187 return;
1188
1189unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1190free: cm_free_msg(msg);
1191}
1192
1193static struct cm_id_private * cm_match_req(struct cm_work *work,
1194 struct cm_id_private *cm_id_priv)
1195{
1196 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1197 struct cm_timewait_info *timewait_info;
1198 struct cm_req_msg *req_msg;
1199 unsigned long flags;
1200
1201 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1202
1203 /* Check for duplicate REQ and stale connections. */
1204 spin_lock_irqsave(&cm.lock, flags);
1205 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1206 if (!timewait_info)
1207 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1208
1209 if (timewait_info) {
1210 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1211 timewait_info->work.remote_id);
1212 spin_unlock_irqrestore(&cm.lock, flags);
1213 if (cur_cm_id_priv) {
1214 cm_dup_req_handler(work, cur_cm_id_priv);
1215 cm_deref_id(cur_cm_id_priv);
1216 } else
1217 cm_issue_rej(work->port, work->mad_recv_wc,
1218 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1219 NULL, 0);
1220 goto error;
1221 }
1222
1223 /* Find matching listen request. */
1224 listen_cm_id_priv = cm_find_listen(req_msg->service_id);
1225 if (!listen_cm_id_priv) {
1226 spin_unlock_irqrestore(&cm.lock, flags);
1227 cm_issue_rej(work->port, work->mad_recv_wc,
1228 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1229 NULL, 0);
1230 goto error;
1231 }
1232 atomic_inc(&listen_cm_id_priv->refcount);
1233 atomic_inc(&cm_id_priv->refcount);
1234 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1235 atomic_inc(&cm_id_priv->work_count);
1236 spin_unlock_irqrestore(&cm.lock, flags);
1237 return listen_cm_id_priv;
1238
1239error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1240 return NULL;
1241}
1242
1243static int cm_req_handler(struct cm_work *work)
1244{
1245 struct ib_cm_id *cm_id;
1246 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1247 struct cm_req_msg *req_msg;
1248 int ret;
1249
1250 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1251
1252 cm_id = ib_create_cm_id(NULL, NULL);
1253 if (IS_ERR(cm_id))
1254 return PTR_ERR(cm_id);
1255
1256 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1257 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1258 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1259 &cm_id_priv->av);
1260 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1261 id.local_id);
1262 if (IS_ERR(cm_id_priv->timewait_info)) {
1263 ret = PTR_ERR(cm_id_priv->timewait_info);
1264 goto error1;
1265 }
1266 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1267 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1268 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1269
1270 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1271 if (!listen_cm_id_priv) {
1272 ret = -EINVAL;
1273 goto error2;
1274 }
1275
1276 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1277 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1278 cm_id_priv->id.service_id = req_msg->service_id;
1279 cm_id_priv->id.service_mask = ~0ULL;
1280
1281 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1282 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1283 if (ret)
1284 goto error3;
1285 if (req_msg->alt_local_lid) {
1286 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1287 if (ret)
1288 goto error3;
1289 }
1290 cm_id_priv->tid = req_msg->hdr.tid;
1291 cm_id_priv->timeout_ms = cm_convert_to_ms(
1292 cm_req_get_local_resp_timeout(req_msg));
1293 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1294 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1295 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1296 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1297 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1298 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1299 cm_id_priv->local_ack_timeout =
1300 cm_req_get_primary_local_ack_timeout(req_msg);
1301 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1302 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1303
1304 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1305 cm_process_work(cm_id_priv, work);
1306 cm_deref_id(listen_cm_id_priv);
1307 return 0;
1308
1309error3: atomic_dec(&cm_id_priv->refcount);
1310 cm_deref_id(listen_cm_id_priv);
1311 cm_cleanup_timewait(cm_id_priv->timewait_info);
1312error2: kfree(cm_id_priv->timewait_info);
1313error1: ib_destroy_cm_id(&cm_id_priv->id);
1314 return ret;
1315}
1316
1317static void cm_format_rep(struct cm_rep_msg *rep_msg,
1318 struct cm_id_private *cm_id_priv,
1319 struct ib_cm_rep_param *param)
1320{
1321 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1322 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1323 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1324 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1325 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1326 rep_msg->resp_resources = param->responder_resources;
1327 rep_msg->initiator_depth = param->initiator_depth;
1328 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1329 cm_rep_set_failover(rep_msg, param->failover_accepted);
1330 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1331 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1332 cm_rep_set_srq(rep_msg, param->srq);
1333 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1334
1335 if (param->private_data && param->private_data_len)
1336 memcpy(rep_msg->private_data, param->private_data,
1337 param->private_data_len);
1338}
1339
1340int ib_send_cm_rep(struct ib_cm_id *cm_id,
1341 struct ib_cm_rep_param *param)
1342{
1343 struct cm_id_private *cm_id_priv;
1344 struct ib_mad_send_buf *msg;
1345 struct cm_rep_msg *rep_msg;
1346 struct ib_send_wr *bad_send_wr;
1347 unsigned long flags;
1348 int ret;
1349
1350 if (param->private_data &&
1351 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1352 return -EINVAL;
1353
1354 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1355 spin_lock_irqsave(&cm_id_priv->lock, flags);
1356 if (cm_id->state != IB_CM_REQ_RCVD &&
1357 cm_id->state != IB_CM_MRA_REQ_SENT) {
1358 ret = -EINVAL;
1359 goto out;
1360 }
1361
1362 ret = cm_alloc_msg(cm_id_priv, &msg);
1363 if (ret)
1364 goto out;
1365
1366 rep_msg = (struct cm_rep_msg *) msg->mad;
1367 cm_format_rep(rep_msg, cm_id_priv, param);
1368 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1369 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1370
1371 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1372 &msg->send_wr, &bad_send_wr);
1373 if (ret) {
1374 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1375 cm_free_msg(msg);
1376 return ret;
1377 }
1378
1379 cm_id->state = IB_CM_REP_SENT;
1380 cm_id_priv->msg = msg;
1381 cm_id_priv->initiator_depth = param->initiator_depth;
1382 cm_id_priv->responder_resources = param->responder_resources;
1383 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1384 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1385
1386out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1387 return ret;
1388}
1389EXPORT_SYMBOL(ib_send_cm_rep);
1390
1391static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1392 struct cm_id_private *cm_id_priv,
1393 const void *private_data,
1394 u8 private_data_len)
1395{
1396 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1397 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1398 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1399
1400 if (private_data && private_data_len)
1401 memcpy(rtu_msg->private_data, private_data, private_data_len);
1402}
1403
1404int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1405 const void *private_data,
1406 u8 private_data_len)
1407{
1408 struct cm_id_private *cm_id_priv;
1409 struct ib_mad_send_buf *msg;
1410 struct ib_send_wr *bad_send_wr;
1411 unsigned long flags;
1412 void *data;
1413 int ret;
1414
1415 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1416 return -EINVAL;
1417
1418 data = cm_copy_private_data(private_data, private_data_len);
1419 if (IS_ERR(data))
1420 return PTR_ERR(data);
1421
1422 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1423 spin_lock_irqsave(&cm_id_priv->lock, flags);
1424 if (cm_id->state != IB_CM_REP_RCVD &&
1425 cm_id->state != IB_CM_MRA_REP_SENT) {
1426 ret = -EINVAL;
1427 goto error;
1428 }
1429
1430 ret = cm_alloc_msg(cm_id_priv, &msg);
1431 if (ret)
1432 goto error;
1433
1434 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1435 private_data, private_data_len);
1436
1437 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1438 &msg->send_wr, &bad_send_wr);
1439 if (ret) {
1440 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1441 cm_free_msg(msg);
1442 kfree(data);
1443 return ret;
1444 }
1445
1446 cm_id->state = IB_CM_ESTABLISHED;
1447 cm_set_private_data(cm_id_priv, data, private_data_len);
1448 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1449 return 0;
1450
1451error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1452 kfree(data);
1453 return ret;
1454}
1455EXPORT_SYMBOL(ib_send_cm_rtu);
1456
1457static void cm_format_rep_event(struct cm_work *work)
1458{
1459 struct cm_rep_msg *rep_msg;
1460 struct ib_cm_rep_event_param *param;
1461
1462 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1463 param = &work->cm_event.param.rep_rcvd;
1464 param->remote_ca_guid = rep_msg->local_ca_guid;
1465 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1466 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1467 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1468 param->responder_resources = rep_msg->initiator_depth;
1469 param->initiator_depth = rep_msg->resp_resources;
1470 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1471 param->failover_accepted = cm_rep_get_failover(rep_msg);
1472 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1473 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1474 param->srq = cm_rep_get_srq(rep_msg);
1475 work->cm_event.private_data = &rep_msg->private_data;
1476}
1477
1478static void cm_dup_rep_handler(struct cm_work *work)
1479{
1480 struct cm_id_private *cm_id_priv;
1481 struct cm_rep_msg *rep_msg;
1482 struct ib_mad_send_buf *msg = NULL;
1483 struct ib_send_wr *bad_send_wr;
1484 unsigned long flags;
1485 int ret;
1486
1487 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1488 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1489 rep_msg->local_comm_id);
1490 if (!cm_id_priv)
1491 return;
1492
1493 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1494 if (ret)
1495 goto deref;
1496
1497 spin_lock_irqsave(&cm_id_priv->lock, flags);
1498 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1499 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1500 cm_id_priv->private_data,
1501 cm_id_priv->private_data_len);
1502 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1503 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1504 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1505 cm_id_priv->private_data,
1506 cm_id_priv->private_data_len);
1507 else
1508 goto unlock;
1509 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1510
1511 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1512 &bad_send_wr);
1513 if (ret)
1514 goto free;
1515 goto deref;
1516
1517unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1518free: cm_free_msg(msg);
1519deref: cm_deref_id(cm_id_priv);
1520}
1521
1522static int cm_rep_handler(struct cm_work *work)
1523{
1524 struct cm_id_private *cm_id_priv;
1525 struct cm_rep_msg *rep_msg;
1526 unsigned long flags;
1527 int ret;
1528
1529 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1530 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1531 if (!cm_id_priv) {
1532 cm_dup_rep_handler(work);
1533 return -EINVAL;
1534 }
1535
1536 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1537 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1538 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1539
1540 spin_lock_irqsave(&cm.lock, flags);
1541 /* Check for duplicate REP. */
1542 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1543 spin_unlock_irqrestore(&cm.lock, flags);
1544 ret = -EINVAL;
1545 goto error;
1546 }
1547 /* Check for a stale connection. */
1548 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1549 spin_unlock_irqrestore(&cm.lock, flags);
1550 cm_issue_rej(work->port, work->mad_recv_wc,
1551 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1552 NULL, 0);
1553 ret = -EINVAL;
1554 goto error;
1555 }
1556 spin_unlock_irqrestore(&cm.lock, flags);
1557
1558 cm_format_rep_event(work);
1559
1560 spin_lock_irqsave(&cm_id_priv->lock, flags);
1561 switch (cm_id_priv->id.state) {
1562 case IB_CM_REQ_SENT:
1563 case IB_CM_MRA_REQ_RCVD:
1564 break;
1565 default:
1566 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1567 ret = -EINVAL;
1568 goto error;
1569 }
1570 cm_id_priv->id.state = IB_CM_REP_RCVD;
1571 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1572 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1573 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1574 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1575 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1576 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1577
1578 /* todo: handle peer_to_peer */
1579
1580 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1581 (unsigned long) cm_id_priv->msg);
1582 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1583 if (!ret)
1584 list_add_tail(&work->list, &cm_id_priv->work_list);
1585 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1586
1587 if (ret)
1588 cm_process_work(cm_id_priv, work);
1589 else
1590 cm_deref_id(cm_id_priv);
1591 return 0;
1592
1593error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1594 cm_deref_id(cm_id_priv);
1595 return ret;
1596}
1597
1598static int cm_establish_handler(struct cm_work *work)
1599{
1600 struct cm_id_private *cm_id_priv;
1601 unsigned long flags;
1602 int ret;
1603
1604 /* See comment in ib_cm_establish about lookup. */
1605 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1606 if (!cm_id_priv)
1607 return -EINVAL;
1608
1609 spin_lock_irqsave(&cm_id_priv->lock, flags);
1610 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1611 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1612 goto out;
1613 }
1614
1615 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1616 (unsigned long) cm_id_priv->msg);
1617 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1618 if (!ret)
1619 list_add_tail(&work->list, &cm_id_priv->work_list);
1620 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1621
1622 if (ret)
1623 cm_process_work(cm_id_priv, work);
1624 else
1625 cm_deref_id(cm_id_priv);
1626 return 0;
1627out:
1628 cm_deref_id(cm_id_priv);
1629 return -EINVAL;
1630}
1631
1632static int cm_rtu_handler(struct cm_work *work)
1633{
1634 struct cm_id_private *cm_id_priv;
1635 struct cm_rtu_msg *rtu_msg;
1636 unsigned long flags;
1637 int ret;
1638
1639 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1640 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1641 rtu_msg->local_comm_id);
1642 if (!cm_id_priv)
1643 return -EINVAL;
1644
1645 work->cm_event.private_data = &rtu_msg->private_data;
1646
1647 spin_lock_irqsave(&cm_id_priv->lock, flags);
1648 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1649 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1650 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1651 goto out;
1652 }
1653 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1654
1655 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1656 (unsigned long) cm_id_priv->msg);
1657 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1658 if (!ret)
1659 list_add_tail(&work->list, &cm_id_priv->work_list);
1660 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1661
1662 if (ret)
1663 cm_process_work(cm_id_priv, work);
1664 else
1665 cm_deref_id(cm_id_priv);
1666 return 0;
1667out:
1668 cm_deref_id(cm_id_priv);
1669 return -EINVAL;
1670}
1671
1672static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1673 struct cm_id_private *cm_id_priv,
1674 const void *private_data,
1675 u8 private_data_len)
1676{
1677 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1678 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1679 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1680 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1681 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1682
1683 if (private_data && private_data_len)
1684 memcpy(dreq_msg->private_data, private_data, private_data_len);
1685}
1686
1687int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1688 const void *private_data,
1689 u8 private_data_len)
1690{
1691 struct cm_id_private *cm_id_priv;
1692 struct ib_mad_send_buf *msg;
1693 struct ib_send_wr *bad_send_wr;
1694 unsigned long flags;
1695 int ret;
1696
1697 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1698 return -EINVAL;
1699
1700 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1701 spin_lock_irqsave(&cm_id_priv->lock, flags);
1702 if (cm_id->state != IB_CM_ESTABLISHED) {
1703 ret = -EINVAL;
1704 goto out;
1705 }
1706
1707 ret = cm_alloc_msg(cm_id_priv, &msg);
1708 if (ret) {
1709 cm_enter_timewait(cm_id_priv);
1710 goto out;
1711 }
1712
1713 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1714 private_data, private_data_len);
1715 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1716 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1717
1718 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1719 &msg->send_wr, &bad_send_wr);
1720 if (ret) {
1721 cm_enter_timewait(cm_id_priv);
1722 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1723 cm_free_msg(msg);
1724 return ret;
1725 }
1726
1727 cm_id->state = IB_CM_DREQ_SENT;
1728 cm_id_priv->msg = msg;
1729out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1730 return ret;
1731}
1732EXPORT_SYMBOL(ib_send_cm_dreq);
1733
1734static void cm_format_drep(struct cm_drep_msg *drep_msg,
1735 struct cm_id_private *cm_id_priv,
1736 const void *private_data,
1737 u8 private_data_len)
1738{
1739 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1740 drep_msg->local_comm_id = cm_id_priv->id.local_id;
1741 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1742
1743 if (private_data && private_data_len)
1744 memcpy(drep_msg->private_data, private_data, private_data_len);
1745}
1746
1747int ib_send_cm_drep(struct ib_cm_id *cm_id,
1748 const void *private_data,
1749 u8 private_data_len)
1750{
1751 struct cm_id_private *cm_id_priv;
1752 struct ib_mad_send_buf *msg;
1753 struct ib_send_wr *bad_send_wr;
1754 unsigned long flags;
1755 void *data;
1756 int ret;
1757
1758 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1759 return -EINVAL;
1760
1761 data = cm_copy_private_data(private_data, private_data_len);
1762 if (IS_ERR(data))
1763 return PTR_ERR(data);
1764
1765 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1766 spin_lock_irqsave(&cm_id_priv->lock, flags);
1767 if (cm_id->state != IB_CM_DREQ_RCVD) {
1768 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1769 kfree(data);
1770 return -EINVAL;
1771 }
1772
1773 cm_set_private_data(cm_id_priv, data, private_data_len);
1774 cm_enter_timewait(cm_id_priv);
1775
1776 ret = cm_alloc_msg(cm_id_priv, &msg);
1777 if (ret)
1778 goto out;
1779
1780 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1781 private_data, private_data_len);
1782
1783 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1784 &bad_send_wr);
1785 if (ret) {
1786 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1787 cm_free_msg(msg);
1788 return ret;
1789 }
1790
1791out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1792 return ret;
1793}
1794EXPORT_SYMBOL(ib_send_cm_drep);
1795
1796static int cm_dreq_handler(struct cm_work *work)
1797{
1798 struct cm_id_private *cm_id_priv;
1799 struct cm_dreq_msg *dreq_msg;
1800 struct ib_mad_send_buf *msg = NULL;
1801 struct ib_send_wr *bad_send_wr;
1802 unsigned long flags;
1803 int ret;
1804
1805 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1806 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1807 dreq_msg->local_comm_id);
1808 if (!cm_id_priv)
1809 return -EINVAL;
1810
1811 work->cm_event.private_data = &dreq_msg->private_data;
1812
1813 spin_lock_irqsave(&cm_id_priv->lock, flags);
1814 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1815 goto unlock;
1816
1817 switch (cm_id_priv->id.state) {
1818 case IB_CM_REP_SENT:
1819 case IB_CM_DREQ_SENT:
1820 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1821 (unsigned long) cm_id_priv->msg);
1822 break;
1823 case IB_CM_ESTABLISHED:
1824 case IB_CM_MRA_REP_RCVD:
1825 break;
1826 case IB_CM_TIMEWAIT:
1827 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1828 goto unlock;
1829
1830 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1831 cm_id_priv->private_data,
1832 cm_id_priv->private_data_len);
1833 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1834
1835 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1836 &msg->send_wr, &bad_send_wr))
1837 cm_free_msg(msg);
1838 goto deref;
1839 default:
1840 goto unlock;
1841 }
1842 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1843 cm_id_priv->tid = dreq_msg->hdr.tid;
1844 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1845 if (!ret)
1846 list_add_tail(&work->list, &cm_id_priv->work_list);
1847 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1848
1849 if (ret)
1850 cm_process_work(cm_id_priv, work);
1851 else
1852 cm_deref_id(cm_id_priv);
1853 return 0;
1854
1855unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1856deref: cm_deref_id(cm_id_priv);
1857 return -EINVAL;
1858}
1859
1860static int cm_drep_handler(struct cm_work *work)
1861{
1862 struct cm_id_private *cm_id_priv;
1863 struct cm_drep_msg *drep_msg;
1864 unsigned long flags;
1865 int ret;
1866
1867 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1868 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1869 drep_msg->local_comm_id);
1870 if (!cm_id_priv)
1871 return -EINVAL;
1872
1873 work->cm_event.private_data = &drep_msg->private_data;
1874
1875 spin_lock_irqsave(&cm_id_priv->lock, flags);
1876 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1877 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1878 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1879 goto out;
1880 }
1881 cm_enter_timewait(cm_id_priv);
1882
1883 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1884 (unsigned long) cm_id_priv->msg);
1885 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1886 if (!ret)
1887 list_add_tail(&work->list, &cm_id_priv->work_list);
1888 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1889
1890 if (ret)
1891 cm_process_work(cm_id_priv, work);
1892 else
1893 cm_deref_id(cm_id_priv);
1894 return 0;
1895out:
1896 cm_deref_id(cm_id_priv);
1897 return -EINVAL;
1898}
1899
1900int ib_send_cm_rej(struct ib_cm_id *cm_id,
1901 enum ib_cm_rej_reason reason,
1902 void *ari,
1903 u8 ari_length,
1904 const void *private_data,
1905 u8 private_data_len)
1906{
1907 struct cm_id_private *cm_id_priv;
1908 struct ib_mad_send_buf *msg;
1909 struct ib_send_wr *bad_send_wr;
1910 unsigned long flags;
1911 int ret;
1912
1913 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1914 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1915 return -EINVAL;
1916
1917 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1918
1919 spin_lock_irqsave(&cm_id_priv->lock, flags);
1920 switch (cm_id->state) {
1921 case IB_CM_REQ_SENT:
1922 case IB_CM_MRA_REQ_RCVD:
1923 case IB_CM_REQ_RCVD:
1924 case IB_CM_MRA_REQ_SENT:
1925 case IB_CM_REP_RCVD:
1926 case IB_CM_MRA_REP_SENT:
1927 ret = cm_alloc_msg(cm_id_priv, &msg);
1928 if (!ret)
1929 cm_format_rej((struct cm_rej_msg *) msg->mad,
1930 cm_id_priv, reason, ari, ari_length,
1931 private_data, private_data_len);
1932
1933 cm_reset_to_idle(cm_id_priv);
1934 break;
1935 case IB_CM_REP_SENT:
1936 case IB_CM_MRA_REP_RCVD:
1937 ret = cm_alloc_msg(cm_id_priv, &msg);
1938 if (!ret)
1939 cm_format_rej((struct cm_rej_msg *) msg->mad,
1940 cm_id_priv, reason, ari, ari_length,
1941 private_data, private_data_len);
1942
1943 cm_enter_timewait(cm_id_priv);
1944 break;
1945 default:
1946 ret = -EINVAL;
1947 goto out;
1948 }
1949
1950 if (ret)
1951 goto out;
1952
1953 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1954 &msg->send_wr, &bad_send_wr);
1955 if (ret)
1956 cm_free_msg(msg);
1957
1958out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1959 return ret;
1960}
1961EXPORT_SYMBOL(ib_send_cm_rej);
1962
1963static void cm_format_rej_event(struct cm_work *work)
1964{
1965 struct cm_rej_msg *rej_msg;
1966 struct ib_cm_rej_event_param *param;
1967
1968 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1969 param = &work->cm_event.param.rej_rcvd;
1970 param->ari = rej_msg->ari;
1971 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1972 param->reason = rej_msg->reason;
1973 work->cm_event.private_data = &rej_msg->private_data;
1974}
1975
1976static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1977{
1978 struct cm_timewait_info *timewait_info;
1979 struct cm_id_private *cm_id_priv;
1980 unsigned long flags;
1981 u32 remote_id;
1982
1983 remote_id = rej_msg->local_comm_id;
1984
1985 if (rej_msg->reason == IB_CM_REJ_TIMEOUT) {
1986 spin_lock_irqsave(&cm.lock, flags);
1987 timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari),
1988 remote_id);
1989 if (!timewait_info) {
1990 spin_unlock_irqrestore(&cm.lock, flags);
1991 return NULL;
1992 }
1993 cm_id_priv = idr_find(&cm.local_id_table,
1994 (int) timewait_info->work.local_id);
1995 if (cm_id_priv) {
1996 if (cm_id_priv->id.remote_id == remote_id)
1997 atomic_inc(&cm_id_priv->refcount);
1998 else
1999 cm_id_priv = NULL;
2000 }
2001 spin_unlock_irqrestore(&cm.lock, flags);
2002 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2003 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2004 else
2005 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2006
2007 return cm_id_priv;
2008}
2009
2010static int cm_rej_handler(struct cm_work *work)
2011{
2012 struct cm_id_private *cm_id_priv;
2013 struct cm_rej_msg *rej_msg;
2014 unsigned long flags;
2015 int ret;
2016
2017 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2018 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2019 if (!cm_id_priv)
2020 return -EINVAL;
2021
2022 cm_format_rej_event(work);
2023
2024 spin_lock_irqsave(&cm_id_priv->lock, flags);
2025 switch (cm_id_priv->id.state) {
2026 case IB_CM_REQ_SENT:
2027 case IB_CM_MRA_REQ_RCVD:
2028 case IB_CM_REP_SENT:
2029 case IB_CM_MRA_REP_RCVD:
2030 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2031 (unsigned long) cm_id_priv->msg);
2032 /* fall through */
2033 case IB_CM_REQ_RCVD:
2034 case IB_CM_MRA_REQ_SENT:
2035 if (rej_msg->reason == IB_CM_REJ_STALE_CONN)
2036 cm_enter_timewait(cm_id_priv);
2037 else
2038 cm_reset_to_idle(cm_id_priv);
2039 break;
2040 case IB_CM_DREQ_SENT:
2041 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2042 (unsigned long) cm_id_priv->msg);
2043 /* fall through */
2044 case IB_CM_REP_RCVD:
2045 case IB_CM_MRA_REP_SENT:
2046 case IB_CM_ESTABLISHED:
2047 cm_enter_timewait(cm_id_priv);
2048 break;
2049 default:
2050 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2051 ret = -EINVAL;
2052 goto out;
2053 }
2054
2055 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2056 if (!ret)
2057 list_add_tail(&work->list, &cm_id_priv->work_list);
2058 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2059
2060 if (ret)
2061 cm_process_work(cm_id_priv, work);
2062 else
2063 cm_deref_id(cm_id_priv);
2064 return 0;
2065out:
2066 cm_deref_id(cm_id_priv);
2067 return -EINVAL;
2068}
2069
2070int ib_send_cm_mra(struct ib_cm_id *cm_id,
2071 u8 service_timeout,
2072 const void *private_data,
2073 u8 private_data_len)
2074{
2075 struct cm_id_private *cm_id_priv;
2076 struct ib_mad_send_buf *msg;
2077 struct ib_send_wr *bad_send_wr;
2078 void *data;
2079 unsigned long flags;
2080 int ret;
2081
2082 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2083 return -EINVAL;
2084
2085 data = cm_copy_private_data(private_data, private_data_len);
2086 if (IS_ERR(data))
2087 return PTR_ERR(data);
2088
2089 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2090
2091 spin_lock_irqsave(&cm_id_priv->lock, flags);
2092 switch(cm_id_priv->id.state) {
2093 case IB_CM_REQ_RCVD:
2094 ret = cm_alloc_msg(cm_id_priv, &msg);
2095 if (ret)
2096 goto error1;
2097
2098 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2099 CM_MSG_RESPONSE_REQ, service_timeout,
2100 private_data, private_data_len);
2101 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2102 &msg->send_wr, &bad_send_wr);
2103 if (ret)
2104 goto error2;
2105 cm_id->state = IB_CM_MRA_REQ_SENT;
2106 break;
2107 case IB_CM_REP_RCVD:
2108 ret = cm_alloc_msg(cm_id_priv, &msg);
2109 if (ret)
2110 goto error1;
2111
2112 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2113 CM_MSG_RESPONSE_REP, service_timeout,
2114 private_data, private_data_len);
2115 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2116 &msg->send_wr, &bad_send_wr);
2117 if (ret)
2118 goto error2;
2119 cm_id->state = IB_CM_MRA_REP_SENT;
2120 break;
2121 case IB_CM_ESTABLISHED:
2122 ret = cm_alloc_msg(cm_id_priv, &msg);
2123 if (ret)
2124 goto error1;
2125
2126 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2127 CM_MSG_RESPONSE_OTHER, service_timeout,
2128 private_data, private_data_len);
2129 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2130 &msg->send_wr, &bad_send_wr);
2131 if (ret)
2132 goto error2;
2133 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2134 break;
2135 default:
2136 ret = -EINVAL;
2137 goto error1;
2138 }
2139 cm_id_priv->service_timeout = service_timeout;
2140 cm_set_private_data(cm_id_priv, data, private_data_len);
2141 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2142 return 0;
2143
2144error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2145 kfree(data);
2146 return ret;
2147
2148error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2149 kfree(data);
2150 cm_free_msg(msg);
2151 return ret;
2152}
2153EXPORT_SYMBOL(ib_send_cm_mra);
2154
2155static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2156{
2157 switch (cm_mra_get_msg_mraed(mra_msg)) {
2158 case CM_MSG_RESPONSE_REQ:
2159 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2160 case CM_MSG_RESPONSE_REP:
2161 case CM_MSG_RESPONSE_OTHER:
2162 return cm_acquire_id(mra_msg->remote_comm_id,
2163 mra_msg->local_comm_id);
2164 default:
2165 return NULL;
2166 }
2167}
2168
2169static int cm_mra_handler(struct cm_work *work)
2170{
2171 struct cm_id_private *cm_id_priv;
2172 struct cm_mra_msg *mra_msg;
2173 unsigned long flags;
2174 int timeout, ret;
2175
2176 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2177 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2178 if (!cm_id_priv)
2179 return -EINVAL;
2180
2181 work->cm_event.private_data = &mra_msg->private_data;
2182 work->cm_event.param.mra_rcvd.service_timeout =
2183 cm_mra_get_service_timeout(mra_msg);
2184 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2185 cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2186
2187 spin_lock_irqsave(&cm_id_priv->lock, flags);
2188 switch (cm_id_priv->id.state) {
2189 case IB_CM_REQ_SENT:
2190 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2191 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2192 (unsigned long) cm_id_priv->msg, timeout))
2193 goto out;
2194 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2195 break;
2196 case IB_CM_REP_SENT:
2197 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2198 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2199 (unsigned long) cm_id_priv->msg, timeout))
2200 goto out;
2201 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2202 break;
2203 case IB_CM_ESTABLISHED:
2204 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2205 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2206 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2207 (unsigned long) cm_id_priv->msg, timeout))
2208 goto out;
2209 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2210 break;
2211 default:
2212 goto out;
2213 }
2214
2215 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2216 cm_id_priv->id.state;
2217 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2218 if (!ret)
2219 list_add_tail(&work->list, &cm_id_priv->work_list);
2220 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2221
2222 if (ret)
2223 cm_process_work(cm_id_priv, work);
2224 else
2225 cm_deref_id(cm_id_priv);
2226 return 0;
2227out:
2228 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2229 cm_deref_id(cm_id_priv);
2230 return -EINVAL;
2231}
2232
2233static void cm_format_lap(struct cm_lap_msg *lap_msg,
2234 struct cm_id_private *cm_id_priv,
2235 struct ib_sa_path_rec *alternate_path,
2236 const void *private_data,
2237 u8 private_data_len)
2238{
2239 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2240 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2241 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2242 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2243 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2244 /* todo: need remote CM response timeout */
2245 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2246 lap_msg->alt_local_lid = alternate_path->slid;
2247 lap_msg->alt_remote_lid = alternate_path->dlid;
2248 lap_msg->alt_local_gid = alternate_path->sgid;
2249 lap_msg->alt_remote_gid = alternate_path->dgid;
2250 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2251 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2252 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2253 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2254 cm_lap_set_sl(lap_msg, alternate_path->sl);
2255 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2256 cm_lap_set_local_ack_timeout(lap_msg,
2257 min(31, alternate_path->packet_life_time + 1));
2258
2259 if (private_data && private_data_len)
2260 memcpy(lap_msg->private_data, private_data, private_data_len);
2261}
2262
2263int ib_send_cm_lap(struct ib_cm_id *cm_id,
2264 struct ib_sa_path_rec *alternate_path,
2265 const void *private_data,
2266 u8 private_data_len)
2267{
2268 struct cm_id_private *cm_id_priv;
2269 struct ib_mad_send_buf *msg;
2270 struct ib_send_wr *bad_send_wr;
2271 unsigned long flags;
2272 int ret;
2273
2274 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2275 return -EINVAL;
2276
2277 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2278 spin_lock_irqsave(&cm_id_priv->lock, flags);
2279 if (cm_id->state != IB_CM_ESTABLISHED ||
2280 cm_id->lap_state != IB_CM_LAP_IDLE) {
2281 ret = -EINVAL;
2282 goto out;
2283 }
2284
2285 ret = cm_alloc_msg(cm_id_priv, &msg);
2286 if (ret)
2287 goto out;
2288
2289 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2290 alternate_path, private_data, private_data_len);
2291 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2292 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2293
2294 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2295 &msg->send_wr, &bad_send_wr);
2296 if (ret) {
2297 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2298 cm_free_msg(msg);
2299 return ret;
2300 }
2301
2302 cm_id->lap_state = IB_CM_LAP_SENT;
2303 cm_id_priv->msg = msg;
2304
2305out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2306 return ret;
2307}
2308EXPORT_SYMBOL(ib_send_cm_lap);
2309
2310static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2311 struct cm_lap_msg *lap_msg)
2312{
2313 memset(path, 0, sizeof *path);
2314 path->dgid = lap_msg->alt_local_gid;
2315 path->sgid = lap_msg->alt_remote_gid;
2316 path->dlid = lap_msg->alt_local_lid;
2317 path->slid = lap_msg->alt_remote_lid;
2318 path->flow_label = cm_lap_get_flow_label(lap_msg);
2319 path->hop_limit = lap_msg->alt_hop_limit;
2320 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2321 path->reversible = 1;
2322 /* pkey is same as in REQ */
2323 path->sl = cm_lap_get_sl(lap_msg);
2324 path->mtu_selector = IB_SA_EQ;
2325 /* mtu is same as in REQ */
2326 path->rate_selector = IB_SA_EQ;
2327 path->rate = cm_lap_get_packet_rate(lap_msg);
2328 path->packet_life_time_selector = IB_SA_EQ;
2329 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2330 path->packet_life_time -= (path->packet_life_time > 0);
2331}
2332
2333static int cm_lap_handler(struct cm_work *work)
2334{
2335 struct cm_id_private *cm_id_priv;
2336 struct cm_lap_msg *lap_msg;
2337 struct ib_cm_lap_event_param *param;
2338 struct ib_mad_send_buf *msg = NULL;
2339 struct ib_send_wr *bad_send_wr;
2340 unsigned long flags;
2341 int ret;
2342
2343 /* todo: verify LAP request and send reject APR if invalid. */
2344 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2345 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2346 lap_msg->local_comm_id);
2347 if (!cm_id_priv)
2348 return -EINVAL;
2349
2350 param = &work->cm_event.param.lap_rcvd;
2351 param->alternate_path = &work->path[0];
2352 cm_format_path_from_lap(param->alternate_path, lap_msg);
2353 work->cm_event.private_data = &lap_msg->private_data;
2354
2355 spin_lock_irqsave(&cm_id_priv->lock, flags);
2356 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2357 goto unlock;
2358
2359 switch (cm_id_priv->id.lap_state) {
2360 case IB_CM_LAP_IDLE:
2361 break;
2362 case IB_CM_MRA_LAP_SENT:
2363 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2364 goto unlock;
2365
2366 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2367 CM_MSG_RESPONSE_OTHER,
2368 cm_id_priv->service_timeout,
2369 cm_id_priv->private_data,
2370 cm_id_priv->private_data_len);
2371 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2372
2373 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2374 &msg->send_wr, &bad_send_wr))
2375 cm_free_msg(msg);
2376 goto deref;
2377 default:
2378 goto unlock;
2379 }
2380
2381 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2382 cm_id_priv->tid = lap_msg->hdr.tid;
2383 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2384 if (!ret)
2385 list_add_tail(&work->list, &cm_id_priv->work_list);
2386 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2387
2388 if (ret)
2389 cm_process_work(cm_id_priv, work);
2390 else
2391 cm_deref_id(cm_id_priv);
2392 return 0;
2393
2394unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2395deref: cm_deref_id(cm_id_priv);
2396 return -EINVAL;
2397}
2398
2399static void cm_format_apr(struct cm_apr_msg *apr_msg,
2400 struct cm_id_private *cm_id_priv,
2401 enum ib_cm_apr_status status,
2402 void *info,
2403 u8 info_length,
2404 const void *private_data,
2405 u8 private_data_len)
2406{
2407 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2408 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2409 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2410 apr_msg->ap_status = (u8) status;
2411
2412 if (info && info_length) {
2413 apr_msg->info_length = info_length;
2414 memcpy(apr_msg->info, info, info_length);
2415 }
2416
2417 if (private_data && private_data_len)
2418 memcpy(apr_msg->private_data, private_data, private_data_len);
2419}
2420
2421int ib_send_cm_apr(struct ib_cm_id *cm_id,
2422 enum ib_cm_apr_status status,
2423 void *info,
2424 u8 info_length,
2425 const void *private_data,
2426 u8 private_data_len)
2427{
2428 struct cm_id_private *cm_id_priv;
2429 struct ib_mad_send_buf *msg;
2430 struct ib_send_wr *bad_send_wr;
2431 unsigned long flags;
2432 int ret;
2433
2434 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2435 (info && info_length > IB_CM_APR_INFO_LENGTH))
2436 return -EINVAL;
2437
2438 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2439 spin_lock_irqsave(&cm_id_priv->lock, flags);
2440 if (cm_id->state != IB_CM_ESTABLISHED ||
2441 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2442 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2443 ret = -EINVAL;
2444 goto out;
2445 }
2446
2447 ret = cm_alloc_msg(cm_id_priv, &msg);
2448 if (ret)
2449 goto out;
2450
2451 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2452 info, info_length, private_data, private_data_len);
2453 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2454 &msg->send_wr, &bad_send_wr);
2455 if (ret) {
2456 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2457 cm_free_msg(msg);
2458 return ret;
2459 }
2460
2461 cm_id->lap_state = IB_CM_LAP_IDLE;
2462out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2463 return ret;
2464}
2465EXPORT_SYMBOL(ib_send_cm_apr);
2466
2467static int cm_apr_handler(struct cm_work *work)
2468{
2469 struct cm_id_private *cm_id_priv;
2470 struct cm_apr_msg *apr_msg;
2471 unsigned long flags;
2472 int ret;
2473
2474 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2475 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2476 apr_msg->local_comm_id);
2477 if (!cm_id_priv)
2478 return -EINVAL; /* Unmatched reply. */
2479
2480 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2481 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2482 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2483 work->cm_event.private_data = &apr_msg->private_data;
2484
2485 spin_lock_irqsave(&cm_id_priv->lock, flags);
2486 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2487 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2488 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2489 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2490 goto out;
2491 }
2492 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2493 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2494 (unsigned long) cm_id_priv->msg);
2495 cm_id_priv->msg = NULL;
2496
2497 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2498 if (!ret)
2499 list_add_tail(&work->list, &cm_id_priv->work_list);
2500 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2501
2502 if (ret)
2503 cm_process_work(cm_id_priv, work);
2504 else
2505 cm_deref_id(cm_id_priv);
2506 return 0;
2507out:
2508 cm_deref_id(cm_id_priv);
2509 return -EINVAL;
2510}
2511
2512static int cm_timewait_handler(struct cm_work *work)
2513{
2514 struct cm_timewait_info *timewait_info;
2515 struct cm_id_private *cm_id_priv;
2516 unsigned long flags;
2517 int ret;
2518
2519 timewait_info = (struct cm_timewait_info *)work;
2520 cm_cleanup_timewait(timewait_info);
2521
2522 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2523 timewait_info->work.remote_id);
2524 if (!cm_id_priv)
2525 return -EINVAL;
2526
2527 spin_lock_irqsave(&cm_id_priv->lock, flags);
2528 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2529 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2530 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2531 goto out;
2532 }
2533 cm_id_priv->id.state = IB_CM_IDLE;
2534 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2535 if (!ret)
2536 list_add_tail(&work->list, &cm_id_priv->work_list);
2537 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2538
2539 if (ret)
2540 cm_process_work(cm_id_priv, work);
2541 else
2542 cm_deref_id(cm_id_priv);
2543 return 0;
2544out:
2545 cm_deref_id(cm_id_priv);
2546 return -EINVAL;
2547}
2548
2549static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2550 struct cm_id_private *cm_id_priv,
2551 struct ib_cm_sidr_req_param *param)
2552{
2553 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2554 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2555 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2556 sidr_req_msg->pkey = param->pkey;
2557 sidr_req_msg->service_id = param->service_id;
2558
2559 if (param->private_data && param->private_data_len)
2560 memcpy(sidr_req_msg->private_data, param->private_data,
2561 param->private_data_len);
2562}
2563
2564int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2565 struct ib_cm_sidr_req_param *param)
2566{
2567 struct cm_id_private *cm_id_priv;
2568 struct ib_mad_send_buf *msg;
2569 struct ib_send_wr *bad_send_wr;
2570 unsigned long flags;
2571 int ret;
2572
2573 if (!param->path || (param->private_data &&
2574 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2575 return -EINVAL;
2576
2577 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2578 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2579 if (ret)
2580 goto out;
2581
2582 cm_id->service_id = param->service_id;
2583 cm_id->service_mask = ~0ULL;
2584 cm_id_priv->timeout_ms = param->timeout_ms;
2585 cm_id_priv->max_cm_retries = param->max_cm_retries;
2586 ret = cm_alloc_msg(cm_id_priv, &msg);
2587 if (ret)
2588 goto out;
2589
2590 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2591 param);
2592 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2593 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2594
2595 spin_lock_irqsave(&cm_id_priv->lock, flags);
2596 if (cm_id->state == IB_CM_IDLE)
2597 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2598 &msg->send_wr, &bad_send_wr);
2599 else
2600 ret = -EINVAL;
2601
2602 if (ret) {
2603 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2604 cm_free_msg(msg);
2605 goto out;
2606 }
2607 cm_id->state = IB_CM_SIDR_REQ_SENT;
2608 cm_id_priv->msg = msg;
2609 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2610out:
2611 return ret;
2612}
2613EXPORT_SYMBOL(ib_send_cm_sidr_req);
2614
2615static void cm_format_sidr_req_event(struct cm_work *work,
2616 struct ib_cm_id *listen_id)
2617{
2618 struct cm_sidr_req_msg *sidr_req_msg;
2619 struct ib_cm_sidr_req_event_param *param;
2620
2621 sidr_req_msg = (struct cm_sidr_req_msg *)
2622 work->mad_recv_wc->recv_buf.mad;
2623 param = &work->cm_event.param.sidr_req_rcvd;
2624 param->pkey = sidr_req_msg->pkey;
2625 param->listen_id = listen_id;
2626 param->device = work->port->mad_agent->device;
2627 param->port = work->port->port_num;
2628 work->cm_event.private_data = &sidr_req_msg->private_data;
2629}
2630
2631static int cm_sidr_req_handler(struct cm_work *work)
2632{
2633 struct ib_cm_id *cm_id;
2634 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2635 struct cm_sidr_req_msg *sidr_req_msg;
2636 struct ib_wc *wc;
2637 unsigned long flags;
2638
2639 cm_id = ib_create_cm_id(NULL, NULL);
2640 if (IS_ERR(cm_id))
2641 return PTR_ERR(cm_id);
2642 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2643
2644 /* Record SGID/SLID and request ID for lookup. */
2645 sidr_req_msg = (struct cm_sidr_req_msg *)
2646 work->mad_recv_wc->recv_buf.mad;
2647 wc = work->mad_recv_wc->wc;
2648 cm_id_priv->av.dgid.global.subnet_prefix = wc->slid;
2649 cm_id_priv->av.dgid.global.interface_id = 0;
2650 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2651 &cm_id_priv->av);
2652 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2653 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2654 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2655 atomic_inc(&cm_id_priv->work_count);
2656
2657 spin_lock_irqsave(&cm.lock, flags);
2658 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2659 if (cur_cm_id_priv) {
2660 spin_unlock_irqrestore(&cm.lock, flags);
2661 goto out; /* Duplicate message. */
2662 }
2663 cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
2664 if (!cur_cm_id_priv) {
2665 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2666 spin_unlock_irqrestore(&cm.lock, flags);
2667 /* todo: reply with no match */
2668 goto out; /* No match. */
2669 }
2670 atomic_inc(&cur_cm_id_priv->refcount);
2671 spin_unlock_irqrestore(&cm.lock, flags);
2672
2673 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2674 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2675 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2676 cm_id_priv->id.service_mask = ~0ULL;
2677
2678 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2679 cm_process_work(cm_id_priv, work);
2680 cm_deref_id(cur_cm_id_priv);
2681 return 0;
2682out:
2683 ib_destroy_cm_id(&cm_id_priv->id);
2684 return -EINVAL;
2685}
2686
2687static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2688 struct cm_id_private *cm_id_priv,
2689 struct ib_cm_sidr_rep_param *param)
2690{
2691 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2692 cm_id_priv->tid);
2693 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2694 sidr_rep_msg->status = param->status;
2695 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2696 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2697 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2698
2699 if (param->info && param->info_length)
2700 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2701
2702 if (param->private_data && param->private_data_len)
2703 memcpy(sidr_rep_msg->private_data, param->private_data,
2704 param->private_data_len);
2705}
2706
2707int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2708 struct ib_cm_sidr_rep_param *param)
2709{
2710 struct cm_id_private *cm_id_priv;
2711 struct ib_mad_send_buf *msg;
2712 struct ib_send_wr *bad_send_wr;
2713 unsigned long flags;
2714 int ret;
2715
2716 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2717 (param->private_data &&
2718 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2719 return -EINVAL;
2720
2721 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2722 spin_lock_irqsave(&cm_id_priv->lock, flags);
2723 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2724 ret = -EINVAL;
2725 goto error;
2726 }
2727
2728 ret = cm_alloc_msg(cm_id_priv, &msg);
2729 if (ret)
2730 goto error;
2731
2732 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2733 param);
2734 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2735 &msg->send_wr, &bad_send_wr);
2736 if (ret) {
2737 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2738 cm_free_msg(msg);
2739 return ret;
2740 }
2741 cm_id->state = IB_CM_IDLE;
2742 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2743
2744 spin_lock_irqsave(&cm.lock, flags);
2745 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2746 spin_unlock_irqrestore(&cm.lock, flags);
2747 return 0;
2748
2749error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2750 return ret;
2751}
2752EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2753
2754static void cm_format_sidr_rep_event(struct cm_work *work)
2755{
2756 struct cm_sidr_rep_msg *sidr_rep_msg;
2757 struct ib_cm_sidr_rep_event_param *param;
2758
2759 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2760 work->mad_recv_wc->recv_buf.mad;
2761 param = &work->cm_event.param.sidr_rep_rcvd;
2762 param->status = sidr_rep_msg->status;
2763 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2764 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2765 param->info = &sidr_rep_msg->info;
2766 param->info_len = sidr_rep_msg->info_length;
2767 work->cm_event.private_data = &sidr_rep_msg->private_data;
2768}
2769
2770static int cm_sidr_rep_handler(struct cm_work *work)
2771{
2772 struct cm_sidr_rep_msg *sidr_rep_msg;
2773 struct cm_id_private *cm_id_priv;
2774 unsigned long flags;
2775
2776 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2777 work->mad_recv_wc->recv_buf.mad;
2778 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2779 if (!cm_id_priv)
2780 return -EINVAL; /* Unmatched reply. */
2781
2782 spin_lock_irqsave(&cm_id_priv->lock, flags);
2783 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2784 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2785 goto out;
2786 }
2787 cm_id_priv->id.state = IB_CM_IDLE;
2788 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2789 (unsigned long) cm_id_priv->msg);
2790 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2791
2792 cm_format_sidr_rep_event(work);
2793 cm_process_work(cm_id_priv, work);
2794 return 0;
2795out:
2796 cm_deref_id(cm_id_priv);
2797 return -EINVAL;
2798}
2799
2800static void cm_process_send_error(struct ib_mad_send_buf *msg,
2801 enum ib_wc_status wc_status)
2802{
2803 struct cm_id_private *cm_id_priv;
2804 struct ib_cm_event cm_event;
2805 enum ib_cm_state state;
2806 unsigned long flags;
2807 int ret;
2808
2809 memset(&cm_event, 0, sizeof cm_event);
2810 cm_id_priv = msg->context[0];
2811
2812 /* Discard old sends or ones without a response. */
2813 spin_lock_irqsave(&cm_id_priv->lock, flags);
2814 state = (enum ib_cm_state) (unsigned long) msg->context[1];
2815 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2816 goto discard;
2817
2818 switch (state) {
2819 case IB_CM_REQ_SENT:
2820 case IB_CM_MRA_REQ_RCVD:
2821 cm_reset_to_idle(cm_id_priv);
2822 cm_event.event = IB_CM_REQ_ERROR;
2823 break;
2824 case IB_CM_REP_SENT:
2825 case IB_CM_MRA_REP_RCVD:
2826 cm_reset_to_idle(cm_id_priv);
2827 cm_event.event = IB_CM_REP_ERROR;
2828 break;
2829 case IB_CM_DREQ_SENT:
2830 cm_enter_timewait(cm_id_priv);
2831 cm_event.event = IB_CM_DREQ_ERROR;
2832 break;
2833 case IB_CM_SIDR_REQ_SENT:
2834 cm_id_priv->id.state = IB_CM_IDLE;
2835 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2836 break;
2837 default:
2838 goto discard;
2839 }
2840 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2841 cm_event.param.send_status = wc_status;
2842
2843 /* No other events can occur on the cm_id at this point. */
2844 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2845 cm_free_msg(msg);
2846 if (ret)
2847 ib_destroy_cm_id(&cm_id_priv->id);
2848 return;
2849discard:
2850 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2851 cm_free_msg(msg);
2852}
2853
2854static void cm_send_handler(struct ib_mad_agent *mad_agent,
2855 struct ib_mad_send_wc *mad_send_wc)
2856{
2857 struct ib_mad_send_buf *msg;
2858
2859 msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
2860
2861 switch (mad_send_wc->status) {
2862 case IB_WC_SUCCESS:
2863 case IB_WC_WR_FLUSH_ERR:
2864 cm_free_msg(msg);
2865 break;
2866 default:
2867 if (msg->context[0] && msg->context[1])
2868 cm_process_send_error(msg, mad_send_wc->status);
2869 else
2870 cm_free_msg(msg);
2871 break;
2872 }
2873}
2874
2875static void cm_work_handler(void *data)
2876{
2877 struct cm_work *work = data;
2878 int ret;
2879
2880 switch (work->cm_event.event) {
2881 case IB_CM_REQ_RECEIVED:
2882 ret = cm_req_handler(work);
2883 break;
2884 case IB_CM_MRA_RECEIVED:
2885 ret = cm_mra_handler(work);
2886 break;
2887 case IB_CM_REJ_RECEIVED:
2888 ret = cm_rej_handler(work);
2889 break;
2890 case IB_CM_REP_RECEIVED:
2891 ret = cm_rep_handler(work);
2892 break;
2893 case IB_CM_RTU_RECEIVED:
2894 ret = cm_rtu_handler(work);
2895 break;
2896 case IB_CM_USER_ESTABLISHED:
2897 ret = cm_establish_handler(work);
2898 break;
2899 case IB_CM_DREQ_RECEIVED:
2900 ret = cm_dreq_handler(work);
2901 break;
2902 case IB_CM_DREP_RECEIVED:
2903 ret = cm_drep_handler(work);
2904 break;
2905 case IB_CM_SIDR_REQ_RECEIVED:
2906 ret = cm_sidr_req_handler(work);
2907 break;
2908 case IB_CM_SIDR_REP_RECEIVED:
2909 ret = cm_sidr_rep_handler(work);
2910 break;
2911 case IB_CM_LAP_RECEIVED:
2912 ret = cm_lap_handler(work);
2913 break;
2914 case IB_CM_APR_RECEIVED:
2915 ret = cm_apr_handler(work);
2916 break;
2917 case IB_CM_TIMEWAIT_EXIT:
2918 ret = cm_timewait_handler(work);
2919 break;
2920 default:
2921 ret = -EINVAL;
2922 break;
2923 }
2924 if (ret)
2925 cm_free_work(work);
2926}
2927
2928int ib_cm_establish(struct ib_cm_id *cm_id)
2929{
2930 struct cm_id_private *cm_id_priv;
2931 struct cm_work *work;
2932 unsigned long flags;
2933 int ret = 0;
2934
2935 work = kmalloc(sizeof *work, GFP_ATOMIC);
2936 if (!work)
2937 return -ENOMEM;
2938
2939 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2940 spin_lock_irqsave(&cm_id_priv->lock, flags);
2941 switch (cm_id->state)
2942 {
2943 case IB_CM_REP_SENT:
2944 case IB_CM_MRA_REP_RCVD:
2945 cm_id->state = IB_CM_ESTABLISHED;
2946 break;
2947 case IB_CM_ESTABLISHED:
2948 ret = -EISCONN;
2949 break;
2950 default:
2951 ret = -EINVAL;
2952 break;
2953 }
2954 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2955
2956 if (ret) {
2957 kfree(work);
2958 goto out;
2959 }
2960
2961 /*
2962 * The CM worker thread may try to destroy the cm_id before it
2963 * can execute this work item. To prevent potential deadlock,
2964 * we need to find the cm_id once we're in the context of the
2965 * worker thread, rather than holding a reference on it.
2966 */
2967 INIT_WORK(&work->work, cm_work_handler, work);
2968 work->local_id = cm_id->local_id;
2969 work->remote_id = cm_id->remote_id;
2970 work->mad_recv_wc = NULL;
2971 work->cm_event.event = IB_CM_USER_ESTABLISHED;
2972 queue_work(cm.wq, &work->work);
2973out:
2974 return ret;
2975}
2976EXPORT_SYMBOL(ib_cm_establish);
2977
2978static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2979 struct ib_mad_recv_wc *mad_recv_wc)
2980{
2981 struct cm_work *work;
2982 enum ib_cm_event_type event;
2983 int paths = 0;
2984
2985 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2986 case CM_REQ_ATTR_ID:
2987 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2988 alt_local_lid != 0);
2989 event = IB_CM_REQ_RECEIVED;
2990 break;
2991 case CM_MRA_ATTR_ID:
2992 event = IB_CM_MRA_RECEIVED;
2993 break;
2994 case CM_REJ_ATTR_ID:
2995 event = IB_CM_REJ_RECEIVED;
2996 break;
2997 case CM_REP_ATTR_ID:
2998 event = IB_CM_REP_RECEIVED;
2999 break;
3000 case CM_RTU_ATTR_ID:
3001 event = IB_CM_RTU_RECEIVED;
3002 break;
3003 case CM_DREQ_ATTR_ID:
3004 event = IB_CM_DREQ_RECEIVED;
3005 break;
3006 case CM_DREP_ATTR_ID:
3007 event = IB_CM_DREP_RECEIVED;
3008 break;
3009 case CM_SIDR_REQ_ATTR_ID:
3010 event = IB_CM_SIDR_REQ_RECEIVED;
3011 break;
3012 case CM_SIDR_REP_ATTR_ID:
3013 event = IB_CM_SIDR_REP_RECEIVED;
3014 break;
3015 case CM_LAP_ATTR_ID:
3016 paths = 1;
3017 event = IB_CM_LAP_RECEIVED;
3018 break;
3019 case CM_APR_ATTR_ID:
3020 event = IB_CM_APR_RECEIVED;
3021 break;
3022 default:
3023 ib_free_recv_mad(mad_recv_wc);
3024 return;
3025 }
3026
3027 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3028 GFP_KERNEL);
3029 if (!work) {
3030 ib_free_recv_mad(mad_recv_wc);
3031 return;
3032 }
3033
3034 INIT_WORK(&work->work, cm_work_handler, work);
3035 work->cm_event.event = event;
3036 work->mad_recv_wc = mad_recv_wc;
3037 work->port = (struct cm_port *)mad_agent->context;
3038 queue_work(cm.wq, &work->work);
3039}
3040
3041static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3042 struct ib_qp_attr *qp_attr,
3043 int *qp_attr_mask)
3044{
3045 unsigned long flags;
3046 int ret;
3047
3048 spin_lock_irqsave(&cm_id_priv->lock, flags);
3049 switch (cm_id_priv->id.state) {
3050 case IB_CM_REQ_SENT:
3051 case IB_CM_MRA_REQ_RCVD:
3052 case IB_CM_REQ_RCVD:
3053 case IB_CM_MRA_REQ_SENT:
3054 case IB_CM_REP_RCVD:
3055 case IB_CM_MRA_REP_SENT:
3056 case IB_CM_REP_SENT:
3057 case IB_CM_MRA_REP_RCVD:
3058 case IB_CM_ESTABLISHED:
3059 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3060 IB_QP_PKEY_INDEX | IB_QP_PORT;
3061 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
3062 if (cm_id_priv->responder_resources)
3063 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
3064 IB_ACCESS_REMOTE_READ;
3065 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3066 qp_attr->port_num = cm_id_priv->av.port->port_num;
3067 ret = 0;
3068 break;
3069 default:
3070 ret = -EINVAL;
3071 break;
3072 }
3073 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3074 return ret;
3075}
3076
3077static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3078 struct ib_qp_attr *qp_attr,
3079 int *qp_attr_mask)
3080{
3081 unsigned long flags;
3082 int ret;
3083
3084 spin_lock_irqsave(&cm_id_priv->lock, flags);
3085 switch (cm_id_priv->id.state) {
3086 case IB_CM_REQ_RCVD:
3087 case IB_CM_MRA_REQ_SENT:
3088 case IB_CM_REP_RCVD:
3089 case IB_CM_MRA_REP_SENT:
3090 case IB_CM_REP_SENT:
3091 case IB_CM_MRA_REP_RCVD:
3092 case IB_CM_ESTABLISHED:
3093 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3094 IB_QP_DEST_QPN | IB_QP_RQ_PSN |
3095 IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
3096 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3097 qp_attr->path_mtu = cm_id_priv->path_mtu;
3098 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3099 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3100 qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
3101 qp_attr->min_rnr_timer = 0;
3102 if (cm_id_priv->alt_av.ah_attr.dlid) {
3103 *qp_attr_mask |= IB_QP_ALT_PATH;
3104 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3105 }
3106 ret = 0;
3107 break;
3108 default:
3109 ret = -EINVAL;
3110 break;
3111 }
3112 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3113 return ret;
3114}
3115
3116static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3117 struct ib_qp_attr *qp_attr,
3118 int *qp_attr_mask)
3119{
3120 unsigned long flags;
3121 int ret;
3122
3123 spin_lock_irqsave(&cm_id_priv->lock, flags);
3124 switch (cm_id_priv->id.state) {
3125 case IB_CM_REP_RCVD:
3126 case IB_CM_MRA_REP_SENT:
3127 case IB_CM_REP_SENT:
3128 case IB_CM_MRA_REP_RCVD:
3129 case IB_CM_ESTABLISHED:
3130 *qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3131 IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
3132 IB_QP_MAX_QP_RD_ATOMIC;
3133 qp_attr->timeout = cm_id_priv->local_ack_timeout;
3134 qp_attr->retry_cnt = cm_id_priv->retry_count;
3135 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3136 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3137 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3138 if (cm_id_priv->alt_av.ah_attr.dlid) {
3139 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3140 qp_attr->path_mig_state = IB_MIG_REARM;
3141 }
3142 ret = 0;
3143 break;
3144 default:
3145 ret = -EINVAL;
3146 break;
3147 }
3148 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3149 return ret;
3150}
3151
3152int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3153 struct ib_qp_attr *qp_attr,
3154 int *qp_attr_mask)
3155{
3156 struct cm_id_private *cm_id_priv;
3157 int ret;
3158
3159 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3160 switch (qp_attr->qp_state) {
3161 case IB_QPS_INIT:
3162 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3163 break;
3164 case IB_QPS_RTR:
3165 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3166 break;
3167 case IB_QPS_RTS:
3168 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3169 break;
3170 default:
3171 ret = -EINVAL;
3172 break;
3173 }
3174 return ret;
3175}
3176EXPORT_SYMBOL(ib_cm_init_qp_attr);
3177
3178static u64 cm_get_ca_guid(struct ib_device *device)
3179{
3180 struct ib_device_attr *device_attr;
3181 u64 guid;
3182 int ret;
3183
3184 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3185 if (!device_attr)
3186 return 0;
3187
3188 ret = ib_query_device(device, device_attr);
3189 guid = ret ? 0 : device_attr->node_guid;
3190 kfree(device_attr);
3191 return guid;
3192}
3193
3194static void cm_add_one(struct ib_device *device)
3195{
3196 struct cm_device *cm_dev;
3197 struct cm_port *port;
3198 struct ib_mad_reg_req reg_req = {
3199 .mgmt_class = IB_MGMT_CLASS_CM,
3200 .mgmt_class_version = IB_CM_CLASS_VERSION
3201 };
3202 struct ib_port_modify port_modify = {
3203 .set_port_cap_mask = IB_PORT_CM_SUP
3204 };
3205 unsigned long flags;
3206 int ret;
3207 u8 i;
3208
3209 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3210 device->phys_port_cnt, GFP_KERNEL);
3211 if (!cm_dev)
3212 return;
3213
3214 cm_dev->device = device;
3215 cm_dev->ca_guid = cm_get_ca_guid(device);
3216 if (!cm_dev->ca_guid)
3217 goto error1;
3218
3219 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3220 for (i = 1; i <= device->phys_port_cnt; i++) {
3221 port = &cm_dev->port[i-1];
3222 port->cm_dev = cm_dev;
3223 port->port_num = i;
3224 port->mad_agent = ib_register_mad_agent(device, i,
3225 IB_QPT_GSI,
3226 &reg_req,
3227 0,
3228 cm_send_handler,
3229 cm_recv_handler,
3230 port);
3231 if (IS_ERR(port->mad_agent))
3232 goto error2;
3233
3234 ret = ib_modify_port(device, i, 0, &port_modify);
3235 if (ret)
3236 goto error3;
3237 }
3238 ib_set_client_data(device, &cm_client, cm_dev);
3239
3240 write_lock_irqsave(&cm.device_lock, flags);
3241 list_add_tail(&cm_dev->list, &cm.device_list);
3242 write_unlock_irqrestore(&cm.device_lock, flags);
3243 return;
3244
3245error3:
3246 ib_unregister_mad_agent(port->mad_agent);
3247error2:
3248 port_modify.set_port_cap_mask = 0;
3249 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3250 while (--i) {
3251 port = &cm_dev->port[i-1];
3252 ib_modify_port(device, port->port_num, 0, &port_modify);
3253 ib_unregister_mad_agent(port->mad_agent);
3254 }
3255error1:
3256 kfree(cm_dev);
3257}
3258
3259static void cm_remove_one(struct ib_device *device)
3260{
3261 struct cm_device *cm_dev;
3262 struct cm_port *port;
3263 struct ib_port_modify port_modify = {
3264 .clr_port_cap_mask = IB_PORT_CM_SUP
3265 };
3266 unsigned long flags;
3267 int i;
3268
3269 cm_dev = ib_get_client_data(device, &cm_client);
3270 if (!cm_dev)
3271 return;
3272
3273 write_lock_irqsave(&cm.device_lock, flags);
3274 list_del(&cm_dev->list);
3275 write_unlock_irqrestore(&cm.device_lock, flags);
3276
3277 for (i = 1; i <= device->phys_port_cnt; i++) {
3278 port = &cm_dev->port[i-1];
3279 ib_modify_port(device, port->port_num, 0, &port_modify);
3280 ib_unregister_mad_agent(port->mad_agent);
3281 }
3282 kfree(cm_dev);
3283}
3284
3285static int __init ib_cm_init(void)
3286{
3287 int ret;
3288
3289 memset(&cm, 0, sizeof cm);
3290 INIT_LIST_HEAD(&cm.device_list);
3291 rwlock_init(&cm.device_lock);
3292 spin_lock_init(&cm.lock);
3293 cm.listen_service_table = RB_ROOT;
3294 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3295 cm.remote_id_table = RB_ROOT;
3296 cm.remote_qp_table = RB_ROOT;
3297 cm.remote_sidr_table = RB_ROOT;
3298 idr_init(&cm.local_id_table);
3299 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3300
3301 cm.wq = create_workqueue("ib_cm");
3302 if (!cm.wq)
3303 return -ENOMEM;
3304
3305 ret = ib_register_client(&cm_client);
3306 if (ret)
3307 goto error;
3308
3309 return 0;
3310error:
3311 destroy_workqueue(cm.wq);
3312 return ret;
3313}
3314
3315static void __exit ib_cm_cleanup(void)
3316{
3317 flush_workqueue(cm.wq);
3318 destroy_workqueue(cm.wq);
3319 ib_unregister_client(&cm_client);
3320}
3321
3322module_init(ib_cm_init);
3323module_exit(ib_cm_cleanup);
3324
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
new file mode 100644
index 000000000000..15a309a77b2b
--- /dev/null
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -0,0 +1,819 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
33 */
34#if !defined(CM_MSGS_H)
35#define CM_MSGS_H
36
37#include <ib_mad.h>
38
39/*
40 * Parameters to routines below should be in network-byte order, and values
41 * are returned in network-byte order.
42 */
43
44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45
46enum cm_msg_attr_id {
47 CM_REQ_ATTR_ID = __constant_htons(0x0010),
48 CM_MRA_ATTR_ID = __constant_htons(0x0011),
49 CM_REJ_ATTR_ID = __constant_htons(0x0012),
50 CM_REP_ATTR_ID = __constant_htons(0x0013),
51 CM_RTU_ATTR_ID = __constant_htons(0x0014),
52 CM_DREQ_ATTR_ID = __constant_htons(0x0015),
53 CM_DREP_ATTR_ID = __constant_htons(0x0016),
54 CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
55 CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
56 CM_LAP_ATTR_ID = __constant_htons(0x0019),
57 CM_APR_ATTR_ID = __constant_htons(0x001A)
58};
59
60enum cm_msg_sequence {
61 CM_MSG_SEQUENCE_REQ,
62 CM_MSG_SEQUENCE_LAP,
63 CM_MSG_SEQUENCE_DREQ,
64 CM_MSG_SEQUENCE_SIDR
65};
66
67struct cm_req_msg {
68 struct ib_mad_hdr hdr;
69
70 u32 local_comm_id;
71 u32 rsvd4;
72 u64 service_id;
73 u64 local_ca_guid;
74 u32 rsvd24;
75 u32 local_qkey;
76 /* local QPN:24, responder resources:8 */
77 u32 offset32;
78 /* local EECN:24, initiator depth:8 */
79 u32 offset36;
80 /*
81 * remote EECN:24, remote CM response timeout:5,
82 * transport service type:2, end-to-end flow control:1
83 */
84 u32 offset40;
85 /* starting PSN:24, local CM response timeout:5, retry count:3 */
86 u32 offset44;
87 u16 pkey;
88 /* path MTU:4, RDC exists:1, RNR retry count:3. */
89 u8 offset50;
90 /* max CM Retries:4, SRQ:1, rsvd:3 */
91 u8 offset51;
92
93 u16 primary_local_lid;
94 u16 primary_remote_lid;
95 union ib_gid primary_local_gid;
96 union ib_gid primary_remote_gid;
97 /* flow label:20, rsvd:6, packet rate:6 */
98 u32 primary_offset88;
99 u8 primary_traffic_class;
100 u8 primary_hop_limit;
101 /* SL:4, subnet local:1, rsvd:3 */
102 u8 primary_offset94;
103 /* local ACK timeout:5, rsvd:3 */
104 u8 primary_offset95;
105
106 u16 alt_local_lid;
107 u16 alt_remote_lid;
108 union ib_gid alt_local_gid;
109 union ib_gid alt_remote_gid;
110 /* flow label:20, rsvd:6, packet rate:6 */
111 u32 alt_offset132;
112 u8 alt_traffic_class;
113 u8 alt_hop_limit;
114 /* SL:4, subnet local:1, rsvd:3 */
115 u8 alt_offset138;
116 /* local ACK timeout:5, rsvd:3 */
117 u8 alt_offset139;
118
119 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
120
121} __attribute__ ((packed));
122
123static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124{
125 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
126}
127
128static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
129{
130 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
131 (be32_to_cpu(req_msg->offset32) &
132 0x000000FF));
133}
134
135static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
136{
137 return (u8) be32_to_cpu(req_msg->offset32);
138}
139
140static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
141{
142 req_msg->offset32 = cpu_to_be32(resp_res |
143 (be32_to_cpu(req_msg->offset32) &
144 0xFFFFFF00));
145}
146
147static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
148{
149 return (u8) be32_to_cpu(req_msg->offset36);
150}
151
152static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
153 u8 init_depth)
154{
155 req_msg->offset36 = cpu_to_be32(init_depth |
156 (be32_to_cpu(req_msg->offset36) &
157 0xFFFFFF00));
158}
159
160static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
161{
162 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
163}
164
165static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
166 u8 resp_timeout)
167{
168 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
169 (be32_to_cpu(req_msg->offset40) &
170 0xFFFFFF07));
171}
172
173static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
174{
175 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
176 switch(transport_type) {
177 case 0: return IB_QPT_RC;
178 case 1: return IB_QPT_UC;
179 default: return 0;
180 }
181}
182
183static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
184 enum ib_qp_type qp_type)
185{
186 switch(qp_type) {
187 case IB_QPT_UC:
188 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
189 req_msg->offset40) &
190 0xFFFFFFF9) | 0x2);
191 default:
192 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
193 req_msg->offset40) &
194 0xFFFFFFF9);
195 }
196}
197
198static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
199{
200 return be32_to_cpu(req_msg->offset40) & 0x1;
201}
202
203static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
204 u8 flow_ctrl)
205{
206 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
207 (be32_to_cpu(req_msg->offset40) &
208 0xFFFFFFFE));
209}
210
211static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
212{
213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
214}
215
216static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
217 u32 starting_psn)
218{
219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
220 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
221}
222
223static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
224{
225 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
226}
227
228static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
229 u8 resp_timeout)
230{
231 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
232 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
233}
234
235static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
236{
237 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
238}
239
240static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
241 u8 retry_count)
242{
243 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
244 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
245}
246
247static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
248{
249 return req_msg->offset50 >> 4;
250}
251
252static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
253{
254 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
255}
256
257static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
258{
259 return req_msg->offset50 & 0x7;
260}
261
262static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
263 u8 rnr_retry_count)
264{
265 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
266 (rnr_retry_count & 0x7));
267}
268
269static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
270{
271 return req_msg->offset51 >> 4;
272}
273
274static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
275 u8 retries)
276{
277 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
278}
279
280static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
281{
282 return (req_msg->offset51 & 0x8) >> 3;
283}
284
285static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
286{
287 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
288 ((srq & 0x1) << 3));
289}
290
291static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
292{
293 return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
294}
295
296static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
297 u32 flow_label)
298{
299 req_msg->primary_offset88 = cpu_to_be32(
300 (be32_to_cpu(req_msg->primary_offset88) &
301 0x00000FFF) |
302 (be32_to_cpu(flow_label) << 12));
303}
304
305static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
306{
307 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
308}
309
310static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
311 u8 rate)
312{
313 req_msg->primary_offset88 = cpu_to_be32(
314 (be32_to_cpu(req_msg->primary_offset88) &
315 0xFFFFFFC0) | (rate & 0x3F));
316}
317
318static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
319{
320 return (u8) (req_msg->primary_offset94 >> 4);
321}
322
323static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
324{
325 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
326 (sl << 4));
327}
328
329static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
330{
331 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
332}
333
334static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
335 u8 subnet_local)
336{
337 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
338 ((subnet_local & 0x1) << 3));
339}
340
341static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
342{
343 return (u8) (req_msg->primary_offset95 >> 3);
344}
345
346static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
347 u8 local_ack_timeout)
348{
349 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
350 (local_ack_timeout << 3));
351}
352
353static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
354{
355 return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
356}
357
358static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
359 u32 flow_label)
360{
361 req_msg->alt_offset132 = cpu_to_be32(
362 (be32_to_cpu(req_msg->alt_offset132) &
363 0x00000FFF) |
364 (be32_to_cpu(flow_label) << 12));
365}
366
367static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
368{
369 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
370}
371
372static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
373 u8 rate)
374{
375 req_msg->alt_offset132 = cpu_to_be32(
376 (be32_to_cpu(req_msg->alt_offset132) &
377 0xFFFFFFC0) | (rate & 0x3F));
378}
379
380static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
381{
382 return (u8) (req_msg->alt_offset138 >> 4);
383}
384
385static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
386{
387 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
388 (sl << 4));
389}
390
391static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
392{
393 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
394}
395
396static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
397 u8 subnet_local)
398{
399 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
400 ((subnet_local & 0x1) << 3));
401}
402
403static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
404{
405 return (u8) (req_msg->alt_offset139 >> 3);
406}
407
408static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
409 u8 local_ack_timeout)
410{
411 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
412 (local_ack_timeout << 3));
413}
414
415/* Message REJected or MRAed */
416enum cm_msg_response {
417 CM_MSG_RESPONSE_REQ = 0x0,
418 CM_MSG_RESPONSE_REP = 0x1,
419 CM_MSG_RESPONSE_OTHER = 0x2
420};
421
422 struct cm_mra_msg {
423 struct ib_mad_hdr hdr;
424
425 u32 local_comm_id;
426 u32 remote_comm_id;
427 /* message MRAed:2, rsvd:6 */
428 u8 offset8;
429 /* service timeout:5, rsvd:3 */
430 u8 offset9;
431
432 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
433
434} __attribute__ ((packed));
435
436static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
437{
438 return (u8) (mra_msg->offset8 >> 6);
439}
440
441static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
442{
443 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
444}
445
446static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
447{
448 return (u8) (mra_msg->offset9 >> 3);
449}
450
451static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
452 u8 service_timeout)
453{
454 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
455 (service_timeout << 3));
456}
457
458struct cm_rej_msg {
459 struct ib_mad_hdr hdr;
460
461 u32 local_comm_id;
462 u32 remote_comm_id;
463 /* message REJected:2, rsvd:6 */
464 u8 offset8;
465 /* reject info length:7, rsvd:1. */
466 u8 offset9;
467 u16 reason;
468 u8 ari[IB_CM_REJ_ARI_LENGTH];
469
470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
471
472} __attribute__ ((packed));
473
474static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
475{
476 return (u8) (rej_msg->offset8 >> 6);
477}
478
479static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
480{
481 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
482}
483
484static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
485{
486 return (u8) (rej_msg->offset9 >> 1);
487}
488
489static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
490 u8 len)
491{
492 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
493}
494
495struct cm_rep_msg {
496 struct ib_mad_hdr hdr;
497
498 u32 local_comm_id;
499 u32 remote_comm_id;
500 u32 local_qkey;
501 /* local QPN:24, rsvd:8 */
502 u32 offset12;
503 /* local EECN:24, rsvd:8 */
504 u32 offset16;
505 /* starting PSN:24 rsvd:8 */
506 u32 offset20;
507 u8 resp_resources;
508 u8 initiator_depth;
509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
510 u8 offset26;
511 /* RNR retry count:3, SRQ:1, rsvd:5 */
512 u8 offset27;
513 u64 local_ca_guid;
514
515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
516
517} __attribute__ ((packed));
518
519static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
520{
521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
522}
523
524static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
525{
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
528}
529
530static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
531{
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
533}
534
535static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
536 u32 starting_psn)
537{
538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
540}
541
542static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
543{
544 return (u8) (rep_msg->offset26 >> 3);
545}
546
547static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
548 u8 target_ack_delay)
549{
550 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
551 (target_ack_delay << 3));
552}
553
554static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
555{
556 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
557}
558
559static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
560{
561 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
562 ((failover & 0x3) << 1));
563}
564
565static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
566{
567 return (u8) (rep_msg->offset26 & 0x01);
568}
569
570static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
571 u8 flow_ctrl)
572{
573 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
574 (flow_ctrl & 0x1));
575}
576
577static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
578{
579 return (u8) (rep_msg->offset27 >> 5);
580}
581
582static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
583 u8 rnr_retry_count)
584{
585 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
586 (rnr_retry_count << 5));
587}
588
589static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
590{
591 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
592}
593
594static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
595{
596 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
597 ((srq & 0x1) << 4));
598}
599
600struct cm_rtu_msg {
601 struct ib_mad_hdr hdr;
602
603 u32 local_comm_id;
604 u32 remote_comm_id;
605
606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
607
608} __attribute__ ((packed));
609
610struct cm_dreq_msg {
611 struct ib_mad_hdr hdr;
612
613 u32 local_comm_id;
614 u32 remote_comm_id;
615 /* remote QPN/EECN:24, rsvd:8 */
616 u32 offset8;
617
618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
619
620} __attribute__ ((packed));
621
622static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
623{
624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
625}
626
627static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
628{
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
631}
632
633struct cm_drep_msg {
634 struct ib_mad_hdr hdr;
635
636 u32 local_comm_id;
637 u32 remote_comm_id;
638
639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
640
641} __attribute__ ((packed));
642
643struct cm_lap_msg {
644 struct ib_mad_hdr hdr;
645
646 u32 local_comm_id;
647 u32 remote_comm_id;
648
649 u32 rsvd8;
650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
651 u32 offset12;
652 u32 rsvd16;
653
654 u16 alt_local_lid;
655 u16 alt_remote_lid;
656 union ib_gid alt_local_gid;
657 union ib_gid alt_remote_gid;
658 /* flow label:20, rsvd:4, traffic class:8 */
659 u32 offset56;
660 u8 alt_hop_limit;
661 /* rsvd:2, packet rate:6 */
662 uint8_t offset61;
663 /* SL:4, subnet local:1, rsvd:3 */
664 uint8_t offset62;
665 /* local ACK timeout:5, rsvd:3 */
666 uint8_t offset63;
667
668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
669} __attribute__ ((packed));
670
671static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
672{
673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
674}
675
676static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn)
677{
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
679 (be32_to_cpu(lap_msg->offset12) &
680 0x000000FF));
681}
682
683static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
684{
685 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
686}
687
688static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
689 u8 resp_timeout)
690{
691 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
692 (be32_to_cpu(lap_msg->offset12) &
693 0xFFFFFF07));
694}
695
696static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
697{
698 return be32_to_cpu(lap_msg->offset56) >> 12;
699}
700
701static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
702 u32 flow_label)
703{
704 lap_msg->offset56 = cpu_to_be32((flow_label << 12) |
705 (be32_to_cpu(lap_msg->offset56) &
706 0x00000FFF));
707}
708
709static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
710{
711 return (u8) be32_to_cpu(lap_msg->offset56);
712}
713
714static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
715 u8 traffic_class)
716{
717 lap_msg->offset56 = cpu_to_be32(traffic_class |
718 (be32_to_cpu(lap_msg->offset56) &
719 0xFFFFFF00));
720}
721
722static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
723{
724 return lap_msg->offset61 & 0x3F;
725}
726
727static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
728 u8 packet_rate)
729{
730 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
731}
732
733static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
734{
735 return lap_msg->offset62 >> 4;
736}
737
738static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
739{
740 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
741}
742
743static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
744{
745 return (lap_msg->offset62 >> 3) & 0x1;
746}
747
748static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
749 u8 subnet_local)
750{
751 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
752 (lap_msg->offset61 & 0xF7);
753}
754static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
755{
756 return lap_msg->offset63 >> 3;
757}
758
759static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
760 u8 local_ack_timeout)
761{
762 lap_msg->offset63 = (local_ack_timeout << 3) |
763 (lap_msg->offset63 & 0x07);
764}
765
766struct cm_apr_msg {
767 struct ib_mad_hdr hdr;
768
769 u32 local_comm_id;
770 u32 remote_comm_id;
771
772 u8 info_length;
773 u8 ap_status;
774 u8 info[IB_CM_APR_INFO_LENGTH];
775
776 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
777} __attribute__ ((packed));
778
779struct cm_sidr_req_msg {
780 struct ib_mad_hdr hdr;
781
782 u32 request_id;
783 u16 pkey;
784 u16 rsvd;
785 u64 service_id;
786
787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
788} __attribute__ ((packed));
789
790struct cm_sidr_rep_msg {
791 struct ib_mad_hdr hdr;
792
793 u32 request_id;
794 u8 status;
795 u8 info_length;
796 u16 rsvd;
797 /* QPN:24, rsvd:8 */
798 u32 offset8;
799 u64 service_id;
800 u32 qkey;
801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
802
803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
804} __attribute__ ((packed));
805
806static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
807{
808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
809}
810
811static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
812 u32 qpn)
813{
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
815 (be32_to_cpu(sidr_rep_msg->offset8) &
816 0x000000FF));
817}
818
819#endif /* CM_MSGS_H */
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 328feae2a5be..7763b31abba7 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
33 */ 34 */
34 35
35#include <linux/errno.h> 36#include <linux/errno.h>
@@ -329,7 +330,7 @@ EXPORT_SYMBOL(ib_create_fmr_pool);
329 * 330 *
330 * Destroy an FMR pool and free all associated resources. 331 * Destroy an FMR pool and free all associated resources.
331 */ 332 */
332int ib_destroy_fmr_pool(struct ib_fmr_pool *pool) 333void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
333{ 334{
334 struct ib_pool_fmr *fmr; 335 struct ib_pool_fmr *fmr;
335 struct ib_pool_fmr *tmp; 336 struct ib_pool_fmr *tmp;
@@ -352,8 +353,6 @@ int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
352 353
353 kfree(pool->cache_bucket); 354 kfree(pool->cache_bucket);
354 kfree(pool); 355 kfree(pool);
355
356 return 0;
357} 356}
358EXPORT_SYMBOL(ib_destroy_fmr_pool); 357EXPORT_SYMBOL(ib_destroy_fmr_pool);
359 358
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 23628c622a50..b97e210ce9c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,12 +31,12 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 32 * SOFTWARE.
31 * 33 *
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $ 34 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
33 */ 35 */
34
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36 37
37#include "mad_priv.h" 38#include "mad_priv.h"
39#include "mad_rmpp.h"
38#include "smi.h" 40#include "smi.h"
39#include "agent.h" 41#include "agent.h"
40 42
@@ -45,6 +47,7 @@ MODULE_AUTHOR("Sean Hefty");
45 47
46 48
47kmem_cache_t *ib_mad_cache; 49kmem_cache_t *ib_mad_cache;
50
48static struct list_head ib_mad_port_list; 51static struct list_head ib_mad_port_list;
49static u32 ib_mad_client_id = 0; 52static u32 ib_mad_client_id = 0;
50 53
@@ -58,16 +61,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
58static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 61static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59static struct ib_mad_agent_private *find_mad_agent( 62static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv, 63 struct ib_mad_port_private *port_priv,
61 struct ib_mad *mad, int solicited); 64 struct ib_mad *mad);
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad); 66 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data); 68static void timeout_sends(void *data);
68static void cancel_sends(void *data);
69static void local_completions(void *data); 69static void local_completions(void *data);
70static int solicited_mad(struct ib_mad *mad);
71static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
72 struct ib_mad_agent_private *agent_priv, 71 struct ib_mad_agent_private *agent_priv,
73 u8 mgmt_class); 72 u8 mgmt_class);
@@ -197,8 +196,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
197 if (qpn == -1) 196 if (qpn == -1)
198 goto error1; 197 goto error1;
199 198
200 if (rmpp_version) 199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
201 goto error1; /* XXX: until RMPP implemented */ 200 goto error1;
202 201
203 /* Validate MAD registration request if supplied */ 202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req) { 203 if (mad_reg_req) {
@@ -261,22 +260,29 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
261 ret = ERR_PTR(-ENOMEM); 260 ret = ERR_PTR(-ENOMEM);
262 goto error1; 261 goto error1;
263 } 262 }
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
264
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE);
267 if (IS_ERR(mad_agent_priv->agent.mr)) {
268 ret = ERR_PTR(-ENOMEM);
269 goto error2;
270 }
264 271
265 if (mad_reg_req) { 272 if (mad_reg_req) {
266 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); 273 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
267 if (!reg_req) { 274 if (!reg_req) {
268 ret = ERR_PTR(-ENOMEM); 275 ret = ERR_PTR(-ENOMEM);
269 goto error2; 276 goto error3;
270 } 277 }
271 /* Make a copy of the MAD registration request */ 278 /* Make a copy of the MAD registration request */
272 memcpy(reg_req, mad_reg_req, sizeof *reg_req); 279 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
273 } 280 }
274 281
275 /* Now, fill in the various structures */ 282 /* Now, fill in the various structures */
276 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
277 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
278 mad_agent_priv->reg_req = reg_req; 284 mad_agent_priv->reg_req = reg_req;
279 mad_agent_priv->rmpp_version = rmpp_version; 285 mad_agent_priv->agent.rmpp_version = rmpp_version;
280 mad_agent_priv->agent.device = device; 286 mad_agent_priv->agent.device = device;
281 mad_agent_priv->agent.recv_handler = recv_handler; 287 mad_agent_priv->agent.recv_handler = recv_handler;
282 mad_agent_priv->agent.send_handler = send_handler; 288 mad_agent_priv->agent.send_handler = send_handler;
@@ -301,7 +307,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
301 if (method) { 307 if (method) {
302 if (method_in_use(&method, 308 if (method_in_use(&method,
303 mad_reg_req)) 309 mad_reg_req))
304 goto error3; 310 goto error4;
305 } 311 }
306 } 312 }
307 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 313 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@@ -317,14 +323,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
317 if (is_vendor_method_in_use( 323 if (is_vendor_method_in_use(
318 vendor_class, 324 vendor_class,
319 mad_reg_req)) 325 mad_reg_req))
320 goto error3; 326 goto error4;
321 } 327 }
322 } 328 }
323 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 329 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
324 } 330 }
325 if (ret2) { 331 if (ret2) {
326 ret = ERR_PTR(ret2); 332 ret = ERR_PTR(ret2);
327 goto error3; 333 goto error4;
328 } 334 }
329 } 335 }
330 336
@@ -335,22 +341,24 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
335 spin_lock_init(&mad_agent_priv->lock); 341 spin_lock_init(&mad_agent_priv->lock);
336 INIT_LIST_HEAD(&mad_agent_priv->send_list); 342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
337 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_LIST_HEAD(&mad_agent_priv->done_list);
345 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
338 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 346 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
339 INIT_LIST_HEAD(&mad_agent_priv->local_list); 347 INIT_LIST_HEAD(&mad_agent_priv->local_list);
340 INIT_WORK(&mad_agent_priv->local_work, local_completions, 348 INIT_WORK(&mad_agent_priv->local_work, local_completions,
341 mad_agent_priv); 349 mad_agent_priv);
342 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
343 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
344 atomic_set(&mad_agent_priv->refcount, 1); 350 atomic_set(&mad_agent_priv->refcount, 1);
345 init_waitqueue_head(&mad_agent_priv->wait); 351 init_waitqueue_head(&mad_agent_priv->wait);
346 352
347 return &mad_agent_priv->agent; 353 return &mad_agent_priv->agent;
348 354
349error3: 355error4:
350 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 356 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
351 kfree(reg_req); 357 kfree(reg_req);
352error2: 358error3:
353 kfree(mad_agent_priv); 359 kfree(mad_agent_priv);
360error2:
361 ib_dereg_mr(mad_agent_priv->agent.mr);
354error1: 362error1:
355 return ret; 363 return ret;
356} 364}
@@ -487,18 +495,16 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
487 * MADs, preventing us from queuing additional work 495 * MADs, preventing us from queuing additional work
488 */ 496 */
489 cancel_mads(mad_agent_priv); 497 cancel_mads(mad_agent_priv);
490
491 port_priv = mad_agent_priv->qp_info->port_priv; 498 port_priv = mad_agent_priv->qp_info->port_priv;
492
493 cancel_delayed_work(&mad_agent_priv->timed_work); 499 cancel_delayed_work(&mad_agent_priv->timed_work);
494 flush_workqueue(port_priv->wq);
495 500
496 spin_lock_irqsave(&port_priv->reg_lock, flags); 501 spin_lock_irqsave(&port_priv->reg_lock, flags);
497 remove_mad_reg_req(mad_agent_priv); 502 remove_mad_reg_req(mad_agent_priv);
498 list_del(&mad_agent_priv->agent_list); 503 list_del(&mad_agent_priv->agent_list);
499 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
500 505
501 /* XXX: Cleanup pending RMPP receives for this agent */ 506 flush_workqueue(port_priv->wq);
507 ib_cancel_rmpp_recvs(mad_agent_priv);
502 508
503 atomic_dec(&mad_agent_priv->refcount); 509 atomic_dec(&mad_agent_priv->refcount);
504 wait_event(mad_agent_priv->wait, 510 wait_event(mad_agent_priv->wait,
@@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
506 512
507 if (mad_agent_priv->reg_req) 513 if (mad_agent_priv->reg_req)
508 kfree(mad_agent_priv->reg_req); 514 kfree(mad_agent_priv->reg_req);
515 ib_dereg_mr(mad_agent_priv->agent.mr);
509 kfree(mad_agent_priv); 516 kfree(mad_agent_priv);
510} 517}
511 518
@@ -551,6 +558,13 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
551} 558}
552EXPORT_SYMBOL(ib_unregister_mad_agent); 559EXPORT_SYMBOL(ib_unregister_mad_agent);
553 560
561static inline int response_mad(struct ib_mad *mad)
562{
563 /* Trap represses are responses although response bit is reset */
564 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
565 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
566}
567
554static void dequeue_mad(struct ib_mad_list_head *mad_list) 568static void dequeue_mad(struct ib_mad_list_head *mad_list)
555{ 569{
556 struct ib_mad_queue *mad_queue; 570 struct ib_mad_queue *mad_queue;
@@ -643,7 +657,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
643 struct ib_smp *smp, 657 struct ib_smp *smp,
644 struct ib_send_wr *send_wr) 658 struct ib_send_wr *send_wr)
645{ 659{
646 int ret, solicited; 660 int ret;
647 unsigned long flags; 661 unsigned long flags;
648 struct ib_mad_local_private *local; 662 struct ib_mad_local_private *local;
649 struct ib_mad_private *mad_priv; 663 struct ib_mad_private *mad_priv;
@@ -689,11 +703,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
689 switch (ret) 703 switch (ret)
690 { 704 {
691 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 705 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
692 /* 706 if (response_mad(&mad_priv->mad.mad) &&
693 * See if response is solicited and
694 * there is a recv handler
695 */
696 if (solicited_mad(&mad_priv->mad.mad) &&
697 mad_agent_priv->agent.recv_handler) { 707 mad_agent_priv->agent.recv_handler) {
698 local->mad_priv = mad_priv; 708 local->mad_priv = mad_priv;
699 local->recv_mad_agent = mad_agent_priv; 709 local->recv_mad_agent = mad_agent_priv;
@@ -710,15 +720,13 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
710 break; 720 break;
711 case IB_MAD_RESULT_SUCCESS: 721 case IB_MAD_RESULT_SUCCESS:
712 /* Treat like an incoming receive MAD */ 722 /* Treat like an incoming receive MAD */
713 solicited = solicited_mad(&mad_priv->mad.mad);
714 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 723 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
715 mad_agent_priv->agent.port_num); 724 mad_agent_priv->agent.port_num);
716 if (port_priv) { 725 if (port_priv) {
717 mad_priv->mad.mad.mad_hdr.tid = 726 mad_priv->mad.mad.mad_hdr.tid =
718 ((struct ib_mad *)smp)->mad_hdr.tid; 727 ((struct ib_mad *)smp)->mad_hdr.tid;
719 recv_mad_agent = find_mad_agent(port_priv, 728 recv_mad_agent = find_mad_agent(port_priv,
720 &mad_priv->mad.mad, 729 &mad_priv->mad.mad);
721 solicited);
722 } 730 }
723 if (!port_priv || !recv_mad_agent) { 731 if (!port_priv || !recv_mad_agent) {
724 kmem_cache_free(ib_mad_cache, mad_priv); 732 kmem_cache_free(ib_mad_cache, mad_priv);
@@ -750,43 +758,133 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
750 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 758 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
751 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 759 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
752 queue_work(mad_agent_priv->qp_info->port_priv->wq, 760 queue_work(mad_agent_priv->qp_info->port_priv->wq,
753 &mad_agent_priv->local_work); 761 &mad_agent_priv->local_work);
754 ret = 1; 762 ret = 1;
755out: 763out:
756 return ret; 764 return ret;
757} 765}
758 766
759static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv, 767static int get_buf_length(int hdr_len, int data_len)
760 struct ib_mad_send_wr_private *mad_send_wr) 768{
769 int seg_size, pad;
770
771 seg_size = sizeof(struct ib_mad) - hdr_len;
772 if (data_len && seg_size) {
773 pad = seg_size - data_len % seg_size;
774 if (pad == seg_size)
775 pad = 0;
776 } else
777 pad = seg_size;
778 return hdr_len + data_len + pad;
779}
780
781struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
782 u32 remote_qpn, u16 pkey_index,
783 struct ib_ah *ah, int rmpp_active,
784 int hdr_len, int data_len,
785 unsigned int __nocast gfp_mask)
786{
787 struct ib_mad_agent_private *mad_agent_priv;
788 struct ib_mad_send_buf *send_buf;
789 int buf_size;
790 void *buf;
791
792 mad_agent_priv = container_of(mad_agent,
793 struct ib_mad_agent_private, agent);
794 buf_size = get_buf_length(hdr_len, data_len);
795
796 if ((!mad_agent->rmpp_version &&
797 (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
798 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
799 return ERR_PTR(-EINVAL);
800
801 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
802 if (!buf)
803 return ERR_PTR(-ENOMEM);
804 memset(buf, 0, sizeof *send_buf + buf_size);
805
806 send_buf = buf + buf_size;
807 send_buf->mad = buf;
808
809 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
810 buf, buf_size, DMA_TO_DEVICE);
811 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
812 send_buf->sge.length = buf_size;
813 send_buf->sge.lkey = mad_agent->mr->lkey;
814
815 send_buf->send_wr.wr_id = (unsigned long) send_buf;
816 send_buf->send_wr.sg_list = &send_buf->sge;
817 send_buf->send_wr.num_sge = 1;
818 send_buf->send_wr.opcode = IB_WR_SEND;
819 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
820 send_buf->send_wr.wr.ud.ah = ah;
821 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
822 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
823 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
824 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
825
826 if (rmpp_active) {
827 struct ib_rmpp_mad *rmpp_mad;
828 rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad;
829 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
830 offsetof(struct ib_rmpp_mad, data) + data_len);
831 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
832 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
833 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
834 IB_MGMT_RMPP_FLAG_ACTIVE);
835 }
836
837 send_buf->mad_agent = mad_agent;
838 atomic_inc(&mad_agent_priv->refcount);
839 return send_buf;
840}
841EXPORT_SYMBOL(ib_create_send_mad);
842
843void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
844{
845 struct ib_mad_agent_private *mad_agent_priv;
846
847 mad_agent_priv = container_of(send_buf->mad_agent,
848 struct ib_mad_agent_private, agent);
849
850 dma_unmap_single(send_buf->mad_agent->device->dma_device,
851 pci_unmap_addr(send_buf, mapping),
852 send_buf->sge.length, DMA_TO_DEVICE);
853 kfree(send_buf->mad);
854
855 if (atomic_dec_and_test(&mad_agent_priv->refcount))
856 wake_up(&mad_agent_priv->wait);
857}
858EXPORT_SYMBOL(ib_free_send_mad);
859
860int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
761{ 861{
762 struct ib_mad_qp_info *qp_info; 862 struct ib_mad_qp_info *qp_info;
763 struct ib_send_wr *bad_send_wr; 863 struct ib_send_wr *bad_send_wr;
864 struct list_head *list;
764 unsigned long flags; 865 unsigned long flags;
765 int ret; 866 int ret;
766 867
767 /* Replace user's WR ID with our own to find WR upon completion */ 868 /* Set WR ID to find mad_send_wr upon completion */
768 qp_info = mad_agent_priv->qp_info; 869 qp_info = mad_send_wr->mad_agent_priv->qp_info;
769 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
770 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 870 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
771 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 871 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
772 872
773 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 873 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
774 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) { 874 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
775 list_add_tail(&mad_send_wr->mad_list.list, 875 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
776 &qp_info->send_queue.list);
777 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
778 ret = ib_post_send(mad_agent_priv->agent.qp,
779 &mad_send_wr->send_wr, &bad_send_wr); 876 &mad_send_wr->send_wr, &bad_send_wr);
780 if (ret) { 877 list = &qp_info->send_queue.list;
781 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
782 dequeue_mad(&mad_send_wr->mad_list);
783 }
784 } else { 878 } else {
785 list_add_tail(&mad_send_wr->mad_list.list,
786 &qp_info->overflow_list);
787 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
788 ret = 0; 879 ret = 0;
880 list = &qp_info->overflow_list;
789 } 881 }
882
883 if (!ret) {
884 qp_info->send_queue.count++;
885 list_add_tail(&mad_send_wr->mad_list.list, list);
886 }
887 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
790 return ret; 888 return ret;
791} 889}
792 890
@@ -860,18 +958,19 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
860 ret = -ENOMEM; 958 ret = -ENOMEM;
861 goto error2; 959 goto error2;
862 } 960 }
961 memset(mad_send_wr, 0, sizeof *mad_send_wr);
863 962
864 mad_send_wr->send_wr = *send_wr; 963 mad_send_wr->send_wr = *send_wr;
865 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 964 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
866 memcpy(mad_send_wr->sg_list, send_wr->sg_list, 965 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
867 sizeof *send_wr->sg_list * send_wr->num_sge); 966 sizeof *send_wr->sg_list * send_wr->num_sge);
868 mad_send_wr->send_wr.next = NULL; 967 mad_send_wr->wr_id = send_wr->wr_id;
869 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; 968 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
870 mad_send_wr->agent = mad_agent; 969 mad_send_wr->mad_agent_priv = mad_agent_priv;
871 /* Timeout will be updated after send completes */ 970 /* Timeout will be updated after send completes */
872 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. 971 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
873 ud.timeout_ms); 972 ud.timeout_ms);
874 mad_send_wr->retry = 0; 973 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
875 /* One reference for each work request to QP + response */ 974 /* One reference for each work request to QP + response */
876 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 975 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
877 mad_send_wr->status = IB_WC_SUCCESS; 976 mad_send_wr->status = IB_WC_SUCCESS;
@@ -883,8 +982,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
883 &mad_agent_priv->send_list); 982 &mad_agent_priv->send_list);
884 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 983 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
885 984
886 ret = ib_send_mad(mad_agent_priv, mad_send_wr); 985 if (mad_agent_priv->agent.rmpp_version) {
887 if (ret) { 986 ret = ib_send_rmpp_mad(mad_send_wr);
987 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
988 ret = ib_send_mad(mad_send_wr);
989 } else
990 ret = ib_send_mad(mad_send_wr);
991 if (ret < 0) {
888 /* Fail send request */ 992 /* Fail send request */
889 spin_lock_irqsave(&mad_agent_priv->lock, flags); 993 spin_lock_irqsave(&mad_agent_priv->lock, flags);
890 list_del(&mad_send_wr->agent_list); 994 list_del(&mad_send_wr->agent_list);
@@ -910,41 +1014,28 @@ EXPORT_SYMBOL(ib_post_send_mad);
910 */ 1014 */
911void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1015void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
912{ 1016{
913 struct ib_mad_recv_buf *entry; 1017 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
914 struct ib_mad_private_header *mad_priv_hdr; 1018 struct ib_mad_private_header *mad_priv_hdr;
915 struct ib_mad_private *priv; 1019 struct ib_mad_private *priv;
1020 struct list_head free_list;
916 1021
917 mad_priv_hdr = container_of(mad_recv_wc, 1022 INIT_LIST_HEAD(&free_list);
918 struct ib_mad_private_header, 1023 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
919 recv_wc);
920 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
921 1024
922 /* 1025 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
923 * Walk receive buffer list associated with this WC 1026 &free_list, list) {
924 * No need to remove them from list of receive buffers 1027 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
925 */ 1028 recv_buf);
926 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
927 /* Free previous receive buffer */
928 kmem_cache_free(ib_mad_cache, priv);
929 mad_priv_hdr = container_of(mad_recv_wc, 1029 mad_priv_hdr = container_of(mad_recv_wc,
930 struct ib_mad_private_header, 1030 struct ib_mad_private_header,
931 recv_wc); 1031 recv_wc);
932 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1032 priv = container_of(mad_priv_hdr, struct ib_mad_private,
933 header); 1033 header);
1034 kmem_cache_free(ib_mad_cache, priv);
934 } 1035 }
935
936 /* Free last buffer */
937 kmem_cache_free(ib_mad_cache, priv);
938} 1036}
939EXPORT_SYMBOL(ib_free_recv_mad); 1037EXPORT_SYMBOL(ib_free_recv_mad);
940 1038
941void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
942 void *buf)
943{
944 printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
945}
946EXPORT_SYMBOL(ib_coalesce_recv_mad);
947
948struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1039struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
949 u8 rmpp_version, 1040 u8 rmpp_version,
950 ib_mad_send_handler send_handler, 1041 ib_mad_send_handler send_handler,
@@ -1338,42 +1429,15 @@ out:
1338 return; 1429 return;
1339} 1430}
1340 1431
1341static int response_mad(struct ib_mad *mad)
1342{
1343 /* Trap represses are responses although response bit is reset */
1344 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
1345 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
1346}
1347
1348static int solicited_mad(struct ib_mad *mad)
1349{
1350 /* CM MADs are never solicited */
1351 if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) {
1352 return 0;
1353 }
1354
1355 /* XXX: Determine whether MAD is using RMPP */
1356
1357 /* Not using RMPP */
1358 /* Is this MAD a response to a previous MAD ? */
1359 return response_mad(mad);
1360}
1361
1362static struct ib_mad_agent_private * 1432static struct ib_mad_agent_private *
1363find_mad_agent(struct ib_mad_port_private *port_priv, 1433find_mad_agent(struct ib_mad_port_private *port_priv,
1364 struct ib_mad *mad, 1434 struct ib_mad *mad)
1365 int solicited)
1366{ 1435{
1367 struct ib_mad_agent_private *mad_agent = NULL; 1436 struct ib_mad_agent_private *mad_agent = NULL;
1368 unsigned long flags; 1437 unsigned long flags;
1369 1438
1370 spin_lock_irqsave(&port_priv->reg_lock, flags); 1439 spin_lock_irqsave(&port_priv->reg_lock, flags);
1371 1440 if (response_mad(mad)) {
1372 /*
1373 * Whether MAD was solicited determines type of routing to
1374 * MAD client.
1375 */
1376 if (solicited) {
1377 u32 hi_tid; 1441 u32 hi_tid;
1378 struct ib_mad_agent_private *entry; 1442 struct ib_mad_agent_private *entry;
1379 1443
@@ -1477,21 +1541,20 @@ out:
1477 return valid; 1541 return valid;
1478} 1542}
1479 1543
1480/* 1544static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1481 * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet 1545 struct ib_mad_hdr *mad_hdr)
1482 */
1483static struct ib_mad_private *
1484reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
1485 struct ib_mad_private *recv)
1486{ 1546{
1487 /* Until we have RMPP, all receives are reassembled!... */ 1547 struct ib_rmpp_mad *rmpp_mad;
1488 INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list); 1548
1489 return recv; 1549 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1550 return !mad_agent_priv->agent.rmpp_version ||
1551 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1552 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1553 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1490} 1554}
1491 1555
1492static struct ib_mad_send_wr_private* 1556struct ib_mad_send_wr_private*
1493find_send_req(struct ib_mad_agent_private *mad_agent_priv, 1557ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
1494 u64 tid)
1495{ 1558{
1496 struct ib_mad_send_wr_private *mad_send_wr; 1559 struct ib_mad_send_wr_private *mad_send_wr;
1497 1560
@@ -1507,7 +1570,9 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1507 */ 1570 */
1508 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1571 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1509 agent_list) { 1572 agent_list) {
1510 if (mad_send_wr->tid == tid && mad_send_wr->timeout) { 1573 if (is_data_mad(mad_agent_priv,
1574 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
1575 mad_send_wr->tid == tid && mad_send_wr->timeout) {
1511 /* Verify request has not been canceled */ 1576 /* Verify request has not been canceled */
1512 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1577 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1513 mad_send_wr : NULL; 1578 mad_send_wr : NULL;
@@ -1516,43 +1581,55 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1516 return NULL; 1581 return NULL;
1517} 1582}
1518 1583
1584void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1585{
1586 mad_send_wr->timeout = 0;
1587 if (mad_send_wr->refcount == 1) {
1588 list_del(&mad_send_wr->agent_list);
1589 list_add_tail(&mad_send_wr->agent_list,
1590 &mad_send_wr->mad_agent_priv->done_list);
1591 }
1592}
1593
1519static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1594static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1520 struct ib_mad_private *recv, 1595 struct ib_mad_recv_wc *mad_recv_wc)
1521 int solicited)
1522{ 1596{
1523 struct ib_mad_send_wr_private *mad_send_wr; 1597 struct ib_mad_send_wr_private *mad_send_wr;
1524 struct ib_mad_send_wc mad_send_wc; 1598 struct ib_mad_send_wc mad_send_wc;
1525 unsigned long flags; 1599 unsigned long flags;
1526 1600 u64 tid;
1527 /* Fully reassemble receive before processing */ 1601
1528 recv = reassemble_recv(mad_agent_priv, recv); 1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1529 if (!recv) { 1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1530 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1604 if (mad_agent_priv->agent.rmpp_version) {
1531 wake_up(&mad_agent_priv->wait); 1605 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1532 return; 1606 mad_recv_wc);
1607 if (!mad_recv_wc) {
1608 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1609 wake_up(&mad_agent_priv->wait);
1610 return;
1611 }
1533 } 1612 }
1534 1613
1535 /* Complete corresponding request */ 1614 /* Complete corresponding request */
1536 if (solicited) { 1615 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1616 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1537 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1617 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1538 mad_send_wr = find_send_req(mad_agent_priv, 1618 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1539 recv->mad.mad.mad_hdr.tid);
1540 if (!mad_send_wr) { 1619 if (!mad_send_wr) {
1541 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1620 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1542 ib_free_recv_mad(&recv->header.recv_wc); 1621 ib_free_recv_mad(mad_recv_wc);
1543 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1622 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1544 wake_up(&mad_agent_priv->wait); 1623 wake_up(&mad_agent_priv->wait);
1545 return; 1624 return;
1546 } 1625 }
1547 /* Timeout = 0 means that we won't wait for a response */ 1626 ib_mark_mad_done(mad_send_wr);
1548 mad_send_wr->timeout = 0;
1549 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1627 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1550 1628
1551 /* Defined behavior is to complete response before request */ 1629 /* Defined behavior is to complete response before request */
1552 recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id; 1630 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1553 mad_agent_priv->agent.recv_handler( 1631 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1554 &mad_agent_priv->agent, 1632 mad_recv_wc);
1555 &recv->header.recv_wc);
1556 atomic_dec(&mad_agent_priv->refcount); 1633 atomic_dec(&mad_agent_priv->refcount);
1557 1634
1558 mad_send_wc.status = IB_WC_SUCCESS; 1635 mad_send_wc.status = IB_WC_SUCCESS;
@@ -1560,9 +1637,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1560 mad_send_wc.wr_id = mad_send_wr->wr_id; 1637 mad_send_wc.wr_id = mad_send_wr->wr_id;
1561 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1638 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1562 } else { 1639 } else {
1563 mad_agent_priv->agent.recv_handler( 1640 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1564 &mad_agent_priv->agent, 1641 mad_recv_wc);
1565 &recv->header.recv_wc);
1566 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1642 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1567 wake_up(&mad_agent_priv->wait); 1643 wake_up(&mad_agent_priv->wait);
1568 } 1644 }
@@ -1576,7 +1652,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1576 struct ib_mad_private *recv, *response; 1652 struct ib_mad_private *recv, *response;
1577 struct ib_mad_list_head *mad_list; 1653 struct ib_mad_list_head *mad_list;
1578 struct ib_mad_agent_private *mad_agent; 1654 struct ib_mad_agent_private *mad_agent;
1579 int solicited;
1580 1655
1581 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); 1656 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1582 if (!response) 1657 if (!response)
@@ -1662,11 +1737,9 @@ local:
1662 } 1737 }
1663 } 1738 }
1664 1739
1665 /* Determine corresponding MAD agent for incoming receive MAD */ 1740 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1666 solicited = solicited_mad(&recv->mad.mad);
1667 mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);
1668 if (mad_agent) { 1741 if (mad_agent) {
1669 ib_mad_complete_recv(mad_agent, recv, solicited); 1742 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1670 /* 1743 /*
1671 * recv is freed up in error cases in ib_mad_complete_recv 1744 * recv is freed up in error cases in ib_mad_complete_recv
1672 * or via recv_handler in ib_mad_complete_recv() 1745 * or via recv_handler in ib_mad_complete_recv()
@@ -1710,26 +1783,31 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1710 } 1783 }
1711} 1784}
1712 1785
1713static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv, 1786static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1714 struct ib_mad_send_wr_private *mad_send_wr )
1715{ 1787{
1788 struct ib_mad_agent_private *mad_agent_priv;
1716 struct ib_mad_send_wr_private *temp_mad_send_wr; 1789 struct ib_mad_send_wr_private *temp_mad_send_wr;
1717 struct list_head *list_item; 1790 struct list_head *list_item;
1718 unsigned long delay; 1791 unsigned long delay;
1719 1792
1793 mad_agent_priv = mad_send_wr->mad_agent_priv;
1720 list_del(&mad_send_wr->agent_list); 1794 list_del(&mad_send_wr->agent_list);
1721 1795
1722 delay = mad_send_wr->timeout; 1796 delay = mad_send_wr->timeout;
1723 mad_send_wr->timeout += jiffies; 1797 mad_send_wr->timeout += jiffies;
1724 1798
1725 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 1799 if (delay) {
1726 temp_mad_send_wr = list_entry(list_item, 1800 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1727 struct ib_mad_send_wr_private, 1801 temp_mad_send_wr = list_entry(list_item,
1728 agent_list); 1802 struct ib_mad_send_wr_private,
1729 if (time_after(mad_send_wr->timeout, 1803 agent_list);
1730 temp_mad_send_wr->timeout)) 1804 if (time_after(mad_send_wr->timeout,
1731 break; 1805 temp_mad_send_wr->timeout))
1806 break;
1807 }
1732 } 1808 }
1809 else
1810 list_item = &mad_agent_priv->wait_list;
1733 list_add(&mad_send_wr->agent_list, list_item); 1811 list_add(&mad_send_wr->agent_list, list_item);
1734 1812
1735 /* Reschedule a work item if we have a shorter timeout */ 1813 /* Reschedule a work item if we have a shorter timeout */
@@ -1740,19 +1818,32 @@ static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv,
1740 } 1818 }
1741} 1819}
1742 1820
1821void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1822 int timeout_ms)
1823{
1824 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1825 wait_for_response(mad_send_wr);
1826}
1827
1743/* 1828/*
1744 * Process a send work completion 1829 * Process a send work completion
1745 */ 1830 */
1746static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 1831void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1747 struct ib_mad_send_wc *mad_send_wc) 1832 struct ib_mad_send_wc *mad_send_wc)
1748{ 1833{
1749 struct ib_mad_agent_private *mad_agent_priv; 1834 struct ib_mad_agent_private *mad_agent_priv;
1750 unsigned long flags; 1835 unsigned long flags;
1836 int ret;
1751 1837
1752 mad_agent_priv = container_of(mad_send_wr->agent, 1838 mad_agent_priv = mad_send_wr->mad_agent_priv;
1753 struct ib_mad_agent_private, agent);
1754
1755 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1839 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1840 if (mad_agent_priv->agent.rmpp_version) {
1841 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1842 if (ret == IB_RMPP_RESULT_CONSUMED)
1843 goto done;
1844 } else
1845 ret = IB_RMPP_RESULT_UNHANDLED;
1846
1756 if (mad_send_wc->status != IB_WC_SUCCESS && 1847 if (mad_send_wc->status != IB_WC_SUCCESS &&
1757 mad_send_wr->status == IB_WC_SUCCESS) { 1848 mad_send_wr->status == IB_WC_SUCCESS) {
1758 mad_send_wr->status = mad_send_wc->status; 1849 mad_send_wr->status = mad_send_wc->status;
@@ -1762,10 +1853,9 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1762 if (--mad_send_wr->refcount > 0) { 1853 if (--mad_send_wr->refcount > 0) {
1763 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 1854 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1764 mad_send_wr->status == IB_WC_SUCCESS) { 1855 mad_send_wr->status == IB_WC_SUCCESS) {
1765 wait_for_response(mad_agent_priv, mad_send_wr); 1856 wait_for_response(mad_send_wr);
1766 } 1857 }
1767 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1858 goto done;
1768 return;
1769 } 1859 }
1770 1860
1771 /* Remove send from MAD agent and notify client of completion */ 1861 /* Remove send from MAD agent and notify client of completion */
@@ -1775,14 +1865,18 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1775 1865
1776 if (mad_send_wr->status != IB_WC_SUCCESS ) 1866 if (mad_send_wr->status != IB_WC_SUCCESS )
1777 mad_send_wc->status = mad_send_wr->status; 1867 mad_send_wc->status = mad_send_wr->status;
1778 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 1868 if (ret != IB_RMPP_RESULT_INTERNAL)
1779 mad_send_wc); 1869 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1870 mad_send_wc);
1780 1871
1781 /* Release reference on agent taken when sending */ 1872 /* Release reference on agent taken when sending */
1782 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1873 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1783 wake_up(&mad_agent_priv->wait); 1874 wake_up(&mad_agent_priv->wait);
1784 1875
1785 kfree(mad_send_wr); 1876 kfree(mad_send_wr);
1877 return;
1878done:
1879 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1786} 1880}
1787 1881
1788static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, 1882static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
@@ -1961,6 +2055,8 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1961 2055
1962 /* Empty wait list to prevent receives from finding a request */ 2056 /* Empty wait list to prevent receives from finding a request */
1963 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2057 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2058 /* Empty local completion list as well */
2059 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
1964 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2060 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1965 2061
1966 /* Report all cancelled requests */ 2062 /* Report all cancelled requests */
@@ -1980,8 +2076,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1980} 2076}
1981 2077
1982static struct ib_mad_send_wr_private* 2078static struct ib_mad_send_wr_private*
1983find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, 2079find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
1984 u64 wr_id)
1985{ 2080{
1986 struct ib_mad_send_wr_private *mad_send_wr; 2081 struct ib_mad_send_wr_private *mad_send_wr;
1987 2082
@@ -1993,79 +2088,50 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
1993 2088
1994 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2089 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1995 agent_list) { 2090 agent_list) {
1996 if (mad_send_wr->wr_id == wr_id) 2091 if (is_data_mad(mad_agent_priv,
2092 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
2093 mad_send_wr->wr_id == wr_id)
1997 return mad_send_wr; 2094 return mad_send_wr;
1998 } 2095 }
1999 return NULL; 2096 return NULL;
2000} 2097}
2001 2098
2002void cancel_sends(void *data) 2099int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2003{
2004 struct ib_mad_agent_private *mad_agent_priv;
2005 struct ib_mad_send_wr_private *mad_send_wr;
2006 struct ib_mad_send_wc mad_send_wc;
2007 unsigned long flags;
2008
2009 mad_agent_priv = data;
2010
2011 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2012 mad_send_wc.vendor_err = 0;
2013
2014 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2015 while (!list_empty(&mad_agent_priv->canceled_list)) {
2016 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2017 struct ib_mad_send_wr_private,
2018 agent_list);
2019
2020 list_del(&mad_send_wr->agent_list);
2021 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2022
2023 mad_send_wc.wr_id = mad_send_wr->wr_id;
2024 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2025 &mad_send_wc);
2026
2027 kfree(mad_send_wr);
2028 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2029 wake_up(&mad_agent_priv->wait);
2030 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2031 }
2032 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2033}
2034
2035void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2036 u64 wr_id)
2037{ 2100{
2038 struct ib_mad_agent_private *mad_agent_priv; 2101 struct ib_mad_agent_private *mad_agent_priv;
2039 struct ib_mad_send_wr_private *mad_send_wr; 2102 struct ib_mad_send_wr_private *mad_send_wr;
2040 unsigned long flags; 2103 unsigned long flags;
2104 int active;
2041 2105
2042 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2106 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2043 agent); 2107 agent);
2044 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2108 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2045 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); 2109 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2046 if (!mad_send_wr) { 2110 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2047 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2111 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2048 goto out; 2112 return -EINVAL;
2049 } 2113 }
2050 2114
2051 if (mad_send_wr->status == IB_WC_SUCCESS) 2115 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2052 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2116 if (!timeout_ms) {
2053
2054 if (mad_send_wr->refcount != 0) {
2055 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2117 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2056 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2118 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2057 goto out;
2058 } 2119 }
2059 2120
2060 list_del(&mad_send_wr->agent_list); 2121 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
2061 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list); 2122 if (active)
2062 adjust_timeout(mad_agent_priv); 2123 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2124 else
2125 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2126
2063 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2127 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2128 return 0;
2129}
2130EXPORT_SYMBOL(ib_modify_mad);
2064 2131
2065 queue_work(mad_agent_priv->qp_info->port_priv->wq, 2132void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
2066 &mad_agent_priv->canceled_work); 2133{
2067out: 2134 ib_modify_mad(mad_agent, wr_id, 0);
2068 return;
2069} 2135}
2070EXPORT_SYMBOL(ib_cancel_mad); 2136EXPORT_SYMBOL(ib_cancel_mad);
2071 2137
@@ -2075,6 +2141,7 @@ static void local_completions(void *data)
2075 struct ib_mad_local_private *local; 2141 struct ib_mad_local_private *local;
2076 struct ib_mad_agent_private *recv_mad_agent; 2142 struct ib_mad_agent_private *recv_mad_agent;
2077 unsigned long flags; 2143 unsigned long flags;
2144 int recv = 0;
2078 struct ib_wc wc; 2145 struct ib_wc wc;
2079 struct ib_mad_send_wc mad_send_wc; 2146 struct ib_mad_send_wc mad_send_wc;
2080 2147
@@ -2090,10 +2157,10 @@ static void local_completions(void *data)
2090 recv_mad_agent = local->recv_mad_agent; 2157 recv_mad_agent = local->recv_mad_agent;
2091 if (!recv_mad_agent) { 2158 if (!recv_mad_agent) {
2092 printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); 2159 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2093 kmem_cache_free(ib_mad_cache, local->mad_priv);
2094 goto local_send_completion; 2160 goto local_send_completion;
2095 } 2161 }
2096 2162
2163 recv = 1;
2097 /* 2164 /*
2098 * Defined behavior is to complete response 2165 * Defined behavior is to complete response
2099 * before request 2166 * before request
@@ -2105,7 +2172,9 @@ static void local_completions(void *data)
2105 local->mad_priv->header.recv_wc.wc = &wc; 2172 local->mad_priv->header.recv_wc.wc = &wc;
2106 local->mad_priv->header.recv_wc.mad_len = 2173 local->mad_priv->header.recv_wc.mad_len =
2107 sizeof(struct ib_mad); 2174 sizeof(struct ib_mad);
2108 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list); 2175 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2176 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2177 &local->mad_priv->header.recv_wc.rmpp_list);
2109 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2178 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2110 local->mad_priv->header.recv_wc.recv_buf.mad = 2179 local->mad_priv->header.recv_wc.recv_buf.mad =
2111 &local->mad_priv->mad.mad; 2180 &local->mad_priv->mad.mad;
@@ -2136,11 +2205,47 @@ local_send_completion:
2136 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2205 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2137 list_del(&local->completion_list); 2206 list_del(&local->completion_list);
2138 atomic_dec(&mad_agent_priv->refcount); 2207 atomic_dec(&mad_agent_priv->refcount);
2208 if (!recv)
2209 kmem_cache_free(ib_mad_cache, local->mad_priv);
2139 kfree(local); 2210 kfree(local);
2140 } 2211 }
2141 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2212 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2142} 2213}
2143 2214
2215static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2216{
2217 int ret;
2218
2219 if (!mad_send_wr->retries--)
2220 return -ETIMEDOUT;
2221
2222 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr.
2223 wr.ud.timeout_ms);
2224
2225 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2226 ret = ib_retry_rmpp(mad_send_wr);
2227 switch (ret) {
2228 case IB_RMPP_RESULT_UNHANDLED:
2229 ret = ib_send_mad(mad_send_wr);
2230 break;
2231 case IB_RMPP_RESULT_CONSUMED:
2232 ret = 0;
2233 break;
2234 default:
2235 ret = -ECOMM;
2236 break;
2237 }
2238 } else
2239 ret = ib_send_mad(mad_send_wr);
2240
2241 if (!ret) {
2242 mad_send_wr->refcount++;
2243 list_add_tail(&mad_send_wr->agent_list,
2244 &mad_send_wr->mad_agent_priv->send_list);
2245 }
2246 return ret;
2247}
2248
2144static void timeout_sends(void *data) 2249static void timeout_sends(void *data)
2145{ 2250{
2146 struct ib_mad_agent_private *mad_agent_priv; 2251 struct ib_mad_agent_private *mad_agent_priv;
@@ -2149,8 +2254,6 @@ static void timeout_sends(void *data)
2149 unsigned long flags, delay; 2254 unsigned long flags, delay;
2150 2255
2151 mad_agent_priv = (struct ib_mad_agent_private *)data; 2256 mad_agent_priv = (struct ib_mad_agent_private *)data;
2152
2153 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2154 mad_send_wc.vendor_err = 0; 2257 mad_send_wc.vendor_err = 0;
2155 2258
2156 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2259 spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2170,8 +2273,16 @@ static void timeout_sends(void *data)
2170 } 2273 }
2171 2274
2172 list_del(&mad_send_wr->agent_list); 2275 list_del(&mad_send_wr->agent_list);
2276 if (mad_send_wr->status == IB_WC_SUCCESS &&
2277 !retry_send(mad_send_wr))
2278 continue;
2279
2173 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2280 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2174 2281
2282 if (mad_send_wr->status == IB_WC_SUCCESS)
2283 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2284 else
2285 mad_send_wc.status = mad_send_wr->status;
2175 mad_send_wc.wr_id = mad_send_wr->wr_id; 2286 mad_send_wc.wr_id = mad_send_wr->wr_id;
2176 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2287 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2177 &mad_send_wc); 2288 &mad_send_wc);
@@ -2447,14 +2558,6 @@ static int ib_mad_port_open(struct ib_device *device,
2447 unsigned long flags; 2558 unsigned long flags;
2448 char name[sizeof "ib_mad123"]; 2559 char name[sizeof "ib_mad123"];
2449 2560
2450 /* First, check if port already open at MAD layer */
2451 port_priv = ib_get_mad_port(device, port_num);
2452 if (port_priv) {
2453 printk(KERN_DEBUG PFX "%s port %d already open\n",
2454 device->name, port_num);
2455 return 0;
2456 }
2457
2458 /* Create new device info */ 2561 /* Create new device info */
2459 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 2562 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2460 if (!port_priv) { 2563 if (!port_priv) {
@@ -2579,7 +2682,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
2579 2682
2580static void ib_mad_init_device(struct ib_device *device) 2683static void ib_mad_init_device(struct ib_device *device)
2581{ 2684{
2582 int ret, num_ports, cur_port, i, ret2; 2685 int num_ports, cur_port, i;
2583 2686
2584 if (device->node_type == IB_NODE_SWITCH) { 2687 if (device->node_type == IB_NODE_SWITCH) {
2585 num_ports = 1; 2688 num_ports = 1;
@@ -2589,47 +2692,37 @@ static void ib_mad_init_device(struct ib_device *device)
2589 cur_port = 1; 2692 cur_port = 1;
2590 } 2693 }
2591 for (i = 0; i < num_ports; i++, cur_port++) { 2694 for (i = 0; i < num_ports; i++, cur_port++) {
2592 ret = ib_mad_port_open(device, cur_port); 2695 if (ib_mad_port_open(device, cur_port)) {
2593 if (ret) {
2594 printk(KERN_ERR PFX "Couldn't open %s port %d\n", 2696 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2595 device->name, cur_port); 2697 device->name, cur_port);
2596 goto error_device_open; 2698 goto error_device_open;
2597 } 2699 }
2598 ret = ib_agent_port_open(device, cur_port); 2700 if (ib_agent_port_open(device, cur_port)) {
2599 if (ret) {
2600 printk(KERN_ERR PFX "Couldn't open %s port %d " 2701 printk(KERN_ERR PFX "Couldn't open %s port %d "
2601 "for agents\n", 2702 "for agents\n",
2602 device->name, cur_port); 2703 device->name, cur_port);
2603 goto error_device_open; 2704 goto error_device_open;
2604 } 2705 }
2605 } 2706 }
2606 2707 return;
2607 goto error_device_query;
2608 2708
2609error_device_open: 2709error_device_open:
2610 while (i > 0) { 2710 while (i > 0) {
2611 cur_port--; 2711 cur_port--;
2612 ret2 = ib_agent_port_close(device, cur_port); 2712 if (ib_agent_port_close(device, cur_port))
2613 if (ret2) {
2614 printk(KERN_ERR PFX "Couldn't close %s port %d " 2713 printk(KERN_ERR PFX "Couldn't close %s port %d "
2615 "for agents\n", 2714 "for agents\n",
2616 device->name, cur_port); 2715 device->name, cur_port);
2617 } 2716 if (ib_mad_port_close(device, cur_port))
2618 ret2 = ib_mad_port_close(device, cur_port);
2619 if (ret2) {
2620 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 2717 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2621 device->name, cur_port); 2718 device->name, cur_port);
2622 }
2623 i--; 2719 i--;
2624 } 2720 }
2625
2626error_device_query:
2627 return;
2628} 2721}
2629 2722
2630static void ib_mad_remove_device(struct ib_device *device) 2723static void ib_mad_remove_device(struct ib_device *device)
2631{ 2724{
2632 int ret = 0, i, num_ports, cur_port, ret2; 2725 int i, num_ports, cur_port;
2633 2726
2634 if (device->node_type == IB_NODE_SWITCH) { 2727 if (device->node_type == IB_NODE_SWITCH) {
2635 num_ports = 1; 2728 num_ports = 1;
@@ -2639,21 +2732,13 @@ static void ib_mad_remove_device(struct ib_device *device)
2639 cur_port = 1; 2732 cur_port = 1;
2640 } 2733 }
2641 for (i = 0; i < num_ports; i++, cur_port++) { 2734 for (i = 0; i < num_ports; i++, cur_port++) {
2642 ret2 = ib_agent_port_close(device, cur_port); 2735 if (ib_agent_port_close(device, cur_port))
2643 if (ret2) {
2644 printk(KERN_ERR PFX "Couldn't close %s port %d " 2736 printk(KERN_ERR PFX "Couldn't close %s port %d "
2645 "for agents\n", 2737 "for agents\n",
2646 device->name, cur_port); 2738 device->name, cur_port);
2647 if (!ret) 2739 if (ib_mad_port_close(device, cur_port))
2648 ret = ret2;
2649 }
2650 ret2 = ib_mad_port_close(device, cur_port);
2651 if (ret2) {
2652 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 2740 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2653 device->name, cur_port); 2741 device->name, cur_port);
2654 if (!ret)
2655 ret = ret2;
2656 }
2657 } 2742 }
2658} 2743}
2659 2744
@@ -2709,3 +2794,4 @@ static void __exit ib_mad_cleanup_module(void)
2709 2794
2710module_init(ib_mad_init_module); 2795module_init(ib_mad_init_module);
2711module_exit(ib_mad_cleanup_module); 2796module_exit(ib_mad_cleanup_module);
2797
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 008cbcb94b15..568da10b05ab 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +31,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 32 * SOFTWARE.
31 * 33 *
32 * $Id: mad_priv.h 1389 2004-12-27 22:56:47Z roland $ 34 * $Id: mad_priv.h 2730 2005-06-28 16:43:03Z sean.hefty $
33 */ 35 */
34 36
35#ifndef __IB_MAD_PRIV_H__ 37#ifndef __IB_MAD_PRIV_H__
@@ -92,16 +94,15 @@ struct ib_mad_agent_private {
92 spinlock_t lock; 94 spinlock_t lock;
93 struct list_head send_list; 95 struct list_head send_list;
94 struct list_head wait_list; 96 struct list_head wait_list;
97 struct list_head done_list;
95 struct work_struct timed_work; 98 struct work_struct timed_work;
96 unsigned long timeout; 99 unsigned long timeout;
97 struct list_head local_list; 100 struct list_head local_list;
98 struct work_struct local_work; 101 struct work_struct local_work;
99 struct list_head canceled_list; 102 struct list_head rmpp_list;
100 struct work_struct canceled_work;
101 103
102 atomic_t refcount; 104 atomic_t refcount;
103 wait_queue_head_t wait; 105 wait_queue_head_t wait;
104 u8 rmpp_version;
105}; 106};
106 107
107struct ib_mad_snoop_private { 108struct ib_mad_snoop_private {
@@ -116,15 +117,24 @@ struct ib_mad_snoop_private {
116struct ib_mad_send_wr_private { 117struct ib_mad_send_wr_private {
117 struct ib_mad_list_head mad_list; 118 struct ib_mad_list_head mad_list;
118 struct list_head agent_list; 119 struct list_head agent_list;
119 struct ib_mad_agent *agent; 120 struct ib_mad_agent_private *mad_agent_priv;
120 struct ib_send_wr send_wr; 121 struct ib_send_wr send_wr;
121 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
122 u64 wr_id; /* client WR ID */ 123 u64 wr_id; /* client WR ID */
123 u64 tid; 124 u64 tid;
124 unsigned long timeout; 125 unsigned long timeout;
126 int retries;
125 int retry; 127 int retry;
126 int refcount; 128 int refcount;
127 enum ib_wc_status status; 129 enum ib_wc_status status;
130
131 /* RMPP control */
132 int last_ack;
133 int seg_num;
134 int newwin;
135 int total_seg;
136 int data_offset;
137 int pad;
128}; 138};
129 139
130struct ib_mad_local_private { 140struct ib_mad_local_private {
@@ -197,4 +207,17 @@ struct ib_mad_port_private {
197 207
198extern kmem_cache_t *ib_mad_cache; 208extern kmem_cache_t *ib_mad_cache;
199 209
210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
211
212struct ib_mad_send_wr_private *
213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
214
215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
216 struct ib_mad_send_wc *mad_send_wc);
217
218void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
219
220void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
221 int timeout_ms);
222
200#endif /* __IB_MAD_PRIV_H__ */ 223#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
new file mode 100644
index 000000000000..8f1eb80e421f
--- /dev/null
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -0,0 +1,765 @@
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */
35
36#include <linux/dma-mapping.h>
37
38#include "mad_priv.h"
39#include "mad_rmpp.h"
40
41enum rmpp_state {
42 RMPP_STATE_ACTIVE,
43 RMPP_STATE_TIMEOUT,
44 RMPP_STATE_COMPLETE
45};
46
47struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct work_struct timeout_work;
51 struct work_struct cleanup_work;
52 wait_queue_head_t wait;
53 enum rmpp_state state;
54 spinlock_t lock;
55 atomic_t refcount;
56
57 struct ib_ah *ah;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
60 int last_ack;
61 int seg_num;
62 int newwin;
63
64 u64 tid;
65 u32 src_qp;
66 u16 slid;
67 u8 mgmt_class;
68 u8 class_version;
69 u8 method;
70};
71
72static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{
74 atomic_dec(&rmpp_recv->refcount);
75 wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76 ib_destroy_ah(rmpp_recv->ah);
77 kfree(rmpp_recv);
78}
79
80void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
81{
82 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
83 unsigned long flags;
84
85 spin_lock_irqsave(&agent->lock, flags);
86 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87 cancel_delayed_work(&rmpp_recv->timeout_work);
88 cancel_delayed_work(&rmpp_recv->cleanup_work);
89 }
90 spin_unlock_irqrestore(&agent->lock, flags);
91
92 flush_workqueue(agent->qp_info->port_priv->wq);
93
94 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95 &agent->rmpp_list, list) {
96 list_del(&rmpp_recv->list);
97 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98 ib_free_recv_mad(rmpp_recv->rmpp_wc);
99 destroy_rmpp_recv(rmpp_recv);
100 }
101}
102
103static void recv_timeout_handler(void *data)
104{
105 struct mad_rmpp_recv *rmpp_recv = data;
106 struct ib_mad_recv_wc *rmpp_wc;
107 unsigned long flags;
108
109 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
110 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
111 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
112 return;
113 }
114 rmpp_recv->state = RMPP_STATE_TIMEOUT;
115 list_del(&rmpp_recv->list);
116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
117
118 /* TODO: send abort. */
119 rmpp_wc = rmpp_recv->rmpp_wc;
120 destroy_rmpp_recv(rmpp_recv);
121 ib_free_recv_mad(rmpp_wc);
122}
123
124static void recv_cleanup_handler(void *data)
125{
126 struct mad_rmpp_recv *rmpp_recv = data;
127 unsigned long flags;
128
129 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
130 list_del(&rmpp_recv->list);
131 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
132 destroy_rmpp_recv(rmpp_recv);
133}
134
135static struct mad_rmpp_recv *
136create_rmpp_recv(struct ib_mad_agent_private *agent,
137 struct ib_mad_recv_wc *mad_recv_wc)
138{
139 struct mad_rmpp_recv *rmpp_recv;
140 struct ib_mad_hdr *mad_hdr;
141
142 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
143 if (!rmpp_recv)
144 return NULL;
145
146 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
147 mad_recv_wc->wc,
148 mad_recv_wc->recv_buf.grh,
149 agent->agent.port_num);
150 if (IS_ERR(rmpp_recv->ah))
151 goto error;
152
153 rmpp_recv->agent = agent;
154 init_waitqueue_head(&rmpp_recv->wait);
155 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
156 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
157 spin_lock_init(&rmpp_recv->lock);
158 rmpp_recv->state = RMPP_STATE_ACTIVE;
159 atomic_set(&rmpp_recv->refcount, 1);
160
161 rmpp_recv->rmpp_wc = mad_recv_wc;
162 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
163 rmpp_recv->newwin = 1;
164 rmpp_recv->seg_num = 1;
165 rmpp_recv->last_ack = 0;
166
167 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
168 rmpp_recv->tid = mad_hdr->tid;
169 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
170 rmpp_recv->slid = mad_recv_wc->wc->slid;
171 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
172 rmpp_recv->class_version = mad_hdr->class_version;
173 rmpp_recv->method = mad_hdr->method;
174 return rmpp_recv;
175
176error: kfree(rmpp_recv);
177 return NULL;
178}
179
180static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
181{
182 if (atomic_dec_and_test(&rmpp_recv->refcount))
183 wake_up(&rmpp_recv->wait);
184}
185
186static struct mad_rmpp_recv *
187find_rmpp_recv(struct ib_mad_agent_private *agent,
188 struct ib_mad_recv_wc *mad_recv_wc)
189{
190 struct mad_rmpp_recv *rmpp_recv;
191 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
192
193 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
194 if (rmpp_recv->tid == mad_hdr->tid &&
195 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
196 rmpp_recv->slid == mad_recv_wc->wc->slid &&
197 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
198 rmpp_recv->class_version == mad_hdr->class_version &&
199 rmpp_recv->method == mad_hdr->method)
200 return rmpp_recv;
201 }
202 return NULL;
203}
204
205static struct mad_rmpp_recv *
206acquire_rmpp_recv(struct ib_mad_agent_private *agent,
207 struct ib_mad_recv_wc *mad_recv_wc)
208{
209 struct mad_rmpp_recv *rmpp_recv;
210 unsigned long flags;
211
212 spin_lock_irqsave(&agent->lock, flags);
213 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
214 if (rmpp_recv)
215 atomic_inc(&rmpp_recv->refcount);
216 spin_unlock_irqrestore(&agent->lock, flags);
217 return rmpp_recv;
218}
219
220static struct mad_rmpp_recv *
221insert_rmpp_recv(struct ib_mad_agent_private *agent,
222 struct mad_rmpp_recv *rmpp_recv)
223{
224 struct mad_rmpp_recv *cur_rmpp_recv;
225
226 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
227 if (!cur_rmpp_recv)
228 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
229
230 return cur_rmpp_recv;
231}
232
233static int data_offset(u8 mgmt_class)
234{
235 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236 return offsetof(struct ib_sa_mad, data);
237 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239 return offsetof(struct ib_vendor_mad, data);
240 else
241 return offsetof(struct ib_rmpp_mad, data);
242}
243
244static void format_ack(struct ib_rmpp_mad *ack,
245 struct ib_rmpp_mad *data,
246 struct mad_rmpp_recv *rmpp_recv)
247{
248 unsigned long flags;
249
250 memcpy(&ack->mad_hdr, &data->mad_hdr,
251 data_offset(data->mad_hdr.mgmt_class));
252
253 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
256
257 spin_lock_irqsave(&rmpp_recv->lock, flags);
258 rmpp_recv->last_ack = rmpp_recv->seg_num;
259 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
262}
263
264static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265 struct ib_mad_recv_wc *recv_wc)
266{
267 struct ib_mad_send_buf *msg;
268 struct ib_send_wr *bad_send_wr;
269 int hdr_len, ret;
270
271 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
275 GFP_KERNEL);
276 if (!msg)
277 return;
278
279 format_ack((struct ib_rmpp_mad *) msg->mad,
280 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
282 &bad_send_wr);
283 if (ret)
284 ib_free_send_mad(msg);
285}
286
287static inline int get_last_flag(struct ib_mad_recv_buf *seg)
288{
289 struct ib_rmpp_mad *rmpp_mad;
290
291 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
292 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
293}
294
295static inline int get_seg_num(struct ib_mad_recv_buf *seg)
296{
297 struct ib_rmpp_mad *rmpp_mad;
298
299 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
300 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
301}
302
303static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
304 struct ib_mad_recv_buf *seg)
305{
306 if (seg->list.next == rmpp_list)
307 return NULL;
308
309 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
310}
311
312static inline int window_size(struct ib_mad_agent_private *agent)
313{
314 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
315}
316
317static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
318 int seg_num)
319{
320 struct ib_mad_recv_buf *seg_buf;
321 int cur_seg_num;
322
323 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
324 cur_seg_num = get_seg_num(seg_buf);
325 if (seg_num > cur_seg_num)
326 return seg_buf;
327 if (seg_num == cur_seg_num)
328 break;
329 }
330 return NULL;
331}
332
333static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
334 struct ib_mad_recv_buf *new_buf)
335{
336 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
337
338 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
339 rmpp_recv->cur_seg_buf = new_buf;
340 rmpp_recv->seg_num++;
341 new_buf = get_next_seg(rmpp_list, new_buf);
342 }
343}
344
345static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
346{
347 struct ib_rmpp_mad *rmpp_mad;
348 int hdr_size, data_size, pad;
349
350 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
351
352 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
353 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
354 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
355 if (pad > data_size || pad < 0)
356 pad = 0;
357
358 return hdr_size + rmpp_recv->seg_num * data_size - pad;
359}
360
361static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
362{
363 struct ib_mad_recv_wc *rmpp_wc;
364
365 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
366 if (rmpp_recv->seg_num > 1)
367 cancel_delayed_work(&rmpp_recv->timeout_work);
368
369 rmpp_wc = rmpp_recv->rmpp_wc;
370 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
371 /* 10 seconds until we can find the packet lifetime */
372 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
373 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
374 return rmpp_wc;
375}
376
377void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
378{
379 struct ib_mad_recv_buf *seg_buf;
380 struct ib_rmpp_mad *rmpp_mad;
381 void *data;
382 int size, len, offset;
383 u8 flags;
384
385 len = mad_recv_wc->mad_len;
386 if (len <= sizeof(struct ib_mad)) {
387 memcpy(buf, mad_recv_wc->recv_buf.mad, len);
388 return;
389 }
390
391 offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
392
393 list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
394 rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
395 flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
396
397 if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
398 data = rmpp_mad;
399 size = sizeof(*rmpp_mad);
400 } else {
401 data = (void *) rmpp_mad + offset;
402 if (flags & IB_MGMT_RMPP_FLAG_LAST)
403 size = len;
404 else
405 size = sizeof(*rmpp_mad) - offset;
406 }
407
408 memcpy(buf, data, size);
409 len -= size;
410 buf += size;
411 }
412}
413EXPORT_SYMBOL(ib_coalesce_recv_mad);
414
415static struct ib_mad_recv_wc *
416continue_rmpp(struct ib_mad_agent_private *agent,
417 struct ib_mad_recv_wc *mad_recv_wc)
418{
419 struct mad_rmpp_recv *rmpp_recv;
420 struct ib_mad_recv_buf *prev_buf;
421 struct ib_mad_recv_wc *done_wc;
422 int seg_num;
423 unsigned long flags;
424
425 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
426 if (!rmpp_recv)
427 goto drop1;
428
429 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
430
431 spin_lock_irqsave(&rmpp_recv->lock, flags);
432 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
433 (seg_num > rmpp_recv->newwin))
434 goto drop3;
435
436 if ((seg_num <= rmpp_recv->last_ack) ||
437 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
438 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
439 ack_recv(rmpp_recv, mad_recv_wc);
440 goto drop2;
441 }
442
443 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
444 if (!prev_buf)
445 goto drop3;
446
447 done_wc = NULL;
448 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
449 if (rmpp_recv->cur_seg_buf == prev_buf) {
450 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
451 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
452 rmpp_recv->state = RMPP_STATE_COMPLETE;
453 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
454 done_wc = complete_rmpp(rmpp_recv);
455 goto out;
456 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
457 rmpp_recv->newwin += window_size(agent);
458 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
459 ack_recv(rmpp_recv, mad_recv_wc);
460 goto out;
461 }
462 }
463 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
464out:
465 deref_rmpp_recv(rmpp_recv);
466 return done_wc;
467
468drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
469drop2: deref_rmpp_recv(rmpp_recv);
470drop1: ib_free_recv_mad(mad_recv_wc);
471 return NULL;
472}
473
474static struct ib_mad_recv_wc *
475start_rmpp(struct ib_mad_agent_private *agent,
476 struct ib_mad_recv_wc *mad_recv_wc)
477{
478 struct mad_rmpp_recv *rmpp_recv;
479 unsigned long flags;
480
481 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
482 if (!rmpp_recv) {
483 ib_free_recv_mad(mad_recv_wc);
484 return NULL;
485 }
486
487 spin_lock_irqsave(&agent->lock, flags);
488 if (insert_rmpp_recv(agent, rmpp_recv)) {
489 spin_unlock_irqrestore(&agent->lock, flags);
490 /* duplicate first MAD */
491 destroy_rmpp_recv(rmpp_recv);
492 return continue_rmpp(agent, mad_recv_wc);
493 }
494 atomic_inc(&rmpp_recv->refcount);
495
496 if (get_last_flag(&mad_recv_wc->recv_buf)) {
497 rmpp_recv->state = RMPP_STATE_COMPLETE;
498 spin_unlock_irqrestore(&agent->lock, flags);
499 complete_rmpp(rmpp_recv);
500 } else {
501 spin_unlock_irqrestore(&agent->lock, flags);
502 /* 40 seconds until we can find the packet lifetimes */
503 queue_delayed_work(agent->qp_info->port_priv->wq,
504 &rmpp_recv->timeout_work,
505 msecs_to_jiffies(40000));
506 rmpp_recv->newwin += window_size(agent);
507 ack_recv(rmpp_recv, mad_recv_wc);
508 mad_recv_wc = NULL;
509 }
510 deref_rmpp_recv(rmpp_recv);
511 return mad_recv_wc;
512}
513
514static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
515{
516 return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
517 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
518 (mad_send_wr->seg_num - 1);
519}
520
521static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
522{
523 struct ib_rmpp_mad *rmpp_mad;
524 int timeout;
525
526 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
527 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
528 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
529
530 if (mad_send_wr->seg_num == 1) {
531 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
532 rmpp_mad->rmpp_hdr.paylen_newwin =
533 cpu_to_be32(mad_send_wr->total_seg *
534 (sizeof(struct ib_rmpp_mad) -
535 offsetof(struct ib_rmpp_mad, data)));
536 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
537 } else {
538 mad_send_wr->send_wr.num_sge = 2;
539 mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
540 mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
541 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
542 mad_send_wr->data_offset;
543 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
544 }
545
546 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
547 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
548 rmpp_mad->rmpp_hdr.paylen_newwin =
549 cpu_to_be32(sizeof(struct ib_rmpp_mad) -
550 offsetof(struct ib_rmpp_mad, data) -
551 mad_send_wr->pad);
552 }
553
554 /* 2 seconds for an ACK until we can find the packet lifetime */
555 timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
556 if (!timeout || timeout > 2000)
557 mad_send_wr->timeout = msecs_to_jiffies(2000);
558 mad_send_wr->seg_num++;
559 return ib_send_mad(mad_send_wr);
560}
561
562static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563 struct ib_mad_recv_wc *mad_recv_wc)
564{
565 struct ib_mad_send_wr_private *mad_send_wr;
566 struct ib_rmpp_mad *rmpp_mad;
567 unsigned long flags;
568 int seg_num, newwin, ret;
569
570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571 if (rmpp_mad->rmpp_hdr.rmpp_status)
572 return;
573
574 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
576
577 spin_lock_irqsave(&agent->lock, flags);
578 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
579 if (!mad_send_wr)
580 goto out; /* Unmatched ACK */
581
582 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584 goto out; /* Send is already done */
585
586 if (seg_num > mad_send_wr->total_seg)
587 goto out; /* Bad ACK */
588
589 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590 goto out; /* Old ACK */
591
592 if (seg_num > mad_send_wr->last_ack) {
593 mad_send_wr->last_ack = seg_num;
594 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
595 }
596 mad_send_wr->newwin = newwin;
597 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
598 /* If no response is expected, the ACK completes the send */
599 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
600 struct ib_mad_send_wc wc;
601
602 ib_mark_mad_done(mad_send_wr);
603 spin_unlock_irqrestore(&agent->lock, flags);
604
605 wc.status = IB_WC_SUCCESS;
606 wc.vendor_err = 0;
607 wc.wr_id = mad_send_wr->wr_id;
608 ib_mad_complete_send_wr(mad_send_wr, &wc);
609 return;
610 }
611 if (mad_send_wr->refcount == 1)
612 ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
613 send_wr.wr.ud.timeout_ms);
614 } else if (mad_send_wr->refcount == 1 &&
615 mad_send_wr->seg_num < mad_send_wr->newwin &&
616 mad_send_wr->seg_num <= mad_send_wr->total_seg) {
617 /* Send failure will just result in a timeout/retry */
618 ret = send_next_seg(mad_send_wr);
619 if (ret)
620 goto out;
621
622 mad_send_wr->refcount++;
623 list_del(&mad_send_wr->agent_list);
624 list_add_tail(&mad_send_wr->agent_list,
625 &mad_send_wr->mad_agent_priv->send_list);
626 }
627out:
628 spin_unlock_irqrestore(&agent->lock, flags);
629}
630
631struct ib_mad_recv_wc *
632ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633 struct ib_mad_recv_wc *mad_recv_wc)
634{
635 struct ib_rmpp_mad *rmpp_mad;
636
637 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
639 return mad_recv_wc;
640
641 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
642 goto out;
643
644 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645 case IB_MGMT_RMPP_TYPE_DATA:
646 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
647 return start_rmpp(agent, mad_recv_wc);
648 else
649 return continue_rmpp(agent, mad_recv_wc);
650 case IB_MGMT_RMPP_TYPE_ACK:
651 process_rmpp_ack(agent, mad_recv_wc);
652 break;
653 case IB_MGMT_RMPP_TYPE_STOP:
654 case IB_MGMT_RMPP_TYPE_ABORT:
655 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
656 break;
657 default:
658 break;
659 }
660out:
661 ib_free_recv_mad(mad_recv_wc);
662 return NULL;
663}
664
665int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
666{
667 struct ib_rmpp_mad *rmpp_mad;
668 int i, total_len, ret;
669
670 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
671 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
672 IB_MGMT_RMPP_FLAG_ACTIVE))
673 return IB_RMPP_RESULT_UNHANDLED;
674
675 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
676 return IB_RMPP_RESULT_INTERNAL;
677
678 if (mad_send_wr->send_wr.num_sge > 1)
679 return -EINVAL; /* TODO: support num_sge > 1 */
680
681 mad_send_wr->seg_num = 1;
682 mad_send_wr->newwin = 1;
683 mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
684
685 total_len = 0;
686 for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
687 total_len += mad_send_wr->send_wr.sg_list[i].length;
688
689 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
690 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
691 mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
692 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
693
694 /* We need to wait for the final ACK even if there isn't a response */
695 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
696 ret = send_next_seg(mad_send_wr);
697 if (!ret)
698 return IB_RMPP_RESULT_CONSUMED;
699 return ret;
700}
701
702int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
703 struct ib_mad_send_wc *mad_send_wc)
704{
705 struct ib_rmpp_mad *rmpp_mad;
706 struct ib_mad_send_buf *msg;
707 int ret;
708
709 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
710 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
711 IB_MGMT_RMPP_FLAG_ACTIVE))
712 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
713
714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715 msg = (struct ib_mad_send_buf *) (unsigned long)
716 mad_send_wc->wr_id;
717 ib_free_send_mad(msg);
718 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
719 }
720
721 if (mad_send_wc->status != IB_WC_SUCCESS ||
722 mad_send_wr->status != IB_WC_SUCCESS)
723 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
724
725 if (!mad_send_wr->timeout)
726 return IB_RMPP_RESULT_PROCESSED; /* Response received */
727
728 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
729 mad_send_wr->timeout =
730 msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
731 return IB_RMPP_RESULT_PROCESSED; /* Send done */
732 }
733
734 if (mad_send_wr->seg_num > mad_send_wr->newwin ||
735 mad_send_wr->seg_num > mad_send_wr->total_seg)
736 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
737
738 ret = send_next_seg(mad_send_wr);
739 if (ret) {
740 mad_send_wc->status = IB_WC_GENERAL_ERR;
741 return IB_RMPP_RESULT_PROCESSED;
742 }
743 return IB_RMPP_RESULT_CONSUMED;
744}
745
746int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
747{
748 struct ib_rmpp_mad *rmpp_mad;
749 int ret;
750
751 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
752 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
753 IB_MGMT_RMPP_FLAG_ACTIVE))
754 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
755
756 if (mad_send_wr->last_ack == mad_send_wr->total_seg)
757 return IB_RMPP_RESULT_PROCESSED;
758
759 mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
760 ret = send_next_seg(mad_send_wr);
761 if (ret)
762 return IB_RMPP_RESULT_PROCESSED;
763
764 return IB_RMPP_RESULT_CONSUMED;
765}
diff --git a/drivers/infiniband/core/mad_rmpp.h b/drivers/infiniband/core/mad_rmpp.h
new file mode 100644
index 000000000000..c4924dfb8e75
--- /dev/null
+++ b/drivers/infiniband/core/mad_rmpp.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mad_rmpp.h 1921 2005-02-25 22:58:44Z sean.hefty $
33 */
34
35#ifndef __MAD_RMPP_H__
36#define __MAD_RMPP_H__
37
38enum {
39 IB_RMPP_RESULT_PROCESSED,
40 IB_RMPP_RESULT_CONSUMED,
41 IB_RMPP_RESULT_INTERNAL,
42 IB_RMPP_RESULT_UNHANDLED
43};
44
45int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr);
46
47struct ib_mad_recv_wc *
48ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
49 struct ib_mad_recv_wc *mad_recv_wc);
50
51int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
52 struct ib_mad_send_wc *mad_send_wc);
53
54void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
55
56int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
57
58#endif /* __MAD_RMPP_H__ */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 5a08e81fa827..795184931c83 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: sa_query.c 1389 2004-12-27 22:56:47Z roland $ 33 * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
33 */ 34 */
34 35
35#include <linux/module.h> 36#include <linux/module.h>
@@ -50,26 +51,6 @@ MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 51MODULE_DESCRIPTION("InfiniBand subnet administration query support");
51MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
52 53
53/*
54 * These two structures must be packed because they have 64-bit fields
55 * that are only 32-bit aligned. 64-bit architectures will lay them
56 * out wrong otherwise. (And unfortunately they are sent on the wire
57 * so we can't change the layout)
58 */
59struct ib_sa_hdr {
60 u64 sm_key;
61 u16 attr_offset;
62 u16 reserved;
63 ib_sa_comp_mask comp_mask;
64} __attribute__ ((packed));
65
66struct ib_sa_mad {
67 struct ib_mad_hdr mad_hdr;
68 struct ib_rmpp_hdr rmpp_hdr;
69 struct ib_sa_hdr sa_hdr;
70 u8 data[200];
71} __attribute__ ((packed));
72
73struct ib_sa_sm_ah { 54struct ib_sa_sm_ah {
74 struct ib_ah *ah; 55 struct ib_ah *ah;
75 struct kref ref; 56 struct kref ref;
@@ -77,7 +58,6 @@ struct ib_sa_sm_ah {
77 58
78struct ib_sa_port { 59struct ib_sa_port {
79 struct ib_mad_agent *agent; 60 struct ib_mad_agent *agent;
80 struct ib_mr *mr;
81 struct ib_sa_sm_ah *sm_ah; 61 struct ib_sa_sm_ah *sm_ah;
82 struct work_struct update_task; 62 struct work_struct update_task;
83 spinlock_t ah_lock; 63 spinlock_t ah_lock;
@@ -100,6 +80,12 @@ struct ib_sa_query {
100 int id; 80 int id;
101}; 81};
102 82
83struct ib_sa_service_query {
84 void (*callback)(int, struct ib_sa_service_rec *, void *);
85 void *context;
86 struct ib_sa_query sa_query;
87};
88
103struct ib_sa_path_query { 89struct ib_sa_path_query {
104 void (*callback)(int, struct ib_sa_path_rec *, void *); 90 void (*callback)(int, struct ib_sa_path_rec *, void *);
105 void *context; 91 void *context;
@@ -341,6 +327,54 @@ static const struct ib_field mcmember_rec_table[] = {
341 .size_bits = 23 }, 327 .size_bits = 23 },
342}; 328};
343 329
330#define SERVICE_REC_FIELD(field) \
331 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
332 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
333 .field_name = "sa_service_rec:" #field
334
335static const struct ib_field service_rec_table[] = {
336 { SERVICE_REC_FIELD(id),
337 .offset_words = 0,
338 .offset_bits = 0,
339 .size_bits = 64 },
340 { SERVICE_REC_FIELD(gid),
341 .offset_words = 2,
342 .offset_bits = 0,
343 .size_bits = 128 },
344 { SERVICE_REC_FIELD(pkey),
345 .offset_words = 6,
346 .offset_bits = 0,
347 .size_bits = 16 },
348 { SERVICE_REC_FIELD(lease),
349 .offset_words = 7,
350 .offset_bits = 0,
351 .size_bits = 32 },
352 { SERVICE_REC_FIELD(key),
353 .offset_words = 8,
354 .offset_bits = 0,
355 .size_bits = 128 },
356 { SERVICE_REC_FIELD(name),
357 .offset_words = 12,
358 .offset_bits = 0,
359 .size_bits = 64*8 },
360 { SERVICE_REC_FIELD(data8),
361 .offset_words = 28,
362 .offset_bits = 0,
363 .size_bits = 16*8 },
364 { SERVICE_REC_FIELD(data16),
365 .offset_words = 32,
366 .offset_bits = 0,
367 .size_bits = 8*16 },
368 { SERVICE_REC_FIELD(data32),
369 .offset_words = 36,
370 .offset_bits = 0,
371 .size_bits = 4*32 },
372 { SERVICE_REC_FIELD(data64),
373 .offset_words = 40,
374 .offset_bits = 0,
375 .size_bits = 2*64 },
376};
377
344static void free_sm_ah(struct kref *kref) 378static void free_sm_ah(struct kref *kref)
345{ 379{
346 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 380 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -463,7 +497,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms)
463 .mad_hdr = &query->mad->mad_hdr, 497 .mad_hdr = &query->mad->mad_hdr,
464 .remote_qpn = 1, 498 .remote_qpn = 1,
465 .remote_qkey = IB_QP1_QKEY, 499 .remote_qkey = IB_QP1_QKEY,
466 .timeout_ms = timeout_ms 500 .timeout_ms = timeout_ms,
467 } 501 }
468 } 502 }
469 }; 503 };
@@ -492,7 +526,7 @@ retry:
492 sizeof (struct ib_sa_mad), 526 sizeof (struct ib_sa_mad),
493 DMA_TO_DEVICE); 527 DMA_TO_DEVICE);
494 gather_list.length = sizeof (struct ib_sa_mad); 528 gather_list.length = sizeof (struct ib_sa_mad);
495 gather_list.lkey = port->mr->lkey; 529 gather_list.lkey = port->agent->mr->lkey;
496 pci_unmap_addr_set(query, mapping, gather_list.addr); 530 pci_unmap_addr_set(query, mapping, gather_list.addr);
497 531
498 ret = ib_post_send_mad(port->agent, &wr, &bad_wr); 532 ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
@@ -566,7 +600,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
566int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 600int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
567 struct ib_sa_path_rec *rec, 601 struct ib_sa_path_rec *rec,
568 ib_sa_comp_mask comp_mask, 602 ib_sa_comp_mask comp_mask,
569 int timeout_ms, int gfp_mask, 603 int timeout_ms, unsigned int __nocast gfp_mask,
570 void (*callback)(int status, 604 void (*callback)(int status,
571 struct ib_sa_path_rec *resp, 605 struct ib_sa_path_rec *resp,
572 void *context), 606 void *context),
@@ -616,6 +650,114 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
616} 650}
617EXPORT_SYMBOL(ib_sa_path_rec_get); 651EXPORT_SYMBOL(ib_sa_path_rec_get);
618 652
653static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
654 int status,
655 struct ib_sa_mad *mad)
656{
657 struct ib_sa_service_query *query =
658 container_of(sa_query, struct ib_sa_service_query, sa_query);
659
660 if (mad) {
661 struct ib_sa_service_rec rec;
662
663 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
664 mad->data, &rec);
665 query->callback(status, &rec, query->context);
666 } else
667 query->callback(status, NULL, query->context);
668}
669
670static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
671{
672 kfree(sa_query->mad);
673 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
674}
675
676/**
677 * ib_sa_service_rec_query - Start Service Record operation
678 * @device:device to send request on
679 * @port_num: port number to send request on
680 * @method:SA method - should be get, set, or delete
681 * @rec:Service Record to send in request
682 * @comp_mask:component mask to send in request
683 * @timeout_ms:time to wait for response
684 * @gfp_mask:GFP mask to use for internal allocations
685 * @callback:function called when request completes, times out or is
686 * canceled
687 * @context:opaque user context passed to callback
688 * @sa_query:request context, used to cancel request
689 *
690 * Send a Service Record set/get/delete to the SA to register,
691 * unregister or query a service record.
692 * The callback function will be called when the request completes (or
693 * fails); status is 0 for a successful response, -EINTR if the query
694 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
695 * occurred sending the query. The resp parameter of the callback is
696 * only valid if status is 0.
697 *
698 * If the return value of ib_sa_service_rec_query() is negative, it is an
699 * error code. Otherwise it is a request ID that can be used to cancel
700 * the query.
701 */
702int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
703 struct ib_sa_service_rec *rec,
704 ib_sa_comp_mask comp_mask,
705 int timeout_ms, unsigned int __nocast gfp_mask,
706 void (*callback)(int status,
707 struct ib_sa_service_rec *resp,
708 void *context),
709 void *context,
710 struct ib_sa_query **sa_query)
711{
712 struct ib_sa_service_query *query;
713 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
714 struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
715 struct ib_mad_agent *agent = port->agent;
716 int ret;
717
718 if (method != IB_MGMT_METHOD_GET &&
719 method != IB_MGMT_METHOD_SET &&
720 method != IB_SA_METHOD_DELETE)
721 return -EINVAL;
722
723 query = kmalloc(sizeof *query, gfp_mask);
724 if (!query)
725 return -ENOMEM;
726 query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
727 if (!query->sa_query.mad) {
728 kfree(query);
729 return -ENOMEM;
730 }
731
732 query->callback = callback;
733 query->context = context;
734
735 init_mad(query->sa_query.mad, agent);
736
737 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
738 query->sa_query.release = ib_sa_service_rec_release;
739 query->sa_query.port = port;
740 query->sa_query.mad->mad_hdr.method = method;
741 query->sa_query.mad->mad_hdr.attr_id =
742 cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
743 query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
744
745 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
746 rec, query->sa_query.mad->data);
747
748 *sa_query = &query->sa_query;
749
750 ret = send_mad(&query->sa_query, timeout_ms);
751 if (ret < 0) {
752 *sa_query = NULL;
753 kfree(query->sa_query.mad);
754 kfree(query);
755 }
756
757 return ret;
758}
759EXPORT_SYMBOL(ib_sa_service_rec_query);
760
619static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 761static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
620 int status, 762 int status,
621 struct ib_sa_mad *mad) 763 struct ib_sa_mad *mad)
@@ -643,7 +785,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
643 u8 method, 785 u8 method,
644 struct ib_sa_mcmember_rec *rec, 786 struct ib_sa_mcmember_rec *rec,
645 ib_sa_comp_mask comp_mask, 787 ib_sa_comp_mask comp_mask,
646 int timeout_ms, int gfp_mask, 788 int timeout_ms, unsigned int __nocast gfp_mask,
647 void (*callback)(int status, 789 void (*callback)(int status,
648 struct ib_sa_mcmember_rec *resp, 790 struct ib_sa_mcmember_rec *resp,
649 void *context), 791 void *context),
@@ -780,7 +922,6 @@ static void ib_sa_add_one(struct ib_device *device)
780 sa_dev->end_port = e; 922 sa_dev->end_port = e;
781 923
782 for (i = 0; i <= e - s; ++i) { 924 for (i = 0; i <= e - s; ++i) {
783 sa_dev->port[i].mr = NULL;
784 sa_dev->port[i].sm_ah = NULL; 925 sa_dev->port[i].sm_ah = NULL;
785 sa_dev->port[i].port_num = i + s; 926 sa_dev->port[i].port_num = i + s;
786 spin_lock_init(&sa_dev->port[i].ah_lock); 927 spin_lock_init(&sa_dev->port[i].ah_lock);
@@ -792,13 +933,6 @@ static void ib_sa_add_one(struct ib_device *device)
792 if (IS_ERR(sa_dev->port[i].agent)) 933 if (IS_ERR(sa_dev->port[i].agent))
793 goto err; 934 goto err;
794 935
795 sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
796 IB_ACCESS_LOCAL_WRITE);
797 if (IS_ERR(sa_dev->port[i].mr)) {
798 ib_unregister_mad_agent(sa_dev->port[i].agent);
799 goto err;
800 }
801
802 INIT_WORK(&sa_dev->port[i].update_task, 936 INIT_WORK(&sa_dev->port[i].update_task,
803 update_sm_ah, &sa_dev->port[i]); 937 update_sm_ah, &sa_dev->port[i]);
804 } 938 }
@@ -822,10 +956,8 @@ static void ib_sa_add_one(struct ib_device *device)
822 return; 956 return;
823 957
824err: 958err:
825 while (--i >= 0) { 959 while (--i >= 0)
826 ib_dereg_mr(sa_dev->port[i].mr);
827 ib_unregister_mad_agent(sa_dev->port[i].agent); 960 ib_unregister_mad_agent(sa_dev->port[i].agent);
828 }
829 961
830 kfree(sa_dev); 962 kfree(sa_dev);
831 963
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
new file mode 100644
index 000000000000..546ec61c407f
--- /dev/null
+++ b/drivers/infiniband/core/ucm.c
@@ -0,0 +1,1393 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
33 */
34#include <linux/init.h>
35#include <linux/fs.h>
36#include <linux/module.h>
37#include <linux/device.h>
38#include <linux/err.h>
39#include <linux/poll.h>
40#include <linux/file.h>
41#include <linux/mount.h>
42#include <linux/cdev.h>
43
44#include <asm/uaccess.h>
45
46#include "ucm.h"
47
48MODULE_AUTHOR("Libor Michalek");
49MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
50MODULE_LICENSE("Dual BSD/GPL");
51
52enum {
53 IB_UCM_MAJOR = 231,
54 IB_UCM_MINOR = 255
55};
56
57#define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
58
59static struct semaphore ctx_id_mutex;
60static struct idr ctx_id_table;
61static int ctx_id_rover = 0;
62
63static struct ib_ucm_context *ib_ucm_ctx_get(int id)
64{
65 struct ib_ucm_context *ctx;
66
67 down(&ctx_id_mutex);
68 ctx = idr_find(&ctx_id_table, id);
69 if (ctx)
70 ctx->ref++;
71 up(&ctx_id_mutex);
72
73 return ctx;
74}
75
76static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
77{
78 struct ib_ucm_event *uevent;
79
80 down(&ctx_id_mutex);
81
82 ctx->ref--;
83 if (!ctx->ref)
84 idr_remove(&ctx_id_table, ctx->id);
85
86 up(&ctx_id_mutex);
87
88 if (ctx->ref)
89 return;
90
91 down(&ctx->file->mutex);
92
93 list_del(&ctx->file_list);
94 while (!list_empty(&ctx->events)) {
95
96 uevent = list_entry(ctx->events.next,
97 struct ib_ucm_event, ctx_list);
98 list_del(&uevent->file_list);
99 list_del(&uevent->ctx_list);
100
101 /* clear incoming connections. */
102 if (uevent->cm_id)
103 ib_destroy_cm_id(uevent->cm_id);
104
105 kfree(uevent);
106 }
107
108 up(&ctx->file->mutex);
109
110 printk(KERN_ERR "UCM: Destroyed CM ID <%d>\n", ctx->id);
111
112 ib_destroy_cm_id(ctx->cm_id);
113 kfree(ctx);
114}
115
116static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
117{
118 struct ib_ucm_context *ctx;
119 int result;
120
121 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
122 if (!ctx)
123 return NULL;
124
125 ctx->ref = 1; /* user reference */
126 ctx->file = file;
127
128 INIT_LIST_HEAD(&ctx->events);
129 init_MUTEX(&ctx->mutex);
130
131 list_add_tail(&ctx->file_list, &file->ctxs);
132
133 ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
134retry:
135 result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
136 if (!result)
137 goto error;
138
139 down(&ctx_id_mutex);
140 result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
141 up(&ctx_id_mutex);
142
143 if (result == -EAGAIN)
144 goto retry;
145 if (result)
146 goto error;
147
148 printk(KERN_ERR "UCM: Allocated CM ID <%d>\n", ctx->id);
149
150 return ctx;
151error:
152 list_del(&ctx->file_list);
153 kfree(ctx);
154
155 return NULL;
156}
157/*
158 * Event portion of the API, handle CM events
159 * and allow event polling.
160 */
161static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
162 struct ib_sa_path_rec *kpath)
163{
164 if (!kpath || !upath)
165 return;
166
167 memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid));
168 memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid));
169
170 upath->dlid = kpath->dlid;
171 upath->slid = kpath->slid;
172 upath->raw_traffic = kpath->raw_traffic;
173 upath->flow_label = kpath->flow_label;
174 upath->hop_limit = kpath->hop_limit;
175 upath->traffic_class = kpath->traffic_class;
176 upath->reversible = kpath->reversible;
177 upath->numb_path = kpath->numb_path;
178 upath->pkey = kpath->pkey;
179 upath->sl = kpath->sl;
180 upath->mtu_selector = kpath->mtu_selector;
181 upath->mtu = kpath->mtu;
182 upath->rate_selector = kpath->rate_selector;
183 upath->rate = kpath->rate;
184 upath->packet_life_time = kpath->packet_life_time;
185 upath->preference = kpath->preference;
186
187 upath->packet_life_time_selector =
188 kpath->packet_life_time_selector;
189}
190
191static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
192 struct ib_cm_req_event_param *kreq)
193{
194 ureq->listen_id = (long)kreq->listen_id->context;
195
196 ureq->remote_ca_guid = kreq->remote_ca_guid;
197 ureq->remote_qkey = kreq->remote_qkey;
198 ureq->remote_qpn = kreq->remote_qpn;
199 ureq->qp_type = kreq->qp_type;
200 ureq->starting_psn = kreq->starting_psn;
201 ureq->responder_resources = kreq->responder_resources;
202 ureq->initiator_depth = kreq->initiator_depth;
203 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
204 ureq->flow_control = kreq->flow_control;
205 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
206 ureq->retry_count = kreq->retry_count;
207 ureq->rnr_retry_count = kreq->rnr_retry_count;
208 ureq->srq = kreq->srq;
209
210 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
211 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
212}
213
214static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
215 struct ib_cm_rep_event_param *krep)
216{
217 urep->remote_ca_guid = krep->remote_ca_guid;
218 urep->remote_qkey = krep->remote_qkey;
219 urep->remote_qpn = krep->remote_qpn;
220 urep->starting_psn = krep->starting_psn;
221 urep->responder_resources = krep->responder_resources;
222 urep->initiator_depth = krep->initiator_depth;
223 urep->target_ack_delay = krep->target_ack_delay;
224 urep->failover_accepted = krep->failover_accepted;
225 urep->flow_control = krep->flow_control;
226 urep->rnr_retry_count = krep->rnr_retry_count;
227 urep->srq = krep->srq;
228}
229
230static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej,
231 struct ib_cm_rej_event_param *krej)
232{
233 urej->reason = krej->reason;
234}
235
236static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
237 struct ib_cm_mra_event_param *kmra)
238{
239 umra->timeout = kmra->service_timeout;
240}
241
242static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
243 struct ib_cm_lap_event_param *klap)
244{
245 ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
246}
247
248static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
249 struct ib_cm_apr_event_param *kapr)
250{
251 uapr->status = kapr->ap_status;
252}
253
254static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
255 struct ib_cm_sidr_req_event_param *kreq)
256{
257 ureq->listen_id = (long)kreq->listen_id->context;
258 ureq->pkey = kreq->pkey;
259}
260
261static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
262 struct ib_cm_sidr_rep_event_param *krep)
263{
264 urep->status = krep->status;
265 urep->qkey = krep->qkey;
266 urep->qpn = krep->qpn;
267};
268
269static int ib_ucm_event_process(struct ib_cm_event *evt,
270 struct ib_ucm_event *uvt)
271{
272 void *info = NULL;
273 int result;
274
275 switch (evt->event) {
276 case IB_CM_REQ_RECEIVED:
277 ib_ucm_event_req_get(&uvt->resp.u.req_resp,
278 &evt->param.req_rcvd);
279 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
280 uvt->resp.present |= (evt->param.req_rcvd.primary_path ?
281 IB_UCM_PRES_PRIMARY : 0);
282 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
283 IB_UCM_PRES_ALTERNATE : 0);
284 break;
285 case IB_CM_REP_RECEIVED:
286 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
287 &evt->param.rep_rcvd);
288 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
289
290 break;
291 case IB_CM_RTU_RECEIVED:
292 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
293 uvt->resp.u.send_status = evt->param.send_status;
294
295 break;
296 case IB_CM_DREQ_RECEIVED:
297 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
298 uvt->resp.u.send_status = evt->param.send_status;
299
300 break;
301 case IB_CM_DREP_RECEIVED:
302 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
303 uvt->resp.u.send_status = evt->param.send_status;
304
305 break;
306 case IB_CM_MRA_RECEIVED:
307 ib_ucm_event_mra_get(&uvt->resp.u.mra_resp,
308 &evt->param.mra_rcvd);
309 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
310
311 break;
312 case IB_CM_REJ_RECEIVED:
313 ib_ucm_event_rej_get(&uvt->resp.u.rej_resp,
314 &evt->param.rej_rcvd);
315 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
316 uvt->info_len = evt->param.rej_rcvd.ari_length;
317 info = evt->param.rej_rcvd.ari;
318
319 break;
320 case IB_CM_LAP_RECEIVED:
321 ib_ucm_event_lap_get(&uvt->resp.u.lap_resp,
322 &evt->param.lap_rcvd);
323 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
324 uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ?
325 IB_UCM_PRES_ALTERNATE : 0);
326 break;
327 case IB_CM_APR_RECEIVED:
328 ib_ucm_event_apr_get(&uvt->resp.u.apr_resp,
329 &evt->param.apr_rcvd);
330 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
331 uvt->info_len = evt->param.apr_rcvd.info_len;
332 info = evt->param.apr_rcvd.apr_info;
333
334 break;
335 case IB_CM_SIDR_REQ_RECEIVED:
336 ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp,
337 &evt->param.sidr_req_rcvd);
338 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
339
340 break;
341 case IB_CM_SIDR_REP_RECEIVED:
342 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
343 &evt->param.sidr_rep_rcvd);
344 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
345 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
346 info = evt->param.sidr_rep_rcvd.info;
347
348 break;
349 default:
350 uvt->resp.u.send_status = evt->param.send_status;
351
352 break;
353 }
354
355 if (uvt->data_len && evt->private_data) {
356
357 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
358 if (!uvt->data) {
359 result = -ENOMEM;
360 goto error;
361 }
362
363 memcpy(uvt->data, evt->private_data, uvt->data_len);
364 uvt->resp.present |= IB_UCM_PRES_DATA;
365 }
366
367 if (uvt->info_len && info) {
368
369 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
370 if (!uvt->info) {
371 result = -ENOMEM;
372 goto error;
373 }
374
375 memcpy(uvt->info, info, uvt->info_len);
376 uvt->resp.present |= IB_UCM_PRES_INFO;
377 }
378
379 return 0;
380error:
381 if (uvt->info)
382 kfree(uvt->info);
383 if (uvt->data)
384 kfree(uvt->data);
385 return result;
386}
387
388static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
389 struct ib_cm_event *event)
390{
391 struct ib_ucm_event *uevent;
392 struct ib_ucm_context *ctx;
393 int result = 0;
394 int id;
395 /*
396 * lookup correct context based on event type.
397 */
398 switch (event->event) {
399 case IB_CM_REQ_RECEIVED:
400 id = (long)event->param.req_rcvd.listen_id->context;
401 break;
402 case IB_CM_SIDR_REQ_RECEIVED:
403 id = (long)event->param.sidr_req_rcvd.listen_id->context;
404 break;
405 default:
406 id = (long)cm_id->context;
407 break;
408 }
409
410 printk(KERN_ERR "UCM: Event. CM ID <%d> event <%d>\n",
411 id, event->event);
412
413 ctx = ib_ucm_ctx_get(id);
414 if (!ctx)
415 return -ENOENT;
416
417 if (event->event == IB_CM_REQ_RECEIVED ||
418 event->event == IB_CM_SIDR_REQ_RECEIVED)
419 id = IB_UCM_CM_ID_INVALID;
420
421 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
422 if (!uevent) {
423 result = -ENOMEM;
424 goto done;
425 }
426
427 memset(uevent, 0, sizeof(*uevent));
428
429 uevent->resp.id = id;
430 uevent->resp.event = event->event;
431
432 result = ib_ucm_event_process(event, uevent);
433 if (result)
434 goto done;
435
436 uevent->ctx = ctx;
437 uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED ||
438 event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
439 cm_id : NULL);
440
441 down(&ctx->file->mutex);
442
443 list_add_tail(&uevent->file_list, &ctx->file->events);
444 list_add_tail(&uevent->ctx_list, &ctx->events);
445
446 wake_up_interruptible(&ctx->file->poll_wait);
447
448 up(&ctx->file->mutex);
449done:
450 ctx->error = result;
451 ib_ucm_ctx_put(ctx); /* func reference */
452 return result;
453}
454
455static ssize_t ib_ucm_event(struct ib_ucm_file *file,
456 const char __user *inbuf,
457 int in_len, int out_len)
458{
459 struct ib_ucm_context *ctx;
460 struct ib_ucm_event_get cmd;
461 struct ib_ucm_event *uevent = NULL;
462 int result = 0;
463 DEFINE_WAIT(wait);
464
465 if (out_len < sizeof(struct ib_ucm_event_resp))
466 return -ENOSPC;
467
468 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
469 return -EFAULT;
470 /*
471 * wait
472 */
473 down(&file->mutex);
474
475 while (list_empty(&file->events)) {
476
477 if (file->filp->f_flags & O_NONBLOCK) {
478 result = -EAGAIN;
479 break;
480 }
481
482 if (signal_pending(current)) {
483 result = -ERESTARTSYS;
484 break;
485 }
486
487 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
488
489 up(&file->mutex);
490 schedule();
491 down(&file->mutex);
492
493 finish_wait(&file->poll_wait, &wait);
494 }
495
496 if (result)
497 goto done;
498
499 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
500
501 if (!uevent->cm_id)
502 goto user;
503
504 ctx = ib_ucm_ctx_alloc(file);
505 if (!ctx) {
506 result = -ENOMEM;
507 goto done;
508 }
509
510 ctx->cm_id = uevent->cm_id;
511 ctx->cm_id->cm_handler = ib_ucm_event_handler;
512 ctx->cm_id->context = (void *)(unsigned long)ctx->id;
513
514 uevent->resp.id = ctx->id;
515
516user:
517 if (copy_to_user((void __user *)(unsigned long)cmd.response,
518 &uevent->resp, sizeof(uevent->resp))) {
519 result = -EFAULT;
520 goto done;
521 }
522
523 if (uevent->data) {
524
525 if (cmd.data_len < uevent->data_len) {
526 result = -ENOMEM;
527 goto done;
528 }
529
530 if (copy_to_user((void __user *)(unsigned long)cmd.data,
531 uevent->data, uevent->data_len)) {
532 result = -EFAULT;
533 goto done;
534 }
535 }
536
537 if (uevent->info) {
538
539 if (cmd.info_len < uevent->info_len) {
540 result = -ENOMEM;
541 goto done;
542 }
543
544 if (copy_to_user((void __user *)(unsigned long)cmd.info,
545 uevent->info, uevent->info_len)) {
546 result = -EFAULT;
547 goto done;
548 }
549 }
550
551 list_del(&uevent->file_list);
552 list_del(&uevent->ctx_list);
553
554 if (uevent->data)
555 kfree(uevent->data);
556 if (uevent->info)
557 kfree(uevent->info);
558 kfree(uevent);
559done:
560 up(&file->mutex);
561 return result;
562}
563
564
565static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
566 const char __user *inbuf,
567 int in_len, int out_len)
568{
569 struct ib_ucm_create_id cmd;
570 struct ib_ucm_create_id_resp resp;
571 struct ib_ucm_context *ctx;
572 int result;
573
574 if (out_len < sizeof(resp))
575 return -ENOSPC;
576
577 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
578 return -EFAULT;
579
580 ctx = ib_ucm_ctx_alloc(file);
581 if (!ctx)
582 return -ENOMEM;
583
584 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler,
585 (void *)(unsigned long)ctx->id);
586 if (!ctx->cm_id) {
587 result = -ENOMEM;
588 goto err_cm;
589 }
590
591 resp.id = ctx->id;
592 if (copy_to_user((void __user *)(unsigned long)cmd.response,
593 &resp, sizeof(resp))) {
594 result = -EFAULT;
595 goto err_ret;
596 }
597
598 return 0;
599err_ret:
600 ib_destroy_cm_id(ctx->cm_id);
601err_cm:
602 ib_ucm_ctx_put(ctx); /* user reference */
603
604 return result;
605}
606
607static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
608 const char __user *inbuf,
609 int in_len, int out_len)
610{
611 struct ib_ucm_destroy_id cmd;
612 struct ib_ucm_context *ctx;
613
614 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
615 return -EFAULT;
616
617 ctx = ib_ucm_ctx_get(cmd.id);
618 if (!ctx)
619 return -ENOENT;
620
621 ib_ucm_ctx_put(ctx); /* user reference */
622 ib_ucm_ctx_put(ctx); /* func reference */
623
624 return 0;
625}
626
627static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
628 const char __user *inbuf,
629 int in_len, int out_len)
630{
631 struct ib_ucm_attr_id_resp resp;
632 struct ib_ucm_attr_id cmd;
633 struct ib_ucm_context *ctx;
634 int result = 0;
635
636 if (out_len < sizeof(resp))
637 return -ENOSPC;
638
639 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
640 return -EFAULT;
641
642 ctx = ib_ucm_ctx_get(cmd.id);
643 if (!ctx)
644 return -ENOENT;
645
646 down(&ctx->file->mutex);
647 if (ctx->file != file) {
648 result = -EINVAL;
649 goto done;
650 }
651
652 resp.service_id = ctx->cm_id->service_id;
653 resp.service_mask = ctx->cm_id->service_mask;
654 resp.local_id = ctx->cm_id->local_id;
655 resp.remote_id = ctx->cm_id->remote_id;
656
657 if (copy_to_user((void __user *)(unsigned long)cmd.response,
658 &resp, sizeof(resp)))
659 result = -EFAULT;
660
661done:
662 up(&ctx->file->mutex);
663 ib_ucm_ctx_put(ctx); /* func reference */
664 return result;
665}
666
667static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
668 const char __user *inbuf,
669 int in_len, int out_len)
670{
671 struct ib_ucm_listen cmd;
672 struct ib_ucm_context *ctx;
673 int result;
674
675 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
676 return -EFAULT;
677
678 ctx = ib_ucm_ctx_get(cmd.id);
679 if (!ctx)
680 return -ENOENT;
681
682 down(&ctx->file->mutex);
683 if (ctx->file != file)
684 result = -EINVAL;
685 else
686 result = ib_cm_listen(ctx->cm_id, cmd.service_id,
687 cmd.service_mask);
688
689 up(&ctx->file->mutex);
690 ib_ucm_ctx_put(ctx); /* func reference */
691 return result;
692}
693
694static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
695 const char __user *inbuf,
696 int in_len, int out_len)
697{
698 struct ib_ucm_establish cmd;
699 struct ib_ucm_context *ctx;
700 int result;
701
702 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
703 return -EFAULT;
704
705 ctx = ib_ucm_ctx_get(cmd.id);
706 if (!ctx)
707 return -ENOENT;
708
709 down(&ctx->file->mutex);
710 if (ctx->file != file)
711 result = -EINVAL;
712 else
713 result = ib_cm_establish(ctx->cm_id);
714
715 up(&ctx->file->mutex);
716 ib_ucm_ctx_put(ctx); /* func reference */
717 return result;
718}
719
720static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
721{
722 void *data;
723
724 *dest = NULL;
725
726 if (!len)
727 return 0;
728
729 data = kmalloc(len, GFP_KERNEL);
730 if (!data)
731 return -ENOMEM;
732
733 if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
734 kfree(data);
735 return -EFAULT;
736 }
737
738 *dest = data;
739 return 0;
740}
741
742static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
743{
744 struct ib_ucm_path_rec ucm_path;
745 struct ib_sa_path_rec *sa_path;
746
747 *path = NULL;
748
749 if (!src)
750 return 0;
751
752 sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
753 if (!sa_path)
754 return -ENOMEM;
755
756 if (copy_from_user(&ucm_path, (void __user *)(unsigned long)src,
757 sizeof(ucm_path))) {
758
759 kfree(sa_path);
760 return -EFAULT;
761 }
762
763 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid));
764 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid));
765
766 sa_path->dlid = ucm_path.dlid;
767 sa_path->slid = ucm_path.slid;
768 sa_path->raw_traffic = ucm_path.raw_traffic;
769 sa_path->flow_label = ucm_path.flow_label;
770 sa_path->hop_limit = ucm_path.hop_limit;
771 sa_path->traffic_class = ucm_path.traffic_class;
772 sa_path->reversible = ucm_path.reversible;
773 sa_path->numb_path = ucm_path.numb_path;
774 sa_path->pkey = ucm_path.pkey;
775 sa_path->sl = ucm_path.sl;
776 sa_path->mtu_selector = ucm_path.mtu_selector;
777 sa_path->mtu = ucm_path.mtu;
778 sa_path->rate_selector = ucm_path.rate_selector;
779 sa_path->rate = ucm_path.rate;
780 sa_path->packet_life_time = ucm_path.packet_life_time;
781 sa_path->preference = ucm_path.preference;
782
783 sa_path->packet_life_time_selector =
784 ucm_path.packet_life_time_selector;
785
786 *path = sa_path;
787 return 0;
788}
789
790static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
791 const char __user *inbuf,
792 int in_len, int out_len)
793{
794 struct ib_cm_req_param param;
795 struct ib_ucm_context *ctx;
796 struct ib_ucm_req cmd;
797 int result;
798
799 param.private_data = NULL;
800 param.primary_path = NULL;
801 param.alternate_path = NULL;
802
803 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
804 return -EFAULT;
805
806 result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
807 if (result)
808 goto done;
809
810 result = ib_ucm_path_get(&param.primary_path, cmd.primary_path);
811 if (result)
812 goto done;
813
814 result = ib_ucm_path_get(&param.alternate_path, cmd.alternate_path);
815 if (result)
816 goto done;
817
818 param.private_data_len = cmd.len;
819 param.service_id = cmd.sid;
820 param.qp_num = cmd.qpn;
821 param.qp_type = cmd.qp_type;
822 param.starting_psn = cmd.psn;
823 param.peer_to_peer = cmd.peer_to_peer;
824 param.responder_resources = cmd.responder_resources;
825 param.initiator_depth = cmd.initiator_depth;
826 param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
827 param.flow_control = cmd.flow_control;
828 param.local_cm_response_timeout = cmd.local_cm_response_timeout;
829 param.retry_count = cmd.retry_count;
830 param.rnr_retry_count = cmd.rnr_retry_count;
831 param.max_cm_retries = cmd.max_cm_retries;
832 param.srq = cmd.srq;
833
834 ctx = ib_ucm_ctx_get(cmd.id);
835 if (!ctx) {
836 result = -ENOENT;
837 goto done;
838 }
839
840 down(&ctx->file->mutex);
841 if (ctx->file != file)
842 result = -EINVAL;
843 else
844 result = ib_send_cm_req(ctx->cm_id, &param);
845
846 up(&ctx->file->mutex);
847 ib_ucm_ctx_put(ctx); /* func reference */
848done:
849 if (param.private_data)
850 kfree(param.private_data);
851 if (param.primary_path)
852 kfree(param.primary_path);
853 if (param.alternate_path)
854 kfree(param.alternate_path);
855
856 return result;
857}
858
859static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
860 const char __user *inbuf,
861 int in_len, int out_len)
862{
863 struct ib_cm_rep_param param;
864 struct ib_ucm_context *ctx;
865 struct ib_ucm_rep cmd;
866 int result;
867
868 param.private_data = NULL;
869
870 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
871 return -EFAULT;
872
873 result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
874 if (result)
875 return result;
876
877 param.qp_num = cmd.qpn;
878 param.starting_psn = cmd.psn;
879 param.private_data_len = cmd.len;
880 param.responder_resources = cmd.responder_resources;
881 param.initiator_depth = cmd.initiator_depth;
882 param.target_ack_delay = cmd.target_ack_delay;
883 param.failover_accepted = cmd.failover_accepted;
884 param.flow_control = cmd.flow_control;
885 param.rnr_retry_count = cmd.rnr_retry_count;
886 param.srq = cmd.srq;
887
888 ctx = ib_ucm_ctx_get(cmd.id);
889 if (!ctx) {
890 result = -ENOENT;
891 goto done;
892 }
893
894 down(&ctx->file->mutex);
895 if (ctx->file != file)
896 result = -EINVAL;
897 else
898 result = ib_send_cm_rep(ctx->cm_id, &param);
899
900 up(&ctx->file->mutex);
901 ib_ucm_ctx_put(ctx); /* func reference */
902done:
903 if (param.private_data)
904 kfree(param.private_data);
905
906 return result;
907}
908
909static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
910 const char __user *inbuf, int in_len,
911 int (*func)(struct ib_cm_id *cm_id,
912 const void *private_data,
913 u8 private_data_len))
914{
915 struct ib_ucm_private_data cmd;
916 struct ib_ucm_context *ctx;
917 const void *private_data = NULL;
918 int result;
919
920 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
921 return -EFAULT;
922
923 result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
924 if (result)
925 return result;
926
927 ctx = ib_ucm_ctx_get(cmd.id);
928 if (!ctx) {
929 result = -ENOENT;
930 goto done;
931 }
932
933 down(&ctx->file->mutex);
934 if (ctx->file != file)
935 result = -EINVAL;
936 else
937 result = func(ctx->cm_id, private_data, cmd.len);
938
939 up(&ctx->file->mutex);
940 ib_ucm_ctx_put(ctx); /* func reference */
941done:
942 if (private_data)
943 kfree(private_data);
944
945 return result;
946}
947
948static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
949 const char __user *inbuf,
950 int in_len, int out_len)
951{
952 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
953}
954
955static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
956 const char __user *inbuf,
957 int in_len, int out_len)
958{
959 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
960}
961
962static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
963 const char __user *inbuf,
964 int in_len, int out_len)
965{
966 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
967}
968
969static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
970 const char __user *inbuf, int in_len,
971 int (*func)(struct ib_cm_id *cm_id,
972 int status,
973 const void *info,
974 u8 info_len,
975 const void *data,
976 u8 data_len))
977{
978 struct ib_ucm_context *ctx;
979 struct ib_ucm_info cmd;
980 const void *data = NULL;
981 const void *info = NULL;
982 int result;
983
984 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
985 return -EFAULT;
986
987 result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
988 if (result)
989 goto done;
990
991 result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
992 if (result)
993 goto done;
994
995 ctx = ib_ucm_ctx_get(cmd.id);
996 if (!ctx) {
997 result = -ENOENT;
998 goto done;
999 }
1000
1001 down(&ctx->file->mutex);
1002 if (ctx->file != file)
1003 result = -EINVAL;
1004 else
1005 result = func(ctx->cm_id, cmd.status,
1006 info, cmd.info_len,
1007 data, cmd.data_len);
1008
1009 up(&ctx->file->mutex);
1010 ib_ucm_ctx_put(ctx); /* func reference */
1011done:
1012 if (data)
1013 kfree(data);
1014 if (info)
1015 kfree(info);
1016
1017 return result;
1018}
1019
1020static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
1021 const char __user *inbuf,
1022 int in_len, int out_len)
1023{
1024 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
1025}
1026
1027static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
1028 const char __user *inbuf,
1029 int in_len, int out_len)
1030{
1031 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
1032}
1033
1034static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
1035 const char __user *inbuf,
1036 int in_len, int out_len)
1037{
1038 struct ib_ucm_context *ctx;
1039 struct ib_ucm_mra cmd;
1040 const void *data = NULL;
1041 int result;
1042
1043 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1044 return -EFAULT;
1045
1046 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1047 if (result)
1048 return result;
1049
1050 ctx = ib_ucm_ctx_get(cmd.id);
1051 if (!ctx) {
1052 result = -ENOENT;
1053 goto done;
1054 }
1055
1056 down(&ctx->file->mutex);
1057 if (ctx->file != file)
1058 result = -EINVAL;
1059 else
1060 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
1061 data, cmd.len);
1062
1063 up(&ctx->file->mutex);
1064 ib_ucm_ctx_put(ctx); /* func reference */
1065done:
1066 if (data)
1067 kfree(data);
1068
1069 return result;
1070}
1071
1072static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
1073 const char __user *inbuf,
1074 int in_len, int out_len)
1075{
1076 struct ib_ucm_context *ctx;
1077 struct ib_sa_path_rec *path = NULL;
1078 struct ib_ucm_lap cmd;
1079 const void *data = NULL;
1080 int result;
1081
1082 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1083 return -EFAULT;
1084
1085 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1086 if (result)
1087 goto done;
1088
1089 result = ib_ucm_path_get(&path, cmd.path);
1090 if (result)
1091 goto done;
1092
1093 ctx = ib_ucm_ctx_get(cmd.id);
1094 if (!ctx) {
1095 result = -ENOENT;
1096 goto done;
1097 }
1098
1099 down(&ctx->file->mutex);
1100 if (ctx->file != file)
1101 result = -EINVAL;
1102 else
1103 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
1104
1105 up(&ctx->file->mutex);
1106 ib_ucm_ctx_put(ctx); /* func reference */
1107done:
1108 if (data)
1109 kfree(data);
1110 if (path)
1111 kfree(path);
1112
1113 return result;
1114}
1115
1116static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1117 const char __user *inbuf,
1118 int in_len, int out_len)
1119{
1120 struct ib_cm_sidr_req_param param;
1121 struct ib_ucm_context *ctx;
1122 struct ib_ucm_sidr_req cmd;
1123 int result;
1124
1125 param.private_data = NULL;
1126 param.path = NULL;
1127
1128 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1129 return -EFAULT;
1130
1131 result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
1132 if (result)
1133 goto done;
1134
1135 result = ib_ucm_path_get(&param.path, cmd.path);
1136 if (result)
1137 goto done;
1138
1139 param.private_data_len = cmd.len;
1140 param.service_id = cmd.sid;
1141 param.timeout_ms = cmd.timeout;
1142 param.max_cm_retries = cmd.max_cm_retries;
1143 param.pkey = cmd.pkey;
1144
1145 ctx = ib_ucm_ctx_get(cmd.id);
1146 if (!ctx) {
1147 result = -ENOENT;
1148 goto done;
1149 }
1150
1151 down(&ctx->file->mutex);
1152 if (ctx->file != file)
1153 result = -EINVAL;
1154 else
1155 result = ib_send_cm_sidr_req(ctx->cm_id, &param);
1156
1157 up(&ctx->file->mutex);
1158 ib_ucm_ctx_put(ctx); /* func reference */
1159done:
1160 if (param.private_data)
1161 kfree(param.private_data);
1162 if (param.path)
1163 kfree(param.path);
1164
1165 return result;
1166}
1167
1168static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1169 const char __user *inbuf,
1170 int in_len, int out_len)
1171{
1172 struct ib_cm_sidr_rep_param param;
1173 struct ib_ucm_sidr_rep cmd;
1174 struct ib_ucm_context *ctx;
1175 int result;
1176
1177 param.info = NULL;
1178
1179 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1180 return -EFAULT;
1181
1182 result = ib_ucm_alloc_data(&param.private_data,
1183 cmd.data, cmd.data_len);
1184 if (result)
1185 goto done;
1186
1187 result = ib_ucm_alloc_data(&param.info, cmd.info, cmd.info_len);
1188 if (result)
1189 goto done;
1190
1191 param.qp_num = cmd.qpn;
1192 param.qkey = cmd.qkey;
1193 param.status = cmd.status;
1194 param.info_length = cmd.info_len;
1195 param.private_data_len = cmd.data_len;
1196
1197 ctx = ib_ucm_ctx_get(cmd.id);
1198 if (!ctx) {
1199 result = -ENOENT;
1200 goto done;
1201 }
1202
1203 down(&ctx->file->mutex);
1204 if (ctx->file != file)
1205 result = -EINVAL;
1206 else
1207 result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
1208
1209 up(&ctx->file->mutex);
1210 ib_ucm_ctx_put(ctx); /* func reference */
1211done:
1212 if (param.private_data)
1213 kfree(param.private_data);
1214 if (param.info)
1215 kfree(param.info);
1216
1217 return result;
1218}
1219
1220static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1221 const char __user *inbuf,
1222 int in_len, int out_len) = {
1223 [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id,
1224 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1225 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1226 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1227 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
1228 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1229 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1230 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
1231 [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
1232 [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
1233 [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
1234 [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
1235 [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
1236 [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
1237 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1238 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1239 [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
1240};
1241
1242static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1243 size_t len, loff_t *pos)
1244{
1245 struct ib_ucm_file *file = filp->private_data;
1246 struct ib_ucm_cmd_hdr hdr;
1247 ssize_t result;
1248
1249 if (len < sizeof(hdr))
1250 return -EINVAL;
1251
1252 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1253 return -EFAULT;
1254
1255 printk(KERN_ERR "UCM: Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
1256 hdr.cmd, hdr.in, hdr.out, len);
1257
1258 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1259 return -EINVAL;
1260
1261 if (hdr.in + sizeof(hdr) > len)
1262 return -EINVAL;
1263
1264 result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1265 hdr.in, hdr.out);
1266 if (!result)
1267 result = len;
1268
1269 return result;
1270}
1271
1272static unsigned int ib_ucm_poll(struct file *filp,
1273 struct poll_table_struct *wait)
1274{
1275 struct ib_ucm_file *file = filp->private_data;
1276 unsigned int mask = 0;
1277
1278 poll_wait(filp, &file->poll_wait, wait);
1279
1280 if (!list_empty(&file->events))
1281 mask = POLLIN | POLLRDNORM;
1282
1283 return mask;
1284}
1285
1286static int ib_ucm_open(struct inode *inode, struct file *filp)
1287{
1288 struct ib_ucm_file *file;
1289
1290 file = kmalloc(sizeof(*file), GFP_KERNEL);
1291 if (!file)
1292 return -ENOMEM;
1293
1294 INIT_LIST_HEAD(&file->events);
1295 INIT_LIST_HEAD(&file->ctxs);
1296 init_waitqueue_head(&file->poll_wait);
1297
1298 init_MUTEX(&file->mutex);
1299
1300 filp->private_data = file;
1301 file->filp = filp;
1302
1303 printk(KERN_ERR "UCM: Created struct\n");
1304
1305 return 0;
1306}
1307
1308static int ib_ucm_close(struct inode *inode, struct file *filp)
1309{
1310 struct ib_ucm_file *file = filp->private_data;
1311 struct ib_ucm_context *ctx;
1312
1313 down(&file->mutex);
1314
1315 while (!list_empty(&file->ctxs)) {
1316
1317 ctx = list_entry(file->ctxs.next,
1318 struct ib_ucm_context, file_list);
1319
1320 up(&ctx->file->mutex);
1321 ib_ucm_ctx_put(ctx); /* user reference */
1322 down(&file->mutex);
1323 }
1324
1325 up(&file->mutex);
1326
1327 kfree(file);
1328
1329 printk(KERN_ERR "UCM: Deleted struct\n");
1330 return 0;
1331}
1332
1333static struct file_operations ib_ucm_fops = {
1334 .owner = THIS_MODULE,
1335 .open = ib_ucm_open,
1336 .release = ib_ucm_close,
1337 .write = ib_ucm_write,
1338 .poll = ib_ucm_poll,
1339};
1340
1341
1342static struct class *ib_ucm_class;
1343static struct cdev ib_ucm_cdev;
1344
1345static int __init ib_ucm_init(void)
1346{
1347 int result;
1348
1349 result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
1350 if (result) {
1351 printk(KERN_ERR "UCM: Error <%d> registering dev\n", result);
1352 goto err_chr;
1353 }
1354
1355 cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
1356
1357 result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
1358 if (result) {
1359 printk(KERN_ERR "UCM: Error <%d> adding cdev\n", result);
1360 goto err_cdev;
1361 }
1362
1363 ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
1364 if (IS_ERR(ib_ucm_class)) {
1365 result = PTR_ERR(ib_ucm_class);
1366 printk(KERN_ERR "UCM: Error <%d> creating class\n", result);
1367 goto err_class;
1368 }
1369
1370 class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
1371
1372 idr_init(&ctx_id_table);
1373 init_MUTEX(&ctx_id_mutex);
1374
1375 return 0;
1376err_class:
1377 cdev_del(&ib_ucm_cdev);
1378err_cdev:
1379 unregister_chrdev_region(IB_UCM_DEV, 1);
1380err_chr:
1381 return result;
1382}
1383
1384static void __exit ib_ucm_cleanup(void)
1385{
1386 class_device_destroy(ib_ucm_class, IB_UCM_DEV);
1387 class_destroy(ib_ucm_class);
1388 cdev_del(&ib_ucm_cdev);
1389 unregister_chrdev_region(IB_UCM_DEV, 1);
1390}
1391
1392module_init(ib_ucm_init);
1393module_exit(ib_ucm_cleanup);
diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h
new file mode 100644
index 000000000000..6d36606151b2
--- /dev/null
+++ b/drivers/infiniband/core/ucm.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ucm.h 2208 2005-04-22 23:24:31Z libor $
33 */
34
35#ifndef UCM_H
36#define UCM_H
37
38#include <linux/fs.h>
39#include <linux/device.h>
40#include <linux/cdev.h>
41#include <linux/idr.h>
42
43#include <ib_cm.h>
44#include <ib_user_cm.h>
45
46#define IB_UCM_CM_ID_INVALID 0xffffffff
47
48struct ib_ucm_file {
49 struct semaphore mutex;
50 struct file *filp;
51 /*
52 * list of pending events
53 */
54 struct list_head ctxs; /* list of active connections */
55 struct list_head events; /* list of pending events */
56 wait_queue_head_t poll_wait;
57};
58
59struct ib_ucm_context {
60 int id;
61 int ref;
62 int error;
63
64 struct ib_ucm_file *file;
65 struct ib_cm_id *cm_id;
66 struct semaphore mutex;
67
68 struct list_head events; /* list of pending events. */
69 struct list_head file_list; /* member in file ctx list */
70};
71
72struct ib_ucm_event {
73 struct ib_ucm_context *ctx;
74 struct list_head file_list; /* member in file event list */
75 struct list_head ctx_list; /* member in ctx event list */
76
77 struct ib_ucm_event_resp resp;
78 void *data;
79 void *info;
80 int data_len;
81 int info_len;
82 /*
83 * new connection identifiers needs to be saved until
84 * userspace can get a handle on them.
85 */
86 struct ib_cm_id *cm_id;
87};
88
89#endif /* UCM_H */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 9d912d6877ff..2e38792df533 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +31,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 32 * SOFTWARE.
31 * 33 *
32 * $Id: user_mad.c 1389 2004-12-27 22:56:47Z roland $ 34 * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
33 */ 35 */
34 36
35#include <linux/module.h> 37#include <linux/module.h>
@@ -94,10 +96,12 @@ struct ib_umad_file {
94}; 96};
95 97
96struct ib_umad_packet { 98struct ib_umad_packet {
97 struct ib_user_mad mad;
98 struct ib_ah *ah; 99 struct ib_ah *ah;
100 struct ib_mad_send_buf *msg;
99 struct list_head list; 101 struct list_head list;
102 int length;
100 DECLARE_PCI_UNMAP_ADDR(mapping) 103 DECLARE_PCI_UNMAP_ADDR(mapping)
104 struct ib_user_mad mad;
101}; 105};
102 106
103static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 107static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
@@ -114,10 +118,10 @@ static int queue_packet(struct ib_umad_file *file,
114 int ret = 1; 118 int ret = 1;
115 119
116 down_read(&file->agent_mutex); 120 down_read(&file->agent_mutex);
117 for (packet->mad.id = 0; 121 for (packet->mad.hdr.id = 0;
118 packet->mad.id < IB_UMAD_MAX_AGENTS; 122 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
119 packet->mad.id++) 123 packet->mad.hdr.id++)
120 if (agent == file->agent[packet->mad.id]) { 124 if (agent == file->agent[packet->mad.hdr.id]) {
121 spin_lock_irq(&file->recv_lock); 125 spin_lock_irq(&file->recv_lock);
122 list_add_tail(&packet->list, &file->recv_list); 126 list_add_tail(&packet->list, &file->recv_list);
123 spin_unlock_irq(&file->recv_lock); 127 spin_unlock_irq(&file->recv_lock);
@@ -135,22 +139,30 @@ static void send_handler(struct ib_mad_agent *agent,
135 struct ib_mad_send_wc *send_wc) 139 struct ib_mad_send_wc *send_wc)
136{ 140{
137 struct ib_umad_file *file = agent->context; 141 struct ib_umad_file *file = agent->context;
138 struct ib_umad_packet *packet = 142 struct ib_umad_packet *timeout, *packet =
139 (void *) (unsigned long) send_wc->wr_id; 143 (void *) (unsigned long) send_wc->wr_id;
140 144
141 dma_unmap_single(agent->device->dma_device, 145 ib_destroy_ah(packet->msg->send_wr.wr.ud.ah);
142 pci_unmap_addr(packet, mapping), 146 ib_free_send_mad(packet->msg);
143 sizeof packet->mad.data,
144 DMA_TO_DEVICE);
145 ib_destroy_ah(packet->ah);
146 147
147 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 148 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
148 packet->mad.status = ETIMEDOUT; 149 timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr),
150 GFP_KERNEL);
151 if (!timeout)
152 goto out;
149 153
150 if (!queue_packet(file, agent, packet)) 154 memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr));
151 return;
152 }
153 155
156 timeout->length = sizeof (struct ib_mad_hdr);
157 timeout->mad.hdr.id = packet->mad.hdr.id;
158 timeout->mad.hdr.status = ETIMEDOUT;
159 memcpy(timeout->mad.data, packet->mad.data,
160 sizeof (struct ib_mad_hdr));
161
162 if (!queue_packet(file, agent, timeout))
163 return;
164 }
165out:
154 kfree(packet); 166 kfree(packet);
155} 167}
156 168
@@ -159,30 +171,35 @@ static void recv_handler(struct ib_mad_agent *agent,
159{ 171{
160 struct ib_umad_file *file = agent->context; 172 struct ib_umad_file *file = agent->context;
161 struct ib_umad_packet *packet; 173 struct ib_umad_packet *packet;
174 int length;
162 175
163 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 176 if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
164 goto out; 177 goto out;
165 178
166 packet = kmalloc(sizeof *packet, GFP_KERNEL); 179 length = mad_recv_wc->mad_len;
180 packet = kmalloc(sizeof *packet + length, GFP_KERNEL);
167 if (!packet) 181 if (!packet)
168 goto out; 182 goto out;
169 183
170 memset(packet, 0, sizeof *packet); 184 memset(packet, 0, sizeof *packet + length);
185 packet->length = length;
186
187 ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
171 188
172 memcpy(packet->mad.data, mad_recv_wc->recv_buf.mad, sizeof packet->mad.data); 189 packet->mad.hdr.status = 0;
173 packet->mad.status = 0; 190 packet->mad.hdr.length = length + sizeof (struct ib_user_mad);
174 packet->mad.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 191 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
175 packet->mad.lid = cpu_to_be16(mad_recv_wc->wc->slid); 192 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
176 packet->mad.sl = mad_recv_wc->wc->sl; 193 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
177 packet->mad.path_bits = mad_recv_wc->wc->dlid_path_bits; 194 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
178 packet->mad.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 195 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
179 if (packet->mad.grh_present) { 196 if (packet->mad.hdr.grh_present) {
180 /* XXX parse GRH */ 197 /* XXX parse GRH */
181 packet->mad.gid_index = 0; 198 packet->mad.hdr.gid_index = 0;
182 packet->mad.hop_limit = 0; 199 packet->mad.hdr.hop_limit = 0;
183 packet->mad.traffic_class = 0; 200 packet->mad.hdr.traffic_class = 0;
184 memset(packet->mad.gid, 0, 16); 201 memset(packet->mad.hdr.gid, 0, 16);
185 packet->mad.flow_label = 0; 202 packet->mad.hdr.flow_label = 0;
186 } 203 }
187 204
188 if (queue_packet(file, agent, packet)) 205 if (queue_packet(file, agent, packet))
@@ -199,7 +216,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
199 struct ib_umad_packet *packet; 216 struct ib_umad_packet *packet;
200 ssize_t ret; 217 ssize_t ret;
201 218
202 if (count < sizeof (struct ib_user_mad)) 219 if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
203 return -EINVAL; 220 return -EINVAL;
204 221
205 spin_lock_irq(&file->recv_lock); 222 spin_lock_irq(&file->recv_lock);
@@ -222,12 +239,25 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
222 239
223 spin_unlock_irq(&file->recv_lock); 240 spin_unlock_irq(&file->recv_lock);
224 241
225 if (copy_to_user(buf, &packet->mad, sizeof packet->mad)) 242 if (count < packet->length + sizeof (struct ib_user_mad)) {
243 /* Return length needed (and first RMPP segment) if too small */
244 if (copy_to_user(buf, &packet->mad,
245 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
246 ret = -EFAULT;
247 else
248 ret = -ENOSPC;
249 } else if (copy_to_user(buf, &packet->mad,
250 packet->length + sizeof (struct ib_user_mad)))
226 ret = -EFAULT; 251 ret = -EFAULT;
227 else 252 else
228 ret = sizeof packet->mad; 253 ret = packet->length + sizeof (struct ib_user_mad);
229 254 if (ret < 0) {
230 kfree(packet); 255 /* Requeue packet */
256 spin_lock_irq(&file->recv_lock);
257 list_add(&packet->list, &file->recv_list);
258 spin_unlock_irq(&file->recv_lock);
259 } else
260 kfree(packet);
231 return ret; 261 return ret;
232} 262}
233 263
@@ -238,69 +268,57 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
238 struct ib_umad_packet *packet; 268 struct ib_umad_packet *packet;
239 struct ib_mad_agent *agent; 269 struct ib_mad_agent *agent;
240 struct ib_ah_attr ah_attr; 270 struct ib_ah_attr ah_attr;
241 struct ib_sge gather_list; 271 struct ib_send_wr *bad_wr;
242 struct ib_send_wr *bad_wr, wr = { 272 struct ib_rmpp_mad *rmpp_mad;
243 .opcode = IB_WR_SEND,
244 .sg_list = &gather_list,
245 .num_sge = 1,
246 .send_flags = IB_SEND_SIGNALED,
247 };
248 u8 method; 273 u8 method;
249 u64 *tid; 274 u64 *tid;
250 int ret; 275 int ret, length, hdr_len, data_len, rmpp_hdr_size;
276 int rmpp_active = 0;
251 277
252 if (count < sizeof (struct ib_user_mad)) 278 if (count < sizeof (struct ib_user_mad))
253 return -EINVAL; 279 return -EINVAL;
254 280
255 packet = kmalloc(sizeof *packet, GFP_KERNEL); 281 length = count - sizeof (struct ib_user_mad);
282 packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) +
283 sizeof(struct ib_rmpp_hdr), GFP_KERNEL);
256 if (!packet) 284 if (!packet)
257 return -ENOMEM; 285 return -ENOMEM;
258 286
259 if (copy_from_user(&packet->mad, buf, sizeof packet->mad)) { 287 if (copy_from_user(&packet->mad, buf,
260 kfree(packet); 288 sizeof (struct ib_user_mad) +
261 return -EFAULT; 289 sizeof(struct ib_mad_hdr) +
290 sizeof(struct ib_rmpp_hdr))) {
291 ret = -EFAULT;
292 goto err;
262 } 293 }
263 294
264 if (packet->mad.id < 0 || packet->mad.id >= IB_UMAD_MAX_AGENTS) { 295 if (packet->mad.hdr.id < 0 ||
296 packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
265 ret = -EINVAL; 297 ret = -EINVAL;
266 goto err; 298 goto err;
267 } 299 }
268 300
301 packet->length = length;
302
269 down_read(&file->agent_mutex); 303 down_read(&file->agent_mutex);
270 304
271 agent = file->agent[packet->mad.id]; 305 agent = file->agent[packet->mad.hdr.id];
272 if (!agent) { 306 if (!agent) {
273 ret = -EINVAL; 307 ret = -EINVAL;
274 goto err_up; 308 goto err_up;
275 } 309 }
276 310
277 /*
278 * If userspace is generating a request that will generate a
279 * response, we need to make sure the high-order part of the
280 * transaction ID matches the agent being used to send the
281 * MAD.
282 */
283 method = ((struct ib_mad_hdr *) packet->mad.data)->method;
284
285 if (!(method & IB_MGMT_METHOD_RESP) &&
286 method != IB_MGMT_METHOD_TRAP_REPRESS &&
287 method != IB_MGMT_METHOD_SEND) {
288 tid = &((struct ib_mad_hdr *) packet->mad.data)->tid;
289 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
290 (be64_to_cpup(tid) & 0xffffffff));
291 }
292
293 memset(&ah_attr, 0, sizeof ah_attr); 311 memset(&ah_attr, 0, sizeof ah_attr);
294 ah_attr.dlid = be16_to_cpu(packet->mad.lid); 312 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
295 ah_attr.sl = packet->mad.sl; 313 ah_attr.sl = packet->mad.hdr.sl;
296 ah_attr.src_path_bits = packet->mad.path_bits; 314 ah_attr.src_path_bits = packet->mad.hdr.path_bits;
297 ah_attr.port_num = file->port->port_num; 315 ah_attr.port_num = file->port->port_num;
298 if (packet->mad.grh_present) { 316 if (packet->mad.hdr.grh_present) {
299 ah_attr.ah_flags = IB_AH_GRH; 317 ah_attr.ah_flags = IB_AH_GRH;
300 memcpy(ah_attr.grh.dgid.raw, packet->mad.gid, 16); 318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
301 ah_attr.grh.flow_label = packet->mad.flow_label; 319 ah_attr.grh.flow_label = packet->mad.hdr.flow_label;
302 ah_attr.grh.hop_limit = packet->mad.hop_limit; 320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
303 ah_attr.grh.traffic_class = packet->mad.traffic_class; 321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
304 } 322 }
305 323
306 packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); 324 packet->ah = ib_create_ah(agent->qp->pd, &ah_attr);
@@ -309,34 +327,104 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
309 goto err_up; 327 goto err_up;
310 } 328 }
311 329
312 gather_list.addr = dma_map_single(agent->device->dma_device, 330 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
313 packet->mad.data, 331 if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) {
314 sizeof packet->mad.data, 332 /* RMPP active */
315 DMA_TO_DEVICE); 333 if (!agent->rmpp_version) {
316 gather_list.length = sizeof packet->mad.data; 334 ret = -EINVAL;
317 gather_list.lkey = file->mr[packet->mad.id]->lkey; 335 goto err_ah;
318 pci_unmap_addr_set(packet, mapping, gather_list.addr); 336 }
337 /* Validate that management class can support RMPP */
338 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339 hdr_len = offsetof(struct ib_sa_mad, data);
340 data_len = length;
341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343 hdr_len = offsetof(struct ib_vendor_mad, data);
344 data_len = length - hdr_len;
345 } else {
346 ret = -EINVAL;
347 goto err_ah;
348 }
349 rmpp_active = 1;
350 } else {
351 if (length > sizeof(struct ib_mad)) {
352 ret = -EINVAL;
353 goto err_ah;
354 }
355 hdr_len = offsetof(struct ib_mad, data);
356 data_len = length - hdr_len;
357 }
358
359 packet->msg = ib_create_send_mad(agent,
360 be32_to_cpu(packet->mad.hdr.qpn),
361 0, packet->ah, rmpp_active,
362 hdr_len, data_len,
363 GFP_KERNEL);
364 if (IS_ERR(packet->msg)) {
365 ret = PTR_ERR(packet->msg);
366 goto err_ah;
367 }
319 368
320 wr.wr.ud.mad_hdr = (struct ib_mad_hdr *) packet->mad.data; 369 packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms;
321 wr.wr.ud.ah = packet->ah; 370 packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries;
322 wr.wr.ud.remote_qpn = be32_to_cpu(packet->mad.qpn);
323 wr.wr.ud.remote_qkey = be32_to_cpu(packet->mad.qkey);
324 wr.wr.ud.timeout_ms = packet->mad.timeout_ms;
325 371
326 wr.wr_id = (unsigned long) packet; 372 /* Override send WR WRID initialized in ib_create_send_mad */
373 packet->msg->send_wr.wr_id = (unsigned long) packet;
327 374
328 ret = ib_post_send_mad(agent, &wr, &bad_wr); 375 if (!rmpp_active) {
329 if (ret) { 376 /* Copy message from user into send buffer */
330 dma_unmap_single(agent->device->dma_device, 377 if (copy_from_user(packet->msg->mad,
331 pci_unmap_addr(packet, mapping), 378 buf + sizeof(struct ib_user_mad), length)) {
332 sizeof packet->mad.data, 379 ret = -EFAULT;
333 DMA_TO_DEVICE); 380 goto err_msg;
334 goto err_up; 381 }
382 } else {
383 rmpp_hdr_size = sizeof(struct ib_mad_hdr) +
384 sizeof(struct ib_rmpp_hdr);
385
386 /* Only copy MAD headers (RMPP header in place) */
387 memcpy(packet->msg->mad, packet->mad.data,
388 sizeof(struct ib_mad_hdr));
389
390 /* Now, copy rest of message from user into send buffer */
391 if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data,
392 buf + sizeof (struct ib_user_mad) + rmpp_hdr_size,
393 length - rmpp_hdr_size)) {
394 ret = -EFAULT;
395 goto err_msg;
396 }
397 }
398
399 /*
400 * If userspace is generating a request that will generate a
401 * response, we need to make sure the high-order part of the
402 * transaction ID matches the agent being used to send the
403 * MAD.
404 */
405 method = packet->msg->mad->mad_hdr.method;
406
407 if (!(method & IB_MGMT_METHOD_RESP) &&
408 method != IB_MGMT_METHOD_TRAP_REPRESS &&
409 method != IB_MGMT_METHOD_SEND) {
410 tid = &packet->msg->mad->mad_hdr.tid;
411 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
412 (be64_to_cpup(tid) & 0xffffffff));
335 } 413 }
336 414
415 ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr);
416 if (ret)
417 goto err_msg;
418
337 up_read(&file->agent_mutex); 419 up_read(&file->agent_mutex);
338 420
339 return sizeof packet->mad; 421 return sizeof (struct ib_user_mad_hdr) + packet->length;
422
423err_msg:
424 ib_free_send_mad(packet->msg);
425
426err_ah:
427 ib_destroy_ah(packet->ah);
340 428
341err_up: 429err_up:
342 up_read(&file->agent_mutex); 430 up_read(&file->agent_mutex);
@@ -399,7 +487,8 @@ found:
399 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 487 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
400 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 488 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
401 ureq.mgmt_class ? &req : NULL, 489 ureq.mgmt_class ? &req : NULL,
402 0, send_handler, recv_handler, file); 490 ureq.rmpp_version,
491 send_handler, recv_handler, file);
403 if (IS_ERR(agent)) { 492 if (IS_ERR(agent)) {
404 ret = PTR_ERR(agent); 493 ret = PTR_ERR(agent);
405 goto out; 494 goto out;
@@ -460,8 +549,8 @@ out:
460 return ret; 549 return ret;
461} 550}
462 551
463static long ib_umad_ioctl(struct file *filp, 552static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
464 unsigned int cmd, unsigned long arg) 553 unsigned long arg)
465{ 554{
466 switch (cmd) { 555 switch (cmd) {
467 case IB_USER_MAD_REGISTER_AGENT: 556 case IB_USER_MAD_REGISTER_AGENT:
@@ -517,14 +606,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
517} 606}
518 607
519static struct file_operations umad_fops = { 608static struct file_operations umad_fops = {
520 .owner = THIS_MODULE, 609 .owner = THIS_MODULE,
521 .read = ib_umad_read, 610 .read = ib_umad_read,
522 .write = ib_umad_write, 611 .write = ib_umad_write,
523 .poll = ib_umad_poll, 612 .poll = ib_umad_poll,
524 .unlocked_ioctl = ib_umad_ioctl, 613 .unlocked_ioctl = ib_umad_ioctl,
525 .compat_ioctl = ib_umad_ioctl, 614 .compat_ioctl = ib_umad_ioctl,
526 .open = ib_umad_open, 615 .open = ib_umad_open,
527 .release = ib_umad_close 616 .release = ib_umad_close
528}; 617};
529 618
530static int ib_umad_sm_open(struct inode *inode, struct file *filp) 619static int ib_umad_sm_open(struct inode *inode, struct file *filp)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 2516f9646515..506fdf1f2a26 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -41,6 +41,7 @@
41#include <linux/err.h> 41#include <linux/err.h>
42 42
43#include <ib_verbs.h> 43#include <ib_verbs.h>
44#include <ib_cache.h>
44 45
45/* Protection domains */ 46/* Protection domains */
46 47
@@ -88,6 +89,40 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
88} 89}
89EXPORT_SYMBOL(ib_create_ah); 90EXPORT_SYMBOL(ib_create_ah);
90 91
92struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
93 struct ib_grh *grh, u8 port_num)
94{
95 struct ib_ah_attr ah_attr;
96 u32 flow_class;
97 u16 gid_index;
98 int ret;
99
100 memset(&ah_attr, 0, sizeof ah_attr);
101 ah_attr.dlid = wc->slid;
102 ah_attr.sl = wc->sl;
103 ah_attr.src_path_bits = wc->dlid_path_bits;
104 ah_attr.port_num = port_num;
105
106 if (wc->wc_flags & IB_WC_GRH) {
107 ah_attr.ah_flags = IB_AH_GRH;
108 ah_attr.grh.dgid = grh->dgid;
109
110 ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
111 &gid_index);
112 if (ret)
113 return ERR_PTR(ret);
114
115 ah_attr.grh.sgid_index = (u8) gid_index;
116 flow_class = be32_to_cpu(grh->version_tclass_flow);
117 ah_attr.grh.flow_label = flow_class & 0xFFFFF;
118 ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
119 ah_attr.grh.hop_limit = grh->hop_limit;
120 }
121
122 return ib_create_ah(pd, &ah_attr);
123}
124EXPORT_SYMBOL(ib_create_ah_from_wc);
125
91int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 126int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
92{ 127{
93 return ah->device->modify_ah ? 128 return ah->device->modify_ah ?
diff --git a/drivers/infiniband/include/ib_cm.h b/drivers/infiniband/include/ib_cm.h
new file mode 100644
index 000000000000..e5d74a730a70
--- /dev/null
+++ b/drivers/infiniband/include/ib_cm.h
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: ib_cm.h 2730 2005-06-28 16:43:03Z sean.hefty $
36 */
37#if !defined(IB_CM_H)
38#define IB_CM_H
39
40#include <ib_mad.h>
41#include <ib_sa.h>
42
43enum ib_cm_state {
44 IB_CM_IDLE,
45 IB_CM_LISTEN,
46 IB_CM_REQ_SENT,
47 IB_CM_REQ_RCVD,
48 IB_CM_MRA_REQ_SENT,
49 IB_CM_MRA_REQ_RCVD,
50 IB_CM_REP_SENT,
51 IB_CM_REP_RCVD,
52 IB_CM_MRA_REP_SENT,
53 IB_CM_MRA_REP_RCVD,
54 IB_CM_ESTABLISHED,
55 IB_CM_DREQ_SENT,
56 IB_CM_DREQ_RCVD,
57 IB_CM_TIMEWAIT,
58 IB_CM_SIDR_REQ_SENT,
59 IB_CM_SIDR_REQ_RCVD
60};
61
62enum ib_cm_lap_state {
63 IB_CM_LAP_IDLE,
64 IB_CM_LAP_SENT,
65 IB_CM_LAP_RCVD,
66 IB_CM_MRA_LAP_SENT,
67 IB_CM_MRA_LAP_RCVD,
68};
69
70enum ib_cm_event_type {
71 IB_CM_REQ_ERROR,
72 IB_CM_REQ_RECEIVED,
73 IB_CM_REP_ERROR,
74 IB_CM_REP_RECEIVED,
75 IB_CM_RTU_RECEIVED,
76 IB_CM_USER_ESTABLISHED,
77 IB_CM_DREQ_ERROR,
78 IB_CM_DREQ_RECEIVED,
79 IB_CM_DREP_RECEIVED,
80 IB_CM_TIMEWAIT_EXIT,
81 IB_CM_MRA_RECEIVED,
82 IB_CM_REJ_RECEIVED,
83 IB_CM_LAP_ERROR,
84 IB_CM_LAP_RECEIVED,
85 IB_CM_APR_RECEIVED,
86 IB_CM_SIDR_REQ_ERROR,
87 IB_CM_SIDR_REQ_RECEIVED,
88 IB_CM_SIDR_REP_RECEIVED
89};
90
91enum ib_cm_data_size {
92 IB_CM_REQ_PRIVATE_DATA_SIZE = 92,
93 IB_CM_MRA_PRIVATE_DATA_SIZE = 222,
94 IB_CM_REJ_PRIVATE_DATA_SIZE = 148,
95 IB_CM_REP_PRIVATE_DATA_SIZE = 196,
96 IB_CM_RTU_PRIVATE_DATA_SIZE = 224,
97 IB_CM_DREQ_PRIVATE_DATA_SIZE = 220,
98 IB_CM_DREP_PRIVATE_DATA_SIZE = 224,
99 IB_CM_REJ_ARI_LENGTH = 72,
100 IB_CM_LAP_PRIVATE_DATA_SIZE = 168,
101 IB_CM_APR_PRIVATE_DATA_SIZE = 148,
102 IB_CM_APR_INFO_LENGTH = 72,
103 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
104 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
105 IB_CM_SIDR_REP_INFO_LENGTH = 72
106};
107
108struct ib_cm_id;
109
110struct ib_cm_req_event_param {
111 struct ib_cm_id *listen_id;
112 struct ib_device *device;
113 u8 port;
114
115 struct ib_sa_path_rec *primary_path;
116 struct ib_sa_path_rec *alternate_path;
117
118 u64 remote_ca_guid;
119 u32 remote_qkey;
120 u32 remote_qpn;
121 enum ib_qp_type qp_type;
122
123 u32 starting_psn;
124 u8 responder_resources;
125 u8 initiator_depth;
126 unsigned int local_cm_response_timeout:5;
127 unsigned int flow_control:1;
128 unsigned int remote_cm_response_timeout:5;
129 unsigned int retry_count:3;
130 unsigned int rnr_retry_count:3;
131 unsigned int srq:1;
132};
133
134struct ib_cm_rep_event_param {
135 u64 remote_ca_guid;
136 u32 remote_qkey;
137 u32 remote_qpn;
138 u32 starting_psn;
139 u8 responder_resources;
140 u8 initiator_depth;
141 unsigned int target_ack_delay:5;
142 unsigned int failover_accepted:2;
143 unsigned int flow_control:1;
144 unsigned int rnr_retry_count:3;
145 unsigned int srq:1;
146};
147
148enum ib_cm_rej_reason {
149 IB_CM_REJ_NO_QP = __constant_htons(1),
150 IB_CM_REJ_NO_EEC = __constant_htons(2),
151 IB_CM_REJ_NO_RESOURCES = __constant_htons(3),
152 IB_CM_REJ_TIMEOUT = __constant_htons(4),
153 IB_CM_REJ_UNSUPPORTED = __constant_htons(5),
154 IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6),
155 IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7),
156 IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8),
157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9),
158 IB_CM_REJ_STALE_CONN = __constant_htons(10),
159 IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11),
160 IB_CM_REJ_INVALID_GID = __constant_htons(12),
161 IB_CM_REJ_INVALID_LID = __constant_htons(13),
162 IB_CM_REJ_INVALID_SL = __constant_htons(14),
163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15),
164 IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16),
165 IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17),
166 IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18),
167 IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19),
168 IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20),
169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21),
170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22),
171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23),
172 IB_CM_REJ_PORT_REDIRECT = __constant_htons(24),
173 IB_CM_REJ_INVALID_MTU = __constant_htons(26),
174 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27),
175 IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28),
176 IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29),
177 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30),
178 IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31),
179 IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32),
180 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33)
181};
182
183struct ib_cm_rej_event_param {
184 enum ib_cm_rej_reason reason;
185 void *ari;
186 u8 ari_length;
187};
188
189struct ib_cm_mra_event_param {
190 u8 service_timeout;
191};
192
193struct ib_cm_lap_event_param {
194 struct ib_sa_path_rec *alternate_path;
195};
196
197enum ib_cm_apr_status {
198 IB_CM_APR_SUCCESS,
199 IB_CM_APR_INVALID_COMM_ID,
200 IB_CM_APR_UNSUPPORTED,
201 IB_CM_APR_REJECT,
202 IB_CM_APR_REDIRECT,
203 IB_CM_APR_IS_CURRENT,
204 IB_CM_APR_INVALID_QPN_EECN,
205 IB_CM_APR_INVALID_LID,
206 IB_CM_APR_INVALID_GID,
207 IB_CM_APR_INVALID_FLOW_LABEL,
208 IB_CM_APR_INVALID_TCLASS,
209 IB_CM_APR_INVALID_HOP_LIMIT,
210 IB_CM_APR_INVALID_PACKET_RATE,
211 IB_CM_APR_INVALID_SL
212};
213
214struct ib_cm_apr_event_param {
215 enum ib_cm_apr_status ap_status;
216 void *apr_info;
217 u8 info_len;
218};
219
220struct ib_cm_sidr_req_event_param {
221 struct ib_cm_id *listen_id;
222 struct ib_device *device;
223 u8 port;
224
225 u16 pkey;
226};
227
228enum ib_cm_sidr_status {
229 IB_SIDR_SUCCESS,
230 IB_SIDR_UNSUPPORTED,
231 IB_SIDR_REJECT,
232 IB_SIDR_NO_QP,
233 IB_SIDR_REDIRECT,
234 IB_SIDR_UNSUPPORTED_VERSION
235};
236
237struct ib_cm_sidr_rep_event_param {
238 enum ib_cm_sidr_status status;
239 u32 qkey;
240 u32 qpn;
241 void *info;
242 u8 info_len;
243
244};
245
246struct ib_cm_event {
247 enum ib_cm_event_type event;
248 union {
249 struct ib_cm_req_event_param req_rcvd;
250 struct ib_cm_rep_event_param rep_rcvd;
251 /* No data for RTU received events. */
252 struct ib_cm_rej_event_param rej_rcvd;
253 struct ib_cm_mra_event_param mra_rcvd;
254 struct ib_cm_lap_event_param lap_rcvd;
255 struct ib_cm_apr_event_param apr_rcvd;
256 /* No data for DREQ/DREP received events. */
257 struct ib_cm_sidr_req_event_param sidr_req_rcvd;
258 struct ib_cm_sidr_rep_event_param sidr_rep_rcvd;
259 enum ib_wc_status send_status;
260 } param;
261
262 void *private_data;
263};
264
265/**
266 * ib_cm_handler - User-defined callback to process communication events.
267 * @cm_id: Communication identifier associated with the reported event.
268 * @event: Information about the communication event.
269 *
270 * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events
271 * generated as a result of listen requests result in the allocation of a
272 * new @cm_id. The new @cm_id is returned to the user through this callback.
273 * Clients are responsible for destroying the new @cm_id. For peer-to-peer
274 * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds
275 * to a user's existing communication identifier.
276 *
277 * Users may not call ib_destroy_cm_id while in the context of this callback;
278 * however, returning a non-zero value instructs the communication manager to
279 * destroy the @cm_id after the callback completes.
280 */
281typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
282 struct ib_cm_event *event);
283
284struct ib_cm_id {
285 ib_cm_handler cm_handler;
286 void *context;
287 u64 service_id;
288 u64 service_mask;
289 enum ib_cm_state state; /* internal CM/debug use */
290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
291 u32 local_id;
292 u32 remote_id;
293};
294
295/**
296 * ib_create_cm_id - Allocate a communication identifier.
297 * @cm_handler: Callback invoked to notify the user of CM events.
298 * @context: User specified context associated with the communication
299 * identifier.
300 *
301 * Communication identifiers are used to track connection states, service
302 * ID resolution requests, and listen requests.
303 */
304struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
305 void *context);
306
307/**
308 * ib_destroy_cm_id - Destroy a connection identifier.
309 * @cm_id: Connection identifier to destroy.
310 *
311 * This call blocks until the connection identifier is destroyed.
312 */
313void ib_destroy_cm_id(struct ib_cm_id *cm_id);
314
315#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL)
316#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
317
318/**
319 * ib_cm_listen - Initiates listening on the specified service ID for
320 * connection and service ID resolution requests.
321 * @cm_id: Connection identifier associated with the listen request.
322 * @service_id: Service identifier matched against incoming connection
323 * and service ID resolution requests. The service ID should be specified
324 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
325 * assign a service ID to the caller.
326 * @service_mask: Mask applied to service ID used to listen across a
327 * range of service IDs. If set to 0, the service ID is matched
328 * exactly. This parameter is ignored if %service_id is set to
329 * IB_CM_ASSIGN_SERVICE_ID.
330 */
331int ib_cm_listen(struct ib_cm_id *cm_id,
332 u64 service_id,
333 u64 service_mask);
334
335struct ib_cm_req_param {
336 struct ib_sa_path_rec *primary_path;
337 struct ib_sa_path_rec *alternate_path;
338 u64 service_id;
339 u32 qp_num;
340 enum ib_qp_type qp_type;
341 u32 starting_psn;
342 const void *private_data;
343 u8 private_data_len;
344 u8 peer_to_peer;
345 u8 responder_resources;
346 u8 initiator_depth;
347 u8 remote_cm_response_timeout;
348 u8 flow_control;
349 u8 local_cm_response_timeout;
350 u8 retry_count;
351 u8 rnr_retry_count;
352 u8 max_cm_retries;
353 u8 srq;
354};
355
356/**
357 * ib_send_cm_req - Sends a connection request to the remote node.
358 * @cm_id: Connection identifier that will be associated with the
359 * connection request.
360 * @param: Connection request information needed to establish the
361 * connection.
362 */
363int ib_send_cm_req(struct ib_cm_id *cm_id,
364 struct ib_cm_req_param *param);
365
366struct ib_cm_rep_param {
367 u32 qp_num;
368 u32 starting_psn;
369 const void *private_data;
370 u8 private_data_len;
371 u8 responder_resources;
372 u8 initiator_depth;
373 u8 target_ack_delay;
374 u8 failover_accepted;
375 u8 flow_control;
376 u8 rnr_retry_count;
377 u8 srq;
378};
379
380/**
381 * ib_send_cm_rep - Sends a connection reply in response to a connection
382 * request.
383 * @cm_id: Connection identifier that will be associated with the
384 * connection request.
385 * @param: Connection reply information needed to establish the
386 * connection.
387 */
388int ib_send_cm_rep(struct ib_cm_id *cm_id,
389 struct ib_cm_rep_param *param);
390
391/**
392 * ib_send_cm_rtu - Sends a connection ready to use message in response
393 * to a connection reply message.
394 * @cm_id: Connection identifier associated with the connection request.
395 * @private_data: Optional user-defined private data sent with the
396 * ready to use message.
397 * @private_data_len: Size of the private data buffer, in bytes.
398 */
399int ib_send_cm_rtu(struct ib_cm_id *cm_id,
400 const void *private_data,
401 u8 private_data_len);
402
403/**
404 * ib_send_cm_dreq - Sends a disconnection request for an existing
405 * connection.
406 * @cm_id: Connection identifier associated with the connection being
407 * released.
408 * @private_data: Optional user-defined private data sent with the
409 * disconnection request message.
410 * @private_data_len: Size of the private data buffer, in bytes.
411 */
412int ib_send_cm_dreq(struct ib_cm_id *cm_id,
413 const void *private_data,
414 u8 private_data_len);
415
416/**
417 * ib_send_cm_drep - Sends a disconnection reply to a disconnection request.
418 * @cm_id: Connection identifier associated with the connection being
419 * released.
420 * @private_data: Optional user-defined private data sent with the
421 * disconnection reply message.
422 * @private_data_len: Size of the private data buffer, in bytes.
423 *
424 * If the cm_id is in the correct state, the CM will transition the connection
425 * to the timewait state, even if an error occurs sending the DREP message.
426 */
427int ib_send_cm_drep(struct ib_cm_id *cm_id,
428 const void *private_data,
429 u8 private_data_len);
430
431/**
432 * ib_cm_establish - Forces a connection state to established.
433 * @cm_id: Connection identifier to transition to established.
434 *
435 * This routine should be invoked by users who receive messages on a
436 * connected QP before an RTU has been received.
437 */
438int ib_cm_establish(struct ib_cm_id *cm_id);
439
440/**
441 * ib_send_cm_rej - Sends a connection rejection message to the
442 * remote node.
443 * @cm_id: Connection identifier associated with the connection being
444 * rejected.
445 * @reason: Reason for the connection request rejection.
446 * @ari: Optional additional rejection information.
447 * @ari_length: Size of the additional rejection information, in bytes.
448 * @private_data: Optional user-defined private data sent with the
449 * rejection message.
450 * @private_data_len: Size of the private data buffer, in bytes.
451 */
452int ib_send_cm_rej(struct ib_cm_id *cm_id,
453 enum ib_cm_rej_reason reason,
454 void *ari,
455 u8 ari_length,
456 const void *private_data,
457 u8 private_data_len);
458
459/**
460 * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
461 * message.
462 * @cm_id: Connection identifier associated with the connection message.
463 * @service_timeout: The maximum time required for the sender to reply to
464 * to the connection message.
465 * @private_data: Optional user-defined private data sent with the
466 * message receipt acknowledgement.
467 * @private_data_len: Size of the private data buffer, in bytes.
468 */
469int ib_send_cm_mra(struct ib_cm_id *cm_id,
470 u8 service_timeout,
471 const void *private_data,
472 u8 private_data_len);
473
474/**
475 * ib_send_cm_lap - Sends a load alternate path request.
476 * @cm_id: Connection identifier associated with the load alternate path
477 * message.
478 * @alternate_path: A path record that identifies the alternate path to
479 * load.
480 * @private_data: Optional user-defined private data sent with the
481 * load alternate path message.
482 * @private_data_len: Size of the private data buffer, in bytes.
483 */
484int ib_send_cm_lap(struct ib_cm_id *cm_id,
485 struct ib_sa_path_rec *alternate_path,
486 const void *private_data,
487 u8 private_data_len);
488
489/**
490 * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
491 * to a specified QP state.
492 * @cm_id: Communication identifier associated with the QP attributes to
493 * initialize.
494 * @qp_attr: On input, specifies the desired QP state. On output, the
495 * mandatory and desired optional attributes will be set in order to
496 * modify the QP to the specified state.
497 * @qp_attr_mask: The QP attribute mask that may be used to transition the
498 * QP to the specified state.
499 *
500 * Users must set the @qp_attr->qp_state to the desired QP state. This call
501 * will set all required attributes for the given transition, along with
502 * known optional attributes. Users may override the attributes returned from
503 * this call before calling ib_modify_qp.
504 */
505int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
506 struct ib_qp_attr *qp_attr,
507 int *qp_attr_mask);
508
509/**
510 * ib_send_cm_apr - Sends an alternate path response message in response to
511 * a load alternate path request.
512 * @cm_id: Connection identifier associated with the alternate path response.
513 * @status: Reply status sent with the alternate path response.
514 * @info: Optional additional information sent with the alternate path
515 * response.
516 * @info_length: Size of the additional information, in bytes.
517 * @private_data: Optional user-defined private data sent with the
518 * alternate path response message.
519 * @private_data_len: Size of the private data buffer, in bytes.
520 */
521int ib_send_cm_apr(struct ib_cm_id *cm_id,
522 enum ib_cm_apr_status status,
523 void *info,
524 u8 info_length,
525 const void *private_data,
526 u8 private_data_len);
527
528struct ib_cm_sidr_req_param {
529 struct ib_sa_path_rec *path;
530 u64 service_id;
531 int timeout_ms;
532 const void *private_data;
533 u8 private_data_len;
534 u8 max_cm_retries;
535 u16 pkey;
536};
537
538/**
539 * ib_send_cm_sidr_req - Sends a service ID resolution request to the
540 * remote node.
541 * @cm_id: Communication identifier that will be associated with the
542 * service ID resolution request.
543 * @param: Service ID resolution request information.
544 */
545int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
546 struct ib_cm_sidr_req_param *param);
547
548struct ib_cm_sidr_rep_param {
549 u32 qp_num;
550 u32 qkey;
551 enum ib_cm_sidr_status status;
552 const void *info;
553 u8 info_length;
554 const void *private_data;
555 u8 private_data_len;
556};
557
558/**
559 * ib_send_cm_sidr_rep - Sends a service ID resolution request to the
560 * remote node.
561 * @cm_id: Communication identifier associated with the received service ID
562 * resolution request.
563 * @param: Service ID resolution reply information.
564 */
565int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
566 struct ib_cm_sidr_rep_param *param);
567
568#endif /* IB_CM_H */
diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/drivers/infiniband/include/ib_fmr_pool.h
index e8769657cbbb..6c9e24d6e144 100644
--- a/drivers/infiniband/include/ib_fmr_pool.h
+++ b/drivers/infiniband/include/ib_fmr_pool.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: ib_fmr_pool.h 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $
33 */ 34 */
34 35
35#if !defined(IB_FMR_POOL_H) 36#if !defined(IB_FMR_POOL_H)
@@ -78,7 +79,7 @@ struct ib_pool_fmr {
78struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, 79struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
79 struct ib_fmr_pool_param *params); 80 struct ib_fmr_pool_param *params);
80 81
81int ib_destroy_fmr_pool(struct ib_fmr_pool *pool); 82void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
82 83
83int ib_flush_fmr_pool(struct ib_fmr_pool *pool); 84int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
84 85
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
index 4a6bf6763a97..491b6f25b3b8 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/drivers/infiniband/include/ib_mad.h
@@ -33,12 +33,14 @@
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE. 34 * SOFTWARE.
35 * 35 *
36 * $Id: ib_mad.h 1389 2004-12-27 22:56:47Z roland $ 36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
37 */ 37 */
38 38
39#if !defined( IB_MAD_H ) 39#if !defined( IB_MAD_H )
40#define IB_MAD_H 40#define IB_MAD_H
41 41
42#include <linux/pci.h>
43
42#include <ib_verbs.h> 44#include <ib_verbs.h>
43 45
44/* Management base version */ 46/* Management base version */
@@ -56,6 +58,8 @@
56#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
57#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
58 60
61#define IB_OPENIB_OUI (0x001405)
62
59/* Management methods */ 63/* Management methods */
60#define IB_MGMT_METHOD_GET 0x01 64#define IB_MGMT_METHOD_GET 0x01
61#define IB_MGMT_METHOD_SET 0x02 65#define IB_MGMT_METHOD_SET 0x02
@@ -70,18 +74,37 @@
70 74
71#define IB_MGMT_MAX_METHODS 128 75#define IB_MGMT_MAX_METHODS 128
72 76
77/* RMPP information */
78#define IB_MGMT_RMPP_VERSION 1
79
80#define IB_MGMT_RMPP_TYPE_DATA 1
81#define IB_MGMT_RMPP_TYPE_ACK 2
82#define IB_MGMT_RMPP_TYPE_STOP 3
83#define IB_MGMT_RMPP_TYPE_ABORT 4
84
85#define IB_MGMT_RMPP_FLAG_ACTIVE 1
86#define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
87#define IB_MGMT_RMPP_FLAG_LAST (1<<2)
88
89#define IB_MGMT_RMPP_NO_RESPTIME 0x1F
90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_T2L 118
94#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
95#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
96#define IB_MGMT_RMPP_STATUS_BADT 121
97#define IB_MGMT_RMPP_STATUS_W2S 122
98#define IB_MGMT_RMPP_STATUS_S2B 123
99#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
100#define IB_MGMT_RMPP_STATUS_UNV 125
101#define IB_MGMT_RMPP_STATUS_TMR 126
102#define IB_MGMT_RMPP_STATUS_UNSPEC 127
103
73#define IB_QP0 0 104#define IB_QP0 0
74#define IB_QP1 __constant_htonl(1) 105#define IB_QP1 __constant_htonl(1)
75#define IB_QP1_QKEY 0x80010000 106#define IB_QP1_QKEY 0x80010000
76 107#define IB_QP_SET_QKEY 0x80000000
77struct ib_grh {
78 u32 version_tclass_flow;
79 u16 paylen;
80 u8 next_hdr;
81 u8 hop_limit;
82 union ib_gid sgid;
83 union ib_gid dgid;
84} __attribute__ ((packed));
85 108
86struct ib_mad_hdr { 109struct ib_mad_hdr {
87 u8 base_version; 110 u8 base_version;
@@ -94,7 +117,7 @@ struct ib_mad_hdr {
94 u16 attr_id; 117 u16 attr_id;
95 u16 resv; 118 u16 resv;
96 u32 attr_mod; 119 u32 attr_mod;
97} __attribute__ ((packed)); 120};
98 121
99struct ib_rmpp_hdr { 122struct ib_rmpp_hdr {
100 u8 rmpp_version; 123 u8 rmpp_version;
@@ -103,17 +126,41 @@ struct ib_rmpp_hdr {
103 u8 rmpp_status; 126 u8 rmpp_status;
104 u32 seg_num; 127 u32 seg_num;
105 u32 paylen_newwin; 128 u32 paylen_newwin;
129};
130
131typedef u64 __bitwise ib_sa_comp_mask;
132
133#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
134
135/*
136 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
137 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
138 * lay them out wrong otherwise. (And unfortunately they are sent on
139 * the wire so we can't change the layout)
140 */
141struct ib_sa_hdr {
142 u64 sm_key;
143 u16 attr_offset;
144 u16 reserved;
145 ib_sa_comp_mask comp_mask;
106} __attribute__ ((packed)); 146} __attribute__ ((packed));
107 147
108struct ib_mad { 148struct ib_mad {
109 struct ib_mad_hdr mad_hdr; 149 struct ib_mad_hdr mad_hdr;
110 u8 data[232]; 150 u8 data[232];
111} __attribute__ ((packed)); 151};
112 152
113struct ib_rmpp_mad { 153struct ib_rmpp_mad {
114 struct ib_mad_hdr mad_hdr; 154 struct ib_mad_hdr mad_hdr;
115 struct ib_rmpp_hdr rmpp_hdr; 155 struct ib_rmpp_hdr rmpp_hdr;
116 u8 data[220]; 156 u8 data[220];
157};
158
159struct ib_sa_mad {
160 struct ib_mad_hdr mad_hdr;
161 struct ib_rmpp_hdr rmpp_hdr;
162 struct ib_sa_hdr sa_hdr;
163 u8 data[200];
117} __attribute__ ((packed)); 164} __attribute__ ((packed));
118 165
119struct ib_vendor_mad { 166struct ib_vendor_mad {
@@ -122,7 +169,70 @@ struct ib_vendor_mad {
122 u8 reserved; 169 u8 reserved;
123 u8 oui[3]; 170 u8 oui[3];
124 u8 data[216]; 171 u8 data[216];
125} __attribute__ ((packed)); 172};
173
174/**
175 * ib_mad_send_buf - MAD data buffer and work request for sends.
176 * @mad: References an allocated MAD data buffer. The size of the data
177 * buffer is specified in the @send_wr.length field.
178 * @mapping: DMA mapping information.
179 * @mad_agent: MAD agent that allocated the buffer.
180 * @context: User-controlled context fields.
181 * @send_wr: An initialized work request structure used when sending the MAD.
182 * The wr_id field of the work request is initialized to reference this
183 * data structure.
184 * @sge: A scatter-gather list referenced by the work request.
185 *
186 * Users are responsible for initializing the MAD buffer itself, with the
187 * exception of specifying the payload length field in any RMPP MAD.
188 */
189struct ib_mad_send_buf {
190 struct ib_mad *mad;
191 DECLARE_PCI_UNMAP_ADDR(mapping)
192 struct ib_mad_agent *mad_agent;
193 void *context[2];
194 struct ib_send_wr send_wr;
195 struct ib_sge sge;
196};
197
198/**
199 * ib_get_rmpp_resptime - Returns the RMPP response time.
200 * @rmpp_hdr: An RMPP header.
201 */
202static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
203{
204 return rmpp_hdr->rmpp_rtime_flags >> 3;
205}
206
207/**
208 * ib_get_rmpp_flags - Returns the RMPP flags.
209 * @rmpp_hdr: An RMPP header.
210 */
211static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
212{
213 return rmpp_hdr->rmpp_rtime_flags & 0x7;
214}
215
216/**
217 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
218 * @rmpp_hdr: An RMPP header.
219 * @rtime: The response time to set.
220 */
221static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
222{
223 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
224}
225
226/**
227 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
228 * @rmpp_hdr: An RMPP header.
229 * @flags: The flags to set.
230 */
231static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
232{
233 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
234 (flags & 0x7);
235}
126 236
127struct ib_mad_agent; 237struct ib_mad_agent;
128struct ib_mad_send_wc; 238struct ib_mad_send_wc;
@@ -168,6 +278,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
168 * ib_mad_agent - Used to track MAD registration with the access layer. 278 * ib_mad_agent - Used to track MAD registration with the access layer.
169 * @device: Reference to device registration is on. 279 * @device: Reference to device registration is on.
170 * @qp: Reference to QP used for sending and receiving MADs. 280 * @qp: Reference to QP used for sending and receiving MADs.
281 * @mr: Memory region for system memory usable for DMA.
171 * @recv_handler: Callback handler for a received MAD. 282 * @recv_handler: Callback handler for a received MAD.
172 * @send_handler: Callback handler for a sent MAD. 283 * @send_handler: Callback handler for a sent MAD.
173 * @snoop_handler: Callback handler for snooped sent MADs. 284 * @snoop_handler: Callback handler for snooped sent MADs.
@@ -176,16 +287,19 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
176 * Unsolicited MADs sent by this client will have the upper 32-bits 287 * Unsolicited MADs sent by this client will have the upper 32-bits
177 * of their TID set to this value. 288 * of their TID set to this value.
178 * @port_num: Port number on which QP is registered 289 * @port_num: Port number on which QP is registered
290 * @rmpp_version: If set, indicates the RMPP version used by this agent.
179 */ 291 */
180struct ib_mad_agent { 292struct ib_mad_agent {
181 struct ib_device *device; 293 struct ib_device *device;
182 struct ib_qp *qp; 294 struct ib_qp *qp;
295 struct ib_mr *mr;
183 ib_mad_recv_handler recv_handler; 296 ib_mad_recv_handler recv_handler;
184 ib_mad_send_handler send_handler; 297 ib_mad_send_handler send_handler;
185 ib_mad_snoop_handler snoop_handler; 298 ib_mad_snoop_handler snoop_handler;
186 void *context; 299 void *context;
187 u32 hi_tid; 300 u32 hi_tid;
188 u8 port_num; 301 u8 port_num;
302 u8 rmpp_version;
189}; 303};
190 304
191/** 305/**
@@ -219,6 +333,7 @@ struct ib_mad_recv_buf {
219 * ib_mad_recv_wc - received MAD information. 333 * ib_mad_recv_wc - received MAD information.
220 * @wc: Completion information for the received data. 334 * @wc: Completion information for the received data.
221 * @recv_buf: Specifies the location of the received data buffer(s). 335 * @recv_buf: Specifies the location of the received data buffer(s).
336 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
222 * @mad_len: The length of the received MAD, without duplicated headers. 337 * @mad_len: The length of the received MAD, without duplicated headers.
223 * 338 *
224 * For received response, the wr_id field of the wc is set to the wr_id 339 * For received response, the wr_id field of the wc is set to the wr_id
@@ -227,6 +342,7 @@ struct ib_mad_recv_buf {
227struct ib_mad_recv_wc { 342struct ib_mad_recv_wc {
228 struct ib_wc *wc; 343 struct ib_wc *wc;
229 struct ib_mad_recv_buf recv_buf; 344 struct ib_mad_recv_buf recv_buf;
345 struct list_head rmpp_list;
230 int mad_len; 346 int mad_len;
231}; 347};
232 348
@@ -322,6 +438,16 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
322 * @bad_send_wr: Specifies the MAD on which an error was encountered. 438 * @bad_send_wr: Specifies the MAD on which an error was encountered.
323 * 439 *
324 * Sent MADs are not guaranteed to complete in the order that they were posted. 440 * Sent MADs are not guaranteed to complete in the order that they were posted.
441 *
442 * If the MAD requires RMPP, the data buffer should contain a single copy
443 * of the common MAD, RMPP, and class specific headers, followed by the class
444 * defined data. If the class defined data would not divide evenly into
445 * RMPP segments, then space must be allocated at the end of the referenced
446 * buffer for any required padding. To indicate the amount of class defined
447 * data being transferred, the paylen_newwin field in the RMPP header should
448 * be set to the size of the class specific header plus the amount of class
449 * defined data being transferred. The paylen_newwin field should be
450 * specified in network-byte order.
325 */ 451 */
326int ib_post_send_mad(struct ib_mad_agent *mad_agent, 452int ib_post_send_mad(struct ib_mad_agent *mad_agent,
327 struct ib_send_wr *send_wr, 453 struct ib_send_wr *send_wr,
@@ -334,15 +460,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
334 * referenced buffer should be at least the size of the mad_len specified 460 * referenced buffer should be at least the size of the mad_len specified
335 * by @mad_recv_wc. 461 * by @mad_recv_wc.
336 * 462 *
337 * This call copies a chain of received RMPP MADs into a single data buffer, 463 * This call copies a chain of received MAD segments into a single data buffer,
338 * removing duplicated headers. 464 * removing duplicated headers.
339 */ 465 */
340void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, 466void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
341 void *buf);
342 467
343/** 468/**
344 * ib_free_recv_mad - Returns data buffers used to receive a MAD to the 469 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
345 * access layer.
346 * @mad_recv_wc: Work completion information for a received MAD. 470 * @mad_recv_wc: Work completion information for a received MAD.
347 * 471 *
348 * Clients receiving MADs through their ib_mad_recv_handler must call this 472 * Clients receiving MADs through their ib_mad_recv_handler must call this
@@ -358,8 +482,18 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
358 * MADs will be returned to the user through the corresponding 482 * MADs will be returned to the user through the corresponding
359 * ib_mad_send_handler. 483 * ib_mad_send_handler.
360 */ 484 */
361void ib_cancel_mad(struct ib_mad_agent *mad_agent, 485void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
362 u64 wr_id); 486
487/**
488 * ib_modify_mad - Modifies an outstanding send MAD operation.
489 * @mad_agent: Specifies the registration associated with sent MAD.
490 * @wr_id: Indicates the work request identifier of the MAD to modify.
491 * @timeout_ms: New timeout value for sent MAD.
492 *
493 * This call will reset the timeout value for a sent MAD to the specified
494 * value.
495 */
496int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
363 497
364/** 498/**
365 * ib_redirect_mad_qp - Registers a QP for MAD services. 499 * ib_redirect_mad_qp - Registers a QP for MAD services.
@@ -401,4 +535,43 @@ struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
401int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 535int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
402 struct ib_wc *wc); 536 struct ib_wc *wc);
403 537
538/**
539 * ib_create_send_mad - Allocate and initialize a data buffer and work request
540 * for sending a MAD.
541 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
542 * @remote_qpn: Specifies the QPN of the receiving node.
543 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
544 * is valid only if the remote_qpn is QP 1.
545 * @ah: References the address handle used to transfer to the remote node.
546 * @rmpp_active: Indicates if the send will enable RMPP.
547 * @hdr_len: Indicates the size of the data header of the MAD. This length
548 * should include the common MAD header, RMPP header, plus any class
549 * specific header.
550 * @data_len: Indicates the size of any user-transferred data. The call will
551 * automatically adjust the allocated buffer size to account for any
552 * additional padding that may be necessary.
553 * @gfp_mask: GFP mask used for the memory allocation.
554 *
555 * This is a helper routine that may be used to allocate a MAD. Users are
556 * not required to allocate outbound MADs using this call. The returned
557 * MAD send buffer will reference a data buffer usable for sending a MAD, along
558 * with an initialized work request structure. Users may modify the returned
559 * MAD data buffer or work request before posting the send.
560 *
561 * The returned data buffer will be cleared. Users are responsible for
562 * initializing the common MAD and any class specific headers. If @rmpp_active
563 * is set, the RMPP header will be initialized for sending.
564 */
565struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
566 u32 remote_qpn, u16 pkey_index,
567 struct ib_ah *ah, int rmpp_active,
568 int hdr_len, int data_len,
569 unsigned int __nocast gfp_mask);
570
571/**
572 * ib_free_send_mad - Returns data buffers used to send a MAD.
573 * @send_buf: Previously allocated send data buffer.
574 */
575void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
576
404#endif /* IB_MAD_H */ 577#endif /* IB_MAD_H */
diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h
index 00222285eb9a..6d999f7b5d93 100644
--- a/drivers/infiniband/include/ib_sa.h
+++ b/drivers/infiniband/include/ib_sa.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: ib_sa.h 1389 2004-12-27 22:56:47Z roland $ 33 * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $
33 */ 34 */
34 35
35#ifndef IB_SA_H 36#ifndef IB_SA_H
@@ -41,9 +42,11 @@
41#include <ib_mad.h> 42#include <ib_mad.h>
42 43
43enum { 44enum {
44 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ 45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
45 46
46 IB_SA_METHOD_DELETE = 0x15 47 IB_SA_METHOD_GET_TABLE = 0x12,
48 IB_SA_METHOD_GET_TABLE_RESP = 0x92,
49 IB_SA_METHOD_DELETE = 0x15
47}; 50};
48 51
49enum ib_sa_selector { 52enum ib_sa_selector {
@@ -87,10 +90,6 @@ static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
87 } 90 }
88} 91}
89 92
90typedef u64 __bitwise ib_sa_comp_mask;
91
92#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
93
94/* 93/*
95 * Structures for SA records are named "struct ib_sa_xxx_rec." No 94 * Structures for SA records are named "struct ib_sa_xxx_rec." No
96 * attempt is made to pack structures to match the physical layout of 95 * attempt is made to pack structures to match the physical layout of
@@ -195,6 +194,61 @@ struct ib_sa_mcmember_rec {
195 int proxy_join; 194 int proxy_join;
196}; 195};
197 196
197/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
198#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0)
199#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1)
200#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2)
201/* reserved: 3 */
202#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4)
203#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5)
204#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6)
205#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7)
206#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8)
207#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9)
208#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10)
209#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11)
210#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12)
211#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13)
212#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14)
213#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15)
214#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16)
215#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17)
216#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18)
217#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19)
218#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20)
219#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21)
220#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22)
221#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23)
222#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24)
223#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25)
224#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26)
225#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27)
226#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28)
227#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29)
228#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30)
229#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31)
230#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32)
231#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33)
232#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34)
233#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35)
234#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36)
235
236#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
237
238struct ib_sa_service_rec {
239 u64 id;
240 union ib_gid gid;
241 u16 pkey;
242 /* reserved */
243 u32 lease;
244 u8 key[16];
245 u8 name[64];
246 u8 data8[16];
247 u16 data16[8];
248 u32 data32[4];
249 u64 data64[2];
250};
251
198struct ib_sa_query; 252struct ib_sa_query;
199 253
200void ib_sa_cancel_query(int id, struct ib_sa_query *query); 254void ib_sa_cancel_query(int id, struct ib_sa_query *query);
@@ -202,7 +256,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query);
202int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 256int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
203 struct ib_sa_path_rec *rec, 257 struct ib_sa_path_rec *rec,
204 ib_sa_comp_mask comp_mask, 258 ib_sa_comp_mask comp_mask,
205 int timeout_ms, int gfp_mask, 259 int timeout_ms, unsigned int __nocast gfp_mask,
206 void (*callback)(int status, 260 void (*callback)(int status,
207 struct ib_sa_path_rec *resp, 261 struct ib_sa_path_rec *resp,
208 void *context), 262 void *context),
@@ -213,13 +267,24 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
213 u8 method, 267 u8 method,
214 struct ib_sa_mcmember_rec *rec, 268 struct ib_sa_mcmember_rec *rec,
215 ib_sa_comp_mask comp_mask, 269 ib_sa_comp_mask comp_mask,
216 int timeout_ms, int gfp_mask, 270 int timeout_ms, unsigned int __nocast gfp_mask,
217 void (*callback)(int status, 271 void (*callback)(int status,
218 struct ib_sa_mcmember_rec *resp, 272 struct ib_sa_mcmember_rec *resp,
219 void *context), 273 void *context),
220 void *context, 274 void *context,
221 struct ib_sa_query **query); 275 struct ib_sa_query **query);
222 276
277int ib_sa_service_rec_query(struct ib_device *device, u8 port_num,
278 u8 method,
279 struct ib_sa_service_rec *rec,
280 ib_sa_comp_mask comp_mask,
281 int timeout_ms, unsigned int __nocast gfp_mask,
282 void (*callback)(int status,
283 struct ib_sa_service_rec *resp,
284 void *context),
285 void *context,
286 struct ib_sa_query **sa_query);
287
223/** 288/**
224 * ib_sa_mcmember_rec_set - Start an MCMember set query 289 * ib_sa_mcmember_rec_set - Start an MCMember set query
225 * @device:device to send query on 290 * @device:device to send query on
@@ -248,7 +313,7 @@ static inline int
248ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, 313ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
249 struct ib_sa_mcmember_rec *rec, 314 struct ib_sa_mcmember_rec *rec,
250 ib_sa_comp_mask comp_mask, 315 ib_sa_comp_mask comp_mask,
251 int timeout_ms, int gfp_mask, 316 int timeout_ms, unsigned int __nocast gfp_mask,
252 void (*callback)(int status, 317 void (*callback)(int status,
253 struct ib_sa_mcmember_rec *resp, 318 struct ib_sa_mcmember_rec *resp,
254 void *context), 319 void *context),
@@ -290,7 +355,7 @@ static inline int
290ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, 355ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
291 struct ib_sa_mcmember_rec *rec, 356 struct ib_sa_mcmember_rec *rec,
292 ib_sa_comp_mask comp_mask, 357 ib_sa_comp_mask comp_mask,
293 int timeout_ms, int gfp_mask, 358 int timeout_ms, unsigned int __nocast gfp_mask,
294 void (*callback)(int status, 359 void (*callback)(int status,
295 struct ib_sa_mcmember_rec *resp, 360 struct ib_sa_mcmember_rec *resp,
296 void *context), 361 void *context),
diff --git a/drivers/infiniband/include/ib_user_cm.h b/drivers/infiniband/include/ib_user_cm.h
new file mode 100644
index 000000000000..500b1af6ff77
--- /dev/null
+++ b/drivers/infiniband/include/ib_user_cm.h
@@ -0,0 +1,328 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_user_cm.h 2576 2005-06-09 17:00:30Z libor $
33 */
34
35#ifndef IB_USER_CM_H
36#define IB_USER_CM_H
37
38#include <linux/types.h>
39
40#define IB_USER_CM_ABI_VERSION 1
41
42enum {
43 IB_USER_CM_CMD_CREATE_ID,
44 IB_USER_CM_CMD_DESTROY_ID,
45 IB_USER_CM_CMD_ATTR_ID,
46
47 IB_USER_CM_CMD_LISTEN,
48 IB_USER_CM_CMD_ESTABLISH,
49
50 IB_USER_CM_CMD_SEND_REQ,
51 IB_USER_CM_CMD_SEND_REP,
52 IB_USER_CM_CMD_SEND_RTU,
53 IB_USER_CM_CMD_SEND_DREQ,
54 IB_USER_CM_CMD_SEND_DREP,
55 IB_USER_CM_CMD_SEND_REJ,
56 IB_USER_CM_CMD_SEND_MRA,
57 IB_USER_CM_CMD_SEND_LAP,
58 IB_USER_CM_CMD_SEND_APR,
59 IB_USER_CM_CMD_SEND_SIDR_REQ,
60 IB_USER_CM_CMD_SEND_SIDR_REP,
61
62 IB_USER_CM_CMD_EVENT,
63};
64/*
65 * command ABI structures.
66 */
67struct ib_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct ib_ucm_create_id {
74 __u64 response;
75};
76
77struct ib_ucm_create_id_resp {
78 __u32 id;
79};
80
81struct ib_ucm_destroy_id {
82 __u32 id;
83};
84
85struct ib_ucm_attr_id {
86 __u64 response;
87 __u32 id;
88};
89
90struct ib_ucm_attr_id_resp {
91 __u64 service_id;
92 __u64 service_mask;
93 __u32 local_id;
94 __u32 remote_id;
95};
96
97struct ib_ucm_listen {
98 __u64 service_id;
99 __u64 service_mask;
100 __u32 id;
101};
102
103struct ib_ucm_establish {
104 __u32 id;
105};
106
107struct ib_ucm_private_data {
108 __u64 data;
109 __u32 id;
110 __u8 len;
111 __u8 reserved[3];
112};
113
114struct ib_ucm_path_rec {
115 __u8 dgid[16];
116 __u8 sgid[16];
117 __u16 dlid;
118 __u16 slid;
119 __u32 raw_traffic;
120 __u32 flow_label;
121 __u32 reversible;
122 __u32 mtu;
123 __u16 pkey;
124 __u8 hop_limit;
125 __u8 traffic_class;
126 __u8 numb_path;
127 __u8 sl;
128 __u8 mtu_selector;
129 __u8 rate_selector;
130 __u8 rate;
131 __u8 packet_life_time_selector;
132 __u8 packet_life_time;
133 __u8 preference;
134};
135
136struct ib_ucm_req {
137 __u32 id;
138 __u32 qpn;
139 __u32 qp_type;
140 __u32 psn;
141 __u64 sid;
142 __u64 data;
143 __u64 primary_path;
144 __u64 alternate_path;
145 __u8 len;
146 __u8 peer_to_peer;
147 __u8 responder_resources;
148 __u8 initiator_depth;
149 __u8 remote_cm_response_timeout;
150 __u8 flow_control;
151 __u8 local_cm_response_timeout;
152 __u8 retry_count;
153 __u8 rnr_retry_count;
154 __u8 max_cm_retries;
155 __u8 srq;
156 __u8 reserved[1];
157};
158
159struct ib_ucm_rep {
160 __u64 data;
161 __u32 id;
162 __u32 qpn;
163 __u32 psn;
164 __u8 len;
165 __u8 responder_resources;
166 __u8 initiator_depth;
167 __u8 target_ack_delay;
168 __u8 failover_accepted;
169 __u8 flow_control;
170 __u8 rnr_retry_count;
171 __u8 srq;
172};
173
174struct ib_ucm_info {
175 __u32 id;
176 __u32 status;
177 __u64 info;
178 __u64 data;
179 __u8 info_len;
180 __u8 data_len;
181 __u8 reserved[2];
182};
183
184struct ib_ucm_mra {
185 __u64 data;
186 __u32 id;
187 __u8 len;
188 __u8 timeout;
189 __u8 reserved[2];
190};
191
192struct ib_ucm_lap {
193 __u64 path;
194 __u64 data;
195 __u32 id;
196 __u8 len;
197 __u8 reserved[3];
198};
199
200struct ib_ucm_sidr_req {
201 __u32 id;
202 __u32 timeout;
203 __u64 sid;
204 __u64 data;
205 __u64 path;
206 __u16 pkey;
207 __u8 len;
208 __u8 max_cm_retries;
209};
210
211struct ib_ucm_sidr_rep {
212 __u32 id;
213 __u32 qpn;
214 __u32 qkey;
215 __u32 status;
216 __u64 info;
217 __u64 data;
218 __u8 info_len;
219 __u8 data_len;
220 __u8 reserved[2];
221};
222/*
223 * event notification ABI structures.
224 */
225struct ib_ucm_event_get {
226 __u64 response;
227 __u64 data;
228 __u64 info;
229 __u8 data_len;
230 __u8 info_len;
231 __u8 reserved[2];
232};
233
234struct ib_ucm_req_event_resp {
235 __u32 listen_id;
236 /* device */
237 /* port */
238 struct ib_ucm_path_rec primary_path;
239 struct ib_ucm_path_rec alternate_path;
240 __u64 remote_ca_guid;
241 __u32 remote_qkey;
242 __u32 remote_qpn;
243 __u32 qp_type;
244 __u32 starting_psn;
245 __u8 responder_resources;
246 __u8 initiator_depth;
247 __u8 local_cm_response_timeout;
248 __u8 flow_control;
249 __u8 remote_cm_response_timeout;
250 __u8 retry_count;
251 __u8 rnr_retry_count;
252 __u8 srq;
253};
254
255struct ib_ucm_rep_event_resp {
256 __u64 remote_ca_guid;
257 __u32 remote_qkey;
258 __u32 remote_qpn;
259 __u32 starting_psn;
260 __u8 responder_resources;
261 __u8 initiator_depth;
262 __u8 target_ack_delay;
263 __u8 failover_accepted;
264 __u8 flow_control;
265 __u8 rnr_retry_count;
266 __u8 srq;
267 __u8 reserved[1];
268};
269
270struct ib_ucm_rej_event_resp {
271 __u32 reason;
272 /* ari in ib_ucm_event_get info field. */
273};
274
275struct ib_ucm_mra_event_resp {
276 __u8 timeout;
277 __u8 reserved[3];
278};
279
280struct ib_ucm_lap_event_resp {
281 struct ib_ucm_path_rec path;
282};
283
284struct ib_ucm_apr_event_resp {
285 __u32 status;
286 /* apr info in ib_ucm_event_get info field. */
287};
288
289struct ib_ucm_sidr_req_event_resp {
290 __u32 listen_id;
291 /* device */
292 /* port */
293 __u16 pkey;
294 __u8 reserved[2];
295};
296
297struct ib_ucm_sidr_rep_event_resp {
298 __u32 status;
299 __u32 qkey;
300 __u32 qpn;
301 /* info in ib_ucm_event_get info field. */
302};
303
304#define IB_UCM_PRES_DATA 0x01
305#define IB_UCM_PRES_INFO 0x02
306#define IB_UCM_PRES_PRIMARY 0x04
307#define IB_UCM_PRES_ALTERNATE 0x08
308
309struct ib_ucm_event_resp {
310 __u32 id;
311 __u32 event;
312 __u32 present;
313 union {
314 struct ib_ucm_req_event_resp req_resp;
315 struct ib_ucm_rep_event_resp rep_resp;
316 struct ib_ucm_rej_event_resp rej_resp;
317 struct ib_ucm_mra_event_resp mra_resp;
318 struct ib_ucm_lap_event_resp lap_resp;
319 struct ib_ucm_apr_event_resp apr_resp;
320
321 struct ib_ucm_sidr_req_event_resp sidr_req_resp;
322 struct ib_ucm_sidr_rep_event_resp sidr_rep_resp;
323
324 __u32 send_status;
325 } u;
326};
327
328#endif /* IB_USER_CM_H */
diff --git a/drivers/infiniband/include/ib_user_mad.h b/drivers/infiniband/include/ib_user_mad.h
index 06ad4a6075fa..a9a56b50aacc 100644
--- a/drivers/infiniband/include/ib_user_mad.h
+++ b/drivers/infiniband/include/ib_user_mad.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: ib_user_mad.h 1389 2004-12-27 22:56:47Z roland $ 33 * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $
33 */ 34 */
34 35
35#ifndef IB_USER_MAD_H 36#ifndef IB_USER_MAD_H
@@ -42,7 +43,7 @@
42 * Increment this value if any changes that break userspace ABI 43 * Increment this value if any changes that break userspace ABI
43 * compatibility are made. 44 * compatibility are made.
44 */ 45 */
45#define IB_USER_MAD_ABI_VERSION 2 46#define IB_USER_MAD_ABI_VERSION 5
46 47
47/* 48/*
48 * Make sure that all structs defined in this file remain laid out so 49 * Make sure that all structs defined in this file remain laid out so
@@ -51,13 +52,13 @@
51 */ 52 */
52 53
53/** 54/**
54 * ib_user_mad - MAD packet 55 * ib_user_mad_hdr - MAD packet header
55 * @data - Contents of MAD
56 * @id - ID of agent MAD received with/to be sent with 56 * @id - ID of agent MAD received with/to be sent with
57 * @status - 0 on successful receive, ETIMEDOUT if no response 57 * @status - 0 on successful receive, ETIMEDOUT if no response
58 * received (transaction ID in data[] will be set to TID of original 58 * received (transaction ID in data[] will be set to TID of original
59 * request) (ignored on send) 59 * request) (ignored on send)
60 * @timeout_ms - Milliseconds to wait for response (unset on receive) 60 * @timeout_ms - Milliseconds to wait for response (unset on receive)
61 * @retries - Number of automatic retries to attempt
61 * @qpn - Remote QP number received from/to be sent to 62 * @qpn - Remote QP number received from/to be sent to
62 * @qkey - Remote Q_Key to be sent with (unset on receive) 63 * @qkey - Remote Q_Key to be sent with (unset on receive)
63 * @lid - Remote lid received from/to be sent to 64 * @lid - Remote lid received from/to be sent to
@@ -72,11 +73,12 @@
72 * 73 *
73 * All multi-byte quantities are stored in network (big endian) byte order. 74 * All multi-byte quantities are stored in network (big endian) byte order.
74 */ 75 */
75struct ib_user_mad { 76struct ib_user_mad_hdr {
76 __u8 data[256];
77 __u32 id; 77 __u32 id;
78 __u32 status; 78 __u32 status;
79 __u32 timeout_ms; 79 __u32 timeout_ms;
80 __u32 retries;
81 __u32 length;
80 __u32 qpn; 82 __u32 qpn;
81 __u32 qkey; 83 __u32 qkey;
82 __u16 lid; 84 __u16 lid;
@@ -91,6 +93,17 @@ struct ib_user_mad {
91}; 93};
92 94
93/** 95/**
96 * ib_user_mad - MAD packet
97 * @hdr - MAD packet header
98 * @data - Contents of MAD
99 *
100 */
101struct ib_user_mad {
102 struct ib_user_mad_hdr hdr;
103 __u8 data[0];
104};
105
106/**
94 * ib_user_mad_reg_req - MAD registration request 107 * ib_user_mad_reg_req - MAD registration request
95 * @id - Set by the kernel; used to identify agent in future requests. 108 * @id - Set by the kernel; used to identify agent in future requests.
96 * @qpn - Queue pair number; must be 0 or 1. 109 * @qpn - Queue pair number; must be 0 or 1.
@@ -103,6 +116,8 @@ struct ib_user_mad {
103 * management class to receive. 116 * management class to receive.
104 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class 117 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
105 * in the range from 0x30 to 0x4f. Otherwise not used. 118 * in the range from 0x30 to 0x4f. Otherwise not used.
119 * @rmpp_version: If set, indicates the RMPP version used.
120 *
106 */ 121 */
107struct ib_user_mad_reg_req { 122struct ib_user_mad_reg_req {
108 __u32 id; 123 __u32 id;
@@ -111,6 +126,7 @@ struct ib_user_mad_reg_req {
111 __u8 mgmt_class; 126 __u8 mgmt_class;
112 __u8 mgmt_class_version; 127 __u8 mgmt_class_version;
113 __u8 oui[3]; 128 __u8 oui[3];
129 __u8 rmpp_version;
114}; 130};
115 131
116#define IB_IOCTL_MAGIC 0x1b 132#define IB_IOCTL_MAGIC 0x1b
diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h
index e5bd9a10c201..5d24edaa66e6 100644
--- a/drivers/infiniband/include/ib_verbs.h
+++ b/drivers/infiniband/include/ib_verbs.h
@@ -289,6 +289,15 @@ struct ib_global_route {
289 u8 traffic_class; 289 u8 traffic_class;
290}; 290};
291 291
292struct ib_grh {
293 u32 version_tclass_flow;
294 u16 paylen;
295 u8 next_hdr;
296 u8 hop_limit;
297 union ib_gid sgid;
298 union ib_gid dgid;
299};
300
292enum { 301enum {
293 IB_MULTICAST_QPN = 0xffffff 302 IB_MULTICAST_QPN = 0xffffff
294}; 303};
@@ -566,6 +575,7 @@ struct ib_send_wr {
566 u32 remote_qpn; 575 u32 remote_qpn;
567 u32 remote_qkey; 576 u32 remote_qkey;
568 int timeout_ms; /* valid for MADs only */ 577 int timeout_ms; /* valid for MADs only */
578 int retries; /* valid for MADs only */
569 u16 pkey_index; /* valid for GSI only */ 579 u16 pkey_index; /* valid for GSI only */
570 u8 port_num; /* valid for DR SMPs on switch only */ 580 u8 port_num; /* valid for DR SMPs on switch only */
571 } ud; 581 } ud;
@@ -990,6 +1000,21 @@ int ib_dealloc_pd(struct ib_pd *pd);
990struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1000struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
991 1001
992/** 1002/**
1003 * ib_create_ah_from_wc - Creates an address handle associated with the
1004 * sender of the specified work completion.
1005 * @pd: The protection domain associated with the address handle.
1006 * @wc: Work completion information associated with a received message.
1007 * @grh: References the received global route header. This parameter is
1008 * ignored unless the work completion indicates that the GRH is valid.
1009 * @port_num: The outbound port number to associate with the address.
1010 *
1011 * The address handle is used to reference a local or global destination
1012 * in all UD QP post sends.
1013 */
1014struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1015 struct ib_grh *grh, u8 port_num);
1016
1017/**
993 * ib_modify_ah - Modifies the address vector associated with an address 1018 * ib_modify_ah - Modifies the address vector associated with an address
994 * handle. 1019 * handle.
995 * @ah: The address handle to modify. 1020 * @ah: The address handle to modify.
diff --git a/drivers/isdn/hisax/avm_a1.c b/drivers/isdn/hisax/avm_a1.c
index 8f028d42fd2f..9a8b02557ff9 100644
--- a/drivers/isdn/hisax/avm_a1.c
+++ b/drivers/isdn/hisax/avm_a1.c
@@ -135,7 +135,7 @@ avm_a1_interrupt(int intno, void *dev_id, struct pt_regs *regs)
135 return IRQ_HANDLED; 135 return IRQ_HANDLED;
136} 136}
137 137
138inline static void 138static inline void
139release_ioregs(struct IsdnCardState *cs, int mask) 139release_ioregs(struct IsdnCardState *cs, int mask)
140{ 140{
141 release_region(cs->hw.avm.cfg_reg, 8); 141 release_region(cs->hw.avm.cfg_reg, 8);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c542e6fb2bde..fbaab4352902 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1900,6 +1900,7 @@ static struct pci_device_id hisax_pci_tbl[] __initdata = {
1900 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R685, PCI_ANY_ID, PCI_ANY_ID}, 1900 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R685, PCI_ANY_ID, PCI_ANY_ID},
1901 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R753, PCI_ANY_ID, PCI_ANY_ID}, 1901 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R753, PCI_ANY_ID, PCI_ANY_ID},
1902 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO, PCI_ANY_ID, PCI_ANY_ID}, 1902 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO, PCI_ANY_ID, PCI_ANY_ID},
1903 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_OLITEC, PCI_ANY_ID, PCI_ANY_ID},
1903#endif 1904#endif
1904#ifdef CONFIG_HISAX_QUADRO 1905#ifdef CONFIG_HISAX_QUADRO
1905 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_ANY_ID, PCI_ANY_ID}, 1906 {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_ANY_ID, PCI_ANY_ID},
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 352b45ac5347..60b04c6d9e7d 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -546,8 +546,9 @@ setup_gazelpci(struct IsdnCardState *cs)
546 546
547 found = 0; 547 found = 0;
548 seekcard = PCI_DEVICE_ID_PLX_R685; 548 seekcard = PCI_DEVICE_ID_PLX_R685;
549 for (nbseek = 0; nbseek < 3; nbseek++) { 549 for (nbseek = 0; nbseek < 4; nbseek++) {
550 if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, seekcard, dev_tel))) { 550 if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX,
551 seekcard, dev_tel))) {
551 if (pci_enable_device(dev_tel)) 552 if (pci_enable_device(dev_tel))
552 return 1; 553 return 1;
553 pci_irq = dev_tel->irq; 554 pci_irq = dev_tel->irq;
@@ -565,6 +566,9 @@ setup_gazelpci(struct IsdnCardState *cs)
565 case PCI_DEVICE_ID_PLX_R753: 566 case PCI_DEVICE_ID_PLX_R753:
566 seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO; 567 seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO;
567 break; 568 break;
569 case PCI_DEVICE_ID_PLX_DJINN_ITOO:
570 seekcard = PCI_DEVICE_ID_PLX_OLITEC;
571 break;
568 } 572 }
569 } 573 }
570 } 574 }
@@ -605,6 +609,7 @@ setup_gazelpci(struct IsdnCardState *cs)
605 break; 609 break;
606 case PCI_DEVICE_ID_PLX_R753: 610 case PCI_DEVICE_ID_PLX_R753:
607 case PCI_DEVICE_ID_PLX_DJINN_ITOO: 611 case PCI_DEVICE_ID_PLX_DJINN_ITOO:
612 case PCI_DEVICE_ID_PLX_OLITEC:
608 printk(KERN_INFO "Gazel: Card PCI R753 found\n"); 613 printk(KERN_INFO "Gazel: Card PCI R753 found\n");
609 cs->subtyp = R753; 614 cs->subtyp = R753;
610 test_and_set_bit(HW_IPAC, &cs->HW_Flags); 615 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 1615c1a76ab8..6d0431725555 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -213,7 +213,7 @@ sethdraddr(struct Layer2 *l2, u_char * header, int rsp)
213 } 213 }
214} 214}
215 215
216inline static void 216static inline void
217enqueue_super(struct PStack *st, 217enqueue_super(struct PStack *st,
218 struct sk_buff *skb) 218 struct sk_buff *skb)
219{ 219{
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index a6d2abdb478a..e96845cdd4f6 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -353,7 +353,7 @@ l3dss1_parse_facility(struct PStack *st, struct l3_process *pc,
353 { l3dss1_dummy_invoke(st, cr, id, ident, p, nlen); 353 { l3dss1_dummy_invoke(st, cr, id, ident, p, nlen);
354 return; 354 return;
355 } 355 }
356#if HISAX_DE_AOC 356#ifdef HISAX_DE_AOC
357 { 357 {
358 358
359#define FOO1(s,a,b) \ 359#define FOO1(s,a,b) \
@@ -977,7 +977,7 @@ l3dss1_release_cmpl(struct l3_process *pc, u_char pr, void *arg)
977 dss1_release_l3_process(pc); 977 dss1_release_l3_process(pc);
978} 978}
979 979
980#if EXT_BEARER_CAPS 980#ifdef EXT_BEARER_CAPS
981 981
982static u_char * 982static u_char *
983EncodeASyncParams(u_char * p, u_char si2) 983EncodeASyncParams(u_char * p, u_char si2)
@@ -1369,7 +1369,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr,
1369 *p++ = *sub++ & 0x7f; 1369 *p++ = *sub++ & 0x7f;
1370 } 1370 }
1371 } 1371 }
1372#if EXT_BEARER_CAPS 1372#ifdef EXT_BEARER_CAPS
1373 if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30 1373 if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30
1374 1374
1375 *p++ = IE_LLC; 1375 *p++ = IE_LLC;
@@ -1609,7 +1609,7 @@ l3dss1_setup(struct l3_process *pc, u_char pr, void *arg)
1609 case 0x08: /* Unrestricted digital information */ 1609 case 0x08: /* Unrestricted digital information */
1610 pc->para.setup.si1 = 7; 1610 pc->para.setup.si1 = 7;
1611/* JIM, 05.11.97 I wanna set service indicator 2 */ 1611/* JIM, 05.11.97 I wanna set service indicator 2 */
1612#if EXT_BEARER_CAPS 1612#ifdef EXT_BEARER_CAPS
1613 pc->para.setup.si2 = DecodeSI2(skb); 1613 pc->para.setup.si2 = DecodeSI2(skb);
1614#endif 1614#endif
1615 break; 1615 break;
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index adeaad62d35c..a3eaf4d65707 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -143,7 +143,7 @@ teles3_interrupt(int intno, void *dev_id, struct pt_regs *regs)
143 return IRQ_HANDLED; 143 return IRQ_HANDLED;
144} 144}
145 145
146inline static void 146static inline void
147release_ioregs(struct IsdnCardState *cs, int mask) 147release_ioregs(struct IsdnCardState *cs, int mask)
148{ 148{
149 if (mask & 1) 149 if (mask & 1)
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 91691a6c004e..65ab64c43b3e 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -4,7 +4,7 @@ menu "Macintosh device drivers"
4 4
5config ADB 5config ADB
6 bool "Apple Desktop Bus (ADB) support" 6 bool "Apple Desktop Bus (ADB) support"
7 depends on MAC || PPC_PMAC 7 depends on MAC || (PPC_PMAC && PPC32)
8 help 8 help
9 Apple Desktop Bus (ADB) support is for support of devices which 9 Apple Desktop Bus (ADB) support is for support of devices which
10 are connected to an ADB port. ADB devices tend to have 4 pins. 10 are connected to an ADB port. ADB devices tend to have 4 pins.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 0c2ed99a3832..70bca955e0de 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -108,7 +108,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
108{ 108{
109 unsigned char *page; 109 unsigned char *page;
110 110
111#if INJECT_FAULTS_1 111#ifdef INJECT_FAULTS_1
112 page = NULL; 112 page = NULL;
113#else 113#else
114 page = kmalloc(PAGE_SIZE, GFP_NOIO); 114 page = kmalloc(PAGE_SIZE, GFP_NOIO);
@@ -843,7 +843,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync)
843 843
844 BUG_ON(!file && !bitmap->offset); 844 BUG_ON(!file && !bitmap->offset);
845 845
846#if INJECT_FAULTS_3 846#ifdef INJECT_FAULTS_3
847 outofdate = 1; 847 outofdate = 1;
848#else 848#else
849 outofdate = bitmap->flags & BITMAP_STALE; 849 outofdate = bitmap->flags & BITMAP_STALE;
@@ -1187,7 +1187,7 @@ static int bitmap_start_daemon(struct bitmap *bitmap, mdk_thread_t **ptr,
1187 1187
1188 spin_unlock_irqrestore(&bitmap->lock, flags); 1188 spin_unlock_irqrestore(&bitmap->lock, flags);
1189 1189
1190#if INJECT_FATAL_FAULT_2 1190#ifdef INJECT_FATAL_FAULT_2
1191 daemon = NULL; 1191 daemon = NULL;
1192#else 1192#else
1193 sprintf(namebuf, "%%s_%s", name); 1193 sprintf(namebuf, "%%s_%s", name);
@@ -1552,7 +1552,7 @@ int bitmap_create(mddev_t *mddev)
1552 1552
1553 bitmap->syncchunk = ~0UL; 1553 bitmap->syncchunk = ~0UL;
1554 1554
1555#if INJECT_FATAL_FAULT_1 1555#ifdef INJECT_FATAL_FAULT_1
1556 bitmap->bp = NULL; 1556 bitmap->bp = NULL;
1557#else 1557#else
1558 bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); 1558 bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4a0c57db2b67..6580e0fa4a47 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,7 +284,7 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
284 return NULL; 284 return NULL;
285} 285}
286 286
287inline static sector_t calc_dev_sboffset(struct block_device *bdev) 287static inline sector_t calc_dev_sboffset(struct block_device *bdev)
288{ 288{
289 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 289 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
290 return MD_NEW_SIZE_BLOCKS(size); 290 return MD_NEW_SIZE_BLOCKS(size);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5f253ee536bb..d3a64a04a6d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1468,6 +1468,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
1468 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 1468 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1469 } 1469 }
1470 mddev->size = mddev->array_size; 1470 mddev->size = mddev->array_size;
1471 mddev->resync_max_sectors = sectors;
1471 return 0; 1472 return 0;
1472} 1473}
1473 1474
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 93a9726cc2d6..4698d5f79575 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1931,6 +1931,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
1931 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 1931 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1932 } 1932 }
1933 mddev->size = sectors /2; 1933 mddev->size = sectors /2;
1934 mddev->resync_max_sectors = sectors;
1934 return 0; 1935 return 0;
1935} 1936}
1936 1937
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index f62ea1a73d0d..f5ee16805111 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -2095,6 +2095,7 @@ static int raid6_resize(mddev_t *mddev, sector_t sectors)
2095 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2095 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2096 } 2096 }
2097 mddev->size = sectors /2; 2097 mddev->size = sectors /2;
2098 mddev->resync_max_sectors = sectors;
2098 return 0; 2099 return 0;
2099} 2100}
2100 2101
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index d847c62bd837..e83256d0fd14 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -187,8 +187,8 @@ config DVB_BCM3510
187 An ATSC 8VSB/16VSB and QAM64/256 tuner module. Say Y when you want to 187 An ATSC 8VSB/16VSB and QAM64/256 tuner module. Say Y when you want to
188 support this frontend. 188 support this frontend.
189 189
190config DVB_LGDT3302 190config DVB_LGDT330X
191 tristate "LGDT3302 based (DViCO FusionHDTV3 Gold)" 191 tristate "LGDT3302 or LGDT3303 based (DViCO FusionHDTV Gold)"
192 depends on DVB_CORE 192 depends on DVB_CORE
193 help 193 help
194 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 194 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index de5e240cba7f..ad8658ffd60a 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -30,4 +30,4 @@ obj-$(CONFIG_DVB_OR51211) += or51211.o
30obj-$(CONFIG_DVB_OR51132) += or51132.o 30obj-$(CONFIG_DVB_OR51132) += or51132.o
31obj-$(CONFIG_DVB_BCM3510) += bcm3510.o 31obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
32obj-$(CONFIG_DVB_S5H1420) += s5h1420.o 32obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
33obj-$(CONFIG_DVB_LGDT3302) += lgdt3302.o 33obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 5afeaa9b43b4..5264310c070e 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -82,13 +82,14 @@ struct dvb_pll_desc dvb_pll_lg_z201 = {
82 .name = "LG z201", 82 .name = "LG z201",
83 .min = 174000000, 83 .min = 174000000,
84 .max = 862000000, 84 .max = 862000000,
85 .count = 5, 85 .count = 6,
86 .entries = { 86 .entries = {
87 { 0, 36166667, 166666, 0xbc, 0x03 }, 87 { 0, 36166667, 166666, 0xbc, 0x03 },
88 { 443250000, 36166667, 166666, 0xbc, 0x01 }, 88 { 157500000, 36166667, 166666, 0xbc, 0x01 },
89 { 542000000, 36166667, 166666, 0xbc, 0x02 }, 89 { 443250000, 36166667, 166666, 0xbc, 0x02 },
90 { 830000000, 36166667, 166666, 0xf4, 0x02 }, 90 { 542000000, 36166667, 166666, 0xbc, 0x04 },
91 { 999999999, 36166667, 166666, 0xfc, 0x02 }, 91 { 830000000, 36166667, 166666, 0xf4, 0x04 },
92 { 999999999, 36166667, 166666, 0xfc, 0x04 },
92 }, 93 },
93}; 94};
94EXPORT_SYMBOL(dvb_pll_lg_z201); 95EXPORT_SYMBOL(dvb_pll_lg_z201);
diff --git a/drivers/media/dvb/frontends/lgdt3302.c b/drivers/media/dvb/frontends/lgdt330x.c
index c85a2a99df42..e94dee50eecd 100644
--- a/drivers/media/dvb/frontends/lgdt3302.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Support for LGDT3302 (DViCO FustionHDTV 3 Gold) - VSB/QAM 2 * Support for LGDT3302 & LGDT3303 (DViCO FusionHDTV Gold) - VSB/QAM
3 * 3 *
4 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net> 4 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net>
5 * 5 *
@@ -25,10 +25,11 @@
25/* 25/*
26 * NOTES ABOUT THIS DRIVER 26 * NOTES ABOUT THIS DRIVER
27 * 27 *
28 * This driver supports DViCO FusionHDTV 3 Gold under Linux. 28 * This driver supports DViCO FusionHDTV Gold under Linux.
29 * 29 *
30 * TODO: 30 * TODO:
31 * BER and signal strength always return 0. 31 * BER and signal strength always return 0.
32 * Include support for LGDT3303
32 * 33 *
33 */ 34 */
34 35
@@ -41,24 +42,24 @@
41 42
42#include "dvb_frontend.h" 43#include "dvb_frontend.h"
43#include "dvb-pll.h" 44#include "dvb-pll.h"
44#include "lgdt3302_priv.h" 45#include "lgdt330x_priv.h"
45#include "lgdt3302.h" 46#include "lgdt330x.h"
46 47
47static int debug = 0; 48static int debug = 0;
48module_param(debug, int, 0644); 49module_param(debug, int, 0644);
49MODULE_PARM_DESC(debug,"Turn on/off lgdt3302 frontend debugging (default:off)."); 50MODULE_PARM_DESC(debug,"Turn on/off lgdt330x frontend debugging (default:off).");
50#define dprintk(args...) \ 51#define dprintk(args...) \
51do { \ 52do { \
52if (debug) printk(KERN_DEBUG "lgdt3302: " args); \ 53if (debug) printk(KERN_DEBUG "lgdt330x: " args); \
53} while (0) 54} while (0)
54 55
55struct lgdt3302_state 56struct lgdt330x_state
56{ 57{
57 struct i2c_adapter* i2c; 58 struct i2c_adapter* i2c;
58 struct dvb_frontend_ops ops; 59 struct dvb_frontend_ops ops;
59 60
60 /* Configuration settings */ 61 /* Configuration settings */
61 const struct lgdt3302_config* config; 62 const struct lgdt330x_config* config;
62 63
63 struct dvb_frontend frontend; 64 struct dvb_frontend frontend;
64 65
@@ -69,45 +70,33 @@ struct lgdt3302_state
69 u32 current_frequency; 70 u32 current_frequency;
70}; 71};
71 72
72static int i2c_writebytes (struct lgdt3302_state* state, 73static int i2c_writebytes (struct lgdt330x_state* state,
73 u8 addr, /* demod_address or pll_address */ 74 u8 addr, /* demod_address or pll_address */
74 u8 *buf, /* data bytes to send */ 75 u8 *buf, /* data bytes to send */
75 int len /* number of bytes to send */ ) 76 int len /* number of bytes to send */ )
76{ 77{
77 if (addr == state->config->pll_address) { 78 u8 tmp[] = { buf[0], buf[1] };
78 struct i2c_msg msg = 79 struct i2c_msg msg =
79 { .addr = addr, .flags = 0, .buf = buf, .len = len }; 80 { .addr = addr, .flags = 0, .buf = tmp, .len = 2 };
80 int err; 81 int err;
82 int i;
81 83
84 for (i=1; i<len; i++) {
85 tmp[1] = buf[i];
82 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 86 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
83 printk(KERN_WARNING "lgdt3302: %s error (addr %02x <- %02x, err == %i)\n", __FUNCTION__, addr, buf[0], err); 87 printk(KERN_WARNING "lgdt330x: %s error (addr %02x <- %02x, err == %i)\n", __FUNCTION__, addr, buf[0], err);
84 if (err < 0) 88 if (err < 0)
85 return err; 89 return err;
86 else 90 else
87 return -EREMOTEIO; 91 return -EREMOTEIO;
88 } 92 }
89 } else { 93 tmp[0]++;
90 u8 tmp[] = { buf[0], buf[1] };
91 struct i2c_msg msg =
92 { .addr = addr, .flags = 0, .buf = tmp, .len = 2 };
93 int err;
94 int i;
95
96 for (i=1; i<len; i++) {
97 tmp[1] = buf[i];
98 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
99 printk(KERN_WARNING "lgdt3302: %s error (addr %02x <- %02x, err == %i)\n", __FUNCTION__, addr, buf[0], err);
100 if (err < 0)
101 return err;
102 else
103 return -EREMOTEIO;
104 }
105 tmp[0]++;
106 }
107 } 94 }
108 return 0; 95 return 0;
109} 96}
110static int i2c_readbytes (struct lgdt3302_state* state, 97
98#if 0
99static int i2c_readbytes (struct lgdt330x_state* state,
111 u8 addr, /* demod_address or pll_address */ 100 u8 addr, /* demod_address or pll_address */
112 u8 *buf, /* holds data bytes read */ 101 u8 *buf, /* holds data bytes read */
113 int len /* number of bytes to read */ ) 102 int len /* number of bytes to read */ )
@@ -117,18 +106,19 @@ static int i2c_readbytes (struct lgdt3302_state* state,
117 int err; 106 int err;
118 107
119 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 108 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
120 printk(KERN_WARNING "lgdt3302: %s error (addr %02x, err == %i)\n", __FUNCTION__, addr, err); 109 printk(KERN_WARNING "lgdt330x: %s error (addr %02x, err == %i)\n", __FUNCTION__, addr, err);
121 return -EREMOTEIO; 110 return -EREMOTEIO;
122 } 111 }
123 return 0; 112 return 0;
124} 113}
114#endif
125 115
126/* 116/*
127 * This routine writes the register (reg) to the demod bus 117 * This routine writes the register (reg) to the demod bus
128 * then reads the data returned for (len) bytes. 118 * then reads the data returned for (len) bytes.
129 */ 119 */
130 120
131static u8 i2c_selectreadbytes (struct lgdt3302_state* state, 121static u8 i2c_selectreadbytes (struct lgdt330x_state* state,
132 enum I2C_REG reg, u8* buf, int len) 122 enum I2C_REG reg, u8* buf, int len)
133{ 123{
134 u8 wr [] = { reg }; 124 u8 wr [] = { reg };
@@ -141,7 +131,7 @@ static u8 i2c_selectreadbytes (struct lgdt3302_state* state,
141 int ret; 131 int ret;
142 ret = i2c_transfer(state->i2c, msg, 2); 132 ret = i2c_transfer(state->i2c, msg, 2);
143 if (ret != 2) { 133 if (ret != 2) {
144 printk(KERN_WARNING "lgdt3302: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __FUNCTION__, state->config->demod_address, reg, ret); 134 printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __FUNCTION__, state->config->demod_address, reg, ret);
145 } else { 135 } else {
146 ret = 0; 136 ret = 0;
147 } 137 }
@@ -149,7 +139,7 @@ static u8 i2c_selectreadbytes (struct lgdt3302_state* state,
149} 139}
150 140
151/* Software reset */ 141/* Software reset */
152int lgdt3302_SwReset(struct lgdt3302_state* state) 142int lgdt330x_SwReset(struct lgdt330x_state* state)
153{ 143{
154 u8 ret; 144 u8 ret;
155 u8 reset[] = { 145 u8 reset[] = {
@@ -175,7 +165,7 @@ int lgdt3302_SwReset(struct lgdt3302_state* state)
175 return ret; 165 return ret;
176} 166}
177 167
178static int lgdt3302_init(struct dvb_frontend* fe) 168static int lgdt330x_init(struct dvb_frontend* fe)
179{ 169{
180 /* Hardware reset is done using gpio[0] of cx23880x chip. 170 /* Hardware reset is done using gpio[0] of cx23880x chip.
181 * I'd like to do it here, but don't know how to find chip address. 171 * I'd like to do it here, but don't know how to find chip address.
@@ -184,18 +174,18 @@ static int lgdt3302_init(struct dvb_frontend* fe)
184 * the caller of this function needs to do it. */ 174 * the caller of this function needs to do it. */
185 175
186 dprintk("%s entered\n", __FUNCTION__); 176 dprintk("%s entered\n", __FUNCTION__);
187 return lgdt3302_SwReset((struct lgdt3302_state*) fe->demodulator_priv); 177 return lgdt330x_SwReset((struct lgdt330x_state*) fe->demodulator_priv);
188} 178}
189 179
190static int lgdt3302_read_ber(struct dvb_frontend* fe, u32* ber) 180static int lgdt330x_read_ber(struct dvb_frontend* fe, u32* ber)
191{ 181{
192 *ber = 0; /* Dummy out for now */ 182 *ber = 0; /* Dummy out for now */
193 return 0; 183 return 0;
194} 184}
195 185
196static int lgdt3302_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) 186static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
197{ 187{
198 struct lgdt3302_state* state = (struct lgdt3302_state*) fe->demodulator_priv; 188 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
199 u8 buf[2]; 189 u8 buf[2];
200 190
201 i2c_selectreadbytes(state, PACKET_ERR_COUNTER1, buf, sizeof(buf)); 191 i2c_selectreadbytes(state, PACKET_ERR_COUNTER1, buf, sizeof(buf));
@@ -204,12 +194,11 @@ static int lgdt3302_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
204 return 0; 194 return 0;
205} 195}
206 196
207static int lgdt3302_set_parameters(struct dvb_frontend* fe, 197static int lgdt330x_set_parameters(struct dvb_frontend* fe,
208 struct dvb_frontend_parameters *param) 198 struct dvb_frontend_parameters *param)
209{ 199{
210 u8 buf[4]; 200 struct lgdt330x_state* state =
211 struct lgdt3302_state* state = 201 (struct lgdt330x_state*) fe->demodulator_priv;
212 (struct lgdt3302_state*) fe->demodulator_priv;
213 202
214 /* Use 50MHz parameter values from spec sheet since xtal is 50 */ 203 /* Use 50MHz parameter values from spec sheet since xtal is 50 */
215 static u8 top_ctrl_cfg[] = { TOP_CONTROL, 0x03 }; 204 static u8 top_ctrl_cfg[] = { TOP_CONTROL, 0x03 };
@@ -228,6 +217,10 @@ static int lgdt3302_set_parameters(struct dvb_frontend* fe,
228 217
229 /* Select VSB mode and serial MPEG interface */ 218 /* Select VSB mode and serial MPEG interface */
230 top_ctrl_cfg[1] = 0x07; 219 top_ctrl_cfg[1] = 0x07;
220
221 /* Select ANT connector if supported by card */
222 if (state->config->pll_rf_set)
223 state->config->pll_rf_set(fe, 1);
231 break; 224 break;
232 225
233 case QAM_64: 226 case QAM_64:
@@ -235,6 +228,10 @@ static int lgdt3302_set_parameters(struct dvb_frontend* fe,
235 228
236 /* Select QAM_64 mode and serial MPEG interface */ 229 /* Select QAM_64 mode and serial MPEG interface */
237 top_ctrl_cfg[1] = 0x04; 230 top_ctrl_cfg[1] = 0x04;
231
232 /* Select CABLE connector if supported by card */
233 if (state->config->pll_rf_set)
234 state->config->pll_rf_set(fe, 0);
238 break; 235 break;
239 236
240 case QAM_256: 237 case QAM_256:
@@ -242,9 +239,13 @@ static int lgdt3302_set_parameters(struct dvb_frontend* fe,
242 239
243 /* Select QAM_256 mode and serial MPEG interface */ 240 /* Select QAM_256 mode and serial MPEG interface */
244 top_ctrl_cfg[1] = 0x05; 241 top_ctrl_cfg[1] = 0x05;
242
243 /* Select CABLE connector if supported by card */
244 if (state->config->pll_rf_set)
245 state->config->pll_rf_set(fe, 0);
245 break; 246 break;
246 default: 247 default:
247 printk(KERN_WARNING "lgdt3302: %s: Modulation type(%d) UNSUPPORTED\n", __FUNCTION__, param->u.vsb.modulation); 248 printk(KERN_WARNING "lgdt330x: %s: Modulation type(%d) UNSUPPORTED\n", __FUNCTION__, param->u.vsb.modulation);
248 return -1; 249 return -1;
249 } 250 }
250 /* Initializations common to all modes */ 251 /* Initializations common to all modes */
@@ -290,44 +291,50 @@ static int lgdt3302_set_parameters(struct dvb_frontend* fe,
290 291
291 /* Change only if we are actually changing the channel */ 292 /* Change only if we are actually changing the channel */
292 if (state->current_frequency != param->frequency) { 293 if (state->current_frequency != param->frequency) {
293 dvb_pll_configure(state->config->pll_desc, buf, 294 u8 buf[5];
294 param->frequency, 0); 295 struct i2c_msg msg = { .flags = 0, .buf = &buf[1], .len = 4 };
295 dprintk("%s: tuner bytes: 0x%02x 0x%02x " 296 int err;
296 "0x%02x 0x%02x\n", __FUNCTION__, buf[0],buf[1],buf[2],buf[3]);
297 i2c_writebytes(state, state->config->pll_address ,buf, 4);
298 297
299 /* Check the status of the tuner pll */ 298 state->config->pll_set(fe, param, buf);
300 i2c_readbytes(state, state->config->pll_address, buf, 1); 299 msg.addr = buf[0];
301 dprintk("%s: tuner status byte = 0x%02x\n", __FUNCTION__, buf[0]);
302 300
301 dprintk("%s: tuner at 0x%02x bytes: 0x%02x 0x%02x "
302 "0x%02x 0x%02x\n", __FUNCTION__,
303 buf[0],buf[1],buf[2],buf[3],buf[4]);
304 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
305 printk(KERN_WARNING "lgdt330x: %s error (addr %02x <- %02x, err = %i)\n", __FUNCTION__, buf[0], buf[1], err);
306 if (err < 0)
307 return err;
308 else
309 return -EREMOTEIO;
310 }
311#if 0
312 /* Check the status of the tuner pll */
313 i2c_readbytes(state, buf[0], &buf[1], 1);
314 dprintk("%s: tuner status byte = 0x%02x\n", __FUNCTION__, buf[1]);
315#endif
303 /* Update current frequency */ 316 /* Update current frequency */
304 state->current_frequency = param->frequency; 317 state->current_frequency = param->frequency;
305 } 318 }
306 lgdt3302_SwReset(state); 319 lgdt330x_SwReset(state);
307 return 0; 320 return 0;
308} 321}
309 322
310static int lgdt3302_get_frontend(struct dvb_frontend* fe, 323static int lgdt330x_get_frontend(struct dvb_frontend* fe,
311 struct dvb_frontend_parameters* param) 324 struct dvb_frontend_parameters* param)
312{ 325{
313 struct lgdt3302_state *state = fe->demodulator_priv; 326 struct lgdt330x_state *state = fe->demodulator_priv;
314 param->frequency = state->current_frequency; 327 param->frequency = state->current_frequency;
315 return 0; 328 return 0;
316} 329}
317 330
318static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status) 331static int lgdt330x_read_status(struct dvb_frontend* fe, fe_status_t* status)
319{ 332{
320 struct lgdt3302_state* state = (struct lgdt3302_state*) fe->demodulator_priv; 333 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
321 u8 buf[3]; 334 u8 buf[3];
322 335
323 *status = 0; /* Reset status result */ 336 *status = 0; /* Reset status result */
324 337
325 /* Check the status of the tuner pll */
326 i2c_readbytes(state, state->config->pll_address, buf, 1);
327 dprintk("%s: tuner status byte = 0x%02x\n", __FUNCTION__, buf[0]);
328 if ((buf[0] & 0xc0) != 0x40)
329 return 0; /* Tuner PLL not locked or not powered on */
330
331 /* 338 /*
332 * You must set the Mask bits to 1 in the IRQ_MASK in order 339 * You must set the Mask bits to 1 in the IRQ_MASK in order
333 * to see that status bit in the IRQ_STATUS register. 340 * to see that status bit in the IRQ_STATUS register.
@@ -383,19 +390,19 @@ static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status)
383 *status |= FE_HAS_CARRIER; 390 *status |= FE_HAS_CARRIER;
384 break; 391 break;
385 default: 392 default:
386 printk("KERN_WARNING lgdt3302: %s: Modulation set to unsupported value\n", __FUNCTION__); 393 printk("KERN_WARNING lgdt330x: %s: Modulation set to unsupported value\n", __FUNCTION__);
387 } 394 }
388 395
389 return 0; 396 return 0;
390} 397}
391 398
392static int lgdt3302_read_signal_strength(struct dvb_frontend* fe, u16* strength) 399static int lgdt330x_read_signal_strength(struct dvb_frontend* fe, u16* strength)
393{ 400{
394 /* not directly available. */ 401 /* not directly available. */
395 return 0; 402 return 0;
396} 403}
397 404
398static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr) 405static int lgdt330x_read_snr(struct dvb_frontend* fe, u16* snr)
399{ 406{
400#ifdef SNR_IN_DB 407#ifdef SNR_IN_DB
401 /* 408 /*
@@ -450,7 +457,7 @@ static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr)
450 static u8 buf[5];/* read data buffer */ 457 static u8 buf[5];/* read data buffer */
451 static u32 noise; /* noise value */ 458 static u32 noise; /* noise value */
452 static u32 snr_db; /* index into SNR_EQ[] */ 459 static u32 snr_db; /* index into SNR_EQ[] */
453 struct lgdt3302_state* state = (struct lgdt3302_state*) fe->demodulator_priv; 460 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
454 461
455 /* read both equalizer and pase tracker noise data */ 462 /* read both equalizer and pase tracker noise data */
456 i2c_selectreadbytes(state, EQPH_ERR0, buf, sizeof(buf)); 463 i2c_selectreadbytes(state, EQPH_ERR0, buf, sizeof(buf));
@@ -486,7 +493,7 @@ static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr)
486 /* Return the raw noise value */ 493 /* Return the raw noise value */
487 static u8 buf[5];/* read data buffer */ 494 static u8 buf[5];/* read data buffer */
488 static u32 noise; /* noise value */ 495 static u32 noise; /* noise value */
489 struct lgdt3302_state* state = (struct lgdt3302_state*) fe->demodulator_priv; 496 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
490 497
491 /* read both equalizer and pase tracker noise data */ 498 /* read both equalizer and pase tracker noise data */
492 i2c_selectreadbytes(state, EQPH_ERR0, buf, sizeof(buf)); 499 i2c_selectreadbytes(state, EQPH_ERR0, buf, sizeof(buf));
@@ -509,7 +516,7 @@ static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr)
509 return 0; 516 return 0;
510} 517}
511 518
512static int lgdt3302_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fe_tune_settings) 519static int lgdt330x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fe_tune_settings)
513{ 520{
514 /* I have no idea about this - it may not be needed */ 521 /* I have no idea about this - it may not be needed */
515 fe_tune_settings->min_delay_ms = 500; 522 fe_tune_settings->min_delay_ms = 500;
@@ -518,22 +525,22 @@ static int lgdt3302_get_tune_settings(struct dvb_frontend* fe, struct dvb_fronte
518 return 0; 525 return 0;
519} 526}
520 527
521static void lgdt3302_release(struct dvb_frontend* fe) 528static void lgdt330x_release(struct dvb_frontend* fe)
522{ 529{
523 struct lgdt3302_state* state = (struct lgdt3302_state*) fe->demodulator_priv; 530 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
524 kfree(state); 531 kfree(state);
525} 532}
526 533
527static struct dvb_frontend_ops lgdt3302_ops; 534static struct dvb_frontend_ops lgdt330x_ops;
528 535
529struct dvb_frontend* lgdt3302_attach(const struct lgdt3302_config* config, 536struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
530 struct i2c_adapter* i2c) 537 struct i2c_adapter* i2c)
531{ 538{
532 struct lgdt3302_state* state = NULL; 539 struct lgdt330x_state* state = NULL;
533 u8 buf[1]; 540 u8 buf[1];
534 541
535 /* Allocate memory for the internal state */ 542 /* Allocate memory for the internal state */
536 state = (struct lgdt3302_state*) kmalloc(sizeof(struct lgdt3302_state), GFP_KERNEL); 543 state = (struct lgdt330x_state*) kmalloc(sizeof(struct lgdt330x_state), GFP_KERNEL);
537 if (state == NULL) 544 if (state == NULL)
538 goto error; 545 goto error;
539 memset(state,0,sizeof(*state)); 546 memset(state,0,sizeof(*state));
@@ -541,7 +548,7 @@ struct dvb_frontend* lgdt3302_attach(const struct lgdt3302_config* config,
541 /* Setup the state */ 548 /* Setup the state */
542 state->config = config; 549 state->config = config;
543 state->i2c = i2c; 550 state->i2c = i2c;
544 memcpy(&state->ops, &lgdt3302_ops, sizeof(struct dvb_frontend_ops)); 551 memcpy(&state->ops, &lgdt330x_ops, sizeof(struct dvb_frontend_ops));
545 /* Verify communication with demod chip */ 552 /* Verify communication with demod chip */
546 if (i2c_selectreadbytes(state, 2, buf, 1)) 553 if (i2c_selectreadbytes(state, 2, buf, 1))
547 goto error; 554 goto error;
@@ -561,9 +568,9 @@ error:
561 return NULL; 568 return NULL;
562} 569}
563 570
564static struct dvb_frontend_ops lgdt3302_ops = { 571static struct dvb_frontend_ops lgdt330x_ops = {
565 .info = { 572 .info = {
566 .name= "LG Electronics LGDT3302 VSB/QAM Frontend", 573 .name= "LG Electronics lgdt330x VSB/QAM Frontend",
567 .type = FE_ATSC, 574 .type = FE_ATSC,
568 .frequency_min= 54000000, 575 .frequency_min= 54000000,
569 .frequency_max= 858000000, 576 .frequency_max= 858000000,
@@ -573,23 +580,23 @@ static struct dvb_frontend_ops lgdt3302_ops = {
573 .symbol_rate_max = 10762000, 580 .symbol_rate_max = 10762000,
574 .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB 581 .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
575 }, 582 },
576 .init = lgdt3302_init, 583 .init = lgdt330x_init,
577 .set_frontend = lgdt3302_set_parameters, 584 .set_frontend = lgdt330x_set_parameters,
578 .get_frontend = lgdt3302_get_frontend, 585 .get_frontend = lgdt330x_get_frontend,
579 .get_tune_settings = lgdt3302_get_tune_settings, 586 .get_tune_settings = lgdt330x_get_tune_settings,
580 .read_status = lgdt3302_read_status, 587 .read_status = lgdt330x_read_status,
581 .read_ber = lgdt3302_read_ber, 588 .read_ber = lgdt330x_read_ber,
582 .read_signal_strength = lgdt3302_read_signal_strength, 589 .read_signal_strength = lgdt330x_read_signal_strength,
583 .read_snr = lgdt3302_read_snr, 590 .read_snr = lgdt330x_read_snr,
584 .read_ucblocks = lgdt3302_read_ucblocks, 591 .read_ucblocks = lgdt330x_read_ucblocks,
585 .release = lgdt3302_release, 592 .release = lgdt330x_release,
586}; 593};
587 594
588MODULE_DESCRIPTION("LGDT3302 [DViCO FusionHDTV 3 Gold] (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver"); 595MODULE_DESCRIPTION("lgdt330x [DViCO FusionHDTV 3 Gold] (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver");
589MODULE_AUTHOR("Wilson Michaels"); 596MODULE_AUTHOR("Wilson Michaels");
590MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
591 598
592EXPORT_SYMBOL(lgdt3302_attach); 599EXPORT_SYMBOL(lgdt330x_attach);
593 600
594/* 601/*
595 * Local variables: 602 * Local variables:
diff --git a/drivers/media/dvb/frontends/lgdt3302.h b/drivers/media/dvb/frontends/lgdt330x.h
index 81587a40032b..04986f8e7565 100644
--- a/drivers/media/dvb/frontends/lgdt3302.h
+++ b/drivers/media/dvb/frontends/lgdt330x.h
@@ -1,7 +1,5 @@
1/* 1/*
2 * $Id: lgdt3302.h,v 1.2 2005/06/28 23:50:48 mkrufky Exp $ 2 * Support for LGDT3302 & LGDT3303 (DViCO FustionHDTV Gold) - VSB/QAM
3 *
4 * Support for LGDT3302 (DViCO FustionHDTV 3 Gold) - VSB/QAM
5 * 3 *
6 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net> 4 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net>
7 * 5 *
@@ -21,26 +19,28 @@
21 * 19 *
22 */ 20 */
23 21
24#ifndef LGDT3302_H 22#ifndef LGDT330X_H
25#define LGDT3302_H 23#define LGDT330X_H
26 24
27#include <linux/dvb/frontend.h> 25#include <linux/dvb/frontend.h>
28 26
29struct lgdt3302_config 27struct lgdt330x_config
30{ 28{
31 /* The demodulator's i2c address */ 29 /* The demodulator's i2c address */
32 u8 demod_address; 30 u8 demod_address;
33 u8 pll_address; 31
34 struct dvb_pll_desc *pll_desc; 32 /* PLL interface */
33 int (*pll_rf_set) (struct dvb_frontend* fe, int index);
34 int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pll_address);
35 35
36 /* Need to set device param for start_dma */ 36 /* Need to set device param for start_dma */
37 int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured); 37 int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
38}; 38};
39 39
40extern struct dvb_frontend* lgdt3302_attach(const struct lgdt3302_config* config, 40extern struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
41 struct i2c_adapter* i2c); 41 struct i2c_adapter* i2c);
42 42
43#endif /* LGDT3302_H */ 43#endif /* LGDT330X_H */
44 44
45/* 45/*
46 * Local variables: 46 * Local variables:
diff --git a/drivers/media/dvb/frontends/lgdt3302_priv.h b/drivers/media/dvb/frontends/lgdt330x_priv.h
index 6193fa7a569d..4143ce8f1a95 100644
--- a/drivers/media/dvb/frontends/lgdt3302_priv.h
+++ b/drivers/media/dvb/frontends/lgdt330x_priv.h
@@ -1,7 +1,5 @@
1/* 1/*
2 * $Id: lgdt3302_priv.h,v 1.2 2005/06/28 23:50:48 mkrufky Exp $ 2 * Support for LGDT3302 & LGDT3303 (DViCO FustionHDTV Gold) - VSB/QAM
3 *
4 * Support for LGDT3302 (DViCO FustionHDTV 3 Gold) - VSB/QAM
5 * 3 *
6 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net> 4 * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net>
7 * 5 *
@@ -21,8 +19,8 @@
21 * 19 *
22 */ 20 */
23 21
24#ifndef _LGDT3302_PRIV_ 22#ifndef _LGDT330X_PRIV_
25#define _LGDT3302_PRIV_ 23#define _LGDT330X_PRIV_
26 24
27/* i2c control register addresses */ 25/* i2c control register addresses */
28enum I2C_REG { 26enum I2C_REG {
@@ -63,7 +61,7 @@ enum I2C_REG {
63 PACKET_ERR_COUNTER2= 0x6b, 61 PACKET_ERR_COUNTER2= 0x6b,
64}; 62};
65 63
66#endif /* _LGDT3302_PRIV_ */ 64#endif /* _LGDT330X_PRIV_ */
67 65
68/* 66/*
69 * Local variables: 67 * Local variables:
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index e62147e4ed1b..e5e2021a7312 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -154,7 +154,7 @@ static void radio_bits_set(struct radio_device *dev, __u32 data)
154 msleep(125); 154 msleep(125);
155} 155}
156 156
157inline static int radio_function(struct inode *inode, struct file *file, 157static inline int radio_function(struct inode *inode, struct file *file,
158 unsigned int cmd, void *arg) 158 unsigned int cmd, void *arg)
159{ 159{
160 struct video_device *dev = video_devdata(file); 160 struct video_device *dev = video_devdata(file);
@@ -283,7 +283,7 @@ static int __init maestro_radio_init(void)
283module_init(maestro_radio_init); 283module_init(maestro_radio_init);
284module_exit(maestro_radio_exit); 284module_exit(maestro_radio_exit);
285 285
286inline static __u16 radio_power_on(struct radio_device *dev) 286static inline __u16 radio_power_on(struct radio_device *dev)
287{ 287{
288 register __u16 io=dev->io; 288 register __u16 io=dev->io;
289 register __u32 ofreq; 289 register __u32 ofreq;
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 5b748a48ce72..02d39a50d5ed 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -166,7 +166,7 @@ static int get_tune(__u16 io)
166} 166}
167 167
168 168
169inline static int radio_function(struct inode *inode, struct file *file, 169static inline int radio_function(struct inode *inode, struct file *file,
170 unsigned int cmd, void *arg) 170 unsigned int cmd, void *arg)
171{ 171{
172 struct video_device *dev = video_devdata(file); 172 struct video_device *dev = video_devdata(file);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f461750c7646..ac81e5e01a9a 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -236,7 +236,7 @@ config VIDEO_MEYE
236 236
237config VIDEO_SAA7134 237config VIDEO_SAA7134
238 tristate "Philips SAA7134 support" 238 tristate "Philips SAA7134 support"
239 depends on VIDEO_DEV && PCI && I2C 239 depends on VIDEO_DEV && PCI && I2C && SOUND
240 select VIDEO_BUF 240 select VIDEO_BUF
241 select VIDEO_IR 241 select VIDEO_IR
242 select VIDEO_TUNER 242 select VIDEO_TUNER
@@ -331,7 +331,7 @@ config VIDEO_CX88_DVB
331 select DVB_MT352 331 select DVB_MT352
332 select DVB_OR51132 332 select DVB_OR51132
333 select DVB_CX22702 333 select DVB_CX22702
334 select DVB_LGDT3302 334 select DVB_LGDT330X
335 ---help--- 335 ---help---
336 This adds support for DVB/ATSC cards based on the 336 This adds support for DVB/ATSC cards based on the
337 Connexant 2388x chip. 337 Connexant 2388x chip.
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 2dbf5ec43abd..6c52fd0bb7df 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -1,5 +1,5 @@
1/* 1/*
2 $Id: bttv-cards.c,v 1.53 2005/07/05 17:37:35 nsh Exp $ 2 $Id: bttv-cards.c,v 1.54 2005/07/19 18:26:46 mkrufky Exp $
3 3
4 bttv-cards.c 4 bttv-cards.c
5 5
@@ -2772,8 +2772,6 @@ void __devinit bttv_init_card2(struct bttv *btv)
2772 } 2772 }
2773 btv->pll.pll_current = -1; 2773 btv->pll.pll_current = -1;
2774 2774
2775 bttv_reset_audio(btv);
2776
2777 /* tuner configuration (from card list / autodetect / insmod option) */ 2775 /* tuner configuration (from card list / autodetect / insmod option) */
2778 if (UNSET != bttv_tvcards[btv->c.type].tuner_type) 2776 if (UNSET != bttv_tvcards[btv->c.type].tuner_type)
2779 if(UNSET == btv->tuner_type) 2777 if(UNSET == btv->tuner_type)
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 606d0348da2c..107e48645e3a 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -9,3 +9,15 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
9EXTRA_CFLAGS += -I$(src)/.. 9EXTRA_CFLAGS += -I$(src)/..
10EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core 10EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
11EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends 11EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends
12ifneq ($(CONFIG_DVB_CX22702),n)
13 EXTRA_CFLAGS += -DHAVE_CX22702=1
14endif
15ifneq ($(CONFIG_DVB_OR51132),n)
16 EXTRA_CFLAGS += -DHAVE_OR51132=1
17endif
18ifneq ($(CONFIG_DVB_LGDT330X),n)
19 EXTRA_CFLAGS += -DHAVE_LGDT330X=1
20endif
21ifneq ($(CONFIG_DVB_MT352),n)
22 EXTRA_CFLAGS += -DHAVE_MT352=1
23endif
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 6ad1458ab652..ef0e9a85c359 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: cx88-dvb.c,v 1.42 2005/07/12 15:44:55 mkrufky Exp $ 2 * $Id: cx88-dvb.c,v 1.54 2005/07/25 05:13:50 mkrufky Exp $
3 * 3 *
4 * device driver for Conexant 2388x based TV cards 4 * device driver for Conexant 2388x based TV cards
5 * MPEG Transport Stream (DVB) routines 5 * MPEG Transport Stream (DVB) routines
@@ -29,27 +29,23 @@
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/file.h> 30#include <linux/file.h>
31#include <linux/suspend.h> 31#include <linux/suspend.h>
32 32#include <linux/config.h>
33#define CONFIG_DVB_MT352 1
34#define CONFIG_DVB_CX22702 1
35#define CONFIG_DVB_OR51132 1
36#define CONFIG_DVB_LGDT3302 1
37 33
38#include "cx88.h" 34#include "cx88.h"
39#include "dvb-pll.h" 35#include "dvb-pll.h"
40 36
41#if CONFIG_DVB_MT352 37#ifdef HAVE_MT352
42# include "mt352.h" 38# include "mt352.h"
43# include "mt352_priv.h" 39# include "mt352_priv.h"
44#endif 40#endif
45#if CONFIG_DVB_CX22702 41#ifdef HAVE_CX22702
46# include "cx22702.h" 42# include "cx22702.h"
47#endif 43#endif
48#if CONFIG_DVB_OR51132 44#ifdef HAVE_OR51132
49# include "or51132.h" 45# include "or51132.h"
50#endif 46#endif
51#if CONFIG_DVB_LGDT3302 47#ifdef HAVE_LGDT330X
52# include "lgdt3302.h" 48# include "lgdt330x.h"
53#endif 49#endif
54 50
55MODULE_DESCRIPTION("driver for cx2388x based DVB cards"); 51MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
@@ -107,7 +103,7 @@ static struct videobuf_queue_ops dvb_qops = {
107 103
108/* ------------------------------------------------------------------ */ 104/* ------------------------------------------------------------------ */
109 105
110#if CONFIG_DVB_MT352 106#ifdef HAVE_MT352
111static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe) 107static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
112{ 108{
113 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 }; 109 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
@@ -177,7 +173,7 @@ static struct mt352_config dntv_live_dvbt_config = {
177}; 173};
178#endif 174#endif
179 175
180#if CONFIG_DVB_CX22702 176#ifdef HAVE_CX22702
181static struct cx22702_config connexant_refboard_config = { 177static struct cx22702_config connexant_refboard_config = {
182 .demod_address = 0x43, 178 .demod_address = 0x43,
183 .output_mode = CX22702_SERIAL_OUTPUT, 179 .output_mode = CX22702_SERIAL_OUTPUT,
@@ -193,7 +189,7 @@ static struct cx22702_config hauppauge_novat_config = {
193}; 189};
194#endif 190#endif
195 191
196#if CONFIG_DVB_OR51132 192#ifdef HAVE_OR51132
197static int or51132_set_ts_param(struct dvb_frontend* fe, 193static int or51132_set_ts_param(struct dvb_frontend* fe,
198 int is_punctured) 194 int is_punctured)
199{ 195{
@@ -210,8 +206,33 @@ static struct or51132_config pchdtv_hd3000 = {
210}; 206};
211#endif 207#endif
212 208
213#if CONFIG_DVB_LGDT3302 209#ifdef HAVE_LGDT330X
214static int lgdt3302_set_ts_param(struct dvb_frontend* fe, int is_punctured) 210static int lgdt330x_pll_set(struct dvb_frontend* fe,
211 struct dvb_frontend_parameters* params,
212 u8* pllbuf)
213{
214 struct cx8802_dev *dev= fe->dvb->priv;
215
216 pllbuf[0] = dev->core->pll_addr;
217 dvb_pll_configure(dev->core->pll_desc, &pllbuf[1],
218 params->frequency, 0);
219 return 0;
220}
221
222static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
223{
224 struct cx8802_dev *dev= fe->dvb->priv;
225 struct cx88_core *core = dev->core;
226
227 dprintk(1, "%s: index = %d\n", __FUNCTION__, index);
228 if (index == 0)
229 cx_clear(MO_GP0_IO, 8);
230 else
231 cx_set(MO_GP0_IO, 8);
232 return 0;
233}
234
235static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
215{ 236{
216 struct cx8802_dev *dev= fe->dvb->priv; 237 struct cx8802_dev *dev= fe->dvb->priv;
217 if (is_punctured) 238 if (is_punctured)
@@ -221,18 +242,10 @@ static int lgdt3302_set_ts_param(struct dvb_frontend* fe, int is_punctured)
221 return 0; 242 return 0;
222} 243}
223 244
224static struct lgdt3302_config fusionhdtv_3_gold_q = { 245static struct lgdt330x_config fusionhdtv_3_gold = {
225 .demod_address = 0x0e, 246 .demod_address = 0x0e,
226 .pll_address = 0x61, 247 .pll_set = lgdt330x_pll_set,
227 .pll_desc = &dvb_pll_microtune_4042, 248 .set_ts_params = lgdt330x_set_ts_param,
228 .set_ts_params = lgdt3302_set_ts_param,
229};
230
231static struct lgdt3302_config fusionhdtv_3_gold_t = {
232 .demod_address = 0x0e,
233 .pll_address = 0x61,
234 .pll_desc = &dvb_pll_thomson_dtt7611,
235 .set_ts_params = lgdt3302_set_ts_param,
236}; 249};
237#endif 250#endif
238 251
@@ -244,7 +257,7 @@ static int dvb_register(struct cx8802_dev *dev)
244 257
245 /* init frontend */ 258 /* init frontend */
246 switch (dev->core->board) { 259 switch (dev->core->board) {
247#if CONFIG_DVB_CX22702 260#ifdef HAVE_CX22702
248 case CX88_BOARD_HAUPPAUGE_DVB_T1: 261 case CX88_BOARD_HAUPPAUGE_DVB_T1:
249 dev->dvb.frontend = cx22702_attach(&hauppauge_novat_config, 262 dev->dvb.frontend = cx22702_attach(&hauppauge_novat_config,
250 &dev->core->i2c_adap); 263 &dev->core->i2c_adap);
@@ -255,7 +268,7 @@ static int dvb_register(struct cx8802_dev *dev)
255 &dev->core->i2c_adap); 268 &dev->core->i2c_adap);
256 break; 269 break;
257#endif 270#endif
258#if CONFIG_DVB_MT352 271#ifdef HAVE_MT352
259 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1: 272 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
260 dev->core->pll_addr = 0x61; 273 dev->core->pll_addr = 0x61;
261 dev->core->pll_desc = &dvb_pll_lg_z201; 274 dev->core->pll_desc = &dvb_pll_lg_z201;
@@ -277,13 +290,13 @@ static int dvb_register(struct cx8802_dev *dev)
277 &dev->core->i2c_adap); 290 &dev->core->i2c_adap);
278 break; 291 break;
279#endif 292#endif
280#if CONFIG_DVB_OR51132 293#ifdef HAVE_OR51132
281 case CX88_BOARD_PCHDTV_HD3000: 294 case CX88_BOARD_PCHDTV_HD3000:
282 dev->dvb.frontend = or51132_attach(&pchdtv_hd3000, 295 dev->dvb.frontend = or51132_attach(&pchdtv_hd3000,
283 &dev->core->i2c_adap); 296 &dev->core->i2c_adap);
284 break; 297 break;
285#endif 298#endif
286#if CONFIG_DVB_LGDT3302 299#ifdef HAVE_LGDT330X
287 case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q: 300 case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
288 dev->ts_gen_cntrl = 0x08; 301 dev->ts_gen_cntrl = 0x08;
289 { 302 {
@@ -292,9 +305,14 @@ static int dvb_register(struct cx8802_dev *dev)
292 305
293 cx_clear(MO_GP0_IO, 1); 306 cx_clear(MO_GP0_IO, 1);
294 mdelay(100); 307 mdelay(100);
295 cx_set(MO_GP0_IO, 9); // ANT connector too FIXME 308 cx_set(MO_GP0_IO, 1);
296 mdelay(200); 309 mdelay(200);
297 dev->dvb.frontend = lgdt3302_attach(&fusionhdtv_3_gold_q, 310
311 /* Select RF connector callback */
312 fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
313 dev->core->pll_addr = 0x61;
314 dev->core->pll_desc = &dvb_pll_microtune_4042;
315 dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_3_gold,
298 &dev->core->i2c_adap); 316 &dev->core->i2c_adap);
299 } 317 }
300 break; 318 break;
@@ -306,9 +324,11 @@ static int dvb_register(struct cx8802_dev *dev)
306 324
307 cx_clear(MO_GP0_IO, 1); 325 cx_clear(MO_GP0_IO, 1);
308 mdelay(100); 326 mdelay(100);
309 cx_set(MO_GP0_IO, 9); /* ANT connector too FIXME */ 327 cx_set(MO_GP0_IO, 9);
310 mdelay(200); 328 mdelay(200);
311 dev->dvb.frontend = lgdt3302_attach(&fusionhdtv_3_gold_t, 329 dev->core->pll_addr = 0x61;
330 dev->core->pll_desc = &dvb_pll_thomson_dtt7611;
331 dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_3_gold,
312 &dev->core->i2c_adap); 332 &dev->core->i2c_adap);
313 } 333 }
314 break; 334 break;
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 8403c4e95050..a628a55299c6 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -1,5 +1,5 @@
1/* 1/*
2 $Id: cx88-i2c.c,v 1.28 2005/07/05 17:37:35 nsh Exp $ 2 $Id: cx88-i2c.c,v 1.30 2005/07/25 05:10:13 mkrufky Exp $
3 3
4 cx88-i2c.c -- all the i2c code is here 4 cx88-i2c.c -- all the i2c code is here
5 5
@@ -164,7 +164,7 @@ static struct i2c_client cx8800_i2c_client_template = {
164}; 164};
165 165
166static char *i2c_devs[128] = { 166static char *i2c_devs[128] = {
167 [ 0x1c >> 1 ] = "lgdt3302", 167 [ 0x1c >> 1 ] = "lgdt330x",
168 [ 0x86 >> 1 ] = "tda9887/cx22702", 168 [ 0x86 >> 1 ] = "tda9887/cx22702",
169 [ 0xa0 >> 1 ] = "eeprom", 169 [ 0xa0 >> 1 ] = "eeprom",
170 [ 0xc0 >> 1 ] = "tuner (analog)", 170 [ 0xc0 >> 1 ] = "tuner (analog)",
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 486234d41b56..d04793fb80fc 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -142,8 +142,8 @@ struct mxb
142 142
143 int cur_mode; /* current audio mode (mono, stereo, ...) */ 143 int cur_mode; /* current audio mode (mono, stereo, ...) */
144 int cur_input; /* current input */ 144 int cur_input; /* current input */
145 int cur_freq; /* current frequency the tuner is tuned to */
146 int cur_mute; /* current mute status */ 145 int cur_mute; /* current mute status */
146 struct v4l2_frequency cur_freq; /* current frequency the tuner is tuned to */
147}; 147};
148 148
149static struct saa7146_extension extension; 149static struct saa7146_extension extension;
@@ -352,9 +352,15 @@ static int mxb_init_done(struct saa7146_dev* dev)
352 /* select a tuner type */ 352 /* select a tuner type */
353 tun_setup.mode_mask = T_ANALOG_TV; 353 tun_setup.mode_mask = T_ANALOG_TV;
354 tun_setup.addr = ADDR_UNSET; 354 tun_setup.addr = ADDR_UNSET;
355 tun_setup.type = 5; 355 tun_setup.type = TUNER_PHILIPS_PAL;
356 mxb->tuner->driver->command(mxb->tuner,TUNER_SET_TYPE_ADDR, &tun_setup); 356 mxb->tuner->driver->command(mxb->tuner,TUNER_SET_TYPE_ADDR, &tun_setup);
357 357 /* tune in some frequency on tuner */
358 mxb->cur_freq.tuner = 0;
359 mxb->cur_freq.type = V4L2_TUNER_ANALOG_TV;
360 mxb->cur_freq.frequency = freq;
361 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY,
362 &mxb->cur_freq);
363
358 /* mute audio on tea6420s */ 364 /* mute audio on tea6420s */
359 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]); 365 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]);
360 mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]); 366 mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]);
@@ -371,12 +377,8 @@ static int mxb_init_done(struct saa7146_dev* dev)
371 vm.out = 13; 377 vm.out = 13;
372 mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm); 378 mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm);
373 379
374 /* tune in some frequency on tuner */
375 mxb->tuner->driver->command(mxb->tuner, VIDIOCSFREQ, &freq);
376
377 /* the rest for mxb */ 380 /* the rest for mxb */
378 mxb->cur_input = 0; 381 mxb->cur_input = 0;
379 mxb->cur_freq = freq;
380 mxb->cur_mute = 1; 382 mxb->cur_mute = 1;
381 383
382 mxb->cur_mode = V4L2_TUNER_MODE_STEREO; 384 mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
@@ -819,18 +821,14 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
819 return -EINVAL; 821 return -EINVAL;
820 } 822 }
821 823
822 memset(f,0,sizeof(*f)); 824 *f = mxb->cur_freq;
823 f->type = V4L2_TUNER_ANALOG_TV;
824 f->frequency = mxb->cur_freq;
825 825
826 DEB_EE(("VIDIOC_G_FREQ: freq:0x%08x.\n", mxb->cur_freq)); 826 DEB_EE(("VIDIOC_G_FREQ: freq:0x%08x.\n", mxb->cur_freq.frequency));
827 return 0; 827 return 0;
828 } 828 }
829 case VIDIOC_S_FREQUENCY: 829 case VIDIOC_S_FREQUENCY:
830 { 830 {
831 struct v4l2_frequency *f = arg; 831 struct v4l2_frequency *f = arg;
832 int t_locked = 0;
833 int v_byte = 0;
834 832
835 if (0 != f->tuner) 833 if (0 != f->tuner)
836 return -EINVAL; 834 return -EINVAL;
@@ -843,20 +841,11 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
843 return -EINVAL; 841 return -EINVAL;
844 } 842 }
845 843
846 DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n",f->frequency)); 844 mxb->cur_freq = *f;
847 845 DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency));
848 mxb->cur_freq = f->frequency;
849 846
850 /* tune in desired frequency */ 847 /* tune in desired frequency */
851 mxb->tuner->driver->command(mxb->tuner, VIDIOCSFREQ, &mxb->cur_freq); 848 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, &mxb->cur_freq);
852
853 /* check if pll of tuner & saa7111a is locked */
854// mxb->tuner->driver->command(mxb->tuner,TUNER_IS_LOCKED, &t_locked);
855 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_GET_STATUS, &v_byte);
856
857 /* not locked -- anything to do here ? */
858 if( 0 == t_locked || 0 == (v_byte & DECODER_STATUS_GOOD)) {
859 }
860 849
861 /* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */ 850 /* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */
862 spin_lock(&dev->slock); 851 spin_lock(&dev->slock);
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index e577a06b136b..b778ffd94e65 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -9,3 +9,9 @@ obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o
9EXTRA_CFLAGS += -I$(src)/.. 9EXTRA_CFLAGS += -I$(src)/..
10EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core 10EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
11EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends 11EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends
12ifneq ($(CONFIG_DVB_MT352),n)
13 EXTRA_CFLAGS += -DHAVE_MT352=1
14endif
15ifneq ($(CONFIG_DVB_TDA1004X),n)
16 EXTRA_CFLAGS += -DHAVE_TDA1004X=1
17endif
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 334bc1850092..8be6a90358c8 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: saa7134-dvb.c,v 1.18 2005/07/04 16:05:50 mkrufky Exp $ 2 * $Id: saa7134-dvb.c,v 1.23 2005/07/24 22:12:47 mkrufky Exp $
3 * 3 *
4 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] 4 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
5 * 5 *
@@ -29,18 +29,17 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/kthread.h> 30#include <linux/kthread.h>
31#include <linux/suspend.h> 31#include <linux/suspend.h>
32#include <linux/config.h>
32 33
33#define CONFIG_DVB_MT352 1
34#define CONFIG_DVB_TDA1004X 1
35 34
36#include "saa7134-reg.h" 35#include "saa7134-reg.h"
37#include "saa7134.h" 36#include "saa7134.h"
38 37
39#if CONFIG_DVB_MT352 38#ifdef HAVE_MT352
40# include "mt352.h" 39# include "mt352.h"
41# include "mt352_priv.h" /* FIXME */ 40# include "mt352_priv.h" /* FIXME */
42#endif 41#endif
43#if CONFIG_DVB_TDA1004X 42#ifdef HAVE_TDA1004X
44# include "tda1004x.h" 43# include "tda1004x.h"
45#endif 44#endif
46 45
@@ -54,7 +53,7 @@ MODULE_PARM_DESC(antenna_pwr,"enable antenna power (Pinnacle 300i)");
54 53
55/* ------------------------------------------------------------------ */ 54/* ------------------------------------------------------------------ */
56 55
57#if CONFIG_DVB_MT352 56#ifdef HAVE_MT352
58static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on) 57static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on)
59{ 58{
60 u32 ok; 59 u32 ok;
@@ -153,7 +152,7 @@ static struct mt352_config pinnacle_300i = {
153 152
154/* ------------------------------------------------------------------ */ 153/* ------------------------------------------------------------------ */
155 154
156#if CONFIG_DVB_TDA1004X 155#ifdef HAVE_TDA1004X
157static int philips_tu1216_pll_init(struct dvb_frontend *fe) 156static int philips_tu1216_pll_init(struct dvb_frontend *fe)
158{ 157{
159 struct saa7134_dev *dev = fe->dvb->priv; 158 struct saa7134_dev *dev = fe->dvb->priv;
@@ -385,7 +384,7 @@ static int philips_fmd1216_pll_set(struct dvb_frontend *fe, struct dvb_frontend_
385 return 0; 384 return 0;
386} 385}
387 386
388 387#ifdef HAVE_TDA1004X
389static struct tda1004x_config medion_cardbus = { 388static struct tda1004x_config medion_cardbus = {
390 .demod_address = 0x08, 389 .demod_address = 0x08,
391 .invert = 1, 390 .invert = 1,
@@ -398,6 +397,7 @@ static struct tda1004x_config medion_cardbus = {
398 .pll_sleep = philips_fmd1216_analog, 397 .pll_sleep = philips_fmd1216_analog,
399 .request_firmware = NULL, 398 .request_firmware = NULL,
400}; 399};
400#endif
401 401
402/* ------------------------------------------------------------------ */ 402/* ------------------------------------------------------------------ */
403 403
@@ -547,14 +547,14 @@ static int dvb_init(struct saa7134_dev *dev)
547 dev); 547 dev);
548 548
549 switch (dev->board) { 549 switch (dev->board) {
550#if CONFIG_DVB_MT352 550#ifdef HAVE_MT352
551 case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL: 551 case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
552 printk("%s: pinnacle 300i dvb setup\n",dev->name); 552 printk("%s: pinnacle 300i dvb setup\n",dev->name);
553 dev->dvb.frontend = mt352_attach(&pinnacle_300i, 553 dev->dvb.frontend = mt352_attach(&pinnacle_300i,
554 &dev->i2c_adap); 554 &dev->i2c_adap);
555 break; 555 break;
556#endif 556#endif
557#if CONFIG_DVB_TDA1004X 557#ifdef HAVE_TDA1004X
558 case SAA7134_BOARD_MD7134: 558 case SAA7134_BOARD_MD7134:
559 dev->dvb.frontend = tda10046_attach(&medion_cardbus, 559 dev->dvb.frontend = tda10046_attach(&medion_cardbus,
560 &dev->i2c_adap); 560 &dev->i2c_adap);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index d8b78f1d686b..f42a1efa8fcf 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -285,6 +285,7 @@ static int chip_thread(void *data)
285 schedule(); 285 schedule();
286 } 286 }
287 remove_wait_queue(&chip->wq, &wait); 287 remove_wait_queue(&chip->wq, &wait);
288 try_to_freeze();
288 if (chip->done || signal_pending(current)) 289 if (chip->done || signal_pending(current))
289 break; 290 break;
290 dprintk("%s: thread wakeup\n", i2c_clientname(&chip->c)); 291 dprintk("%s: thread wakeup\n", i2c_clientname(&chip->c));
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index e8d9440977cb..62b03ef091e0 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -445,6 +445,7 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
445} 445}
446EXPORT_SYMBOL(tveeprom_read); 446EXPORT_SYMBOL(tveeprom_read);
447 447
448#if 0
448int tveeprom_dump(unsigned char *eedata, int len) 449int tveeprom_dump(unsigned char *eedata, int len)
449{ 450{
450 int i; 451 int i;
@@ -460,6 +461,7 @@ int tveeprom_dump(unsigned char *eedata, int len)
460 return 0; 461 return 0;
461} 462}
462EXPORT_SYMBOL(tveeprom_dump); 463EXPORT_SYMBOL(tveeprom_dump);
464#endif /* 0 */
463 465
464/* ----------------------------------------------------------------------- */ 466/* ----------------------------------------------------------------------- */
465/* needed for ivtv.sf.net at the moment. Should go away in the long */ 467/* needed for ivtv.sf.net at the moment. Should go away in the long */
@@ -477,7 +479,7 @@ static unsigned short normal_i2c[] = {
477 479
478I2C_CLIENT_INSMOD; 480I2C_CLIENT_INSMOD;
479 481
480struct i2c_driver i2c_driver_tveeprom; 482static struct i2c_driver i2c_driver_tveeprom;
481 483
482static int 484static int
483tveeprom_command(struct i2c_client *client, 485tveeprom_command(struct i2c_client *client,
@@ -549,7 +551,7 @@ tveeprom_detach_client (struct i2c_client *client)
549 return 0; 551 return 0;
550} 552}
551 553
552struct i2c_driver i2c_driver_tveeprom = { 554static struct i2c_driver i2c_driver_tveeprom = {
553 .owner = THIS_MODULE, 555 .owner = THIS_MODULE,
554 .name = "tveeprom", 556 .name = "tveeprom",
555 .id = I2C_DRIVERID_TVEEPROM, 557 .id = I2C_DRIVERID_TVEEPROM,
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 0c41d4b41a65..8b487ed1069c 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1053,7 +1053,7 @@ static void wbsd_detect_card(unsigned long data)
1053 * Tasklets 1053 * Tasklets
1054 */ 1054 */
1055 1055
1056inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host) 1056static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
1057{ 1057{
1058 WARN_ON(!host->mrq); 1058 WARN_ON(!host->mrq);
1059 if (!host->mrq) 1059 if (!host->mrq)
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 933877ff4d88..9a087c1fb0b7 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -40,6 +40,7 @@
40#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/doc2000.h> 41#include <linux/mtd/doc2000.h>
42 42
43#define DEBUG 0
43/* need to undef it (from asm/termbits.h) */ 44/* need to undef it (from asm/termbits.h) */
44#undef B0 45#undef B0
45 46
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index ad17f17e8e7a..111601ca4ca3 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -272,7 +272,7 @@ static inline void set_hsf(struct net_device *dev, int hsf)
272 272
273static int start_receive(struct net_device *, pcb_struct *); 273static int start_receive(struct net_device *, pcb_struct *);
274 274
275inline static void adapter_reset(struct net_device *dev) 275static inline void adapter_reset(struct net_device *dev)
276{ 276{
277 unsigned long timeout; 277 unsigned long timeout;
278 elp_device *adapter = dev->priv; 278 elp_device *adapter = dev->priv;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 5a4a08a7c951..4c2cf7bbd252 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -126,14 +126,14 @@
126#define USE_IO_OPS 1 126#define USE_IO_OPS 1
127#endif 127#endif
128 128
129/* define to 1 to enable copious debugging info */ 129/* define to 1, 2 or 3 to enable copious debugging info */
130#undef RTL8139_DEBUG 130#define RTL8139_DEBUG 0
131 131
132/* define to 1 to disable lightweight runtime debugging checks */ 132/* define to 1 to disable lightweight runtime debugging checks */
133#undef RTL8139_NDEBUG 133#undef RTL8139_NDEBUG
134 134
135 135
136#ifdef RTL8139_DEBUG 136#if RTL8139_DEBUG
137/* note: prints function name for you */ 137/* note: prints function name for you */
138# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 138# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
139#else 139#else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 534b598866b3..8a835eb58808 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -23,9 +23,12 @@ config NETDEVICES
23 23
24 If unsure, say Y. 24 If unsure, say Y.
25 25
26# All the following symbols are dependent on NETDEVICES - do not repeat
27# that for each of the symbols.
28if NETDEVICES
29
26config DUMMY 30config DUMMY
27 tristate "Dummy net driver support" 31 tristate "Dummy net driver support"
28 depends on NETDEVICES
29 ---help--- 32 ---help---
30 This is essentially a bit-bucket device (i.e. traffic you send to 33 This is essentially a bit-bucket device (i.e. traffic you send to
31 this device is consigned into oblivion) with a configurable IP 34 this device is consigned into oblivion) with a configurable IP
@@ -45,7 +48,6 @@ config DUMMY
45 48
46config BONDING 49config BONDING
47 tristate "Bonding driver support" 50 tristate "Bonding driver support"
48 depends on NETDEVICES
49 depends on INET 51 depends on INET
50 ---help--- 52 ---help---
51 Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet 53 Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
@@ -63,7 +65,6 @@ config BONDING
63 65
64config EQUALIZER 66config EQUALIZER
65 tristate "EQL (serial line load balancing) support" 67 tristate "EQL (serial line load balancing) support"
66 depends on NETDEVICES
67 ---help--- 68 ---help---
68 If you have two serial connections to some other computer (this 69 If you have two serial connections to some other computer (this
69 usually requires two modems and two telephone lines) and you use 70 usually requires two modems and two telephone lines) and you use
@@ -83,7 +84,6 @@ config EQUALIZER
83 84
84config TUN 85config TUN
85 tristate "Universal TUN/TAP device driver support" 86 tristate "Universal TUN/TAP device driver support"
86 depends on NETDEVICES
87 select CRC32 87 select CRC32
88 ---help--- 88 ---help---
89 TUN/TAP provides packet reception and transmission for user space 89 TUN/TAP provides packet reception and transmission for user space
@@ -107,7 +107,7 @@ config TUN
107 107
108config NET_SB1000 108config NET_SB1000
109 tristate "General Instruments Surfboard 1000" 109 tristate "General Instruments Surfboard 1000"
110 depends on NETDEVICES && PNP 110 depends on PNP
111 ---help--- 111 ---help---
112 This is a driver for the General Instrument (also known as 112 This is a driver for the General Instrument (also known as
113 NextLevel) SURFboard 1000 internal 113 NextLevel) SURFboard 1000 internal
@@ -129,16 +129,14 @@ config NET_SB1000
129 129
130 If you don't have this card, of course say N. 130 If you don't have this card, of course say N.
131 131
132if NETDEVICES
133 source "drivers/net/arcnet/Kconfig" 132 source "drivers/net/arcnet/Kconfig"
134endif
135 133
136# 134#
137# Ethernet 135# Ethernet
138# 136#
139 137
140menu "Ethernet (10 or 100Mbit)" 138menu "Ethernet (10 or 100Mbit)"
141 depends on NETDEVICES && !UML 139 depends on !UML
142 140
143config NET_ETHERNET 141config NET_ETHERNET
144 bool "Ethernet (10 or 100Mbit)" 142 bool "Ethernet (10 or 100Mbit)"
@@ -1137,7 +1135,7 @@ config IBMLANA
1137 1135
1138config IBMVETH 1136config IBMVETH
1139 tristate "IBM LAN Virtual Ethernet support" 1137 tristate "IBM LAN Virtual Ethernet support"
1140 depends on NETDEVICES && NET_ETHERNET && PPC_PSERIES 1138 depends on NET_ETHERNET && PPC_PSERIES
1141 ---help--- 1139 ---help---
1142 This driver supports virtual ethernet adapters on newer IBM iSeries 1140 This driver supports virtual ethernet adapters on newer IBM iSeries
1143 and pSeries systems. 1141 and pSeries systems.
@@ -1760,7 +1758,7 @@ endmenu
1760# 1758#
1761 1759
1762menu "Ethernet (1000 Mbit)" 1760menu "Ethernet (1000 Mbit)"
1763 depends on NETDEVICES && !UML 1761 depends on !UML
1764 1762
1765config ACENIC 1763config ACENIC
1766 tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" 1764 tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
@@ -2091,7 +2089,7 @@ endmenu
2091# 2089#
2092 2090
2093menu "Ethernet (10000 Mbit)" 2091menu "Ethernet (10000 Mbit)"
2094 depends on NETDEVICES && !UML 2092 depends on !UML
2095 2093
2096config IXGB 2094config IXGB
2097 tristate "Intel(R) PRO/10GbE support" 2095 tristate "Intel(R) PRO/10GbE support"
@@ -2186,11 +2184,11 @@ source "drivers/s390/net/Kconfig"
2186 2184
2187config ISERIES_VETH 2185config ISERIES_VETH
2188 tristate "iSeries Virtual Ethernet driver support" 2186 tristate "iSeries Virtual Ethernet driver support"
2189 depends on NETDEVICES && PPC_ISERIES 2187 depends on PPC_ISERIES
2190 2188
2191config FDDI 2189config FDDI
2192 bool "FDDI driver support" 2190 bool "FDDI driver support"
2193 depends on NETDEVICES && (PCI || EISA) 2191 depends on (PCI || EISA)
2194 help 2192 help
2195 Fiber Distributed Data Interface is a high speed local area network 2193 Fiber Distributed Data Interface is a high speed local area network
2196 design; essentially a replacement for high speed Ethernet. FDDI can 2194 design; essentially a replacement for high speed Ethernet. FDDI can
@@ -2239,7 +2237,7 @@ config SKFP
2239 2237
2240config HIPPI 2238config HIPPI
2241 bool "HIPPI driver support (EXPERIMENTAL)" 2239 bool "HIPPI driver support (EXPERIMENTAL)"
2242 depends on NETDEVICES && EXPERIMENTAL && INET && PCI 2240 depends on EXPERIMENTAL && INET && PCI
2243 help 2241 help
2244 HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and 2242 HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
2245 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI 2243 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
@@ -2271,7 +2269,7 @@ config ROADRUNNER_LARGE_RINGS
2271 2269
2272config PLIP 2270config PLIP
2273 tristate "PLIP (parallel port) support" 2271 tristate "PLIP (parallel port) support"
2274 depends on NETDEVICES && PARPORT 2272 depends on PARPORT
2275 ---help--- 2273 ---help---
2276 PLIP (Parallel Line Internet Protocol) is used to create a 2274 PLIP (Parallel Line Internet Protocol) is used to create a
2277 reasonably fast mini network consisting of two (or, rarely, more) 2275 reasonably fast mini network consisting of two (or, rarely, more)
@@ -2307,7 +2305,6 @@ config PLIP
2307 2305
2308config PPP 2306config PPP
2309 tristate "PPP (point-to-point protocol) support" 2307 tristate "PPP (point-to-point protocol) support"
2310 depends on NETDEVICES
2311 ---help--- 2308 ---help---
2312 PPP (Point to Point Protocol) is a newer and better SLIP. It serves 2309 PPP (Point to Point Protocol) is a newer and better SLIP. It serves
2313 the same purpose: sending Internet traffic over telephone (and other 2310 the same purpose: sending Internet traffic over telephone (and other
@@ -2443,7 +2440,6 @@ config PPPOATM
2443 2440
2444config SLIP 2441config SLIP
2445 tristate "SLIP (serial line) support" 2442 tristate "SLIP (serial line) support"
2446 depends on NETDEVICES
2447 ---help--- 2443 ---help---
2448 Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to 2444 Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
2449 connect to your Internet service provider or to connect to some 2445 connect to your Internet service provider or to connect to some
@@ -2510,7 +2506,7 @@ config SLIP_MODE_SLIP6
2510 2506
2511config NET_FC 2507config NET_FC
2512 bool "Fibre Channel driver support" 2508 bool "Fibre Channel driver support"
2513 depends on NETDEVICES && SCSI && PCI 2509 depends on SCSI && PCI
2514 help 2510 help
2515 Fibre Channel is a high speed serial protocol mainly used to connect 2511 Fibre Channel is a high speed serial protocol mainly used to connect
2516 large storage devices to the computer; it is compatible with and 2512 large storage devices to the computer; it is compatible with and
@@ -2523,7 +2519,7 @@ config NET_FC
2523 2519
2524config SHAPER 2520config SHAPER
2525 tristate "Traffic Shaper (EXPERIMENTAL)" 2521 tristate "Traffic Shaper (EXPERIMENTAL)"
2526 depends on NETDEVICES && EXPERIMENTAL 2522 depends on EXPERIMENTAL
2527 ---help--- 2523 ---help---
2528 The traffic shaper is a virtual network device that allows you to 2524 The traffic shaper is a virtual network device that allows you to
2529 limit the rate of outgoing data flow over some other network device. 2525 limit the rate of outgoing data flow over some other network device.
@@ -2544,11 +2540,13 @@ config SHAPER
2544 2540
2545config NETCONSOLE 2541config NETCONSOLE
2546 tristate "Network console logging support (EXPERIMENTAL)" 2542 tristate "Network console logging support (EXPERIMENTAL)"
2547 depends on NETDEVICES && INET && EXPERIMENTAL 2543 depends on EXPERIMENTAL
2548 ---help--- 2544 ---help---
2549 If you want to log kernel messages over the network, enable this. 2545 If you want to log kernel messages over the network, enable this.
2550 See <file:Documentation/networking/netconsole.txt> for details. 2546 See <file:Documentation/networking/netconsole.txt> for details.
2551 2547
2548endif #NETDEVICES
2549
2552config NETPOLL 2550config NETPOLL
2553 def_bool NETCONSOLE 2551 def_bool NETCONSOLE
2554 2552
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8618012df06a..d9ba8be72af8 100755
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1290,7 +1290,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
1290 writel(intr0, mmio + INT0); 1290 writel(intr0, mmio + INT0);
1291 1291
1292 /* Check if Receive Interrupt has occurred. */ 1292 /* Check if Receive Interrupt has occurred. */
1293#if CONFIG_AMD8111E_NAPI 1293#ifdef CONFIG_AMD8111E_NAPI
1294 if(intr0 & RINT0){ 1294 if(intr0 & RINT0){
1295 if(netif_rx_schedule_prep(dev)){ 1295 if(netif_rx_schedule_prep(dev)){
1296 /* Disable receive interupts */ 1296 /* Disable receive interupts */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 6c57096aa2e1..d209a1556b2e 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -129,9 +129,9 @@ bad_clone_list[] __initdata = {
129#define NESM_START_PG 0x40 /* First page of TX buffer */ 129#define NESM_START_PG 0x40 /* First page of TX buffer */
130#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 130#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
131 131
132#ifdef CONFIG_PLAT_MAPPI 132#if defined(CONFIG_PLAT_MAPPI)
133# define DCR_VAL 0x4b 133# define DCR_VAL 0x4b
134#elif CONFIG_PLAT_OAKS32R 134#elif defined(CONFIG_PLAT_OAKS32R)
135# define DCR_VAL 0x48 135# define DCR_VAL 0x48
136#else 136#else
137# define DCR_VAL 0x49 137# define DCR_VAL 0x49
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 21537ee3a6a7..1bd22cd40c75 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -160,7 +160,7 @@ static struct net_device_stats *plip_get_stats(struct net_device *dev);
160static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 160static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
161static int plip_preempt(void *handle); 161static int plip_preempt(void *handle);
162static void plip_wakeup(void *handle); 162static void plip_wakeup(void *handle);
163 163
164enum plip_connection_state { 164enum plip_connection_state {
165 PLIP_CN_NONE=0, 165 PLIP_CN_NONE=0,
166 PLIP_CN_RECEIVE, 166 PLIP_CN_RECEIVE,
@@ -231,8 +231,8 @@ struct net_local {
231 atomic_t kill_timer; 231 atomic_t kill_timer;
232 struct semaphore killed_timer_sem; 232 struct semaphore killed_timer_sem;
233}; 233};
234 234
235inline static void enable_parport_interrupts (struct net_device *dev) 235static inline void enable_parport_interrupts (struct net_device *dev)
236{ 236{
237 if (dev->irq != -1) 237 if (dev->irq != -1)
238 { 238 {
@@ -242,7 +242,7 @@ inline static void enable_parport_interrupts (struct net_device *dev)
242 } 242 }
243} 243}
244 244
245inline static void disable_parport_interrupts (struct net_device *dev) 245static inline void disable_parport_interrupts (struct net_device *dev)
246{ 246{
247 if (dev->irq != -1) 247 if (dev->irq != -1)
248 { 248 {
@@ -252,7 +252,7 @@ inline static void disable_parport_interrupts (struct net_device *dev)
252 } 252 }
253} 253}
254 254
255inline static void write_data (struct net_device *dev, unsigned char data) 255static inline void write_data (struct net_device *dev, unsigned char data)
256{ 256{
257 struct parport *port = 257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port; 258 ((struct net_local *)dev->priv)->pardev->port;
@@ -260,14 +260,14 @@ inline static void write_data (struct net_device *dev, unsigned char data)
260 port->ops->write_data (port, data); 260 port->ops->write_data (port, data);
261} 261}
262 262
263inline static unsigned char read_status (struct net_device *dev) 263static inline unsigned char read_status (struct net_device *dev)
264{ 264{
265 struct parport *port = 265 struct parport *port =
266 ((struct net_local *)dev->priv)->pardev->port; 266 ((struct net_local *)dev->priv)->pardev->port;
267 267
268 return port->ops->read_status (port); 268 return port->ops->read_status (port);
269} 269}
270 270
271/* Entry point of PLIP driver. 271/* Entry point of PLIP driver.
272 Probe the hardware, and register/initialize the driver. 272 Probe the hardware, and register/initialize the driver.
273 273
@@ -316,7 +316,7 @@ plip_init_netdev(struct net_device *dev)
316 316
317 spin_lock_init(&nl->lock); 317 spin_lock_init(&nl->lock);
318} 318}
319 319
320/* Bottom half handler for the delayed request. 320/* Bottom half handler for the delayed request.
321 This routine is kicked by do_timer(). 321 This routine is kicked by do_timer().
322 Request `plip_bh' to be invoked. */ 322 Request `plip_bh' to be invoked. */
@@ -471,7 +471,7 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
471 471
472 return TIMEOUT; 472 return TIMEOUT;
473} 473}
474 474
475static int 475static int
476plip_none(struct net_device *dev, struct net_local *nl, 476plip_none(struct net_device *dev, struct net_local *nl,
477 struct plip_local *snd, struct plip_local *rcv) 477 struct plip_local *snd, struct plip_local *rcv)
@@ -481,7 +481,7 @@ plip_none(struct net_device *dev, struct net_local *nl,
481 481
482/* PLIP_RECEIVE --- receive a byte(two nibbles) 482/* PLIP_RECEIVE --- receive a byte(two nibbles)
483 Returns OK on success, TIMEOUT on timeout */ 483 Returns OK on success, TIMEOUT on timeout */
484inline static int 484static inline int
485plip_receive(unsigned short nibble_timeout, struct net_device *dev, 485plip_receive(unsigned short nibble_timeout, struct net_device *dev,
486 enum plip_nibble_state *ns_p, unsigned char *data_p) 486 enum plip_nibble_state *ns_p, unsigned char *data_p)
487{ 487{
@@ -582,7 +582,6 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
582 return htons(ETH_P_802_2); 582 return htons(ETH_P_802_2);
583} 583}
584 584
585
586/* PLIP_RECEIVE_PACKET --- receive a packet */ 585/* PLIP_RECEIVE_PACKET --- receive a packet */
587static int 586static int
588plip_receive_packet(struct net_device *dev, struct net_local *nl, 587plip_receive_packet(struct net_device *dev, struct net_local *nl,
@@ -702,7 +701,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
702 701
703/* PLIP_SEND --- send a byte (two nibbles) 702/* PLIP_SEND --- send a byte (two nibbles)
704 Returns OK on success, TIMEOUT when timeout */ 703 Returns OK on success, TIMEOUT when timeout */
705inline static int 704static inline int
706plip_send(unsigned short nibble_timeout, struct net_device *dev, 705plip_send(unsigned short nibble_timeout, struct net_device *dev,
707 enum plip_nibble_state *ns_p, unsigned char data) 706 enum plip_nibble_state *ns_p, unsigned char data)
708{ 707{
@@ -902,7 +901,7 @@ plip_error(struct net_device *dev, struct net_local *nl,
902 901
903 return OK; 902 return OK;
904} 903}
905 904
906/* Handle the parallel port interrupts. */ 905/* Handle the parallel port interrupts. */
907static void 906static void
908plip_interrupt(int irq, void *dev_id, struct pt_regs * regs) 907plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
@@ -957,7 +956,7 @@ plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
957 956
958 spin_unlock_irq(&nl->lock); 957 spin_unlock_irq(&nl->lock);
959} 958}
960 959
961static int 960static int
962plip_tx_packet(struct sk_buff *skb, struct net_device *dev) 961plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
963{ 962{
@@ -1238,7 +1237,7 @@ plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1238 } 1237 }
1239 return 0; 1238 return 0;
1240} 1239}
1241 1240
1242static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 }; 1241static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1243static int timid; 1242static int timid;
1244 1243
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 1b70b7c97580..d9a774b91ddc 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1414,7 +1414,7 @@ static inline void mac_get_cam(struct mac_regs __iomem * regs, int idx, u8 *addr
1414 * the rest of the logic from the result of sleep/wakeup 1414 * the rest of the logic from the result of sleep/wakeup
1415 */ 1415 */
1416 1416
1417inline static void mac_wol_reset(struct mac_regs __iomem * regs) 1417static inline void mac_wol_reset(struct mac_regs __iomem * regs)
1418{ 1418{
1419 1419
1420 /* Turn off SWPTAG right after leaving power mode */ 1420 /* Turn off SWPTAG right after leaving power mode */
@@ -1811,7 +1811,7 @@ struct velocity_info {
1811 * CHECK ME: locking 1811 * CHECK ME: locking
1812 */ 1812 */
1813 1813
1814inline static int velocity_get_ip(struct velocity_info *vptr) 1814static inline int velocity_get_ip(struct velocity_info *vptr)
1815{ 1815{
1816 struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr; 1816 struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
1817 struct in_ifaddr *ifa; 1817 struct in_ifaddr *ifa;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 47f3c5d0203d..df20adcd0730 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5013,7 +5013,7 @@ static void proc_SSID_on_close( struct inode *inode, struct file *file ) {
5013 enable_MAC(ai, &rsp, 1); 5013 enable_MAC(ai, &rsp, 1);
5014} 5014}
5015 5015
5016inline static u8 hexVal(char c) { 5016static inline u8 hexVal(char c) {
5017 if (c>='0' && c<='9') return c -= '0'; 5017 if (c>='0' && c<='9') return c -= '0';
5018 if (c>='a' && c<='f') return c -= 'a'-10; 5018 if (c>='a' && c<='f') return c -= 'a'-10;
5019 if (c>='A' && c<='F') return c -= 'A'-10; 5019 if (c>='A' && c<='F') return c -= 'A'-10;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e9b1772a3a28..026f671ea558 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -42,8 +42,7 @@ void free_cpu_buffers(void)
42 vfree(cpu_buffer[i].buffer); 42 vfree(cpu_buffer[i].buffer);
43 } 43 }
44} 44}
45 45
46
47int alloc_cpu_buffers(void) 46int alloc_cpu_buffers(void)
48{ 47{
49 int i; 48 int i;
@@ -74,7 +73,6 @@ fail:
74 free_cpu_buffers(); 73 free_cpu_buffers();
75 return -ENOMEM; 74 return -ENOMEM;
76} 75}
77
78 76
79void start_cpu_work(void) 77void start_cpu_work(void)
80{ 78{
@@ -93,7 +91,6 @@ void start_cpu_work(void)
93 } 91 }
94} 92}
95 93
96
97void end_cpu_work(void) 94void end_cpu_work(void)
98{ 95{
99 int i; 96 int i;
@@ -109,7 +106,6 @@ void end_cpu_work(void)
109 flush_scheduled_work(); 106 flush_scheduled_work();
110} 107}
111 108
112
113/* Resets the cpu buffer to a sane state. */ 109/* Resets the cpu buffer to a sane state. */
114void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) 110void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
115{ 111{
@@ -121,7 +117,6 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
121 cpu_buf->last_task = NULL; 117 cpu_buf->last_task = NULL;
122} 118}
123 119
124
125/* compute number of available slots in cpu_buffer queue */ 120/* compute number of available slots in cpu_buffer queue */
126static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) 121static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
127{ 122{
@@ -134,7 +129,6 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
134 return tail + (b->buffer_size - head) - 1; 129 return tail + (b->buffer_size - head) - 1;
135} 130}
136 131
137
138static void increment_head(struct oprofile_cpu_buffer * b) 132static void increment_head(struct oprofile_cpu_buffer * b)
139{ 133{
140 unsigned long new_head = b->head_pos + 1; 134 unsigned long new_head = b->head_pos + 1;
@@ -149,10 +143,7 @@ static void increment_head(struct oprofile_cpu_buffer * b)
149 b->head_pos = 0; 143 b->head_pos = 0;
150} 144}
151 145
152 146static inline void
153
154
155inline static void
156add_sample(struct oprofile_cpu_buffer * cpu_buf, 147add_sample(struct oprofile_cpu_buffer * cpu_buf,
157 unsigned long pc, unsigned long event) 148 unsigned long pc, unsigned long event)
158{ 149{
@@ -162,14 +153,12 @@ add_sample(struct oprofile_cpu_buffer * cpu_buf,
162 increment_head(cpu_buf); 153 increment_head(cpu_buf);
163} 154}
164 155
165 156static inline void
166inline static void
167add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) 157add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
168{ 158{
169 add_sample(buffer, ESCAPE_CODE, value); 159 add_sample(buffer, ESCAPE_CODE, value);
170} 160}
171 161
172
173/* This must be safe from any context. It's safe writing here 162/* This must be safe from any context. It's safe writing here
174 * because of the head/tail separation of the writer and reader 163 * because of the head/tail separation of the writer and reader
175 * of the CPU buffer. 164 * of the CPU buffer.
@@ -223,13 +212,11 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
223 return 1; 212 return 1;
224} 213}
225 214
226
227static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) 215static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
228{ 216{
229 cpu_buf->tracing = 0; 217 cpu_buf->tracing = 0;
230} 218}
231 219
232
233void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 220void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
234{ 221{
235 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 222 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
@@ -251,14 +238,12 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
251 oprofile_end_trace(cpu_buf); 238 oprofile_end_trace(cpu_buf);
252} 239}
253 240
254
255void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 241void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
256{ 242{
257 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 243 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
258 log_sample(cpu_buf, pc, is_kernel, event); 244 log_sample(cpu_buf, pc, is_kernel, event);
259} 245}
260 246
261
262void oprofile_add_trace(unsigned long pc) 247void oprofile_add_trace(unsigned long pc)
263{ 248{
264 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 249 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
@@ -283,8 +268,6 @@ void oprofile_add_trace(unsigned long pc)
283 add_sample(cpu_buf, pc, 0); 268 add_sample(cpu_buf, pc, 0);
284} 269}
285 270
286
287
288/* 271/*
289 * This serves to avoid cpu buffer overflow, and makes sure 272 * This serves to avoid cpu buffer overflow, and makes sure
290 * the task mortuary progresses 273 * the task mortuary progresses
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 9001b6f0204d..e305bb132c24 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -11,7 +11,7 @@
11#ifdef CONFIG_PCI 11#ifdef CONFIG_PCI
12#include <linux/pci.h> 12#include <linux/pci.h>
13#else 13#else
14inline void pcibios_penalize_isa_irq(int irq) {} 14inline void pcibios_penalize_isa_irq(int irq, int active) {}
15#endif /* CONFIG_PCI */ 15#endif /* CONFIG_PCI */
16 16
17#include "pnpbios.h" 17#include "pnpbios.h"
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 6527ff6f4706..d5f53980749b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.164 $ 10 * $Revision: 1.165 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -1740,6 +1740,10 @@ dasd_exit(void)
1740 dasd_proc_exit(); 1740 dasd_proc_exit();
1741#endif 1741#endif
1742 dasd_ioctl_exit(); 1742 dasd_ioctl_exit();
1743 if (dasd_page_cache != NULL) {
1744 kmem_cache_destroy(dasd_page_cache);
1745 dasd_page_cache = NULL;
1746 }
1743 dasd_gendisk_exit(); 1747 dasd_gendisk_exit();
1744 dasd_devmap_exit(); 1748 dasd_devmap_exit();
1745 devfs_remove("dasd"); 1749 devfs_remove("dasd");
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 7963ae343eef..28cb4613b7f5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -4,7 +4,7 @@
4 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 * 6 *
7 * $Revision: 1.39 $ 7 * $Revision: 1.40 $
8 */ 8 */
9 9
10#include <linux/config.h> 10#include <linux/config.h>
@@ -354,6 +354,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
354 } 354 }
355 cqr->device = device; 355 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->retries = 32;
358 cqr->buildclk = get_clock();
357 cqr->status = DASD_CQR_FILLED; 359 cqr->status = DASD_CQR_FILLED;
358 return cqr; 360 return cqr;
359} 361}
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index d04e6c2c3cc1..01d865d93791 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,10 +3,11 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
10 */ 11 */
11 12
12#ifndef _TAPE_H 13#ifndef _TAPE_H
@@ -111,6 +112,7 @@ enum tape_request_status {
111 TAPE_REQUEST_QUEUED, /* request is queued to be processed */ 112 TAPE_REQUEST_QUEUED, /* request is queued to be processed */
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 113 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 114 TAPE_REQUEST_DONE, /* request is completed. */
115 TAPE_REQUEST_CANCEL, /* request should be canceled. */
114}; 116};
115 117
116/* Tape CCW request */ 118/* Tape CCW request */
@@ -237,6 +239,9 @@ struct tape_device {
237 /* Block dev frontend data */ 239 /* Block dev frontend data */
238 struct tape_blk_data blk_data; 240 struct tape_blk_data blk_data;
239#endif 241#endif
242
243 /* Function to start or stop the next request later. */
244 struct work_struct tape_dnr;
240}; 245};
241 246
242/* Externals from tape_core.c */ 247/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 0597aa0e27ee..6c52e8307dc5 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,11 +3,12 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com>
11 */ 12 */
12 13
13#include <linux/config.h> 14#include <linux/config.h>
@@ -28,7 +29,7 @@
28#define PRINTK_HEADER "TAPE_CORE: " 29#define PRINTK_HEADER "TAPE_CORE: "
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void __tape_remove_request(struct tape_device *, struct tape_request *); 32static void tape_delayed_next_request(void * data);
32 33
33/* 34/*
34 * One list to contain all tape devices of all disciplines, so 35 * One list to contain all tape devices of all disciplines, so
@@ -257,7 +258,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
257 * Stop running ccw. Has to be called with the device lock held. 258 * Stop running ccw. Has to be called with the device lock held.
258 */ 259 */
259static inline int 260static inline int
260__tape_halt_io(struct tape_device *device, struct tape_request *request) 261__tape_cancel_io(struct tape_device *device, struct tape_request *request)
261{ 262{
262 int retries; 263 int retries;
263 int rc; 264 int rc;
@@ -270,20 +271,23 @@ __tape_halt_io(struct tape_device *device, struct tape_request *request)
270 for (retries = 0; retries < 5; retries++) { 271 for (retries = 0; retries < 5; retries++) {
271 rc = ccw_device_clear(device->cdev, (long) request); 272 rc = ccw_device_clear(device->cdev, (long) request);
272 273
273 if (rc == 0) { /* Termination successful */ 274 switch (rc) {
274 request->rc = -EIO; 275 case 0:
275 request->status = TAPE_REQUEST_DONE; 276 request->status = TAPE_REQUEST_DONE;
276 return 0; 277 return 0;
278 case -EBUSY:
279 request->status = TAPE_REQUEST_CANCEL;
280 schedule_work(&device->tape_dnr);
281 return 0;
282 case -ENODEV:
283 DBF_EXCEPTION(2, "device gone, retry\n");
284 break;
285 case -EIO:
286 DBF_EXCEPTION(2, "I/O error, retry\n");
287 break;
288 default:
289 BUG();
277 } 290 }
278
279 if (rc == -ENODEV)
280 DBF_EXCEPTION(2, "device gone, retry\n");
281 else if (rc == -EIO)
282 DBF_EXCEPTION(2, "I/O error, retry\n");
283 else if (rc == -EBUSY)
284 DBF_EXCEPTION(2, "device busy, retry late\n");
285 else
286 BUG();
287 } 291 }
288 292
289 return rc; 293 return rc;
@@ -473,6 +477,7 @@ tape_alloc_device(void)
473 *device->modeset_byte = 0; 477 *device->modeset_byte = 0;
474 device->first_minor = -1; 478 device->first_minor = -1;
475 atomic_set(&device->ref_count, 1); 479 atomic_set(&device->ref_count, 1);
480 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device);
476 481
477 return device; 482 return device;
478} 483}
@@ -708,54 +713,119 @@ tape_free_request (struct tape_request * request)
708 kfree(request); 713 kfree(request);
709} 714}
710 715
716static inline int
717__tape_start_io(struct tape_device *device, struct tape_request *request)
718{
719 int rc;
720
721#ifdef CONFIG_S390_TAPE_BLOCK
722 if (request->op == TO_BLOCK)
723 device->discipline->check_locate(device, request);
724#endif
725 rc = ccw_device_start(
726 device->cdev,
727 request->cpaddr,
728 (unsigned long) request,
729 0x00,
730 request->options
731 );
732 if (rc == 0) {
733 request->status = TAPE_REQUEST_IN_IO;
734 } else if (rc == -EBUSY) {
735 /* The common I/O subsystem is currently busy. Retry later. */
736 request->status = TAPE_REQUEST_QUEUED;
737 schedule_work(&device->tape_dnr);
738 rc = 0;
739 } else {
740 /* Start failed. Remove request and indicate failure. */
741 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
742 }
743 return rc;
744}
745
711static inline void 746static inline void
712__tape_do_io_list(struct tape_device *device) 747__tape_start_next_request(struct tape_device *device)
713{ 748{
714 struct list_head *l, *n; 749 struct list_head *l, *n;
715 struct tape_request *request; 750 struct tape_request *request;
716 int rc; 751 int rc;
717 752
718 DBF_LH(6, "__tape_do_io_list(%p)\n", device); 753 DBF_LH(6, "__tape_start_next_request(%p)\n", device);
719 /* 754 /*
720 * Try to start each request on request queue until one is 755 * Try to start each request on request queue until one is
721 * started successful. 756 * started successful.
722 */ 757 */
723 list_for_each_safe(l, n, &device->req_queue) { 758 list_for_each_safe(l, n, &device->req_queue) {
724 request = list_entry(l, struct tape_request, list); 759 request = list_entry(l, struct tape_request, list);
725#ifdef CONFIG_S390_TAPE_BLOCK 760
726 if (request->op == TO_BLOCK) 761 /*
727 device->discipline->check_locate(device, request); 762 * Avoid race condition if bottom-half was triggered more than
728#endif 763 * once.
729 rc = ccw_device_start(device->cdev, request->cpaddr, 764 */
730 (unsigned long) request, 0x00, 765 if (request->status == TAPE_REQUEST_IN_IO)
731 request->options); 766 return;
732 if (rc == 0) { 767
733 request->status = TAPE_REQUEST_IN_IO; 768 /*
734 break; 769 * We wanted to cancel the request but the common I/O layer
770 * was busy at that time. This can only happen if this
771 * function is called by delayed_next_request.
772 * Otherwise we start the next request on the queue.
773 */
774 if (request->status == TAPE_REQUEST_CANCEL) {
775 rc = __tape_cancel_io(device, request);
776 } else {
777 rc = __tape_start_io(device, request);
735 } 778 }
736 /* Start failed. Remove request and indicate failure. */ 779 if (rc == 0)
737 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); 780 return;
738 781
739 /* Set ending status and do callback. */ 782 /* Set ending status. */
740 request->rc = rc; 783 request->rc = rc;
741 request->status = TAPE_REQUEST_DONE; 784 request->status = TAPE_REQUEST_DONE;
742 __tape_remove_request(device, request); 785
786 /* Remove from request queue. */
787 list_del(&request->list);
788
789 /* Do callback. */
790 if (request->callback != NULL)
791 request->callback(request, request->callback_data);
743 } 792 }
744} 793}
745 794
746static void 795static void
747__tape_remove_request(struct tape_device *device, struct tape_request *request) 796tape_delayed_next_request(void *data)
748{ 797{
749 /* Remove from request queue. */ 798 struct tape_device * device;
750 list_del(&request->list);
751 799
752 /* Do callback. */ 800 device = (struct tape_device *) data;
753 if (request->callback != NULL) 801 DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
754 request->callback(request, request->callback_data); 802 spin_lock_irq(get_ccwdev_lock(device->cdev));
803 __tape_start_next_request(device);
804 spin_unlock_irq(get_ccwdev_lock(device->cdev));
805}
806
807static inline void
808__tape_end_request(
809 struct tape_device * device,
810 struct tape_request * request,
811 int rc)
812{
813 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
814 if (request) {
815 request->rc = rc;
816 request->status = TAPE_REQUEST_DONE;
817
818 /* Remove from request queue. */
819 list_del(&request->list);
820
821 /* Do callback. */
822 if (request->callback != NULL)
823 request->callback(request, request->callback_data);
824 }
755 825
756 /* Start next request. */ 826 /* Start next request. */
757 if (!list_empty(&device->req_queue)) 827 if (!list_empty(&device->req_queue))
758 __tape_do_io_list(device); 828 __tape_start_next_request(device);
759} 829}
760 830
761/* 831/*
@@ -812,7 +882,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
812 * the device lock held. 882 * the device lock held.
813 */ 883 */
814static inline int 884static inline int
815__tape_do_io(struct tape_device *device, struct tape_request *request) 885__tape_start_request(struct tape_device *device, struct tape_request *request)
816{ 886{
817 int rc; 887 int rc;
818 888
@@ -837,24 +907,16 @@ __tape_do_io(struct tape_device *device, struct tape_request *request)
837 907
838 if (list_empty(&device->req_queue)) { 908 if (list_empty(&device->req_queue)) {
839 /* No other requests are on the queue. Start this one. */ 909 /* No other requests are on the queue. Start this one. */
840#ifdef CONFIG_S390_TAPE_BLOCK 910 rc = __tape_start_io(device, request);
841 if (request->op == TO_BLOCK) 911 if (rc)
842 device->discipline->check_locate(device, request);
843#endif
844 rc = ccw_device_start(device->cdev, request->cpaddr,
845 (unsigned long) request, 0x00,
846 request->options);
847 if (rc) {
848 DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
849 return rc; 912 return rc;
850 } 913
851 DBF_LH(5, "Request %p added for execution.\n", request); 914 DBF_LH(5, "Request %p added for execution.\n", request);
852 list_add(&request->list, &device->req_queue); 915 list_add(&request->list, &device->req_queue);
853 request->status = TAPE_REQUEST_IN_IO;
854 } else { 916 } else {
855 DBF_LH(5, "Request %p add to queue.\n", request); 917 DBF_LH(5, "Request %p add to queue.\n", request);
856 list_add_tail(&request->list, &device->req_queue);
857 request->status = TAPE_REQUEST_QUEUED; 918 request->status = TAPE_REQUEST_QUEUED;
919 list_add_tail(&request->list, &device->req_queue);
858 } 920 }
859 return 0; 921 return 0;
860} 922}
@@ -872,7 +934,7 @@ tape_do_io_async(struct tape_device *device, struct tape_request *request)
872 934
873 spin_lock_irq(get_ccwdev_lock(device->cdev)); 935 spin_lock_irq(get_ccwdev_lock(device->cdev));
874 /* Add request to request queue and try to start it. */ 936 /* Add request to request queue and try to start it. */
875 rc = __tape_do_io(device, request); 937 rc = __tape_start_request(device, request);
876 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 938 spin_unlock_irq(get_ccwdev_lock(device->cdev));
877 return rc; 939 return rc;
878} 940}
@@ -901,7 +963,7 @@ tape_do_io(struct tape_device *device, struct tape_request *request)
901 request->callback = __tape_wake_up; 963 request->callback = __tape_wake_up;
902 request->callback_data = &wq; 964 request->callback_data = &wq;
903 /* Add request to request queue and try to start it. */ 965 /* Add request to request queue and try to start it. */
904 rc = __tape_do_io(device, request); 966 rc = __tape_start_request(device, request);
905 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 967 spin_unlock_irq(get_ccwdev_lock(device->cdev));
906 if (rc) 968 if (rc)
907 return rc; 969 return rc;
@@ -935,7 +997,7 @@ tape_do_io_interruptible(struct tape_device *device,
935 /* Setup callback */ 997 /* Setup callback */
936 request->callback = __tape_wake_up_interruptible; 998 request->callback = __tape_wake_up_interruptible;
937 request->callback_data = &wq; 999 request->callback_data = &wq;
938 rc = __tape_do_io(device, request); 1000 rc = __tape_start_request(device, request);
939 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1001 spin_unlock_irq(get_ccwdev_lock(device->cdev));
940 if (rc) 1002 if (rc)
941 return rc; 1003 return rc;
@@ -944,36 +1006,27 @@ tape_do_io_interruptible(struct tape_device *device,
944 if (rc != -ERESTARTSYS) 1006 if (rc != -ERESTARTSYS)
945 /* Request finished normally. */ 1007 /* Request finished normally. */
946 return request->rc; 1008 return request->rc;
1009
947 /* Interrupted by a signal. We have to stop the current request. */ 1010 /* Interrupted by a signal. We have to stop the current request. */
948 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1011 spin_lock_irq(get_ccwdev_lock(device->cdev));
949 rc = __tape_halt_io(device, request); 1012 rc = __tape_cancel_io(device, request);
1013 spin_unlock_irq(get_ccwdev_lock(device->cdev));
950 if (rc == 0) { 1014 if (rc == 0) {
1015 /* Wait for the interrupt that acknowledges the halt. */
1016 do {
1017 rc = wait_event_interruptible(
1018 wq,
1019 (request->callback == NULL)
1020 );
1021 } while (rc != -ERESTARTSYS);
1022
951 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1023 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
952 rc = -ERESTARTSYS; 1024 rc = -ERESTARTSYS;
953 } 1025 }
954 spin_unlock_irq(get_ccwdev_lock(device->cdev));
955 return rc; 1026 return rc;
956} 1027}
957 1028
958/* 1029/*
959 * Handle requests that return an i/o error in the irb.
960 */
961static inline void
962tape_handle_killed_request(
963 struct tape_device *device,
964 struct tape_request *request)
965{
966 if(request != NULL) {
967 /* Set ending status. FIXME: Should the request be retried? */
968 request->rc = -EIO;
969 request->status = TAPE_REQUEST_DONE;
970 __tape_remove_request(device, request);
971 } else {
972 __tape_do_io_list(device);
973 }
974}
975
976/*
977 * Tape interrupt routine, called from the ccw_device layer 1030 * Tape interrupt routine, called from the ccw_device layer
978 */ 1031 */
979static void 1032static void
@@ -981,7 +1034,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
981{ 1034{
982 struct tape_device *device; 1035 struct tape_device *device;
983 struct tape_request *request; 1036 struct tape_request *request;
984 int final;
985 int rc; 1037 int rc;
986 1038
987 device = (struct tape_device *) cdev->dev.driver_data; 1039 device = (struct tape_device *) cdev->dev.driver_data;
@@ -996,12 +1048,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
996 1048
997 /* On special conditions irb is an error pointer */ 1049 /* On special conditions irb is an error pointer */
998 if (IS_ERR(irb)) { 1050 if (IS_ERR(irb)) {
1051 /* FIXME: What to do with the request? */
999 switch (PTR_ERR(irb)) { 1052 switch (PTR_ERR(irb)) {
1000 case -ETIMEDOUT: 1053 case -ETIMEDOUT:
1001 PRINT_WARN("(%s): Request timed out\n", 1054 PRINT_WARN("(%s): Request timed out\n",
1002 cdev->dev.bus_id); 1055 cdev->dev.bus_id);
1003 case -EIO: 1056 case -EIO:
1004 tape_handle_killed_request(device, request); 1057 __tape_end_request(device, request, -EIO);
1005 break; 1058 break;
1006 default: 1059 default:
1007 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1060 PRINT_ERR("(%s): Unexpected i/o error %li\n",
@@ -1011,6 +1064,21 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1011 return; 1064 return;
1012 } 1065 }
1013 1066
1067 /*
1068 * If the condition code is not zero and the start function bit is
1069 * still set, this is an deferred error and the last start I/O did
1070 * not succeed. Restart the request now.
1071 */
1072 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
1073 PRINT_WARN("(%s): deferred cc=%i. restaring\n",
1074 cdev->dev.bus_id,
1075 irb->scsw.cc);
1076 rc = __tape_start_io(device, request);
1077 if (rc)
1078 __tape_end_request(device, request, rc);
1079 return;
1080 }
1081
1014 /* May be an unsolicited irq */ 1082 /* May be an unsolicited irq */
1015 if(request != NULL) 1083 if(request != NULL)
1016 request->rescnt = irb->scsw.count; 1084 request->rescnt = irb->scsw.count;
@@ -1042,7 +1110,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1042 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1110 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1043 */ 1111 */
1044 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1112 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1045 __tape_remove_request(device, request); 1113 __tape_end_request(device, request, -EIO);
1046 return; 1114 return;
1047 } 1115 }
1048 1116
@@ -1054,51 +1122,34 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1054 * rc == TAPE_IO_RETRY: request finished but needs another go. 1122 * rc == TAPE_IO_RETRY: request finished but needs another go.
1055 * rc == TAPE_IO_STOP: request needs to get terminated. 1123 * rc == TAPE_IO_STOP: request needs to get terminated.
1056 */ 1124 */
1057 final = 0;
1058 switch (rc) { 1125 switch (rc) {
1059 case TAPE_IO_SUCCESS: 1126 case TAPE_IO_SUCCESS:
1060 /* Upon normal completion the device _is_ online */ 1127 /* Upon normal completion the device _is_ online */
1061 device->tape_generic_status |= GMT_ONLINE(~0); 1128 device->tape_generic_status |= GMT_ONLINE(~0);
1062 final = 1; 1129 __tape_end_request(device, request, rc);
1063 break; 1130 break;
1064 case TAPE_IO_PENDING: 1131 case TAPE_IO_PENDING:
1065 break; 1132 break;
1066 case TAPE_IO_RETRY: 1133 case TAPE_IO_RETRY:
1067#ifdef CONFIG_S390_TAPE_BLOCK 1134 rc = __tape_start_io(device, request);
1068 if (request->op == TO_BLOCK) 1135 if (rc)
1069 device->discipline->check_locate(device, request); 1136 __tape_end_request(device, request, rc);
1070#endif 1137 break;
1071 rc = ccw_device_start(cdev, request->cpaddr, 1138 case TAPE_IO_STOP:
1072 (unsigned long) request, 0x00, 1139 rc = __tape_cancel_io(device, request);
1073 request->options); 1140 if (rc)
1074 if (rc) { 1141 __tape_end_request(device, request, rc);
1075 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); 1142 break;
1076 final = 1; 1143 default:
1077 } 1144 if (rc > 0) {
1078 break; 1145 DBF_EVENT(6, "xunknownrc\n");
1079 case TAPE_IO_STOP: 1146 PRINT_ERR("Invalid return code from discipline "
1080 __tape_halt_io(device, request); 1147 "interrupt function.\n");
1081 break; 1148 __tape_end_request(device, request, -EIO);
1082 default: 1149 } else {
1083 if (rc > 0) { 1150 __tape_end_request(device, request, rc);
1084 DBF_EVENT(6, "xunknownrc\n"); 1151 }
1085 PRINT_ERR("Invalid return code from discipline " 1152 break;
1086 "interrupt function.\n");
1087 rc = -EIO;
1088 }
1089 final = 1;
1090 break;
1091 }
1092 if (final) {
1093 /* May be an unsolicited irq */
1094 if(request != NULL) {
1095 /* Set ending status. */
1096 request->rc = rc;
1097 request->status = TAPE_REQUEST_DONE;
1098 __tape_remove_request(device, request);
1099 } else {
1100 __tape_do_io_list(device);
1101 }
1102 } 1153 }
1103} 1154}
1104 1155
@@ -1191,7 +1242,7 @@ tape_init (void)
1191#ifdef DBF_LIKE_HELL 1242#ifdef DBF_LIKE_HELL
1192 debug_set_level(TAPE_DBF_AREA, 6); 1243 debug_set_level(TAPE_DBF_AREA, 6);
1193#endif 1244#endif
1194 DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n"); 1245 DBF_EVENT(3, "tape init: ($Revision: 1.54 $)\n");
1195 tape_proc_init(); 1246 tape_proc_init();
1196 tapechar_init (); 1247 tapechar_init ();
1197 tapeblock_init (); 1248 tapeblock_init ();
@@ -1216,7 +1267,7 @@ tape_exit(void)
1216MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1267MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1217 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1268 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1218MODULE_DESCRIPTION("Linux on zSeries channel attached " 1269MODULE_DESCRIPTION("Linux on zSeries channel attached "
1219 "tape device driver ($Revision: 1.51 $)"); 1270 "tape device driver ($Revision: 1.54 $)");
1220MODULE_LICENSE("GPL"); 1271MODULE_LICENSE("GPL");
1221 1272
1222module_init(tape_init); 1273module_init(tape_init);
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 7f11a608a633..8990d8076e7d 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -115,9 +115,9 @@ vmcp_write(struct file *file, const char __user * buff, size_t count,
115 return -ENOMEM; 115 return -ENOMEM;
116 } 116 }
117 debug_text_event(vmcp_debug, 1, cmd); 117 debug_text_event(vmcp_debug, 1, cmd);
118 session->resp_size = cpcmd(cmd, session->response, 118 session->resp_size = __cpcmd(cmd, session->response,
119 session->bufsize, 119 session->bufsize,
120 &session->resp_code); 120 &session->resp_code);
121 up(&session->mutex); 121 up(&session->mutex);
122 kfree(cmd); 122 kfree(cmd);
123 *ppos = 0; /* reset the file pointer after a command */ 123 *ppos = 0; /* reset the file pointer after a command */
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 22cf4fec8da9..5473c23fcb52 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -23,11 +23,7 @@
23static char vmwdt_cmd[MAX_CMDLEN] = "IPL"; 23static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
24static int vmwdt_conceal; 24static int vmwdt_conceal;
25 25
26#ifdef CONFIG_WATCHDOG_NOWAYOUT 26static int vmwdt_nowayout = WATCHDOG_NOWAYOUT;
27static int vmwdt_nowayout = 1;
28#else
29static int vmwdt_nowayout = 0;
30#endif
31 27
32MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); 29MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index b86f94ecd874..fa3c23b80e3a 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.119 $ 4 * $Revision: 1.120 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -412,11 +412,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
412 if (chp_mask == 0) { 412 if (chp_mask == 0) {
413 413
414 spin_unlock_irq(&sch->lock); 414 spin_unlock_irq(&sch->lock);
415 415 continue;
416 if (fla_mask != 0)
417 break;
418 else
419 continue;
420 } 416 }
421 old_lpm = sch->lpm; 417 old_lpm = sch->lpm;
422 sch->lpm = ((sch->schib.pmcw.pim & 418 sch->lpm = ((sch->schib.pmcw.pim &
@@ -430,7 +426,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
430 426
431 spin_unlock_irq(&sch->lock); 427 spin_unlock_irq(&sch->lock);
432 put_device(&sch->dev); 428 put_device(&sch->dev);
433 if (fla_mask != 0) 429 if (fla_mask == 0xffff)
434 break; 430 break;
435 } 431 }
436 return rc; 432 return rc;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 4ab2e0d95009..12a24d4331a2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -39,15 +39,14 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
39 " ... device %04X on subchannel %04X, dev_stat " 39 " ... device %04X on subchannel %04X, dev_stat "
40 ": %02X sch_stat : %02X\n", 40 ": %02X sch_stat : %02X\n",
41 cdev->private->devno, cdev->private->irq, 41 cdev->private->devno, cdev->private->irq,
42 cdev->private->irb.scsw.dstat, 42 irb->scsw.dstat, irb->scsw.cstat);
43 cdev->private->irb.scsw.cstat);
44 43
45 if (irb->scsw.cc != 3) { 44 if (irb->scsw.cc != 3) {
46 char dbf_text[15]; 45 char dbf_text[15];
47 46
48 sprintf(dbf_text, "chk%x", cdev->private->irq); 47 sprintf(dbf_text, "chk%x", cdev->private->irq);
49 CIO_TRACE_EVENT(0, dbf_text); 48 CIO_TRACE_EVENT(0, dbf_text);
50 CIO_HEX_EVENT(0, &cdev->private->irb, sizeof (struct irb)); 49 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
51 } 50 }
52} 51}
53 52
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 82194c4eadfb..d36258d6665f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -432,7 +432,7 @@ tiqdio_clear_global_summary(void)
432 432
433/************************* OUTBOUND ROUTINES *******************************/ 433/************************* OUTBOUND ROUTINES *******************************/
434 434
435inline static int 435static inline int
436qdio_get_outbound_buffer_frontier(struct qdio_q *q) 436qdio_get_outbound_buffer_frontier(struct qdio_q *q)
437{ 437{
438 int f,f_mod_no; 438 int f,f_mod_no;
@@ -510,7 +510,7 @@ out:
510} 510}
511 511
512/* all buffers are processed */ 512/* all buffers are processed */
513inline static int 513static inline int
514qdio_is_outbound_q_done(struct qdio_q *q) 514qdio_is_outbound_q_done(struct qdio_q *q)
515{ 515{
516 int no_used; 516 int no_used;
@@ -532,7 +532,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
532 return (no_used==0); 532 return (no_used==0);
533} 533}
534 534
535inline static int 535static inline int
536qdio_has_outbound_q_moved(struct qdio_q *q) 536qdio_has_outbound_q_moved(struct qdio_q *q)
537{ 537{
538 int i; 538 int i;
@@ -552,7 +552,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
552 } 552 }
553} 553}
554 554
555inline static void 555static inline void
556qdio_kick_outbound_q(struct qdio_q *q) 556qdio_kick_outbound_q(struct qdio_q *q)
557{ 557{
558 int result; 558 int result;
@@ -641,7 +641,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
641 } 641 }
642} 642}
643 643
644inline static void 644static inline void
645qdio_kick_outbound_handler(struct qdio_q *q) 645qdio_kick_outbound_handler(struct qdio_q *q)
646{ 646{
647 int start, end, real_end, count; 647 int start, end, real_end, count;
@@ -740,7 +740,7 @@ qdio_outbound_processing(struct qdio_q *q)
740/************************* INBOUND ROUTINES *******************************/ 740/************************* INBOUND ROUTINES *******************************/
741 741
742 742
743inline static int 743static inline int
744qdio_get_inbound_buffer_frontier(struct qdio_q *q) 744qdio_get_inbound_buffer_frontier(struct qdio_q *q)
745{ 745{
746 int f,f_mod_no; 746 int f,f_mod_no;
@@ -865,7 +865,7 @@ out:
865 return q->first_to_check; 865 return q->first_to_check;
866} 866}
867 867
868inline static int 868static inline int
869qdio_has_inbound_q_moved(struct qdio_q *q) 869qdio_has_inbound_q_moved(struct qdio_q *q)
870{ 870{
871 int i; 871 int i;
@@ -898,7 +898,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
898} 898}
899 899
900/* means, no more buffers to be filled */ 900/* means, no more buffers to be filled */
901inline static int 901static inline int
902tiqdio_is_inbound_q_done(struct qdio_q *q) 902tiqdio_is_inbound_q_done(struct qdio_q *q)
903{ 903{
904 int no_used; 904 int no_used;
@@ -951,7 +951,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
951 return 0; 951 return 0;
952} 952}
953 953
954inline static int 954static inline int
955qdio_is_inbound_q_done(struct qdio_q *q) 955qdio_is_inbound_q_done(struct qdio_q *q)
956{ 956{
957 int no_used; 957 int no_used;
@@ -1010,7 +1010,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1010 } 1010 }
1011} 1011}
1012 1012
1013inline static void 1013static inline void
1014qdio_kick_inbound_handler(struct qdio_q *q) 1014qdio_kick_inbound_handler(struct qdio_q *q)
1015{ 1015{
1016 int count, start, end, real_end, i; 1016 int count, start, end, real_end, i;
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 008e0a5d2eb3..3a0285669adf 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -824,7 +824,7 @@ extern struct list_head qeth_notify_list;
824 824
825#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 825#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
826 826
827inline static __u8 827static inline __u8
828qeth_get_ipa_adp_type(enum qeth_link_types link_type) 828qeth_get_ipa_adp_type(enum qeth_link_types link_type)
829{ 829{
830 switch (link_type) { 830 switch (link_type) {
@@ -835,7 +835,7 @@ qeth_get_ipa_adp_type(enum qeth_link_types link_type)
835 } 835 }
836} 836}
837 837
838inline static int 838static inline int
839qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size) 839qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
840{ 840{
841 struct sk_buff *new_skb = NULL; 841 struct sk_buff *new_skb = NULL;
@@ -852,6 +852,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
852 } 852 }
853 return 0; 853 return 0;
854} 854}
855
855static inline struct sk_buff * 856static inline struct sk_buff *
856qeth_pskb_unshare(struct sk_buff *skb, int pri) 857qeth_pskb_unshare(struct sk_buff *skb, int pri)
857{ 858{
@@ -863,8 +864,7 @@ qeth_pskb_unshare(struct sk_buff *skb, int pri)
863 return nskb; 864 return nskb;
864} 865}
865 866
866 867static inline void *
867inline static void *
868qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) 868qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
869{ 869{
870 void *hdr; 870 void *hdr;
@@ -887,7 +887,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
887} 887}
888 888
889 889
890inline static int 890static inline int
891qeth_get_hlen(__u8 link_type) 891qeth_get_hlen(__u8 link_type)
892{ 892{
893#ifdef CONFIG_QETH_IPV6 893#ifdef CONFIG_QETH_IPV6
@@ -911,7 +911,7 @@ qeth_get_hlen(__u8 link_type)
911#endif /* CONFIG_QETH_IPV6 */ 911#endif /* CONFIG_QETH_IPV6 */
912} 912}
913 913
914inline static unsigned short 914static inline unsigned short
915qeth_get_netdev_flags(struct qeth_card *card) 915qeth_get_netdev_flags(struct qeth_card *card)
916{ 916{
917 if (card->options.layer2) 917 if (card->options.layer2)
@@ -929,7 +929,7 @@ qeth_get_netdev_flags(struct qeth_card *card)
929 } 929 }
930} 930}
931 931
932inline static int 932static inline int
933qeth_get_initial_mtu_for_card(struct qeth_card * card) 933qeth_get_initial_mtu_for_card(struct qeth_card * card)
934{ 934{
935 switch (card->info.type) { 935 switch (card->info.type) {
@@ -950,7 +950,7 @@ qeth_get_initial_mtu_for_card(struct qeth_card * card)
950 } 950 }
951} 951}
952 952
953inline static int 953static inline int
954qeth_get_max_mtu_for_card(int cardtype) 954qeth_get_max_mtu_for_card(int cardtype)
955{ 955{
956 switch (cardtype) { 956 switch (cardtype) {
@@ -965,7 +965,7 @@ qeth_get_max_mtu_for_card(int cardtype)
965 } 965 }
966} 966}
967 967
968inline static int 968static inline int
969qeth_get_mtu_out_of_mpc(int cardtype) 969qeth_get_mtu_out_of_mpc(int cardtype)
970{ 970{
971 switch (cardtype) { 971 switch (cardtype) {
@@ -976,7 +976,7 @@ qeth_get_mtu_out_of_mpc(int cardtype)
976 } 976 }
977} 977}
978 978
979inline static int 979static inline int
980qeth_get_mtu_outof_framesize(int framesize) 980qeth_get_mtu_outof_framesize(int framesize)
981{ 981{
982 switch (framesize) { 982 switch (framesize) {
@@ -993,7 +993,7 @@ qeth_get_mtu_outof_framesize(int framesize)
993 } 993 }
994} 994}
995 995
996inline static int 996static inline int
997qeth_mtu_is_valid(struct qeth_card * card, int mtu) 997qeth_mtu_is_valid(struct qeth_card * card, int mtu)
998{ 998{
999 switch (card->info.type) { 999 switch (card->info.type) {
@@ -1008,7 +1008,7 @@ qeth_mtu_is_valid(struct qeth_card * card, int mtu)
1008 } 1008 }
1009} 1009}
1010 1010
1011inline static int 1011static inline int
1012qeth_get_arphdr_type(int cardtype, int linktype) 1012qeth_get_arphdr_type(int cardtype, int linktype)
1013{ 1013{
1014 switch (cardtype) { 1014 switch (cardtype) {
@@ -1027,7 +1027,7 @@ qeth_get_arphdr_type(int cardtype, int linktype)
1027} 1027}
1028 1028
1029#ifdef CONFIG_QETH_PERF_STATS 1029#ifdef CONFIG_QETH_PERF_STATS
1030inline static int 1030static inline int
1031qeth_get_micros(void) 1031qeth_get_micros(void)
1032{ 1032{
1033 return (int) (get_clock() >> 12); 1033 return (int) (get_clock() >> 12);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index b2002ba6e2aa..79ae73b23680 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -182,13 +182,13 @@ static int irq_probe(void);
182static void *bios_base; 182static void *bios_base;
183#endif 183#endif
184 184
185#if PORT_BASE 185#ifdef PORT_BASE
186static int port_base = PORT_BASE; 186static int port_base = PORT_BASE;
187#else 187#else
188static int port_base; 188static int port_base;
189#endif 189#endif
190 190
191#if IRQ_LEV 191#ifdef IRQ_LEV
192static int irq_level = IRQ_LEV; 192static int irq_level = IRQ_LEV;
193#else 193#else
194static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized' */ 194static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized' */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6466a184a141..329cb2331339 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1505,7 +1505,7 @@ ahd_linux_dev_reset(Scsi_Cmnd *cmd)
1505 memset(recovery_cmd, 0, sizeof(struct scsi_cmnd)); 1505 memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
1506 recovery_cmd->device = cmd->device; 1506 recovery_cmd->device = cmd->device;
1507 recovery_cmd->scsi_done = ahd_linux_dev_reset_complete; 1507 recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
1508#if AHD_DEBUG 1508#ifdef AHD_DEBUG
1509 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1509 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1510 printf("%s:%d:%d:%d: Device reset called for cmd %p\n", 1510 printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
1511 ahd_name(ahd), cmd->device->channel, cmd->device->id, 1511 ahd_name(ahd), cmd->device->channel, cmd->device->id,
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 4c3bb7bb8420..703f6e44889d 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -582,7 +582,7 @@ ahd_check_extport(struct ahd_softc *ahd)
582 } 582 }
583 } 583 }
584 584
585#if AHD_DEBUG 585#ifdef AHD_DEBUG
586 if (have_seeprom != 0 586 if (have_seeprom != 0
587 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { 587 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) {
588 uint16_t *sc_data; 588 uint16_t *sc_data;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index ae13c002f60d..929170dcd3cb 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -744,7 +744,7 @@ static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
744 744
745 745
746/* Find cmd in SRB list */ 746/* Find cmd in SRB list */
747inline static struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd, 747static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
748 struct list_head *head) 748 struct list_head *head)
749{ 749{
750 struct ScsiReqBlk *i; 750 struct ScsiReqBlk *i;
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 95a4cce6c892..4bf447792129 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -76,7 +76,7 @@ typedef unsigned long sigLONG;
76#endif /* aix */ 76#endif /* aix */
77#endif 77#endif
78/* For the Macintosh */ 78/* For the Macintosh */
79#if STRUCTALIGNMENTSUPPORTED 79#ifdef STRUCTALIGNMENTSUPPORTED
80#pragma options align=mac68k 80#pragma options align=mac68k
81#endif 81#endif
82 82
@@ -332,7 +332,7 @@ typedef struct dpt_sig {
332#endif /* aix */ 332#endif /* aix */
333#endif 333#endif
334/* For the Macintosh */ 334/* For the Macintosh */
335#if STRUCTALIGNMENTSUPPORTED 335#ifdef STRUCTALIGNMENTSUPPORTED
336#pragma options align=reset 336#pragma options align=reset
337#endif 337#endif
338 338
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index ab9de39bb50b..897743b23342 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -92,10 +92,6 @@
92 92
93#define DTC_PUBLIC_RELEASE 2 93#define DTC_PUBLIC_RELEASE 2
94 94
95/*#define DTCDEBUG 0x1*/
96#define DTCDEBUG_INIT 0x1
97#define DTCDEBUG_TRANSFER 0x2
98
99/* 95/*
100 * The DTC3180 & 3280 boards are memory mapped. 96 * The DTC3180 & 3280 boards are memory mapped.
101 * 97 *
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index ed73629eb2f9..277cd015ee4e 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -28,6 +28,10 @@
28#ifndef DTC3280_H 28#ifndef DTC3280_H
29#define DTC3280_H 29#define DTC3280_H
30 30
31#define DTCDEBUG 0
32#define DTCDEBUG_INIT 0x1
33#define DTCDEBUG_TRANSFER 0x2
34
31static int dtc_abort(Scsi_Cmnd *); 35static int dtc_abort(Scsi_Cmnd *);
32static int dtc_biosparam(struct scsi_device *, struct block_device *, 36static int dtc_biosparam(struct scsi_device *, struct block_device *,
33 sector_t, int*); 37 sector_t, int*);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index aecf32dd0bde..3b2a5bf5c43e 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -570,7 +570,7 @@ static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
570 mdelay(10*amount); 570 mdelay(10*amount);
571} 571}
572 572
573inline static void fdomain_make_bus_idle( void ) 573static inline void fdomain_make_bus_idle( void )
574{ 574{
575 outb(0, port_base + SCSI_Cntl); 575 outb(0, port_base + SCSI_Cntl);
576 outb(0, port_base + SCSI_Mode_Cntl); 576 outb(0, port_base + SCSI_Mode_Cntl);
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 2094d4811d61..ea6f3c0e05d9 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -716,7 +716,7 @@ static int init_tulip(HCS * pCurHcb, SCB * scbp, int tul_num_scb,
716 pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; 716 pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
717 pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID); 717 pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID);
718 718
719#if CHK_PARITY 719#ifdef CHK_PARITY
720 /* Enable parity error response */ 720 /* Enable parity error response */
721 TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40); 721 TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40);
722#endif 722#endif
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 275ba34b3c9d..a11f1ae7b98e 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -30,8 +30,9 @@ memcpy_toio() and memcpy_fromio() can be used.
30However on a big-endian host, copy 4 bytes at a time, 30However on a big-endian host, copy 4 bytes at a time,
31using writel() and readl(). 31using writel() and readl().
32 *******************************************************************/ 32 *******************************************************************/
33#include <asm/byteorder.h>
33 34
34#if __BIG_ENDIAN 35#ifdef __BIG_ENDIAN
35 36
36static inline void 37static inline void
37lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes) 38lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index d8fd2010ef41..0fd9ba14e1b5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -18,6 +18,8 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#include <asm/byteorder.h>
22
21struct lpfc_hba; 23struct lpfc_hba;
22 24
23#define list_remove_head(list, entry, type, member) \ 25#define list_remove_head(list, entry, type, member) \
@@ -81,7 +83,7 @@ struct fcp_cmnd {
81 /* # of bits to shift lun id to end up in right 83 /* # of bits to shift lun id to end up in right
82 * payload word, little endian = 8, big = 16. 84 * payload word, little endian = 8, big = 16.
83 */ 85 */
84#if __BIG_ENDIAN 86#ifdef __BIG_ENDIAN
85#define FC_LUN_SHIFT 16 87#define FC_LUN_SHIFT 16
86#define FC_ADDR_MODE_SHIFT 24 88#define FC_ADDR_MODE_SHIFT 24
87#else /* __LITTLE_ENDIAN */ 89#else /* __LITTLE_ENDIAN */
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 363e0ebd4a39..72bc947e45b6 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -2,6 +2,7 @@
2#define PSEUDO_DMA 2#define PSEUDO_DMA
3#define FOO 3#define FOO
4#define UNSAFE /* Not unsafe for PAS16 -- use it */ 4#define UNSAFE /* Not unsafe for PAS16 -- use it */
5#define PDEBUG 0
5 6
6/* 7/*
7 * This driver adapted from Drew Eckhardt's Trantor T128 driver 8 * This driver adapted from Drew Eckhardt's Trantor T128 driver
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index fccecf67423e..6c73b84c6e64 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -2,7 +2,6 @@ config SCSI_QLA2XXX
2 tristate 2 tristate
3 default (SCSI && PCI) 3 default (SCSI && PCI)
4 depends on SCSI && PCI 4 depends on SCSI && PCI
5 select SCSI_FC_ATTRS
6 5
7config SCSI_QLA21XX 6config SCSI_QLA21XX
8 tristate "QLogic ISP2100 host adapter family support" 7 tristate "QLogic ISP2100 host adapter family support"
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 982b83604b41..00d2e3c21ef6 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,4 @@
1EXTRA_CFLAGS += -DUNIQUE_FW_NAME 1EXTRA_CFLAGS += -DUNIQUE_FW_NAME
2CONFIG_SCSI_QLA24XX=m
3EXTRA_CFLAGS += -DCONFIG_SCSI_QLA24XX -DCONFIG_SCSI_QLA24XX_MODULE 2EXTRA_CFLAGS += -DCONFIG_SCSI_QLA24XX -DCONFIG_SCSI_QLA24XX_MODULE
4 3
5qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 4qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ad3a5b142468..2d3c4ac475f2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -756,7 +756,8 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
756 * register it and tell the rest of the kernel 756 * register it and tell the rest of the kernel
757 * about it. 757 * about it.
758 */ 758 */
759 scsi_sysfs_add_sdev(sdev); 759 if (scsi_sysfs_add_sdev(sdev) != 0)
760 return SCSI_SCAN_NO_RESPONSE;
760 761
761 return SCSI_SCAN_LUN_PRESENT; 762 return SCSI_SCAN_LUN_PRESENT;
762} 763}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index c55c7a57afa0..3131a6bf7ab7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -151,6 +151,16 @@
151 */ 151 */
152#define SYM_CONF_MIN_ASYNC (40) 152#define SYM_CONF_MIN_ASYNC (40)
153 153
154
155/*
156 * MEMORY ALLOCATOR.
157 */
158
159#define SYM_MEM_WARN 1 /* Warn on failed operations */
160
161#define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
162#define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
163#define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
154/* 164/*
155 * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16. 165 * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
156 * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized. 166 * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
@@ -1192,12 +1202,6 @@ static inline void sym_setup_data_pointers(struct sym_hcb *np,
1192 * MEMORY ALLOCATOR. 1202 * MEMORY ALLOCATOR.
1193 */ 1203 */
1194 1204
1195#define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
1196#define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
1197#define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
1198
1199#define SYM_MEM_WARN 1 /* Warn on failed operations */
1200
1201#define sym_get_mem_cluster() \ 1205#define sym_get_mem_cluster() \
1202 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER) 1206 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1203#define sym_free_mem_cluster(p) \ 1207#define sym_free_mem_cluster(p) \
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index cd9140e158cf..994b7566bcac 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -367,7 +367,7 @@ static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ac
367 S24C16_write_ack(np, ack_data, gpreg, gpcntl); 367 S24C16_write_ack(np, ack_data, gpreg, gpcntl);
368} 368}
369 369
370#if SYM_CONF_NVRAM_WRITE_SUPPORT 370#ifdef SYM_CONF_NVRAM_WRITE_SUPPORT
371/* 371/*
372 * Write 'len' bytes starting at 'offset'. 372 * Write 'len' bytes starting at 'offset'.
373 */ 373 */
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 9ad1d68827a7..596f3a32a1c6 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -43,6 +43,7 @@
43 43
44#define T128_PUBLIC_RELEASE 3 44#define T128_PUBLIC_RELEASE 3
45 45
46#define TDEBUG 0
46#define TDEBUG_INIT 0x1 47#define TDEBUG_INIT 0x1
47#define TDEBUG_TRANSFER 0x2 48#define TDEBUG_TRANSFER 0x2
48 49
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 52b0a0558ed4..0e21f583690e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -865,6 +865,8 @@ enum pci_board_num_t {
865 pbn_b0_2_921600, 865 pbn_b0_2_921600,
866 pbn_b0_4_921600, 866 pbn_b0_4_921600,
867 867
868 pbn_b0_2_1130000,
869
868 pbn_b0_4_1152000, 870 pbn_b0_4_1152000,
869 871
870 pbn_b0_bt_1_115200, 872 pbn_b0_bt_1_115200,
@@ -999,6 +1001,14 @@ static struct pciserial_board pci_boards[] __devinitdata = {
999 .base_baud = 921600, 1001 .base_baud = 921600,
1000 .uart_offset = 8, 1002 .uart_offset = 8,
1001 }, 1003 },
1004
1005 [pbn_b0_2_1130000] = {
1006 .flags = FL_BASE0,
1007 .num_ports = 2,
1008 .base_baud = 1130000,
1009 .uart_offset = 8,
1010 },
1011
1002 [pbn_b0_4_1152000] = { 1012 [pbn_b0_4_1152000] = {
1003 .flags = FL_BASE0, 1013 .flags = FL_BASE0,
1004 .num_ports = 4, 1014 .num_ports = 4,
@@ -1868,6 +1878,16 @@ static struct pci_device_id serial_pci_tbl[] = {
1868 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, 1878 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
1869 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0, 1879 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0,
1870 pbn_b0_4_1152000 }, 1880 pbn_b0_4_1152000 },
1881
1882 /*
1883 * The below card is a little controversial since it is the
1884 * subject of a PCI vendor/device ID clash. (See
1885 * www.ussg.iu.edu/hypermail/linux/kernel/0303.1/0516.html).
1886 * For now just used the hex ID 0x950a.
1887 */
1888 { PCI_VENDOR_ID_OXSEMI, 0x950a,
1889 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1890 pbn_b0_2_1130000 },
1871 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, 1891 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
1872 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1892 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1873 pbn_b0_4_115200 }, 1893 pbn_b0_4_115200 },
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
index 7911912f50c7..8efbd6d1d6a4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
@@ -185,7 +185,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
185 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + 185 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
186 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); 186 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
187 if (is_con) { 187 if (is_con) {
188 mem_addr = (u8 *) m8xx_cpm_hostalloc(memsz); 188 mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
189 dma_addr = 0; 189 dma_addr = 0;
190 } else 190 } else
191 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, 191 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index 5bf3c45521f4..18753193f59b 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -89,7 +89,7 @@ enum {
89#define WRITEBUFLEN ((4096) + 4) 89#define WRITEBUFLEN ((4096) + 4)
90#define MYFLIPLEN N_TTY_BUF_SIZE 90#define MYFLIPLEN N_TTY_BUF_SIZE
91 91
92#define JSM_VERSION "jsm: 1.1-1-INKERNEL" 92#define JSM_VERSION "jsm: 1.2-1-INKERNEL"
93#define JSM_PARTNUM "40002438_A-INKERNEL" 93#define JSM_PARTNUM "40002438_A-INKERNEL"
94 94
95struct jsm_board; 95struct jsm_board;
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index cc5d21300ed3..7e56c7824194 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -22,6 +22,7 @@
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com>
24 * 24 *
25 *
25 ***********************************************************************/ 26 ***********************************************************************/
26#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
@@ -42,7 +43,7 @@ struct uart_driver jsm_uart_driver = {
42 .owner = THIS_MODULE, 43 .owner = THIS_MODULE,
43 .driver_name = JSM_DRIVER_NAME, 44 .driver_name = JSM_DRIVER_NAME,
44 .dev_name = "ttyn", 45 .dev_name = "ttyn",
45 .major = 253, 46 .major = 0,
46 .minor = JSM_MINOR_START, 47 .minor = JSM_MINOR_START,
47 .nr = NR_PORTS, 48 .nr = NR_PORTS,
48}; 49};
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index 3a11a69feb44..6f22b42d9337 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -48,8 +48,9 @@ static inline void neo_pci_posting_flush(struct jsm_board *bd)
48 48
49static void neo_set_cts_flow_control(struct jsm_channel *ch) 49static void neo_set_cts_flow_control(struct jsm_channel *ch)
50{ 50{
51 u8 ier = readb(&ch->ch_neo_uart->ier); 51 u8 ier, efr;
52 u8 efr = readb(&ch->ch_neo_uart->efr); 52 ier = readb(&ch->ch_neo_uart->ier);
53 efr = readb(&ch->ch_neo_uart->efr);
53 54
54 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n"); 55 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n");
55 56
@@ -78,8 +79,9 @@ static void neo_set_cts_flow_control(struct jsm_channel *ch)
78 79
79static void neo_set_rts_flow_control(struct jsm_channel *ch) 80static void neo_set_rts_flow_control(struct jsm_channel *ch)
80{ 81{
81 u8 ier = readb(&ch->ch_neo_uart->ier); 82 u8 ier, efr;
82 u8 efr = readb(&ch->ch_neo_uart->efr); 83 ier = readb(&ch->ch_neo_uart->ier);
84 efr = readb(&ch->ch_neo_uart->efr);
83 85
84 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n"); 86 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n");
85 87
@@ -117,8 +119,9 @@ static void neo_set_rts_flow_control(struct jsm_channel *ch)
117 119
118static void neo_set_ixon_flow_control(struct jsm_channel *ch) 120static void neo_set_ixon_flow_control(struct jsm_channel *ch)
119{ 121{
120 u8 ier = readb(&ch->ch_neo_uart->ier); 122 u8 ier, efr;
121 u8 efr = readb(&ch->ch_neo_uart->efr); 123 ier = readb(&ch->ch_neo_uart->ier);
124 efr = readb(&ch->ch_neo_uart->efr);
122 125
123 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n"); 126 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n");
124 127
@@ -153,8 +156,9 @@ static void neo_set_ixon_flow_control(struct jsm_channel *ch)
153 156
154static void neo_set_ixoff_flow_control(struct jsm_channel *ch) 157static void neo_set_ixoff_flow_control(struct jsm_channel *ch)
155{ 158{
156 u8 ier = readb(&ch->ch_neo_uart->ier); 159 u8 ier, efr;
157 u8 efr = readb(&ch->ch_neo_uart->efr); 160 ier = readb(&ch->ch_neo_uart->ier);
161 efr = readb(&ch->ch_neo_uart->efr);
158 162
159 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); 163 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n");
160 164
@@ -190,8 +194,9 @@ static void neo_set_ixoff_flow_control(struct jsm_channel *ch)
190 194
191static void neo_set_no_input_flow_control(struct jsm_channel *ch) 195static void neo_set_no_input_flow_control(struct jsm_channel *ch)
192{ 196{
193 u8 ier = readb(&ch->ch_neo_uart->ier); 197 u8 ier, efr;
194 u8 efr = readb(&ch->ch_neo_uart->efr); 198 ier = readb(&ch->ch_neo_uart->ier);
199 efr = readb(&ch->ch_neo_uart->efr);
195 200
196 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n"); 201 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n");
197 202
@@ -228,8 +233,9 @@ static void neo_set_no_input_flow_control(struct jsm_channel *ch)
228 233
229static void neo_set_no_output_flow_control(struct jsm_channel *ch) 234static void neo_set_no_output_flow_control(struct jsm_channel *ch)
230{ 235{
231 u8 ier = readb(&ch->ch_neo_uart->ier); 236 u8 ier, efr;
232 u8 efr = readb(&ch->ch_neo_uart->efr); 237 ier = readb(&ch->ch_neo_uart->ier);
238 efr = readb(&ch->ch_neo_uart->efr);
233 239
234 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); 240 jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n");
235 241
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 7d21a4f5c425..c84e1486054f 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -361,8 +361,7 @@ int mts_scsi_queuecommand (Scsi_Cmnd *srb, mts_scsi_cmnd_callback callback );
361static void mts_transfer_cleanup( struct urb *transfer ); 361static void mts_transfer_cleanup( struct urb *transfer );
362static void mts_do_sg(struct urb * transfer, struct pt_regs *regs); 362static void mts_do_sg(struct urb * transfer, struct pt_regs *regs);
363 363
364 364static inline
365inline static
366void mts_int_submit_urb (struct urb* transfer, 365void mts_int_submit_urb (struct urb* transfer,
367 int pipe, 366 int pipe,
368 void* data, 367 void* data,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 04d3120f7236..cde0ed097af6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1399,8 +1399,8 @@ config FB_TX3912
1399 Say Y here to enable kernel support for the on-board framebuffer. 1399 Say Y here to enable kernel support for the on-board framebuffer.
1400 1400
1401config FB_G364 1401config FB_G364
1402 bool 1402 bool "G364 frame buffer support"
1403 depends on MIPS_MAGNUM_4000 || OLIVETTI_M700 1403 depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
1404 select FB_CFB_FILLRECT 1404 select FB_CFB_FILLRECT
1405 select FB_CFB_COPYAREA 1405 select FB_CFB_COPYAREA
1406 select FB_CFB_IMAGEBLIT 1406 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 47a6b12bc968..e7e8b52014c3 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2521,6 +2521,11 @@ static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
2521 2521
2522 radeonfb_pm_exit(rinfo); 2522 radeonfb_pm_exit(rinfo);
2523 2523
2524 if (rinfo->mon1_EDID)
2525 sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid1_attr);
2526 if (rinfo->mon2_EDID)
2527 sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr);
2528
2524#if 0 2529#if 0
2525 /* restore original state 2530 /* restore original state
2526 * 2531 *
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 9dd0fbccf994..35c88bd7ba5e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -275,7 +275,8 @@ static void fb_flashcursor(void *private)
275 275
276 if (!vc || !CON_IS_VISIBLE(vc) || 276 if (!vc || !CON_IS_VISIBLE(vc) ||
277 fbcon_is_inactive(vc, info) || 277 fbcon_is_inactive(vc, info) ||
278 registered_fb[con2fb_map[vc->vc_num]] != info) 278 registered_fb[con2fb_map[vc->vc_num]] != info ||
279 vc_cons[ops->currcon].d->vc_deccm != 1)
279 return; 280 return;
280 acquire_console_sem(); 281 acquire_console_sem();
281 p = &fb_display[vc->vc_num]; 282 p = &fb_display[vc->vc_num];
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index 4e5ce8f7d65e..c32a2a50bfa2 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -212,7 +212,7 @@ int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to)
212 212
213int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info) 213int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
214{ 214{
215 int i, start; 215 int i, start, rc = 0;
216 u16 *red, *green, *blue, *transp; 216 u16 *red, *green, *blue, *transp;
217 u_int hred, hgreen, hblue, htransp = 0xffff; 217 u_int hred, hgreen, hblue, htransp = 0xffff;
218 218
@@ -225,75 +225,51 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
225 if (start < 0 || (!info->fbops->fb_setcolreg && 225 if (start < 0 || (!info->fbops->fb_setcolreg &&
226 !info->fbops->fb_setcmap)) 226 !info->fbops->fb_setcmap))
227 return -EINVAL; 227 return -EINVAL;
228 if (info->fbops->fb_setcmap) 228 if (info->fbops->fb_setcmap) {
229 return info->fbops->fb_setcmap(cmap, info); 229 rc = info->fbops->fb_setcmap(cmap, info);
230 for (i = 0; i < cmap->len; i++) { 230 } else {
231 hred = *red++; 231 for (i = 0; i < cmap->len; i++) {
232 hgreen = *green++; 232 hred = *red++;
233 hblue = *blue++; 233 hgreen = *green++;
234 if (transp) 234 hblue = *blue++;
235 htransp = *transp++; 235 if (transp)
236 if (info->fbops->fb_setcolreg(start++, 236 htransp = *transp++;
237 hred, hgreen, hblue, htransp, 237 if (info->fbops->fb_setcolreg(start++,
238 info)) 238 hred, hgreen, hblue,
239 break; 239 htransp, info))
240 break;
241 }
240 } 242 }
241 return 0; 243 if (rc == 0)
244 fb_copy_cmap(cmap, &info->cmap);
245
246 return rc;
242} 247}
243 248
244int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) 249int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
245{ 250{
246 int i, start; 251 int rc, size = cmap->len * sizeof(u16);
247 u16 __user *red, *green, *blue, *transp; 252 struct fb_cmap umap;
248 u_int hred, hgreen, hblue, htransp = 0xffff;
249
250 red = cmap->red;
251 green = cmap->green;
252 blue = cmap->blue;
253 transp = cmap->transp;
254 start = cmap->start;
255 253
256 if (start < 0 || (!info->fbops->fb_setcolreg && 254 if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
257 !info->fbops->fb_setcmap)) 255 !info->fbops->fb_setcmap))
258 return -EINVAL; 256 return -EINVAL;
259 257
260 /* If we can batch, do it */ 258 memset(&umap, 0, sizeof(struct fb_cmap));
261 if (info->fbops->fb_setcmap && cmap->len > 1) { 259 rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL);
262 struct fb_cmap umap; 260 if (rc)
263 int size = cmap->len * sizeof(u16);
264 int rc;
265
266 memset(&umap, 0, sizeof(struct fb_cmap));
267 rc = fb_alloc_cmap(&umap, cmap->len, transp != NULL);
268 if (rc)
269 return rc;
270 if (copy_from_user(umap.red, red, size) ||
271 copy_from_user(umap.green, green, size) ||
272 copy_from_user(umap.blue, blue, size) ||
273 (transp && copy_from_user(umap.transp, transp, size))) {
274 rc = -EFAULT;
275 }
276 umap.start = start;
277 if (rc == 0)
278 rc = info->fbops->fb_setcmap(&umap, info);
279 fb_dealloc_cmap(&umap);
280 return rc; 261 return rc;
262 if (copy_from_user(umap.red, cmap->red, size) ||
263 copy_from_user(umap.green, cmap->green, size) ||
264 copy_from_user(umap.blue, cmap->blue, size) ||
265 (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) {
266 fb_dealloc_cmap(&umap);
267 return -EFAULT;
281 } 268 }
282 269 umap.start = cmap->start;
283 for (i = 0; i < cmap->len; i++, red++, blue++, green++) { 270 rc = fb_set_cmap(&umap, info);
284 if (get_user(hred, red) || 271 fb_dealloc_cmap(&umap);
285 get_user(hgreen, green) || 272 return rc;
286 get_user(hblue, blue) ||
287 (transp && get_user(htransp, transp)))
288 return -EFAULT;
289 if (info->fbops->fb_setcolreg(start++,
290 hred, hgreen, hblue, htransp,
291 info))
292 return 0;
293 if (transp)
294 transp++;
295 }
296 return 0;
297} 273}
298 274
299/** 275/**
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 2222de6ad844..40784a944d05 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1164,6 +1164,7 @@ static void __exit
1164fbmem_exit(void) 1164fbmem_exit(void)
1165{ 1165{
1166 class_destroy(fb_class); 1166 class_destroy(fb_class);
1167 unregister_chrdev(FB_MAJOR, "fb");
1167} 1168}
1168 1169
1169module_exit(fbmem_exit); 1170module_exit(fbmem_exit);
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 6cd1976548d4..c2718bb94949 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -1241,6 +1241,8 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
1241 vtotal *= 2; 1241 vtotal *= 2;
1242 1242
1243 hfreq = pixclock/htotal; 1243 hfreq = pixclock/htotal;
1244 hfreq = (hfreq + 500) / 1000 * 1000;
1245
1244 vfreq = hfreq/vtotal; 1246 vfreq = hfreq/vtotal;
1245 1247
1246 return (vfreq < vfmin || vfreq > vfmax || 1248 return (vfreq < vfmin || vfreq > vfmax ||
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index ddc9443254d9..63b505cce4ec 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -242,10 +242,68 @@ static ssize_t show_virtual(struct class_device *class_device, char *buf)
242 fb_info->var.yres_virtual); 242 fb_info->var.yres_virtual);
243} 243}
244 244
245static ssize_t store_cmap(struct class_device *class_device, const char * buf, 245/* Format for cmap is "%02x%c%4x%4x%4x\n" */
246/* %02x entry %c transp %4x red %4x blue %4x green \n */
247/* 255 rows at 16 chars equals 4096 */
248/* PAGE_SIZE can be 4096 or larger */
249static ssize_t store_cmap(struct class_device *class_device, const char *buf,
246 size_t count) 250 size_t count)
247{ 251{
248// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device); 252 struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
253 int rc, i, start, length, transp = 0;
254
255 if ((count > 4096) || ((count % 16) != 0) || (PAGE_SIZE < 4096))
256 return -EINVAL;
257
258 if (!fb_info->fbops->fb_setcolreg && !fb_info->fbops->fb_setcmap)
259 return -EINVAL;
260
261 sscanf(buf, "%02x", &start);
262 length = count / 16;
263
264 for (i = 0; i < length; i++)
265 if (buf[i * 16 + 2] != ' ')
266 transp = 1;
267
268 /* If we can batch, do it */
269 if (fb_info->fbops->fb_setcmap && length > 1) {
270 struct fb_cmap umap;
271
272 memset(&umap, 0, sizeof(umap));
273 if ((rc = fb_alloc_cmap(&umap, length, transp)))
274 return rc;
275
276 umap.start = start;
277 for (i = 0; i < length; i++) {
278 sscanf(&buf[i * 16 + 3], "%4hx", &umap.red[i]);
279 sscanf(&buf[i * 16 + 7], "%4hx", &umap.blue[i]);
280 sscanf(&buf[i * 16 + 11], "%4hx", &umap.green[i]);
281 if (transp)
282 umap.transp[i] = (buf[i * 16 + 2] != ' ');
283 }
284 rc = fb_info->fbops->fb_setcmap(&umap, fb_info);
285 fb_copy_cmap(&umap, &fb_info->cmap);
286 fb_dealloc_cmap(&umap);
287
288 return rc;
289 }
290 for (i = 0; i < length; i++) {
291 u16 red, blue, green, tsp;
292
293 sscanf(&buf[i * 16 + 3], "%4hx", &red);
294 sscanf(&buf[i * 16 + 7], "%4hx", &blue);
295 sscanf(&buf[i * 16 + 11], "%4hx", &green);
296 tsp = (buf[i * 16 + 2] != ' ');
297 if ((rc = fb_info->fbops->fb_setcolreg(start++,
298 red, green, blue, tsp, fb_info)))
299 return rc;
300
301 fb_info->cmap.red[i] = red;
302 fb_info->cmap.blue[i] = blue;
303 fb_info->cmap.green[i] = green;
304 if (transp)
305 fb_info->cmap.transp[i] = tsp;
306 }
249 return 0; 307 return 0;
250} 308}
251 309
@@ -253,20 +311,24 @@ static ssize_t show_cmap(struct class_device *class_device, char *buf)
253{ 311{
254 struct fb_info *fb_info = 312 struct fb_info *fb_info =
255 (struct fb_info *)class_get_devdata(class_device); 313 (struct fb_info *)class_get_devdata(class_device);
256 unsigned int offset = 0, i; 314 unsigned int i;
257 315
258 if (!fb_info->cmap.red || !fb_info->cmap.blue || 316 if (!fb_info->cmap.red || !fb_info->cmap.blue ||
259 !fb_info->cmap.green || !fb_info->cmap.transp) 317 !fb_info->cmap.green)
318 return -EINVAL;
319
320 if (PAGE_SIZE < 4096)
260 return -EINVAL; 321 return -EINVAL;
261 322
323 /* don't mess with the format, the buffer is PAGE_SIZE */
324 /* 255 entries at 16 chars per line equals 4096 = PAGE_SIZE */
262 for (i = 0; i < fb_info->cmap.len; i++) { 325 for (i = 0; i < fb_info->cmap.len; i++) {
263 offset += snprintf(buf, PAGE_SIZE - offset, 326 sprintf(&buf[ i * 16], "%02x%c%4x%4x%4x\n", i + fb_info->cmap.start,
264 "%d,%d,%d,%d,%d\n", i + fb_info->cmap.start, 327 ((fb_info->cmap.transp && fb_info->cmap.transp[i]) ? '*' : ' '),
265 fb_info->cmap.red[i], fb_info->cmap.blue[i], 328 fb_info->cmap.red[i], fb_info->cmap.blue[i],
266 fb_info->cmap.green[i], 329 fb_info->cmap.green[i]);
267 fb_info->cmap.transp[i]);
268 } 330 }
269 return offset; 331 return 4096;
270} 332}
271 333
272static ssize_t store_blank(struct class_device *class_device, const char * buf, 334static ssize_t store_blank(struct class_device *class_device, const char * buf,
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 5dceddedf507..42c17efa9fb0 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -138,27 +138,27 @@ static struct fb_var_screeninfo pm2fb_var __devinitdata = {
138 * Utility functions 138 * Utility functions
139 */ 139 */
140 140
141inline static u32 RD32(unsigned char __iomem *base, s32 off) 141static inline u32 RD32(unsigned char __iomem *base, s32 off)
142{ 142{
143 return fb_readl(base + off); 143 return fb_readl(base + off);
144} 144}
145 145
146inline static void WR32(unsigned char __iomem *base, s32 off, u32 v) 146static inline void WR32(unsigned char __iomem *base, s32 off, u32 v)
147{ 147{
148 fb_writel(v, base + off); 148 fb_writel(v, base + off);
149} 149}
150 150
151inline static u32 pm2_RD(struct pm2fb_par* p, s32 off) 151static inline u32 pm2_RD(struct pm2fb_par* p, s32 off)
152{ 152{
153 return RD32(p->v_regs, off); 153 return RD32(p->v_regs, off);
154} 154}
155 155
156inline static void pm2_WR(struct pm2fb_par* p, s32 off, u32 v) 156static inline void pm2_WR(struct pm2fb_par* p, s32 off, u32 v)
157{ 157{
158 WR32(p->v_regs, off, v); 158 WR32(p->v_regs, off, v);
159} 159}
160 160
161inline static u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx) 161static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
162{ 162{
163 int index = PM2R_RD_INDEXED_DATA; 163 int index = PM2R_RD_INDEXED_DATA;
164 switch (p->type) { 164 switch (p->type) {
@@ -174,7 +174,7 @@ inline static u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
174 return pm2_RD(p, index); 174 return pm2_RD(p, index);
175} 175}
176 176
177inline static void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v) 177static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
178{ 178{
179 int index = PM2R_RD_INDEXED_DATA; 179 int index = PM2R_RD_INDEXED_DATA;
180 switch (p->type) { 180 switch (p->type) {
@@ -190,7 +190,7 @@ inline static void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
190 pm2_WR(p, index, v); 190 pm2_WR(p, index, v);
191} 191}
192 192
193inline static void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v) 193static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
194{ 194{
195 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); 195 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
196 mb(); 196 mb();
@@ -200,7 +200,7 @@ inline static void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
200#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT 200#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
201#define WAIT_FIFO(p,a) 201#define WAIT_FIFO(p,a)
202#else 202#else
203inline static void WAIT_FIFO(struct pm2fb_par* p, u32 a) 203static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
204{ 204{
205 while( pm2_RD(p, PM2R_IN_FIFO_SPACE) < a ); 205 while( pm2_RD(p, PM2R_IN_FIFO_SPACE) < a );
206 mb(); 206 mb();
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 6a9e183be41b..ae297e222681 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1826,7 +1826,7 @@ static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
1826#ifdef CONFIG_PPC_OF 1826#ifdef CONFIG_PPC_OF
1827 if (!riva_get_EDID_OF(info, pdev)) 1827 if (!riva_get_EDID_OF(info, pdev))
1828 printk(PFX "could not retrieve EDID from OF\n"); 1828 printk(PFX "could not retrieve EDID from OF\n");
1829#elif CONFIG_FB_RIVA_I2C 1829#elif defined(CONFIG_FB_RIVA_I2C)
1830 if (!riva_get_EDID_i2c(info)) 1830 if (!riva_get_EDID_i2c(info))
1831 printk(PFX "could not retrieve EDID from DDC/I2C\n"); 1831 printk(PFX "could not retrieve EDID from DDC/I2C\n");
1832#endif 1832#endif