diff options
Diffstat (limited to 'drivers/scsi/mvsas/mv_sas.c')
-rw-r--r-- | drivers/scsi/mvsas/mv_sas.c | 2117 |
1 files changed, 1228 insertions, 889 deletions
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6a583c19c6e5..d79ac179eaff 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c | |||
@@ -1,97 +1,65 @@ | |||
1 | /* | 1 | /* |
2 | mv_sas.c - Marvell 88SE6440 SAS/SATA support | 2 | * Marvell 88SE64xx/88SE94xx main function |
3 | 3 | * | |
4 | Copyright 2007 Red Hat, Inc. | 4 | * Copyright 2007 Red Hat, Inc. |
5 | Copyright 2008 Marvell. <kewei@marvell.com> | 5 | * Copyright 2008 Marvell. <kewei@marvell.com> |
6 | 6 | * | |
7 | This program is free software; you can redistribute it and/or | 7 | * This file is licensed under GPLv2. |
8 | modify it under the terms of the GNU General Public License as | 8 | * |
9 | published by the Free Software Foundation; either version 2, | 9 | * This program is free software; you can redistribute it and/or |
10 | or (at your option) any later version. | 10 | * modify it under the terms of the GNU General Public License as |
11 | 11 | * published by the Free Software Foundation; version 2 of the | |
12 | This program is distributed in the hope that it will be useful, | 12 | * License. |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty | 13 | * |
14 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 14 | * This program is distributed in the hope that it will be useful, |
15 | See the GNU General Public License for more details. | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | You should have received a copy of the GNU General Public | 17 | * General Public License for more details. |
18 | License along with this program; see the file COPYING. If not, | 18 | * |
19 | write to the Free Software Foundation, 675 Mass Ave, Cambridge, | 19 | * You should have received a copy of the GNU General Public License |
20 | MA 02139, USA. | 20 | * along with this program; if not, write to the Free Software |
21 | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | |
22 | --------------------------------------------------------------- | 22 | * USA |
23 | 23 | */ | |
24 | Random notes: | ||
25 | * hardware supports controlling the endian-ness of data | ||
26 | structures. this permits elimination of all the le32_to_cpu() | ||
27 | and cpu_to_le32() conversions. | ||
28 | |||
29 | */ | ||
30 | 24 | ||
31 | #include "mv_sas.h" | 25 | #include "mv_sas.h" |
32 | #include "mv_64xx.h" | ||
33 | #include "mv_chips.h" | ||
34 | |||
35 | /* offset for D2H FIS in the Received FIS List Structure */ | ||
36 | #define SATA_RECEIVED_D2H_FIS(reg_set) \ | ||
37 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) | ||
38 | #define SATA_RECEIVED_PIO_FIS(reg_set) \ | ||
39 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) | ||
40 | #define UNASSOC_D2H_FIS(id) \ | ||
41 | ((void *) mvi->rx_fis + 0x100 * id) | ||
42 | |||
43 | struct mvs_task_exec_info { | ||
44 | struct sas_task *task; | ||
45 | struct mvs_cmd_hdr *hdr; | ||
46 | struct mvs_port *port; | ||
47 | u32 tag; | ||
48 | int n_elem; | ||
49 | }; | ||
50 | |||
51 | static void mvs_release_task(struct mvs_info *mvi, int phy_no); | ||
52 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); | ||
53 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, | ||
54 | int get_st); | ||
55 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); | ||
56 | static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, | ||
57 | u32 slot_idx); | ||
58 | 26 | ||
59 | static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) | 27 | static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) |
60 | { | 28 | { |
61 | if (task->lldd_task) { | 29 | if (task->lldd_task) { |
62 | struct mvs_slot_info *slot; | 30 | struct mvs_slot_info *slot; |
63 | slot = (struct mvs_slot_info *) task->lldd_task; | 31 | slot = (struct mvs_slot_info *) task->lldd_task; |
64 | *tag = slot - mvi->slot_info; | 32 | *tag = slot->slot_tag; |
65 | return 1; | 33 | return 1; |
66 | } | 34 | } |
67 | return 0; | 35 | return 0; |
68 | } | 36 | } |
69 | 37 | ||
70 | static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) | 38 | void mvs_tag_clear(struct mvs_info *mvi, u32 tag) |
71 | { | 39 | { |
72 | void *bitmap = (void *) &mvi->tags; | 40 | void *bitmap = (void *) &mvi->tags; |
73 | clear_bit(tag, bitmap); | 41 | clear_bit(tag, bitmap); |
74 | } | 42 | } |
75 | 43 | ||
76 | static void mvs_tag_free(struct mvs_info *mvi, u32 tag) | 44 | void mvs_tag_free(struct mvs_info *mvi, u32 tag) |
77 | { | 45 | { |
78 | mvs_tag_clear(mvi, tag); | 46 | mvs_tag_clear(mvi, tag); |
79 | } | 47 | } |
80 | 48 | ||
81 | static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) | 49 | void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) |
82 | { | 50 | { |
83 | void *bitmap = (void *) &mvi->tags; | 51 | void *bitmap = (void *) &mvi->tags; |
84 | set_bit(tag, bitmap); | 52 | set_bit(tag, bitmap); |
85 | } | 53 | } |
86 | 54 | ||
87 | static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) | 55 | inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) |
88 | { | 56 | { |
89 | unsigned int index, tag; | 57 | unsigned int index, tag; |
90 | void *bitmap = (void *) &mvi->tags; | 58 | void *bitmap = (void *) &mvi->tags; |
91 | 59 | ||
92 | index = find_first_zero_bit(bitmap, MVS_SLOTS); | 60 | index = find_first_zero_bit(bitmap, mvi->tags_num); |
93 | tag = index; | 61 | tag = index; |
94 | if (tag >= MVS_SLOTS) | 62 | if (tag >= mvi->tags_num) |
95 | return -SAS_QUEUE_FULL; | 63 | return -SAS_QUEUE_FULL; |
96 | mvs_tag_set(mvi, tag); | 64 | mvs_tag_set(mvi, tag); |
97 | *tag_out = tag; | 65 | *tag_out = tag; |
@@ -101,11 +69,11 @@ static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) | |||
101 | void mvs_tag_init(struct mvs_info *mvi) | 69 | void mvs_tag_init(struct mvs_info *mvi) |
102 | { | 70 | { |
103 | int i; | 71 | int i; |
104 | for (i = 0; i < MVS_SLOTS; ++i) | 72 | for (i = 0; i < mvi->tags_num; ++i) |
105 | mvs_tag_clear(mvi, i); | 73 | mvs_tag_clear(mvi, i); |
106 | } | 74 | } |
107 | 75 | ||
108 | static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) | 76 | void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) |
109 | { | 77 | { |
110 | u32 i; | 78 | u32 i; |
111 | u32 run; | 79 | u32 run; |
@@ -113,7 +81,7 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) | |||
113 | 81 | ||
114 | offset = 0; | 82 | offset = 0; |
115 | while (size) { | 83 | while (size) { |
116 | printk("%08X : ", baseaddr + offset); | 84 | printk(KERN_DEBUG"%08X : ", baseaddr + offset); |
117 | if (size >= 16) | 85 | if (size >= 16) |
118 | run = 16; | 86 | run = 16; |
119 | else | 87 | else |
@@ -121,31 +89,31 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) | |||
121 | size -= run; | 89 | size -= run; |
122 | for (i = 0; i < 16; i++) { | 90 | for (i = 0; i < 16; i++) { |
123 | if (i < run) | 91 | if (i < run) |
124 | printk("%02X ", (u32)data[i]); | 92 | printk(KERN_DEBUG"%02X ", (u32)data[i]); |
125 | else | 93 | else |
126 | printk(" "); | 94 | printk(KERN_DEBUG" "); |
127 | } | 95 | } |
128 | printk(": "); | 96 | printk(KERN_DEBUG": "); |
129 | for (i = 0; i < run; i++) | 97 | for (i = 0; i < run; i++) |
130 | printk("%c", isalnum(data[i]) ? data[i] : '.'); | 98 | printk(KERN_DEBUG"%c", |
131 | printk("\n"); | 99 | isalnum(data[i]) ? data[i] : '.'); |
100 | printk(KERN_DEBUG"\n"); | ||
132 | data = &data[16]; | 101 | data = &data[16]; |
133 | offset += run; | 102 | offset += run; |
134 | } | 103 | } |
135 | printk("\n"); | 104 | printk(KERN_DEBUG"\n"); |
136 | } | 105 | } |
137 | 106 | ||
138 | #if _MV_DUMP | 107 | #if (_MV_DUMP > 1) |
139 | static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, | 108 | static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, |
140 | enum sas_protocol proto) | 109 | enum sas_protocol proto) |
141 | { | 110 | { |
142 | u32 offset; | 111 | u32 offset; |
143 | struct pci_dev *pdev = mvi->pdev; | ||
144 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | 112 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; |
145 | 113 | ||
146 | offset = slot->cmd_size + MVS_OAF_SZ + | 114 | offset = slot->cmd_size + MVS_OAF_SZ + |
147 | sizeof(struct mvs_prd) * slot->n_elem; | 115 | MVS_CHIP_DISP->prd_size() * slot->n_elem; |
148 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", | 116 | dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", |
149 | tag); | 117 | tag); |
150 | mvs_hexdump(32, (u8 *) slot->response, | 118 | mvs_hexdump(32, (u8 *) slot->response, |
151 | (u32) slot->buf_dma + offset); | 119 | (u32) slot->buf_dma + offset); |
@@ -155,47 +123,45 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, | |||
155 | static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, | 123 | static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, |
156 | enum sas_protocol proto) | 124 | enum sas_protocol proto) |
157 | { | 125 | { |
158 | #if _MV_DUMP | 126 | #if (_MV_DUMP > 1) |
159 | u32 sz, w_ptr; | 127 | u32 sz, w_ptr; |
160 | u64 addr; | 128 | u64 addr; |
161 | void __iomem *regs = mvi->regs; | ||
162 | struct pci_dev *pdev = mvi->pdev; | ||
163 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | 129 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; |
164 | 130 | ||
165 | /*Delivery Queue */ | 131 | /*Delivery Queue */ |
166 | sz = mr32(TX_CFG) & TX_RING_SZ_MASK; | 132 | sz = MVS_CHIP_SLOT_SZ; |
167 | w_ptr = slot->tx; | 133 | w_ptr = slot->tx; |
168 | addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); | 134 | addr = mvi->tx_dma; |
169 | dev_printk(KERN_DEBUG, &pdev->dev, | 135 | dev_printk(KERN_DEBUG, mvi->dev, |
170 | "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); | 136 | "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); |
171 | dev_printk(KERN_DEBUG, &pdev->dev, | 137 | dev_printk(KERN_DEBUG, mvi->dev, |
172 | "Delivery Queue Base Address=0x%llX (PA)" | 138 | "Delivery Queue Base Address=0x%llX (PA)" |
173 | "(tx_dma=0x%llX), Entry=%04d\n", | 139 | "(tx_dma=0x%llX), Entry=%04d\n", |
174 | addr, mvi->tx_dma, w_ptr); | 140 | addr, (unsigned long long)mvi->tx_dma, w_ptr); |
175 | mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), | 141 | mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), |
176 | (u32) mvi->tx_dma + sizeof(u32) * w_ptr); | 142 | (u32) mvi->tx_dma + sizeof(u32) * w_ptr); |
177 | /*Command List */ | 143 | /*Command List */ |
178 | addr = mvi->slot_dma; | 144 | addr = mvi->slot_dma; |
179 | dev_printk(KERN_DEBUG, &pdev->dev, | 145 | dev_printk(KERN_DEBUG, mvi->dev, |
180 | "Command List Base Address=0x%llX (PA)" | 146 | "Command List Base Address=0x%llX (PA)" |
181 | "(slot_dma=0x%llX), Header=%03d\n", | 147 | "(slot_dma=0x%llX), Header=%03d\n", |
182 | addr, slot->buf_dma, tag); | 148 | addr, (unsigned long long)slot->buf_dma, tag); |
183 | dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); | 149 | dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); |
184 | /*mvs_cmd_hdr */ | 150 | /*mvs_cmd_hdr */ |
185 | mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), | 151 | mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), |
186 | (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); | 152 | (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); |
187 | /*1.command table area */ | 153 | /*1.command table area */ |
188 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); | 154 | dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); |
189 | mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); | 155 | mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); |
190 | /*2.open address frame area */ | 156 | /*2.open address frame area */ |
191 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); | 157 | dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); |
192 | mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, | 158 | mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, |
193 | (u32) slot->buf_dma + slot->cmd_size); | 159 | (u32) slot->buf_dma + slot->cmd_size); |
194 | /*3.status buffer */ | 160 | /*3.status buffer */ |
195 | mvs_hba_sb_dump(mvi, tag, proto); | 161 | mvs_hba_sb_dump(mvi, tag, proto); |
196 | /*4.PRD table */ | 162 | /*4.PRD table */ |
197 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); | 163 | dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); |
198 | mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, | 164 | mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, |
199 | (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, | 165 | (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, |
200 | (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); | 166 | (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); |
201 | #endif | 167 | #endif |
@@ -206,15 +172,14 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) | |||
206 | #if (_MV_DUMP > 2) | 172 | #if (_MV_DUMP > 2) |
207 | u64 addr; | 173 | u64 addr; |
208 | void __iomem *regs = mvi->regs; | 174 | void __iomem *regs = mvi->regs; |
209 | struct pci_dev *pdev = mvi->pdev; | ||
210 | u32 entry = mvi->rx_cons + 1; | 175 | u32 entry = mvi->rx_cons + 1; |
211 | u32 rx_desc = le32_to_cpu(mvi->rx[entry]); | 176 | u32 rx_desc = le32_to_cpu(mvi->rx[entry]); |
212 | 177 | ||
213 | /*Completion Queue */ | 178 | /*Completion Queue */ |
214 | addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); | 179 | addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); |
215 | dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", | 180 | dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", |
216 | mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); | 181 | mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); |
217 | dev_printk(KERN_DEBUG, &pdev->dev, | 182 | dev_printk(KERN_DEBUG, mvi->dev, |
218 | "Completion List Base Address=0x%llX (PA), " | 183 | "Completion List Base Address=0x%llX (PA), " |
219 | "CQ_Entry=%04d, CQ_WP=0x%08X\n", | 184 | "CQ_Entry=%04d, CQ_WP=0x%08X\n", |
220 | addr, entry - 1, mvi->rx[0]); | 185 | addr, entry - 1, mvi->rx[0]); |
@@ -223,62 +188,174 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) | |||
223 | #endif | 188 | #endif |
224 | } | 189 | } |
225 | 190 | ||
226 | /* FIXME: locking? */ | 191 | void mvs_get_sas_addr(void *buf, u32 buflen) |
227 | int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) | ||
228 | { | 192 | { |
229 | struct mvs_info *mvi = sas_phy->ha->lldd_ha; | 193 | /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ |
230 | int rc = 0, phy_id = sas_phy->id; | 194 | } |
231 | u32 tmp; | ||
232 | 195 | ||
233 | tmp = mvs_read_phy_ctl(mvi, phy_id); | 196 | struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) |
197 | { | ||
198 | unsigned long i = 0, j = 0, hi = 0; | ||
199 | struct sas_ha_struct *sha = dev->port->ha; | ||
200 | struct mvs_info *mvi = NULL; | ||
201 | struct asd_sas_phy *phy; | ||
202 | |||
203 | while (sha->sas_port[i]) { | ||
204 | if (sha->sas_port[i] == dev->port) { | ||
205 | phy = container_of(sha->sas_port[i]->phy_list.next, | ||
206 | struct asd_sas_phy, port_phy_el); | ||
207 | j = 0; | ||
208 | while (sha->sas_phy[j]) { | ||
209 | if (sha->sas_phy[j] == phy) | ||
210 | break; | ||
211 | j++; | ||
212 | } | ||
213 | break; | ||
214 | } | ||
215 | i++; | ||
216 | } | ||
217 | hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; | ||
218 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; | ||
234 | 219 | ||
235 | switch (func) { | 220 | return mvi; |
236 | case PHY_FUNC_SET_LINK_RATE:{ | ||
237 | struct sas_phy_linkrates *rates = funcdata; | ||
238 | u32 lrmin = 0, lrmax = 0; | ||
239 | 221 | ||
240 | lrmin = (rates->minimum_linkrate << 8); | 222 | } |
241 | lrmax = (rates->maximum_linkrate << 12); | ||
242 | 223 | ||
243 | if (lrmin) { | 224 | /* FIXME */ |
244 | tmp &= ~(0xf << 8); | 225 | int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) |
245 | tmp |= lrmin; | 226 | { |
246 | } | 227 | unsigned long i = 0, j = 0, n = 0, num = 0; |
247 | if (lrmax) { | 228 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); |
248 | tmp &= ~(0xf << 12); | 229 | struct sas_ha_struct *sha = dev->port->ha; |
249 | tmp |= lrmax; | 230 | |
231 | while (sha->sas_port[i]) { | ||
232 | if (sha->sas_port[i] == dev->port) { | ||
233 | struct asd_sas_phy *phy; | ||
234 | list_for_each_entry(phy, | ||
235 | &sha->sas_port[i]->phy_list, port_phy_el) { | ||
236 | j = 0; | ||
237 | while (sha->sas_phy[j]) { | ||
238 | if (sha->sas_phy[j] == phy) | ||
239 | break; | ||
240 | j++; | ||
241 | } | ||
242 | phyno[n] = (j >= mvi->chip->n_phy) ? | ||
243 | (j - mvi->chip->n_phy) : j; | ||
244 | num++; | ||
245 | n++; | ||
250 | } | 246 | } |
251 | mvs_write_phy_ctl(mvi, phy_id, tmp); | ||
252 | break; | 247 | break; |
253 | } | 248 | } |
249 | i++; | ||
250 | } | ||
251 | return num; | ||
252 | } | ||
253 | |||
254 | static inline void mvs_free_reg_set(struct mvs_info *mvi, | ||
255 | struct mvs_device *dev) | ||
256 | { | ||
257 | if (!dev) { | ||
258 | mv_printk("device has been free.\n"); | ||
259 | return; | ||
260 | } | ||
261 | if (dev->runing_req != 0) | ||
262 | return; | ||
263 | if (dev->taskfileset == MVS_ID_NOT_MAPPED) | ||
264 | return; | ||
265 | MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); | ||
266 | } | ||
267 | |||
268 | static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, | ||
269 | struct mvs_device *dev) | ||
270 | { | ||
271 | if (dev->taskfileset != MVS_ID_NOT_MAPPED) | ||
272 | return 0; | ||
273 | return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); | ||
274 | } | ||
275 | |||
276 | void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) | ||
277 | { | ||
278 | u32 no; | ||
279 | for_each_phy(phy_mask, phy_mask, no) { | ||
280 | if (!(phy_mask & 1)) | ||
281 | continue; | ||
282 | MVS_CHIP_DISP->phy_reset(mvi, no, hard); | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* FIXME: locking? */ | ||
287 | int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
288 | void *funcdata) | ||
289 | { | ||
290 | int rc = 0, phy_id = sas_phy->id; | ||
291 | u32 tmp, i = 0, hi; | ||
292 | struct sas_ha_struct *sha = sas_phy->ha; | ||
293 | struct mvs_info *mvi = NULL; | ||
294 | |||
295 | while (sha->sas_phy[i]) { | ||
296 | if (sha->sas_phy[i] == sas_phy) | ||
297 | break; | ||
298 | i++; | ||
299 | } | ||
300 | hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; | ||
301 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; | ||
302 | |||
303 | switch (func) { | ||
304 | case PHY_FUNC_SET_LINK_RATE: | ||
305 | MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); | ||
306 | break; | ||
254 | 307 | ||
255 | case PHY_FUNC_HARD_RESET: | 308 | case PHY_FUNC_HARD_RESET: |
309 | tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); | ||
256 | if (tmp & PHY_RST_HARD) | 310 | if (tmp & PHY_RST_HARD) |
257 | break; | 311 | break; |
258 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); | 312 | MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); |
259 | break; | 313 | break; |
260 | 314 | ||
261 | case PHY_FUNC_LINK_RESET: | 315 | case PHY_FUNC_LINK_RESET: |
262 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); | 316 | MVS_CHIP_DISP->phy_enable(mvi, phy_id); |
317 | MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); | ||
263 | break; | 318 | break; |
264 | 319 | ||
265 | case PHY_FUNC_DISABLE: | 320 | case PHY_FUNC_DISABLE: |
321 | MVS_CHIP_DISP->phy_disable(mvi, phy_id); | ||
322 | break; | ||
266 | case PHY_FUNC_RELEASE_SPINUP_HOLD: | 323 | case PHY_FUNC_RELEASE_SPINUP_HOLD: |
267 | default: | 324 | default: |
268 | rc = -EOPNOTSUPP; | 325 | rc = -EOPNOTSUPP; |
269 | } | 326 | } |
270 | 327 | msleep(200); | |
271 | return rc; | 328 | return rc; |
272 | } | 329 | } |
273 | 330 | ||
331 | void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, | ||
332 | u32 off_lo, u32 off_hi, u64 sas_addr) | ||
333 | { | ||
334 | u32 lo = (u32)sas_addr; | ||
335 | u32 hi = (u32)(sas_addr>>32); | ||
336 | |||
337 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); | ||
338 | MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); | ||
339 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); | ||
340 | MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); | ||
341 | } | ||
342 | |||
274 | static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) | 343 | static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) |
275 | { | 344 | { |
276 | struct mvs_phy *phy = &mvi->phy[i]; | 345 | struct mvs_phy *phy = &mvi->phy[i]; |
277 | struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; | 346 | struct asd_sas_phy *sas_phy = &phy->sas_phy; |
278 | 347 | struct sas_ha_struct *sas_ha; | |
279 | if (!phy->phy_attached) | 348 | if (!phy->phy_attached) |
280 | return; | 349 | return; |
281 | 350 | ||
351 | if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) | ||
352 | && phy->phy_type & PORT_TYPE_SAS) { | ||
353 | return; | ||
354 | } | ||
355 | |||
356 | sas_ha = mvi->sas; | ||
357 | sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); | ||
358 | |||
282 | if (sas_phy->phy) { | 359 | if (sas_phy->phy) { |
283 | struct sas_phy *sphy = sas_phy->phy; | 360 | struct sas_phy *sphy = sas_phy->phy; |
284 | 361 | ||
@@ -286,7 +363,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) | |||
286 | sphy->minimum_linkrate = phy->minimum_linkrate; | 363 | sphy->minimum_linkrate = phy->minimum_linkrate; |
287 | sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; | 364 | sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; |
288 | sphy->maximum_linkrate = phy->maximum_linkrate; | 365 | sphy->maximum_linkrate = phy->maximum_linkrate; |
289 | sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; | 366 | sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); |
290 | } | 367 | } |
291 | 368 | ||
292 | if (phy->phy_type & PORT_TYPE_SAS) { | 369 | if (phy->phy_type & PORT_TYPE_SAS) { |
@@ -297,13 +374,31 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) | |||
297 | id->initiator_bits = SAS_PROTOCOL_ALL; | 374 | id->initiator_bits = SAS_PROTOCOL_ALL; |
298 | id->target_bits = phy->identify.target_port_protocols; | 375 | id->target_bits = phy->identify.target_port_protocols; |
299 | } else if (phy->phy_type & PORT_TYPE_SATA) { | 376 | } else if (phy->phy_type & PORT_TYPE_SATA) { |
300 | /* TODO */ | 377 | /*Nothing*/ |
301 | } | 378 | } |
302 | mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; | 379 | mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); |
303 | mvi->sas.notify_port_event(mvi->sas.sas_phy[i], | 380 | |
381 | sas_phy->frame_rcvd_size = phy->frame_rcvd_size; | ||
382 | |||
383 | mvi->sas->notify_port_event(sas_phy, | ||
304 | PORTE_BYTES_DMAED); | 384 | PORTE_BYTES_DMAED); |
305 | } | 385 | } |
306 | 386 | ||
387 | int mvs_slave_alloc(struct scsi_device *scsi_dev) | ||
388 | { | ||
389 | struct domain_device *dev = sdev_to_domain_dev(scsi_dev); | ||
390 | if (dev_is_sata(dev)) { | ||
391 | /* We don't need to rescan targets | ||
392 | * if REPORT_LUNS request is failed | ||
393 | */ | ||
394 | if (scsi_dev->lun > 0) | ||
395 | return -ENXIO; | ||
396 | scsi_dev->tagged_supported = 1; | ||
397 | } | ||
398 | |||
399 | return sas_slave_alloc(scsi_dev); | ||
400 | } | ||
401 | |||
307 | int mvs_slave_configure(struct scsi_device *sdev) | 402 | int mvs_slave_configure(struct scsi_device *sdev) |
308 | { | 403 | { |
309 | struct domain_device *dev = sdev_to_domain_dev(sdev); | 404 | struct domain_device *dev = sdev_to_domain_dev(sdev); |
@@ -311,25 +406,31 @@ int mvs_slave_configure(struct scsi_device *sdev) | |||
311 | 406 | ||
312 | if (ret) | 407 | if (ret) |
313 | return ret; | 408 | return ret; |
314 | |||
315 | if (dev_is_sata(dev)) { | 409 | if (dev_is_sata(dev)) { |
316 | /* struct ata_port *ap = dev->sata_dev.ap; */ | 410 | /* may set PIO mode */ |
317 | /* struct ata_device *adev = ap->link.device; */ | 411 | #if MV_DISABLE_NCQ |
318 | 412 | struct ata_port *ap = dev->sata_dev.ap; | |
319 | /* clamp at no NCQ for the time being */ | 413 | struct ata_device *adev = ap->link.device; |
320 | /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ | 414 | adev->flags |= ATA_DFLAG_NCQ_OFF; |
321 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); | 415 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); |
416 | #endif | ||
322 | } | 417 | } |
323 | return 0; | 418 | return 0; |
324 | } | 419 | } |
325 | 420 | ||
326 | void mvs_scan_start(struct Scsi_Host *shost) | 421 | void mvs_scan_start(struct Scsi_Host *shost) |
327 | { | 422 | { |
328 | int i; | 423 | int i, j; |
329 | struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; | 424 | unsigned short core_nr; |
425 | struct mvs_info *mvi; | ||
426 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | ||
330 | 427 | ||
331 | for (i = 0; i < mvi->chip->n_phy; ++i) { | 428 | core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; |
332 | mvs_bytes_dmaed(mvi, i); | 429 | |
430 | for (j = 0; j < core_nr; j++) { | ||
431 | mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; | ||
432 | for (i = 0; i < mvi->chip->n_phy; ++i) | ||
433 | mvs_bytes_dmaed(mvi, i); | ||
333 | } | 434 | } |
334 | } | 435 | } |
335 | 436 | ||
@@ -350,15 +451,15 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
350 | int elem, rc, i; | 451 | int elem, rc, i; |
351 | struct sas_task *task = tei->task; | 452 | struct sas_task *task = tei->task; |
352 | struct mvs_cmd_hdr *hdr = tei->hdr; | 453 | struct mvs_cmd_hdr *hdr = tei->hdr; |
454 | struct domain_device *dev = task->dev; | ||
455 | struct asd_sas_port *sas_port = dev->port; | ||
353 | struct scatterlist *sg_req, *sg_resp; | 456 | struct scatterlist *sg_req, *sg_resp; |
354 | u32 req_len, resp_len, tag = tei->tag; | 457 | u32 req_len, resp_len, tag = tei->tag; |
355 | void *buf_tmp; | 458 | void *buf_tmp; |
356 | u8 *buf_oaf; | 459 | u8 *buf_oaf; |
357 | dma_addr_t buf_tmp_dma; | 460 | dma_addr_t buf_tmp_dma; |
358 | struct mvs_prd *buf_prd; | 461 | void *buf_prd; |
359 | struct scatterlist *sg; | ||
360 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | 462 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; |
361 | struct asd_sas_port *sas_port = task->dev->port; | ||
362 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | 463 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); |
363 | #if _MV_DUMP | 464 | #if _MV_DUMP |
364 | u8 *buf_cmd; | 465 | u8 *buf_cmd; |
@@ -368,18 +469,18 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
368 | * DMA-map SMP request, response buffers | 469 | * DMA-map SMP request, response buffers |
369 | */ | 470 | */ |
370 | sg_req = &task->smp_task.smp_req; | 471 | sg_req = &task->smp_task.smp_req; |
371 | elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); | 472 | elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); |
372 | if (!elem) | 473 | if (!elem) |
373 | return -ENOMEM; | 474 | return -ENOMEM; |
374 | req_len = sg_dma_len(sg_req); | 475 | req_len = sg_dma_len(sg_req); |
375 | 476 | ||
376 | sg_resp = &task->smp_task.smp_resp; | 477 | sg_resp = &task->smp_task.smp_resp; |
377 | elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); | 478 | elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); |
378 | if (!elem) { | 479 | if (!elem) { |
379 | rc = -ENOMEM; | 480 | rc = -ENOMEM; |
380 | goto err_out; | 481 | goto err_out; |
381 | } | 482 | } |
382 | resp_len = sg_dma_len(sg_resp); | 483 | resp_len = SB_RFB_MAX; |
383 | 484 | ||
384 | /* must be in dwords */ | 485 | /* must be in dwords */ |
385 | if ((req_len & 0x3) || (resp_len & 0x3)) { | 486 | if ((req_len & 0x3) || (resp_len & 0x3)) { |
@@ -391,7 +492,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
391 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | 492 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs |
392 | */ | 493 | */ |
393 | 494 | ||
394 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ | 495 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ |
395 | buf_tmp = slot->buf; | 496 | buf_tmp = slot->buf; |
396 | buf_tmp_dma = slot->buf_dma; | 497 | buf_tmp_dma = slot->buf_dma; |
397 | 498 | ||
@@ -412,20 +513,22 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
412 | buf_tmp += MVS_OAF_SZ; | 513 | buf_tmp += MVS_OAF_SZ; |
413 | buf_tmp_dma += MVS_OAF_SZ; | 514 | buf_tmp_dma += MVS_OAF_SZ; |
414 | 515 | ||
415 | /* region 3: PRD table ********************************************* */ | 516 | /* region 3: PRD table *********************************** */ |
416 | buf_prd = buf_tmp; | 517 | buf_prd = buf_tmp; |
417 | if (tei->n_elem) | 518 | if (tei->n_elem) |
418 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | 519 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); |
419 | else | 520 | else |
420 | hdr->prd_tbl = 0; | 521 | hdr->prd_tbl = 0; |
421 | 522 | ||
422 | i = sizeof(struct mvs_prd) * tei->n_elem; | 523 | i = MVS_CHIP_DISP->prd_size() * tei->n_elem; |
423 | buf_tmp += i; | 524 | buf_tmp += i; |
424 | buf_tmp_dma += i; | 525 | buf_tmp_dma += i; |
425 | 526 | ||
426 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | 527 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ |
427 | slot->response = buf_tmp; | 528 | slot->response = buf_tmp; |
428 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | 529 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); |
530 | if (mvi->flags & MVF_FLAG_SOC) | ||
531 | hdr->reserved[0] = 0; | ||
429 | 532 | ||
430 | /* | 533 | /* |
431 | * Fill in TX ring and command slot header | 534 | * Fill in TX ring and command slot header |
@@ -441,17 +544,14 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
441 | hdr->data_len = 0; | 544 | hdr->data_len = 0; |
442 | 545 | ||
443 | /* generate open address frame hdr (first 12 bytes) */ | 546 | /* generate open address frame hdr (first 12 bytes) */ |
444 | buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ | 547 | /* initiator, SMP, ftype 1h */ |
445 | buf_oaf[1] = task->dev->linkrate & 0xf; | 548 | buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; |
549 | buf_oaf[1] = dev->linkrate & 0xf; | ||
446 | *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ | 550 | *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ |
447 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | 551 | memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); |
448 | 552 | ||
449 | /* fill in PRD (scatter/gather) table, if any */ | 553 | /* fill in PRD (scatter/gather) table, if any */ |
450 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | 554 | MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); |
451 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
452 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
453 | buf_prd++; | ||
454 | } | ||
455 | 555 | ||
456 | #if _MV_DUMP | 556 | #if _MV_DUMP |
457 | /* copy cmd table */ | 557 | /* copy cmd table */ |
@@ -462,10 +562,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, | |||
462 | return 0; | 562 | return 0; |
463 | 563 | ||
464 | err_out_2: | 564 | err_out_2: |
465 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, | 565 | dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, |
466 | PCI_DMA_FROMDEVICE); | 566 | PCI_DMA_FROMDEVICE); |
467 | err_out: | 567 | err_out: |
468 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, | 568 | dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, |
469 | PCI_DMA_TODEVICE); | 569 | PCI_DMA_TODEVICE); |
470 | return rc; | 570 | return rc; |
471 | } | 571 | } |
@@ -490,30 +590,41 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
490 | { | 590 | { |
491 | struct sas_task *task = tei->task; | 591 | struct sas_task *task = tei->task; |
492 | struct domain_device *dev = task->dev; | 592 | struct domain_device *dev = task->dev; |
593 | struct mvs_device *mvi_dev = | ||
594 | (struct mvs_device *)dev->lldd_dev; | ||
493 | struct mvs_cmd_hdr *hdr = tei->hdr; | 595 | struct mvs_cmd_hdr *hdr = tei->hdr; |
494 | struct asd_sas_port *sas_port = dev->port; | 596 | struct asd_sas_port *sas_port = dev->port; |
495 | struct mvs_slot_info *slot; | 597 | struct mvs_slot_info *slot; |
496 | struct scatterlist *sg; | 598 | void *buf_prd; |
497 | struct mvs_prd *buf_prd; | 599 | u32 tag = tei->tag, hdr_tag; |
498 | struct mvs_port *port = tei->port; | 600 | u32 flags, del_q; |
499 | u32 tag = tei->tag; | ||
500 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
501 | void *buf_tmp; | 601 | void *buf_tmp; |
502 | u8 *buf_cmd, *buf_oaf; | 602 | u8 *buf_cmd, *buf_oaf; |
503 | dma_addr_t buf_tmp_dma; | 603 | dma_addr_t buf_tmp_dma; |
504 | u32 i, req_len, resp_len; | 604 | u32 i, req_len, resp_len; |
505 | const u32 max_resp_len = SB_RFB_MAX; | 605 | const u32 max_resp_len = SB_RFB_MAX; |
506 | 606 | ||
507 | if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) | 607 | if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { |
608 | mv_dprintk("Have not enough regiset for dev %d.\n", | ||
609 | mvi_dev->device_id); | ||
508 | return -EBUSY; | 610 | return -EBUSY; |
509 | 611 | } | |
510 | slot = &mvi->slot_info[tag]; | 612 | slot = &mvi->slot_info[tag]; |
511 | slot->tx = mvi->tx_prod; | 613 | slot->tx = mvi->tx_prod; |
512 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | 614 | del_q = TXQ_MODE_I | tag | |
513 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | | 615 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | |
514 | (sas_port->phy_mask << TXQ_PHY_SHIFT) | | 616 | (sas_port->phy_mask << TXQ_PHY_SHIFT) | |
515 | (port->taskfileset << TXQ_SRS_SHIFT)); | 617 | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); |
516 | 618 | mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); | |
619 | |||
620 | #ifndef DISABLE_HOTPLUG_DMA_FIX | ||
621 | if (task->data_dir == DMA_FROM_DEVICE) | ||
622 | flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); | ||
623 | else | ||
624 | flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
625 | #else | ||
626 | flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
627 | #endif | ||
517 | if (task->ata_task.use_ncq) | 628 | if (task->ata_task.use_ncq) |
518 | flags |= MCH_FPDMA; | 629 | flags |= MCH_FPDMA; |
519 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { | 630 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { |
@@ -526,10 +637,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
526 | hdr->flags = cpu_to_le32(flags); | 637 | hdr->flags = cpu_to_le32(flags); |
527 | 638 | ||
528 | /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ | 639 | /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ |
529 | if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) | 640 | if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) |
530 | task->ata_task.fis.sector_count |= hdr->tags << 3; | 641 | task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); |
531 | else | 642 | else |
532 | hdr->tags = cpu_to_le32(tag); | 643 | hdr_tag = tag; |
644 | |||
645 | hdr->tags = cpu_to_le32(hdr_tag); | ||
646 | |||
533 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | 647 | hdr->data_len = cpu_to_le32(task->total_xfer_len); |
534 | 648 | ||
535 | /* | 649 | /* |
@@ -558,12 +672,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
558 | 672 | ||
559 | /* region 3: PRD table ********************************************* */ | 673 | /* region 3: PRD table ********************************************* */ |
560 | buf_prd = buf_tmp; | 674 | buf_prd = buf_tmp; |
675 | |||
561 | if (tei->n_elem) | 676 | if (tei->n_elem) |
562 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | 677 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); |
563 | else | 678 | else |
564 | hdr->prd_tbl = 0; | 679 | hdr->prd_tbl = 0; |
680 | i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); | ||
565 | 681 | ||
566 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
567 | buf_tmp += i; | 682 | buf_tmp += i; |
568 | buf_tmp_dma += i; | 683 | buf_tmp_dma += i; |
569 | 684 | ||
@@ -573,6 +688,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
573 | */ | 688 | */ |
574 | slot->response = buf_tmp; | 689 | slot->response = buf_tmp; |
575 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | 690 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); |
691 | if (mvi->flags & MVF_FLAG_SOC) | ||
692 | hdr->reserved[0] = 0; | ||
576 | 693 | ||
577 | req_len = sizeof(struct host_to_dev_fis); | 694 | req_len = sizeof(struct host_to_dev_fis); |
578 | resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - | 695 | resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - |
@@ -582,7 +699,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
582 | resp_len = min(resp_len, max_resp_len); | 699 | resp_len = min(resp_len, max_resp_len); |
583 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | 700 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); |
584 | 701 | ||
585 | task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ | 702 | if (likely(!task->ata_task.device_control_reg_update)) |
703 | task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ | ||
586 | /* fill in command FIS and ATAPI CDB */ | 704 | /* fill in command FIS and ATAPI CDB */ |
587 | memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); | 705 | memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); |
588 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) | 706 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) |
@@ -590,30 +708,35 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, | |||
590 | task->ata_task.atapi_packet, 16); | 708 | task->ata_task.atapi_packet, 16); |
591 | 709 | ||
592 | /* generate open address frame hdr (first 12 bytes) */ | 710 | /* generate open address frame hdr (first 12 bytes) */ |
593 | buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ | 711 | /* initiator, STP, ftype 1h */ |
594 | buf_oaf[1] = task->dev->linkrate & 0xf; | 712 | buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; |
595 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | 713 | buf_oaf[1] = dev->linkrate & 0xf; |
596 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | 714 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); |
715 | memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); | ||
597 | 716 | ||
598 | /* fill in PRD (scatter/gather) table, if any */ | 717 | /* fill in PRD (scatter/gather) table, if any */ |
599 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | 718 | MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); |
600 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | 719 | #ifndef DISABLE_HOTPLUG_DMA_FIX |
601 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | 720 | if (task->data_dir == DMA_FROM_DEVICE) |
602 | buf_prd++; | 721 | MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, |
603 | } | 722 | TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); |
604 | 723 | #endif | |
605 | return 0; | 724 | return 0; |
606 | } | 725 | } |
607 | 726 | ||
608 | static int mvs_task_prep_ssp(struct mvs_info *mvi, | 727 | static int mvs_task_prep_ssp(struct mvs_info *mvi, |
609 | struct mvs_task_exec_info *tei) | 728 | struct mvs_task_exec_info *tei, int is_tmf, |
729 | struct mvs_tmf_task *tmf) | ||
610 | { | 730 | { |
611 | struct sas_task *task = tei->task; | 731 | struct sas_task *task = tei->task; |
612 | struct mvs_cmd_hdr *hdr = tei->hdr; | 732 | struct mvs_cmd_hdr *hdr = tei->hdr; |
613 | struct mvs_port *port = tei->port; | 733 | struct mvs_port *port = tei->port; |
734 | struct domain_device *dev = task->dev; | ||
735 | struct mvs_device *mvi_dev = | ||
736 | (struct mvs_device *)dev->lldd_dev; | ||
737 | struct asd_sas_port *sas_port = dev->port; | ||
614 | struct mvs_slot_info *slot; | 738 | struct mvs_slot_info *slot; |
615 | struct scatterlist *sg; | 739 | void *buf_prd; |
616 | struct mvs_prd *buf_prd; | ||
617 | struct ssp_frame_hdr *ssp_hdr; | 740 | struct ssp_frame_hdr *ssp_hdr; |
618 | void *buf_tmp; | 741 | void *buf_tmp; |
619 | u8 *buf_cmd, *buf_oaf, fburst = 0; | 742 | u8 *buf_cmd, *buf_oaf, fburst = 0; |
@@ -621,12 +744,13 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, | |||
621 | u32 flags; | 744 | u32 flags; |
622 | u32 resp_len, req_len, i, tag = tei->tag; | 745 | u32 resp_len, req_len, i, tag = tei->tag; |
623 | const u32 max_resp_len = SB_RFB_MAX; | 746 | const u32 max_resp_len = SB_RFB_MAX; |
624 | u8 phy_mask; | 747 | u32 phy_mask; |
625 | 748 | ||
626 | slot = &mvi->slot_info[tag]; | 749 | slot = &mvi->slot_info[tag]; |
627 | 750 | ||
628 | phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : | 751 | phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : |
629 | task->dev->port->phy_mask; | 752 | sas_port->phy_mask) & TXQ_PHY_MASK; |
753 | |||
630 | slot->tx = mvi->tx_prod; | 754 | slot->tx = mvi->tx_prod; |
631 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | 755 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | |
632 | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | | 756 | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | |
@@ -640,7 +764,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, | |||
640 | hdr->flags = cpu_to_le32(flags | | 764 | hdr->flags = cpu_to_le32(flags | |
641 | (tei->n_elem << MCH_PRD_LEN_SHIFT) | | 765 | (tei->n_elem << MCH_PRD_LEN_SHIFT) | |
642 | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); | 766 | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); |
643 | |||
644 | hdr->tags = cpu_to_le32(tag); | 767 | hdr->tags = cpu_to_le32(tag); |
645 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | 768 | hdr->data_len = cpu_to_le32(task->total_xfer_len); |
646 | 769 | ||
@@ -674,13 +797,15 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, | |||
674 | else | 797 | else |
675 | hdr->prd_tbl = 0; | 798 | hdr->prd_tbl = 0; |
676 | 799 | ||
677 | i = sizeof(struct mvs_prd) * tei->n_elem; | 800 | i = MVS_CHIP_DISP->prd_size() * tei->n_elem; |
678 | buf_tmp += i; | 801 | buf_tmp += i; |
679 | buf_tmp_dma += i; | 802 | buf_tmp_dma += i; |
680 | 803 | ||
681 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | 804 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ |
682 | slot->response = buf_tmp; | 805 | slot->response = buf_tmp; |
683 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | 806 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); |
807 | if (mvi->flags & MVF_FLAG_SOC) | ||
808 | hdr->reserved[0] = 0; | ||
684 | 809 | ||
685 | resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - | 810 | resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - |
686 | sizeof(struct mvs_err_info) - i; | 811 | sizeof(struct mvs_err_info) - i; |
@@ -692,57 +817,105 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, | |||
692 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | 817 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); |
693 | 818 | ||
694 | /* generate open address frame hdr (first 12 bytes) */ | 819 | /* generate open address frame hdr (first 12 bytes) */ |
695 | buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ | 820 | /* initiator, SSP, ftype 1h */ |
696 | buf_oaf[1] = task->dev->linkrate & 0xf; | 821 | buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; |
697 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | 822 | buf_oaf[1] = dev->linkrate & 0xf; |
698 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | 823 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); |
824 | memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); | ||
699 | 825 | ||
700 | /* fill in SSP frame header (Command Table.SSP frame header) */ | 826 | /* fill in SSP frame header (Command Table.SSP frame header) */ |
701 | ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; | 827 | ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; |
702 | ssp_hdr->frame_type = SSP_COMMAND; | 828 | |
703 | memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, | 829 | if (is_tmf) |
830 | ssp_hdr->frame_type = SSP_TASK; | ||
831 | else | ||
832 | ssp_hdr->frame_type = SSP_COMMAND; | ||
833 | |||
834 | memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, | ||
704 | HASHED_SAS_ADDR_SIZE); | 835 | HASHED_SAS_ADDR_SIZE); |
705 | memcpy(ssp_hdr->hashed_src_addr, | 836 | memcpy(ssp_hdr->hashed_src_addr, |
706 | task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); | 837 | dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); |
707 | ssp_hdr->tag = cpu_to_be16(tag); | 838 | ssp_hdr->tag = cpu_to_be16(tag); |
708 | 839 | ||
709 | /* fill in command frame IU */ | 840 | /* fill in IU for TASK and Command Frame */ |
710 | buf_cmd += sizeof(*ssp_hdr); | 841 | buf_cmd += sizeof(*ssp_hdr); |
711 | memcpy(buf_cmd, &task->ssp_task.LUN, 8); | 842 | memcpy(buf_cmd, &task->ssp_task.LUN, 8); |
712 | buf_cmd[9] = fburst | task->ssp_task.task_attr | | ||
713 | (task->ssp_task.task_prio << 3); | ||
714 | memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); | ||
715 | 843 | ||
716 | /* fill in PRD (scatter/gather) table, if any */ | 844 | if (ssp_hdr->frame_type != SSP_TASK) { |
717 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | 845 | buf_cmd[9] = fburst | task->ssp_task.task_attr | |
718 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | 846 | (task->ssp_task.task_prio << 3); |
719 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | 847 | memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); |
720 | buf_prd++; | 848 | } else{ |
849 | buf_cmd[10] = tmf->tmf; | ||
850 | switch (tmf->tmf) { | ||
851 | case TMF_ABORT_TASK: | ||
852 | case TMF_QUERY_TASK: | ||
853 | buf_cmd[12] = | ||
854 | (tmf->tag_of_task_to_be_managed >> 8) & 0xff; | ||
855 | buf_cmd[13] = | ||
856 | tmf->tag_of_task_to_be_managed & 0xff; | ||
857 | break; | ||
858 | default: | ||
859 | break; | ||
860 | } | ||
721 | } | 861 | } |
722 | 862 | /* fill in PRD (scatter/gather) table, if any */ | |
863 | MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); | ||
723 | return 0; | 864 | return 0; |
724 | } | 865 | } |
725 | 866 | ||
726 | int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | 867 | #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) |
868 | static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, | ||
869 | struct completion *completion, int lock, | ||
870 | int is_tmf, struct mvs_tmf_task *tmf) | ||
727 | { | 871 | { |
728 | struct domain_device *dev = task->dev; | 872 | struct domain_device *dev = task->dev; |
729 | struct mvs_info *mvi = dev->port->ha->lldd_ha; | 873 | struct mvs_info *mvi; |
730 | struct pci_dev *pdev = mvi->pdev; | 874 | struct mvs_device *mvi_dev; |
731 | void __iomem *regs = mvi->regs; | ||
732 | struct mvs_task_exec_info tei; | 875 | struct mvs_task_exec_info tei; |
733 | struct sas_task *t = task; | 876 | struct sas_task *t = task; |
734 | struct mvs_slot_info *slot; | 877 | struct mvs_slot_info *slot; |
735 | u32 tag = 0xdeadbeef, rc, n_elem = 0; | 878 | u32 tag = 0xdeadbeef, rc, n_elem = 0; |
736 | unsigned long flags; | ||
737 | u32 n = num, pass = 0; | 879 | u32 n = num, pass = 0; |
880 | unsigned long flags = 0; | ||
738 | 881 | ||
739 | spin_lock_irqsave(&mvi->lock, flags); | 882 | if (!dev->port) { |
883 | struct task_status_struct *tsm = &t->task_status; | ||
884 | |||
885 | tsm->resp = SAS_TASK_UNDELIVERED; | ||
886 | tsm->stat = SAS_PHY_DOWN; | ||
887 | t->task_done(t); | ||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | mvi = mvs_find_dev_mvi(task->dev); | ||
892 | |||
893 | if (lock) | ||
894 | spin_lock_irqsave(&mvi->lock, flags); | ||
740 | do { | 895 | do { |
741 | dev = t->dev; | 896 | dev = t->dev; |
742 | tei.port = &mvi->port[dev->port->id]; | 897 | mvi_dev = (struct mvs_device *)dev->lldd_dev; |
898 | if (DEV_IS_GONE(mvi_dev)) { | ||
899 | if (mvi_dev) | ||
900 | mv_dprintk("device %d not ready.\n", | ||
901 | mvi_dev->device_id); | ||
902 | else | ||
903 | mv_dprintk("device %016llx not ready.\n", | ||
904 | SAS_ADDR(dev->sas_addr)); | ||
905 | |||
906 | rc = SAS_PHY_DOWN; | ||
907 | goto out_done; | ||
908 | } | ||
909 | |||
910 | if (dev->port->id >= mvi->chip->n_phy) | ||
911 | tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; | ||
912 | else | ||
913 | tei.port = &mvi->port[dev->port->id]; | ||
743 | 914 | ||
744 | if (!tei.port->port_attached) { | 915 | if (!tei.port->port_attached) { |
745 | if (sas_protocol_ata(t->task_proto)) { | 916 | if (sas_protocol_ata(t->task_proto)) { |
917 | mv_dprintk("port %d does not" | ||
918 | "attached device.\n", dev->port->id); | ||
746 | rc = SAS_PHY_DOWN; | 919 | rc = SAS_PHY_DOWN; |
747 | goto out_done; | 920 | goto out_done; |
748 | } else { | 921 | } else { |
@@ -759,7 +932,8 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | |||
759 | 932 | ||
760 | if (!sas_protocol_ata(t->task_proto)) { | 933 | if (!sas_protocol_ata(t->task_proto)) { |
761 | if (t->num_scatter) { | 934 | if (t->num_scatter) { |
762 | n_elem = pci_map_sg(mvi->pdev, t->scatter, | 935 | n_elem = dma_map_sg(mvi->dev, |
936 | t->scatter, | ||
763 | t->num_scatter, | 937 | t->num_scatter, |
764 | t->data_dir); | 938 | t->data_dir); |
765 | if (!n_elem) { | 939 | if (!n_elem) { |
@@ -776,20 +950,23 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | |||
776 | goto err_out; | 950 | goto err_out; |
777 | 951 | ||
778 | slot = &mvi->slot_info[tag]; | 952 | slot = &mvi->slot_info[tag]; |
953 | |||
954 | |||
779 | t->lldd_task = NULL; | 955 | t->lldd_task = NULL; |
780 | slot->n_elem = n_elem; | 956 | slot->n_elem = n_elem; |
957 | slot->slot_tag = tag; | ||
781 | memset(slot->buf, 0, MVS_SLOT_BUF_SZ); | 958 | memset(slot->buf, 0, MVS_SLOT_BUF_SZ); |
959 | |||
782 | tei.task = t; | 960 | tei.task = t; |
783 | tei.hdr = &mvi->slot[tag]; | 961 | tei.hdr = &mvi->slot[tag]; |
784 | tei.tag = tag; | 962 | tei.tag = tag; |
785 | tei.n_elem = n_elem; | 963 | tei.n_elem = n_elem; |
786 | |||
787 | switch (t->task_proto) { | 964 | switch (t->task_proto) { |
788 | case SAS_PROTOCOL_SMP: | 965 | case SAS_PROTOCOL_SMP: |
789 | rc = mvs_task_prep_smp(mvi, &tei); | 966 | rc = mvs_task_prep_smp(mvi, &tei); |
790 | break; | 967 | break; |
791 | case SAS_PROTOCOL_SSP: | 968 | case SAS_PROTOCOL_SSP: |
792 | rc = mvs_task_prep_ssp(mvi, &tei); | 969 | rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); |
793 | break; | 970 | break; |
794 | case SAS_PROTOCOL_SATA: | 971 | case SAS_PROTOCOL_SATA: |
795 | case SAS_PROTOCOL_STP: | 972 | case SAS_PROTOCOL_STP: |
@@ -797,52 +974,61 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | |||
797 | rc = mvs_task_prep_ata(mvi, &tei); | 974 | rc = mvs_task_prep_ata(mvi, &tei); |
798 | break; | 975 | break; |
799 | default: | 976 | default: |
800 | dev_printk(KERN_ERR, &pdev->dev, | 977 | dev_printk(KERN_ERR, mvi->dev, |
801 | "unknown sas_task proto: 0x%x\n", | 978 | "unknown sas_task proto: 0x%x\n", |
802 | t->task_proto); | 979 | t->task_proto); |
803 | rc = -EINVAL; | 980 | rc = -EINVAL; |
804 | break; | 981 | break; |
805 | } | 982 | } |
806 | 983 | ||
807 | if (rc) | 984 | if (rc) { |
985 | mv_dprintk("rc is %x\n", rc); | ||
808 | goto err_out_tag; | 986 | goto err_out_tag; |
809 | 987 | } | |
810 | slot->task = t; | 988 | slot->task = t; |
811 | slot->port = tei.port; | 989 | slot->port = tei.port; |
812 | t->lldd_task = (void *) slot; | 990 | t->lldd_task = (void *) slot; |
813 | list_add_tail(&slot->list, &slot->port->list); | 991 | list_add_tail(&slot->entry, &tei.port->list); |
814 | /* TODO: select normal or high priority */ | 992 | /* TODO: select normal or high priority */ |
815 | |||
816 | spin_lock(&t->task_state_lock); | 993 | spin_lock(&t->task_state_lock); |
817 | t->task_state_flags |= SAS_TASK_AT_INITIATOR; | 994 | t->task_state_flags |= SAS_TASK_AT_INITIATOR; |
818 | spin_unlock(&t->task_state_lock); | 995 | spin_unlock(&t->task_state_lock); |
819 | 996 | ||
820 | mvs_hba_memory_dump(mvi, tag, t->task_proto); | 997 | mvs_hba_memory_dump(mvi, tag, t->task_proto); |
821 | 998 | mvi_dev->runing_req++; | |
822 | ++pass; | 999 | ++pass; |
823 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); | 1000 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); |
824 | if (n > 1) | 1001 | if (n > 1) |
825 | t = list_entry(t->list.next, struct sas_task, list); | 1002 | t = list_entry(t->list.next, struct sas_task, list); |
826 | } while (--n); | 1003 | } while (--n); |
827 | |||
828 | rc = 0; | 1004 | rc = 0; |
829 | goto out_done; | 1005 | goto out_done; |
830 | 1006 | ||
831 | err_out_tag: | 1007 | err_out_tag: |
832 | mvs_tag_free(mvi, tag); | 1008 | mvs_tag_free(mvi, tag); |
833 | err_out: | 1009 | err_out: |
834 | dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); | 1010 | |
1011 | dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); | ||
835 | if (!sas_protocol_ata(t->task_proto)) | 1012 | if (!sas_protocol_ata(t->task_proto)) |
836 | if (n_elem) | 1013 | if (n_elem) |
837 | pci_unmap_sg(mvi->pdev, t->scatter, n_elem, | 1014 | dma_unmap_sg(mvi->dev, t->scatter, n_elem, |
838 | t->data_dir); | 1015 | t->data_dir); |
839 | out_done: | 1016 | out_done: |
840 | if (pass) | 1017 | if (likely(pass)) { |
841 | mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); | 1018 | MVS_CHIP_DISP->start_delivery(mvi, |
842 | spin_unlock_irqrestore(&mvi->lock, flags); | 1019 | (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); |
1020 | } | ||
1021 | if (lock) | ||
1022 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
843 | return rc; | 1023 | return rc; |
844 | } | 1024 | } |
845 | 1025 | ||
1026 | int mvs_queue_command(struct sas_task *task, const int num, | ||
1027 | gfp_t gfp_flags) | ||
1028 | { | ||
1029 | return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL); | ||
1030 | } | ||
1031 | |||
846 | static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) | 1032 | static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) |
847 | { | 1033 | { |
848 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | 1034 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; |
@@ -852,16 +1038,18 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) | |||
852 | static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, | 1038 | static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, |
853 | struct mvs_slot_info *slot, u32 slot_idx) | 1039 | struct mvs_slot_info *slot, u32 slot_idx) |
854 | { | 1040 | { |
1041 | if (!slot->task) | ||
1042 | return; | ||
855 | if (!sas_protocol_ata(task->task_proto)) | 1043 | if (!sas_protocol_ata(task->task_proto)) |
856 | if (slot->n_elem) | 1044 | if (slot->n_elem) |
857 | pci_unmap_sg(mvi->pdev, task->scatter, | 1045 | dma_unmap_sg(mvi->dev, task->scatter, |
858 | slot->n_elem, task->data_dir); | 1046 | slot->n_elem, task->data_dir); |
859 | 1047 | ||
860 | switch (task->task_proto) { | 1048 | switch (task->task_proto) { |
861 | case SAS_PROTOCOL_SMP: | 1049 | case SAS_PROTOCOL_SMP: |
862 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, | 1050 | dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, |
863 | PCI_DMA_FROMDEVICE); | 1051 | PCI_DMA_FROMDEVICE); |
864 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, | 1052 | dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, |
865 | PCI_DMA_TODEVICE); | 1053 | PCI_DMA_TODEVICE); |
866 | break; | 1054 | break; |
867 | 1055 | ||
@@ -872,10 +1060,12 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, | |||
872 | /* do nothing */ | 1060 | /* do nothing */ |
873 | break; | 1061 | break; |
874 | } | 1062 | } |
875 | list_del(&slot->list); | 1063 | list_del_init(&slot->entry); |
876 | task->lldd_task = NULL; | 1064 | task->lldd_task = NULL; |
877 | slot->task = NULL; | 1065 | slot->task = NULL; |
878 | slot->port = NULL; | 1066 | slot->port = NULL; |
1067 | slot->slot_tag = 0xFFFFFFFF; | ||
1068 | mvs_slot_free(mvi, slot_idx); | ||
879 | } | 1069 | } |
880 | 1070 | ||
881 | static void mvs_update_wideport(struct mvs_info *mvi, int i) | 1071 | static void mvs_update_wideport(struct mvs_info *mvi, int i) |
@@ -884,25 +1074,28 @@ static void mvs_update_wideport(struct mvs_info *mvi, int i) | |||
884 | struct mvs_port *port = phy->port; | 1074 | struct mvs_port *port = phy->port; |
885 | int j, no; | 1075 | int j, no; |
886 | 1076 | ||
887 | for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) | 1077 | for_each_phy(port->wide_port_phymap, j, no) { |
888 | if (no & 1) { | 1078 | if (j & 1) { |
889 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | 1079 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, |
890 | mvs_write_port_cfg_data(mvi, no, | 1080 | PHYR_WIDE_PORT); |
1081 | MVS_CHIP_DISP->write_port_cfg_data(mvi, no, | ||
891 | port->wide_port_phymap); | 1082 | port->wide_port_phymap); |
892 | } else { | 1083 | } else { |
893 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | 1084 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, |
894 | mvs_write_port_cfg_data(mvi, no, 0); | 1085 | PHYR_WIDE_PORT); |
1086 | MVS_CHIP_DISP->write_port_cfg_data(mvi, no, | ||
1087 | 0); | ||
895 | } | 1088 | } |
1089 | } | ||
896 | } | 1090 | } |
897 | 1091 | ||
898 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) | 1092 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) |
899 | { | 1093 | { |
900 | u32 tmp; | 1094 | u32 tmp; |
901 | struct mvs_phy *phy = &mvi->phy[i]; | 1095 | struct mvs_phy *phy = &mvi->phy[i]; |
902 | struct mvs_port *port = phy->port;; | 1096 | struct mvs_port *port = phy->port; |
903 | |||
904 | tmp = mvs_read_phy_ctl(mvi, i); | ||
905 | 1097 | ||
1098 | tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); | ||
906 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { | 1099 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { |
907 | if (!port) | 1100 | if (!port) |
908 | phy->phy_attached = 1; | 1101 | phy->phy_attached = 1; |
@@ -917,7 +1110,6 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) | |||
917 | mvs_update_wideport(mvi, i); | 1110 | mvs_update_wideport(mvi, i); |
918 | } else if (phy->phy_type & PORT_TYPE_SATA) | 1111 | } else if (phy->phy_type & PORT_TYPE_SATA) |
919 | port->port_attached = 0; | 1112 | port->port_attached = 0; |
920 | mvs_free_reg_set(mvi, phy->port); | ||
921 | phy->port = NULL; | 1113 | phy->port = NULL; |
922 | phy->phy_attached = 0; | 1114 | phy->phy_attached = 0; |
923 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); | 1115 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); |
@@ -932,17 +1124,21 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) | |||
932 | if (!s) | 1124 | if (!s) |
933 | return NULL; | 1125 | return NULL; |
934 | 1126 | ||
935 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); | 1127 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); |
936 | s[3] = mvs_read_port_cfg_data(mvi, i); | 1128 | s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); |
1129 | |||
1130 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); | ||
1131 | s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); | ||
937 | 1132 | ||
938 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); | 1133 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); |
939 | s[2] = mvs_read_port_cfg_data(mvi, i); | 1134 | s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); |
940 | 1135 | ||
941 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); | 1136 | MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); |
942 | s[1] = mvs_read_port_cfg_data(mvi, i); | 1137 | s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); |
943 | 1138 | ||
944 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); | 1139 | /* Workaround: take some ATAPI devices for ATA */ |
945 | s[0] = mvs_read_port_cfg_data(mvi, i); | 1140 | if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) |
1141 | s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); | ||
946 | 1142 | ||
947 | return (void *)s; | 1143 | return (void *)s; |
948 | } | 1144 | } |
@@ -952,56 +1148,53 @@ static u32 mvs_is_sig_fis_received(u32 irq_status) | |||
952 | return irq_status & PHYEV_SIG_FIS; | 1148 | return irq_status & PHYEV_SIG_FIS; |
953 | } | 1149 | } |
954 | 1150 | ||
955 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, | 1151 | void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) |
956 | int get_st) | ||
957 | { | 1152 | { |
958 | struct mvs_phy *phy = &mvi->phy[i]; | 1153 | struct mvs_phy *phy = &mvi->phy[i]; |
959 | struct pci_dev *pdev = mvi->pdev; | 1154 | struct sas_identify_frame *id; |
960 | u32 tmp; | ||
961 | u64 tmp64; | ||
962 | |||
963 | mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); | ||
964 | phy->dev_info = mvs_read_port_cfg_data(mvi, i); | ||
965 | |||
966 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
967 | phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; | ||
968 | 1155 | ||
969 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | 1156 | id = (struct sas_identify_frame *)phy->frame_rcvd; |
970 | phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | ||
971 | 1157 | ||
972 | if (get_st) { | 1158 | if (get_st) { |
973 | phy->irq_status = mvs_read_port_irq_stat(mvi, i); | 1159 | phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); |
974 | phy->phy_status = mvs_is_phy_ready(mvi, i); | 1160 | phy->phy_status = mvs_is_phy_ready(mvi, i); |
975 | } | 1161 | } |
976 | 1162 | ||
977 | if (phy->phy_status) { | 1163 | if (phy->phy_status) { |
978 | u32 phy_st; | 1164 | int oob_done = 0; |
979 | struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; | 1165 | struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; |
980 | |||
981 | mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); | ||
982 | phy_st = mvs_read_port_cfg_data(mvi, i); | ||
983 | |||
984 | sas_phy->linkrate = | ||
985 | (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
986 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; | ||
987 | phy->minimum_linkrate = | ||
988 | (phy->phy_status & | ||
989 | PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; | ||
990 | phy->maximum_linkrate = | ||
991 | (phy->phy_status & | ||
992 | PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; | ||
993 | 1166 | ||
994 | if (phy->phy_type & PORT_TYPE_SAS) { | 1167 | oob_done = MVS_CHIP_DISP->oob_done(mvi, i); |
995 | /* Updated attached_sas_addr */ | 1168 | |
996 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); | 1169 | MVS_CHIP_DISP->fix_phy_info(mvi, i, id); |
997 | phy->att_dev_sas_addr = | 1170 | if (phy->phy_type & PORT_TYPE_SATA) { |
998 | (u64) mvs_read_port_cfg_data(mvi, i) << 32; | 1171 | phy->identify.target_port_protocols = SAS_PROTOCOL_STP; |
999 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); | 1172 | if (mvs_is_sig_fis_received(phy->irq_status)) { |
1000 | phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | 1173 | phy->phy_attached = 1; |
1001 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); | 1174 | phy->att_dev_sas_addr = |
1002 | phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); | 1175 | i + mvi->id * mvi->chip->n_phy; |
1176 | if (oob_done) | ||
1177 | sas_phy->oob_mode = SATA_OOB_MODE; | ||
1178 | phy->frame_rcvd_size = | ||
1179 | sizeof(struct dev_to_host_fis); | ||
1180 | mvs_get_d2h_reg(mvi, i, (void *)id); | ||
1181 | } else { | ||
1182 | u32 tmp; | ||
1183 | dev_printk(KERN_DEBUG, mvi->dev, | ||
1184 | "Phy%d : No sig fis\n", i); | ||
1185 | tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); | ||
1186 | MVS_CHIP_DISP->write_port_irq_mask(mvi, i, | ||
1187 | tmp | PHYEV_SIG_FIS); | ||
1188 | phy->phy_attached = 0; | ||
1189 | phy->phy_type &= ~PORT_TYPE_SATA; | ||
1190 | MVS_CHIP_DISP->phy_reset(mvi, i, 0); | ||
1191 | goto out_done; | ||
1192 | } | ||
1193 | } else if (phy->phy_type & PORT_TYPE_SAS | ||
1194 | || phy->att_dev_info & PORT_SSP_INIT_MASK) { | ||
1195 | phy->phy_attached = 1; | ||
1003 | phy->identify.device_type = | 1196 | phy->identify.device_type = |
1004 | phy->att_dev_info & PORT_DEV_TYPE_MASK; | 1197 | phy->att_dev_info & PORT_DEV_TYPE_MASK; |
1005 | 1198 | ||
1006 | if (phy->identify.device_type == SAS_END_DEV) | 1199 | if (phy->identify.device_type == SAS_END_DEV) |
1007 | phy->identify.target_port_protocols = | 1200 | phy->identify.target_port_protocols = |
@@ -1009,93 +1202,522 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i, | |||
1009 | else if (phy->identify.device_type != NO_DEVICE) | 1202 | else if (phy->identify.device_type != NO_DEVICE) |
1010 | phy->identify.target_port_protocols = | 1203 | phy->identify.target_port_protocols = |
1011 | SAS_PROTOCOL_SMP; | 1204 | SAS_PROTOCOL_SMP; |
1012 | if (phy_st & PHY_OOB_DTCTD) | 1205 | if (oob_done) |
1013 | sas_phy->oob_mode = SAS_OOB_MODE; | 1206 | sas_phy->oob_mode = SAS_OOB_MODE; |
1014 | phy->frame_rcvd_size = | 1207 | phy->frame_rcvd_size = |
1015 | sizeof(struct sas_identify_frame); | 1208 | sizeof(struct sas_identify_frame); |
1016 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
1017 | phy->identify.target_port_protocols = SAS_PROTOCOL_STP; | ||
1018 | if (mvs_is_sig_fis_received(phy->irq_status)) { | ||
1019 | phy->att_dev_sas_addr = i; /* temp */ | ||
1020 | if (phy_st & PHY_OOB_DTCTD) | ||
1021 | sas_phy->oob_mode = SATA_OOB_MODE; | ||
1022 | phy->frame_rcvd_size = | ||
1023 | sizeof(struct dev_to_host_fis); | ||
1024 | mvs_get_d2h_reg(mvi, i, | ||
1025 | (void *)sas_phy->frame_rcvd); | ||
1026 | } else { | ||
1027 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1028 | "No sig fis\n"); | ||
1029 | phy->phy_type &= ~(PORT_TYPE_SATA); | ||
1030 | goto out_done; | ||
1031 | } | ||
1032 | } | 1209 | } |
1033 | tmp64 = cpu_to_be64(phy->att_dev_sas_addr); | 1210 | memcpy(sas_phy->attached_sas_addr, |
1034 | memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); | 1211 | &phy->att_dev_sas_addr, SAS_ADDR_SIZE); |
1035 | |||
1036 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1037 | "phy[%d] Get Attached Address 0x%llX ," | ||
1038 | " SAS Address 0x%llX\n", | ||
1039 | i, | ||
1040 | (unsigned long long)phy->att_dev_sas_addr, | ||
1041 | (unsigned long long)phy->dev_sas_addr); | ||
1042 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1043 | "Rate = %x , type = %d\n", | ||
1044 | sas_phy->linkrate, phy->phy_type); | ||
1045 | |||
1046 | /* workaround for HW phy decoding error on 1.5g disk drive */ | ||
1047 | mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); | ||
1048 | tmp = mvs_read_port_vsr_data(mvi, i); | ||
1049 | if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
1050 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == | ||
1051 | SAS_LINK_RATE_1_5_GBPS) | ||
1052 | tmp &= ~PHY_MODE6_LATECLK; | ||
1053 | else | ||
1054 | tmp |= PHY_MODE6_LATECLK; | ||
1055 | mvs_write_port_vsr_data(mvi, i, tmp); | ||
1056 | 1212 | ||
1213 | if (MVS_CHIP_DISP->phy_work_around) | ||
1214 | MVS_CHIP_DISP->phy_work_around(mvi, i); | ||
1057 | } | 1215 | } |
1216 | mv_dprintk("port %d attach dev info is %x\n", | ||
1217 | i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); | ||
1218 | mv_dprintk("port %d attach sas addr is %llx\n", | ||
1219 | i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); | ||
1058 | out_done: | 1220 | out_done: |
1059 | if (get_st) | 1221 | if (get_st) |
1060 | mvs_write_port_irq_stat(mvi, i, phy->irq_status); | 1222 | MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); |
1061 | } | 1223 | } |
1062 | 1224 | ||
1063 | void mvs_port_formed(struct asd_sas_phy *sas_phy) | 1225 | static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) |
1064 | { | 1226 | { |
1065 | struct sas_ha_struct *sas_ha = sas_phy->ha; | 1227 | struct sas_ha_struct *sas_ha = sas_phy->ha; |
1066 | struct mvs_info *mvi = sas_ha->lldd_ha; | 1228 | struct mvs_info *mvi = NULL; int i = 0, hi; |
1067 | struct asd_sas_port *sas_port = sas_phy->port; | ||
1068 | struct mvs_phy *phy = sas_phy->lldd_phy; | 1229 | struct mvs_phy *phy = sas_phy->lldd_phy; |
1069 | struct mvs_port *port = &mvi->port[sas_port->id]; | 1230 | struct asd_sas_port *sas_port = sas_phy->port; |
1070 | unsigned long flags; | 1231 | struct mvs_port *port; |
1232 | unsigned long flags = 0; | ||
1233 | if (!sas_port) | ||
1234 | return; | ||
1071 | 1235 | ||
1072 | spin_lock_irqsave(&mvi->lock, flags); | 1236 | while (sas_ha->sas_phy[i]) { |
1237 | if (sas_ha->sas_phy[i] == sas_phy) | ||
1238 | break; | ||
1239 | i++; | ||
1240 | } | ||
1241 | hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; | ||
1242 | mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; | ||
1243 | if (sas_port->id >= mvi->chip->n_phy) | ||
1244 | port = &mvi->port[sas_port->id - mvi->chip->n_phy]; | ||
1245 | else | ||
1246 | port = &mvi->port[sas_port->id]; | ||
1247 | if (lock) | ||
1248 | spin_lock_irqsave(&mvi->lock, flags); | ||
1073 | port->port_attached = 1; | 1249 | port->port_attached = 1; |
1074 | phy->port = port; | 1250 | phy->port = port; |
1075 | port->taskfileset = MVS_ID_NOT_MAPPED; | ||
1076 | if (phy->phy_type & PORT_TYPE_SAS) { | 1251 | if (phy->phy_type & PORT_TYPE_SAS) { |
1077 | port->wide_port_phymap = sas_port->phy_mask; | 1252 | port->wide_port_phymap = sas_port->phy_mask; |
1253 | mv_printk("set wide port phy map %x\n", sas_port->phy_mask); | ||
1078 | mvs_update_wideport(mvi, sas_phy->id); | 1254 | mvs_update_wideport(mvi, sas_phy->id); |
1079 | } | 1255 | } |
1080 | spin_unlock_irqrestore(&mvi->lock, flags); | 1256 | if (lock) |
1257 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1258 | } | ||
1259 | |||
1260 | static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) | ||
1261 | { | ||
1262 | /*Nothing*/ | ||
1263 | } | ||
1264 | |||
1265 | |||
1266 | void mvs_port_formed(struct asd_sas_phy *sas_phy) | ||
1267 | { | ||
1268 | mvs_port_notify_formed(sas_phy, 1); | ||
1269 | } | ||
1270 | |||
1271 | void mvs_port_deformed(struct asd_sas_phy *sas_phy) | ||
1272 | { | ||
1273 | mvs_port_notify_deformed(sas_phy, 1); | ||
1274 | } | ||
1275 | |||
1276 | struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) | ||
1277 | { | ||
1278 | u32 dev; | ||
1279 | for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { | ||
1280 | if (mvi->devices[dev].dev_type == NO_DEVICE) { | ||
1281 | mvi->devices[dev].device_id = dev; | ||
1282 | return &mvi->devices[dev]; | ||
1283 | } | ||
1284 | } | ||
1285 | |||
1286 | if (dev == MVS_MAX_DEVICES) | ||
1287 | mv_printk("max support %d devices, ignore ..\n", | ||
1288 | MVS_MAX_DEVICES); | ||
1289 | |||
1290 | return NULL; | ||
1291 | } | ||
1292 | |||
1293 | void mvs_free_dev(struct mvs_device *mvi_dev) | ||
1294 | { | ||
1295 | u32 id = mvi_dev->device_id; | ||
1296 | memset(mvi_dev, 0, sizeof(*mvi_dev)); | ||
1297 | mvi_dev->device_id = id; | ||
1298 | mvi_dev->dev_type = NO_DEVICE; | ||
1299 | mvi_dev->dev_status = MVS_DEV_NORMAL; | ||
1300 | mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; | ||
1301 | } | ||
1302 | |||
1303 | int mvs_dev_found_notify(struct domain_device *dev, int lock) | ||
1304 | { | ||
1305 | unsigned long flags = 0; | ||
1306 | int res = 0; | ||
1307 | struct mvs_info *mvi = NULL; | ||
1308 | struct domain_device *parent_dev = dev->parent; | ||
1309 | struct mvs_device *mvi_device; | ||
1310 | |||
1311 | mvi = mvs_find_dev_mvi(dev); | ||
1312 | |||
1313 | if (lock) | ||
1314 | spin_lock_irqsave(&mvi->lock, flags); | ||
1315 | |||
1316 | mvi_device = mvs_alloc_dev(mvi); | ||
1317 | if (!mvi_device) { | ||
1318 | res = -1; | ||
1319 | goto found_out; | ||
1320 | } | ||
1321 | dev->lldd_dev = (void *)mvi_device; | ||
1322 | mvi_device->dev_type = dev->dev_type; | ||
1323 | |||
1324 | if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { | ||
1325 | int phy_id; | ||
1326 | u8 phy_num = parent_dev->ex_dev.num_phys; | ||
1327 | struct ex_phy *phy; | ||
1328 | for (phy_id = 0; phy_id < phy_num; phy_id++) { | ||
1329 | phy = &parent_dev->ex_dev.ex_phy[phy_id]; | ||
1330 | if (SAS_ADDR(phy->attached_sas_addr) == | ||
1331 | SAS_ADDR(dev->sas_addr)) { | ||
1332 | mvi_device->attached_phy = phy_id; | ||
1333 | break; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | if (phy_id == phy_num) { | ||
1338 | mv_printk("Error: no attached dev:%016llx" | ||
1339 | "at ex:%016llx.\n", | ||
1340 | SAS_ADDR(dev->sas_addr), | ||
1341 | SAS_ADDR(parent_dev->sas_addr)); | ||
1342 | res = -1; | ||
1343 | } | ||
1344 | } | ||
1345 | |||
1346 | found_out: | ||
1347 | if (lock) | ||
1348 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1349 | return res; | ||
1350 | } | ||
1351 | |||
1352 | int mvs_dev_found(struct domain_device *dev) | ||
1353 | { | ||
1354 | return mvs_dev_found_notify(dev, 1); | ||
1355 | } | ||
1356 | |||
1357 | void mvs_dev_gone_notify(struct domain_device *dev, int lock) | ||
1358 | { | ||
1359 | unsigned long flags = 0; | ||
1360 | struct mvs_info *mvi; | ||
1361 | struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; | ||
1362 | |||
1363 | mvi = mvs_find_dev_mvi(dev); | ||
1364 | |||
1365 | if (lock) | ||
1366 | spin_lock_irqsave(&mvi->lock, flags); | ||
1367 | |||
1368 | if (mvi_dev) { | ||
1369 | mv_dprintk("found dev[%d:%x] is gone.\n", | ||
1370 | mvi_dev->device_id, mvi_dev->dev_type); | ||
1371 | mvs_free_reg_set(mvi, mvi_dev); | ||
1372 | mvs_free_dev(mvi_dev); | ||
1373 | } else { | ||
1374 | mv_dprintk("found dev has gone.\n"); | ||
1375 | } | ||
1376 | dev->lldd_dev = NULL; | ||
1377 | |||
1378 | if (lock) | ||
1379 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1380 | } | ||
1381 | |||
1382 | |||
1383 | void mvs_dev_gone(struct domain_device *dev) | ||
1384 | { | ||
1385 | mvs_dev_gone_notify(dev, 1); | ||
1386 | } | ||
1387 | |||
1388 | static struct sas_task *mvs_alloc_task(void) | ||
1389 | { | ||
1390 | struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); | ||
1391 | |||
1392 | if (task) { | ||
1393 | INIT_LIST_HEAD(&task->list); | ||
1394 | spin_lock_init(&task->task_state_lock); | ||
1395 | task->task_state_flags = SAS_TASK_STATE_PENDING; | ||
1396 | init_timer(&task->timer); | ||
1397 | init_completion(&task->completion); | ||
1398 | } | ||
1399 | return task; | ||
1400 | } | ||
1401 | |||
1402 | static void mvs_free_task(struct sas_task *task) | ||
1403 | { | ||
1404 | if (task) { | ||
1405 | BUG_ON(!list_empty(&task->list)); | ||
1406 | kfree(task); | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | static void mvs_task_done(struct sas_task *task) | ||
1411 | { | ||
1412 | if (!del_timer(&task->timer)) | ||
1413 | return; | ||
1414 | complete(&task->completion); | ||
1415 | } | ||
1416 | |||
1417 | static void mvs_tmf_timedout(unsigned long data) | ||
1418 | { | ||
1419 | struct sas_task *task = (struct sas_task *)data; | ||
1420 | |||
1421 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | ||
1422 | complete(&task->completion); | ||
1423 | } | ||
1424 | |||
1425 | /* XXX */ | ||
1426 | #define MVS_TASK_TIMEOUT 20 | ||
1427 | static int mvs_exec_internal_tmf_task(struct domain_device *dev, | ||
1428 | void *parameter, u32 para_len, struct mvs_tmf_task *tmf) | ||
1429 | { | ||
1430 | int res, retry; | ||
1431 | struct sas_task *task = NULL; | ||
1432 | |||
1433 | for (retry = 0; retry < 3; retry++) { | ||
1434 | task = mvs_alloc_task(); | ||
1435 | if (!task) | ||
1436 | return -ENOMEM; | ||
1437 | |||
1438 | task->dev = dev; | ||
1439 | task->task_proto = dev->tproto; | ||
1440 | |||
1441 | memcpy(&task->ssp_task, parameter, para_len); | ||
1442 | task->task_done = mvs_task_done; | ||
1443 | |||
1444 | task->timer.data = (unsigned long) task; | ||
1445 | task->timer.function = mvs_tmf_timedout; | ||
1446 | task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; | ||
1447 | add_timer(&task->timer); | ||
1448 | |||
1449 | res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf); | ||
1450 | |||
1451 | if (res) { | ||
1452 | del_timer(&task->timer); | ||
1453 | mv_printk("executing internel task failed:%d\n", res); | ||
1454 | goto ex_err; | ||
1455 | } | ||
1456 | |||
1457 | wait_for_completion(&task->completion); | ||
1458 | res = -TMF_RESP_FUNC_FAILED; | ||
1459 | /* Even TMF timed out, return direct. */ | ||
1460 | if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1461 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { | ||
1462 | mv_printk("TMF task[%x] timeout.\n", tmf->tmf); | ||
1463 | goto ex_err; | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1468 | task->task_status.stat == SAM_GOOD) { | ||
1469 | res = TMF_RESP_FUNC_COMPLETE; | ||
1470 | break; | ||
1471 | } | ||
1472 | |||
1473 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1474 | task->task_status.stat == SAS_DATA_UNDERRUN) { | ||
1475 | /* no error, but return the number of bytes of | ||
1476 | * underrun */ | ||
1477 | res = task->task_status.residual; | ||
1478 | break; | ||
1479 | } | ||
1480 | |||
1481 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1482 | task->task_status.stat == SAS_DATA_OVERRUN) { | ||
1483 | mv_dprintk("blocked task error.\n"); | ||
1484 | res = -EMSGSIZE; | ||
1485 | break; | ||
1486 | } else { | ||
1487 | mv_dprintk(" task to dev %016llx response: 0x%x " | ||
1488 | "status 0x%x\n", | ||
1489 | SAS_ADDR(dev->sas_addr), | ||
1490 | task->task_status.resp, | ||
1491 | task->task_status.stat); | ||
1492 | mvs_free_task(task); | ||
1493 | task = NULL; | ||
1494 | |||
1495 | } | ||
1496 | } | ||
1497 | ex_err: | ||
1498 | BUG_ON(retry == 3 && task != NULL); | ||
1499 | if (task != NULL) | ||
1500 | mvs_free_task(task); | ||
1501 | return res; | ||
1502 | } | ||
1503 | |||
1504 | static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, | ||
1505 | u8 *lun, struct mvs_tmf_task *tmf) | ||
1506 | { | ||
1507 | struct sas_ssp_task ssp_task; | ||
1508 | DECLARE_COMPLETION_ONSTACK(completion); | ||
1509 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) | ||
1510 | return TMF_RESP_FUNC_ESUPP; | ||
1511 | |||
1512 | strncpy((u8 *)&ssp_task.LUN, lun, 8); | ||
1513 | |||
1514 | return mvs_exec_internal_tmf_task(dev, &ssp_task, | ||
1515 | sizeof(ssp_task), tmf); | ||
1516 | } | ||
1517 | |||
1518 | |||
1519 | /* Standard mandates link reset for ATA (type 0) | ||
1520 | and hard reset for SSP (type 1) , only for RECOVERY */ | ||
1521 | static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) | ||
1522 | { | ||
1523 | int rc; | ||
1524 | struct sas_phy *phy = sas_find_local_phy(dev); | ||
1525 | int reset_type = (dev->dev_type == SATA_DEV || | ||
1526 | (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; | ||
1527 | rc = sas_phy_reset(phy, reset_type); | ||
1528 | msleep(2000); | ||
1529 | return rc; | ||
1530 | } | ||
1531 | |||
1532 | /* mandatory SAM-3 */ | ||
1533 | int mvs_lu_reset(struct domain_device *dev, u8 *lun) | ||
1534 | { | ||
1535 | unsigned long flags; | ||
1536 | int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; | ||
1537 | struct mvs_tmf_task tmf_task; | ||
1538 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); | ||
1539 | struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; | ||
1540 | |||
1541 | tmf_task.tmf = TMF_LU_RESET; | ||
1542 | mvi_dev->dev_status = MVS_DEV_EH; | ||
1543 | rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); | ||
1544 | if (rc == TMF_RESP_FUNC_COMPLETE) { | ||
1545 | num = mvs_find_dev_phyno(dev, phyno); | ||
1546 | spin_lock_irqsave(&mvi->lock, flags); | ||
1547 | for (i = 0; i < num; i++) | ||
1548 | mvs_release_task(mvi, phyno[i], dev); | ||
1549 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1550 | } | ||
1551 | /* If failed, fall-through I_T_Nexus reset */ | ||
1552 | mv_printk("%s for device[%x]:rc= %d\n", __func__, | ||
1553 | mvi_dev->device_id, rc); | ||
1554 | return rc; | ||
1081 | } | 1555 | } |
1082 | 1556 | ||
1083 | int mvs_I_T_nexus_reset(struct domain_device *dev) | 1557 | int mvs_I_T_nexus_reset(struct domain_device *dev) |
1084 | { | 1558 | { |
1085 | return TMF_RESP_FUNC_FAILED; | 1559 | unsigned long flags; |
1560 | int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; | ||
1561 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); | ||
1562 | struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; | ||
1563 | |||
1564 | if (mvi_dev->dev_status != MVS_DEV_EH) | ||
1565 | return TMF_RESP_FUNC_COMPLETE; | ||
1566 | rc = mvs_debug_I_T_nexus_reset(dev); | ||
1567 | mv_printk("%s for device[%x]:rc= %d\n", | ||
1568 | __func__, mvi_dev->device_id, rc); | ||
1569 | |||
1570 | /* housekeeper */ | ||
1571 | num = mvs_find_dev_phyno(dev, phyno); | ||
1572 | spin_lock_irqsave(&mvi->lock, flags); | ||
1573 | for (i = 0; i < num; i++) | ||
1574 | mvs_release_task(mvi, phyno[i], dev); | ||
1575 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1576 | |||
1577 | return rc; | ||
1578 | } | ||
1579 | /* optional SAM-3 */ | ||
1580 | int mvs_query_task(struct sas_task *task) | ||
1581 | { | ||
1582 | u32 tag; | ||
1583 | struct scsi_lun lun; | ||
1584 | struct mvs_tmf_task tmf_task; | ||
1585 | int rc = TMF_RESP_FUNC_FAILED; | ||
1586 | |||
1587 | if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { | ||
1588 | struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; | ||
1589 | struct domain_device *dev = task->dev; | ||
1590 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); | ||
1591 | |||
1592 | int_to_scsilun(cmnd->device->lun, &lun); | ||
1593 | rc = mvs_find_tag(mvi, task, &tag); | ||
1594 | if (rc == 0) { | ||
1595 | rc = TMF_RESP_FUNC_FAILED; | ||
1596 | return rc; | ||
1597 | } | ||
1598 | |||
1599 | tmf_task.tmf = TMF_QUERY_TASK; | ||
1600 | tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); | ||
1601 | |||
1602 | rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); | ||
1603 | switch (rc) { | ||
1604 | /* The task is still in Lun, release it then */ | ||
1605 | case TMF_RESP_FUNC_SUCC: | ||
1606 | /* The task is not in Lun or failed, reset the phy */ | ||
1607 | case TMF_RESP_FUNC_FAILED: | ||
1608 | case TMF_RESP_FUNC_COMPLETE: | ||
1609 | break; | ||
1610 | } | ||
1611 | } | ||
1612 | mv_printk("%s:rc= %d\n", __func__, rc); | ||
1613 | return rc; | ||
1614 | } | ||
1615 | |||
1616 | /* mandatory SAM-3, still need free task/slot info */ | ||
1617 | int mvs_abort_task(struct sas_task *task) | ||
1618 | { | ||
1619 | struct scsi_lun lun; | ||
1620 | struct mvs_tmf_task tmf_task; | ||
1621 | struct domain_device *dev = task->dev; | ||
1622 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); | ||
1623 | int rc = TMF_RESP_FUNC_FAILED; | ||
1624 | unsigned long flags; | ||
1625 | u32 tag; | ||
1626 | if (mvi->exp_req) | ||
1627 | mvi->exp_req--; | ||
1628 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1629 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | ||
1630 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1631 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1632 | goto out; | ||
1633 | } | ||
1634 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1635 | if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { | ||
1636 | struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; | ||
1637 | |||
1638 | int_to_scsilun(cmnd->device->lun, &lun); | ||
1639 | rc = mvs_find_tag(mvi, task, &tag); | ||
1640 | if (rc == 0) { | ||
1641 | mv_printk("No such tag in %s\n", __func__); | ||
1642 | rc = TMF_RESP_FUNC_FAILED; | ||
1643 | return rc; | ||
1644 | } | ||
1645 | |||
1646 | tmf_task.tmf = TMF_ABORT_TASK; | ||
1647 | tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); | ||
1648 | |||
1649 | rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); | ||
1650 | |||
1651 | /* if successful, clear the task and callback forwards.*/ | ||
1652 | if (rc == TMF_RESP_FUNC_COMPLETE) { | ||
1653 | u32 slot_no; | ||
1654 | struct mvs_slot_info *slot; | ||
1655 | struct mvs_info *mvi = mvs_find_dev_mvi(dev); | ||
1656 | |||
1657 | if (task->lldd_task) { | ||
1658 | slot = (struct mvs_slot_info *)task->lldd_task; | ||
1659 | slot_no = (u32) (slot - mvi->slot_info); | ||
1660 | mvs_slot_complete(mvi, slot_no, 1); | ||
1661 | } | ||
1662 | } | ||
1663 | } else if (task->task_proto & SAS_PROTOCOL_SATA || | ||
1664 | task->task_proto & SAS_PROTOCOL_STP) { | ||
1665 | /* to do free register_set */ | ||
1666 | } else { | ||
1667 | /* SMP */ | ||
1668 | |||
1669 | } | ||
1670 | out: | ||
1671 | if (rc != TMF_RESP_FUNC_COMPLETE) | ||
1672 | mv_printk("%s:rc= %d\n", __func__, rc); | ||
1673 | return rc; | ||
1674 | } | ||
1675 | |||
1676 | int mvs_abort_task_set(struct domain_device *dev, u8 *lun) | ||
1677 | { | ||
1678 | int rc = TMF_RESP_FUNC_FAILED; | ||
1679 | struct mvs_tmf_task tmf_task; | ||
1680 | |||
1681 | tmf_task.tmf = TMF_ABORT_TASK_SET; | ||
1682 | rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); | ||
1683 | |||
1684 | return rc; | ||
1685 | } | ||
1686 | |||
1687 | int mvs_clear_aca(struct domain_device *dev, u8 *lun) | ||
1688 | { | ||
1689 | int rc = TMF_RESP_FUNC_FAILED; | ||
1690 | struct mvs_tmf_task tmf_task; | ||
1691 | |||
1692 | tmf_task.tmf = TMF_CLEAR_ACA; | ||
1693 | rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); | ||
1694 | |||
1695 | return rc; | ||
1696 | } | ||
1697 | |||
1698 | int mvs_clear_task_set(struct domain_device *dev, u8 *lun) | ||
1699 | { | ||
1700 | int rc = TMF_RESP_FUNC_FAILED; | ||
1701 | struct mvs_tmf_task tmf_task; | ||
1702 | |||
1703 | tmf_task.tmf = TMF_CLEAR_TASK_SET; | ||
1704 | rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); | ||
1705 | |||
1706 | return rc; | ||
1086 | } | 1707 | } |
1087 | 1708 | ||
1088 | static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, | 1709 | static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, |
1089 | u32 slot_idx, int err) | 1710 | u32 slot_idx, int err) |
1090 | { | 1711 | { |
1091 | struct mvs_port *port = mvi->slot_info[slot_idx].port; | 1712 | struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev; |
1092 | struct task_status_struct *tstat = &task->task_status; | 1713 | struct task_status_struct *tstat = &task->task_status; |
1093 | struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; | 1714 | struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; |
1094 | int stat = SAM_GOOD; | 1715 | int stat = SAM_GOOD; |
1095 | 1716 | ||
1717 | |||
1096 | resp->frame_len = sizeof(struct dev_to_host_fis); | 1718 | resp->frame_len = sizeof(struct dev_to_host_fis); |
1097 | memcpy(&resp->ending_fis[0], | 1719 | memcpy(&resp->ending_fis[0], |
1098 | SATA_RECEIVED_D2H_FIS(port->taskfileset), | 1720 | SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), |
1099 | sizeof(struct dev_to_host_fis)); | 1721 | sizeof(struct dev_to_host_fis)); |
1100 | tstat->buf_valid_size = sizeof(*resp); | 1722 | tstat->buf_valid_size = sizeof(*resp); |
1101 | if (unlikely(err)) | 1723 | if (unlikely(err)) |
@@ -1107,75 +1729,104 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, | |||
1107 | u32 slot_idx) | 1729 | u32 slot_idx) |
1108 | { | 1730 | { |
1109 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | 1731 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; |
1732 | int stat; | ||
1110 | u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); | 1733 | u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); |
1111 | u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); | 1734 | u32 tfs = 0; |
1112 | int stat = SAM_CHECK_COND; | 1735 | enum mvs_port_type type = PORT_TYPE_SAS; |
1113 | 1736 | ||
1114 | if (err_dw1 & SLOT_BSY_ERR) { | 1737 | if (err_dw0 & CMD_ISS_STPD) |
1115 | stat = SAS_QUEUE_FULL; | 1738 | MVS_CHIP_DISP->issue_stop(mvi, type, tfs); |
1116 | mvs_slot_reset(mvi, task, slot_idx); | 1739 | |
1117 | } | 1740 | MVS_CHIP_DISP->command_active(mvi, slot_idx); |
1741 | |||
1742 | stat = SAM_CHECK_COND; | ||
1118 | switch (task->task_proto) { | 1743 | switch (task->task_proto) { |
1119 | case SAS_PROTOCOL_SSP: | 1744 | case SAS_PROTOCOL_SSP: |
1745 | stat = SAS_ABORTED_TASK; | ||
1120 | break; | 1746 | break; |
1121 | case SAS_PROTOCOL_SMP: | 1747 | case SAS_PROTOCOL_SMP: |
1748 | stat = SAM_CHECK_COND; | ||
1122 | break; | 1749 | break; |
1750 | |||
1123 | case SAS_PROTOCOL_SATA: | 1751 | case SAS_PROTOCOL_SATA: |
1124 | case SAS_PROTOCOL_STP: | 1752 | case SAS_PROTOCOL_STP: |
1125 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | 1753 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: |
1126 | if (err_dw0 & TFILE_ERR) | 1754 | { |
1127 | stat = mvs_sata_done(mvi, task, slot_idx, 1); | 1755 | if (err_dw0 == 0x80400002) |
1756 | mv_printk("find reserved error, why?\n"); | ||
1757 | |||
1758 | task->ata_task.use_ncq = 0; | ||
1759 | stat = SAS_PROTO_RESPONSE; | ||
1760 | mvs_sata_done(mvi, task, slot_idx, 1); | ||
1761 | |||
1762 | } | ||
1128 | break; | 1763 | break; |
1129 | default: | 1764 | default: |
1130 | break; | 1765 | break; |
1131 | } | 1766 | } |
1132 | 1767 | ||
1133 | mvs_hexdump(16, (u8 *) slot->response, 0); | ||
1134 | return stat; | 1768 | return stat; |
1135 | } | 1769 | } |
1136 | 1770 | ||
1137 | static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) | 1771 | int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) |
1138 | { | 1772 | { |
1139 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | 1773 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; |
1140 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | 1774 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; |
1141 | struct sas_task *task = slot->task; | 1775 | struct sas_task *task = slot->task; |
1776 | struct mvs_device *mvi_dev = NULL; | ||
1142 | struct task_status_struct *tstat; | 1777 | struct task_status_struct *tstat; |
1143 | struct mvs_port *port; | 1778 | |
1144 | bool aborted; | 1779 | bool aborted; |
1145 | void *to; | 1780 | void *to; |
1781 | enum exec_status sts; | ||
1146 | 1782 | ||
1783 | if (mvi->exp_req) | ||
1784 | mvi->exp_req--; | ||
1147 | if (unlikely(!task || !task->lldd_task)) | 1785 | if (unlikely(!task || !task->lldd_task)) |
1148 | return -1; | 1786 | return -1; |
1149 | 1787 | ||
1788 | tstat = &task->task_status; | ||
1789 | mvi_dev = (struct mvs_device *)task->dev->lldd_dev; | ||
1790 | |||
1150 | mvs_hba_cq_dump(mvi); | 1791 | mvs_hba_cq_dump(mvi); |
1151 | 1792 | ||
1152 | spin_lock(&task->task_state_lock); | 1793 | spin_lock(&task->task_state_lock); |
1794 | task->task_state_flags &= | ||
1795 | ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); | ||
1796 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1797 | /* race condition*/ | ||
1153 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; | 1798 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; |
1154 | if (!aborted) { | ||
1155 | task->task_state_flags &= | ||
1156 | ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); | ||
1157 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1158 | } | ||
1159 | spin_unlock(&task->task_state_lock); | 1799 | spin_unlock(&task->task_state_lock); |
1160 | 1800 | ||
1161 | if (aborted) { | 1801 | memset(tstat, 0, sizeof(*tstat)); |
1802 | tstat->resp = SAS_TASK_COMPLETE; | ||
1803 | |||
1804 | if (unlikely(aborted)) { | ||
1805 | tstat->stat = SAS_ABORTED_TASK; | ||
1806 | if (mvi_dev) | ||
1807 | mvi_dev->runing_req--; | ||
1808 | if (sas_protocol_ata(task->task_proto)) | ||
1809 | mvs_free_reg_set(mvi, mvi_dev); | ||
1810 | |||
1162 | mvs_slot_task_free(mvi, task, slot, slot_idx); | 1811 | mvs_slot_task_free(mvi, task, slot, slot_idx); |
1163 | mvs_slot_free(mvi, rx_desc); | ||
1164 | return -1; | 1812 | return -1; |
1165 | } | 1813 | } |
1166 | 1814 | ||
1167 | port = slot->port; | 1815 | if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { |
1168 | tstat = &task->task_status; | 1816 | mv_dprintk("port has not device.\n"); |
1169 | memset(tstat, 0, sizeof(*tstat)); | 1817 | tstat->stat = SAS_PHY_DOWN; |
1170 | tstat->resp = SAS_TASK_COMPLETE; | ||
1171 | |||
1172 | if (unlikely(!port->port_attached || flags)) { | ||
1173 | mvs_slot_err(mvi, task, slot_idx); | ||
1174 | if (!sas_protocol_ata(task->task_proto)) | ||
1175 | tstat->stat = SAS_PHY_DOWN; | ||
1176 | goto out; | 1818 | goto out; |
1177 | } | 1819 | } |
1178 | 1820 | ||
1821 | /* | ||
1822 | if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { | ||
1823 | mv_dprintk("Find device[%016llx] RXQ_ERR %X, | ||
1824 | err info:%016llx\n", | ||
1825 | SAS_ADDR(task->dev->sas_addr), | ||
1826 | rx_desc, (u64)(*(u64 *) slot->response)); | ||
1827 | } | ||
1828 | */ | ||
1829 | |||
1179 | /* error info record present */ | 1830 | /* error info record present */ |
1180 | if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { | 1831 | if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { |
1181 | tstat->stat = mvs_slot_err(mvi, task, slot_idx); | 1832 | tstat->stat = mvs_slot_err(mvi, task, slot_idx); |
@@ -1191,13 +1842,10 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) | |||
1191 | } | 1842 | } |
1192 | /* response frame present */ | 1843 | /* response frame present */ |
1193 | else if (rx_desc & RXQ_RSP) { | 1844 | else if (rx_desc & RXQ_RSP) { |
1194 | struct ssp_response_iu *iu = | 1845 | struct ssp_response_iu *iu = slot->response + |
1195 | slot->response + sizeof(struct mvs_err_info); | 1846 | sizeof(struct mvs_err_info); |
1196 | sas_ssp_task_response(&mvi->pdev->dev, task, iu); | 1847 | sas_ssp_task_response(mvi->dev, task, iu); |
1197 | } | 1848 | } else |
1198 | |||
1199 | /* should never happen? */ | ||
1200 | else | ||
1201 | tstat->stat = SAM_CHECK_COND; | 1849 | tstat->stat = SAM_CHECK_COND; |
1202 | break; | 1850 | break; |
1203 | 1851 | ||
@@ -1225,105 +1873,245 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) | |||
1225 | } | 1873 | } |
1226 | 1874 | ||
1227 | out: | 1875 | out: |
1876 | if (mvi_dev) | ||
1877 | mvi_dev->runing_req--; | ||
1878 | if (sas_protocol_ata(task->task_proto)) | ||
1879 | mvs_free_reg_set(mvi, mvi_dev); | ||
1880 | |||
1228 | mvs_slot_task_free(mvi, task, slot, slot_idx); | 1881 | mvs_slot_task_free(mvi, task, slot, slot_idx); |
1229 | if (unlikely(tstat->stat != SAS_QUEUE_FULL)) | 1882 | sts = tstat->stat; |
1230 | mvs_slot_free(mvi, rx_desc); | ||
1231 | 1883 | ||
1232 | spin_unlock(&mvi->lock); | 1884 | spin_unlock(&mvi->lock); |
1233 | task->task_done(task); | 1885 | if (task->task_done) |
1886 | task->task_done(task); | ||
1887 | else | ||
1888 | mv_dprintk("why has not task_done.\n"); | ||
1234 | spin_lock(&mvi->lock); | 1889 | spin_lock(&mvi->lock); |
1235 | return tstat->stat; | 1890 | |
1891 | return sts; | ||
1236 | } | 1892 | } |
1237 | 1893 | ||
1238 | static void mvs_release_task(struct mvs_info *mvi, int phy_no) | 1894 | void mvs_release_task(struct mvs_info *mvi, |
1895 | int phy_no, struct domain_device *dev) | ||
1239 | { | 1896 | { |
1240 | struct list_head *pos, *n; | 1897 | int i = 0; u32 slot_idx; |
1241 | struct mvs_slot_info *slot; | 1898 | struct mvs_phy *phy; |
1242 | struct mvs_phy *phy = &mvi->phy[phy_no]; | 1899 | struct mvs_port *port; |
1243 | struct mvs_port *port = phy->port; | 1900 | struct mvs_slot_info *slot, *slot2; |
1244 | u32 rx_desc; | ||
1245 | 1901 | ||
1902 | phy = &mvi->phy[phy_no]; | ||
1903 | port = phy->port; | ||
1246 | if (!port) | 1904 | if (!port) |
1247 | return; | 1905 | return; |
1248 | 1906 | ||
1249 | list_for_each_safe(pos, n, &port->list) { | 1907 | list_for_each_entry_safe(slot, slot2, &port->list, entry) { |
1250 | slot = container_of(pos, struct mvs_slot_info, list); | 1908 | struct sas_task *task; |
1251 | rx_desc = (u32) (slot - mvi->slot_info); | 1909 | slot_idx = (u32) (slot - mvi->slot_info); |
1252 | mvs_slot_complete(mvi, rx_desc, 1); | 1910 | task = slot->task; |
1911 | |||
1912 | if (dev && task->dev != dev) | ||
1913 | continue; | ||
1914 | |||
1915 | mv_printk("Release slot [%x] tag[%x], task [%p]:\n", | ||
1916 | slot_idx, slot->slot_tag, task); | ||
1917 | |||
1918 | if (task->task_proto & SAS_PROTOCOL_SSP) { | ||
1919 | mv_printk("attached with SSP task CDB["); | ||
1920 | for (i = 0; i < 16; i++) | ||
1921 | mv_printk(" %02x", task->ssp_task.cdb[i]); | ||
1922 | mv_printk(" ]\n"); | ||
1923 | } | ||
1924 | |||
1925 | mvs_slot_complete(mvi, slot_idx, 1); | ||
1926 | } | ||
1927 | } | ||
1928 | |||
1929 | static void mvs_phy_disconnected(struct mvs_phy *phy) | ||
1930 | { | ||
1931 | phy->phy_attached = 0; | ||
1932 | phy->att_dev_info = 0; | ||
1933 | phy->att_dev_sas_addr = 0; | ||
1934 | } | ||
1935 | |||
1936 | static void mvs_work_queue(struct work_struct *work) | ||
1937 | { | ||
1938 | struct delayed_work *dw = container_of(work, struct delayed_work, work); | ||
1939 | struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); | ||
1940 | struct mvs_info *mvi = mwq->mvi; | ||
1941 | unsigned long flags; | ||
1942 | |||
1943 | spin_lock_irqsave(&mvi->lock, flags); | ||
1944 | if (mwq->handler & PHY_PLUG_EVENT) { | ||
1945 | u32 phy_no = (unsigned long) mwq->data; | ||
1946 | struct sas_ha_struct *sas_ha = mvi->sas; | ||
1947 | struct mvs_phy *phy = &mvi->phy[phy_no]; | ||
1948 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
1949 | |||
1950 | if (phy->phy_event & PHY_PLUG_OUT) { | ||
1951 | u32 tmp; | ||
1952 | struct sas_identify_frame *id; | ||
1953 | id = (struct sas_identify_frame *)phy->frame_rcvd; | ||
1954 | tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); | ||
1955 | phy->phy_event &= ~PHY_PLUG_OUT; | ||
1956 | if (!(tmp & PHY_READY_MASK)) { | ||
1957 | sas_phy_disconnected(sas_phy); | ||
1958 | mvs_phy_disconnected(phy); | ||
1959 | sas_ha->notify_phy_event(sas_phy, | ||
1960 | PHYE_LOSS_OF_SIGNAL); | ||
1961 | mv_dprintk("phy%d Removed Device\n", phy_no); | ||
1962 | } else { | ||
1963 | MVS_CHIP_DISP->detect_porttype(mvi, phy_no); | ||
1964 | mvs_update_phyinfo(mvi, phy_no, 1); | ||
1965 | mvs_bytes_dmaed(mvi, phy_no); | ||
1966 | mvs_port_notify_formed(sas_phy, 0); | ||
1967 | mv_dprintk("phy%d Attached Device\n", phy_no); | ||
1968 | } | ||
1969 | } | ||
1970 | } | ||
1971 | list_del(&mwq->entry); | ||
1972 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1973 | kfree(mwq); | ||
1974 | } | ||
1975 | |||
1976 | static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) | ||
1977 | { | ||
1978 | struct mvs_wq *mwq; | ||
1979 | int ret = 0; | ||
1980 | |||
1981 | mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); | ||
1982 | if (mwq) { | ||
1983 | mwq->mvi = mvi; | ||
1984 | mwq->data = data; | ||
1985 | mwq->handler = handler; | ||
1986 | MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); | ||
1987 | list_add_tail(&mwq->entry, &mvi->wq_list); | ||
1988 | schedule_delayed_work(&mwq->work_q, HZ * 2); | ||
1989 | } else | ||
1990 | ret = -ENOMEM; | ||
1991 | |||
1992 | return ret; | ||
1993 | } | ||
1994 | |||
1995 | static void mvs_sig_time_out(unsigned long tphy) | ||
1996 | { | ||
1997 | struct mvs_phy *phy = (struct mvs_phy *)tphy; | ||
1998 | struct mvs_info *mvi = phy->mvi; | ||
1999 | u8 phy_no; | ||
2000 | |||
2001 | for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { | ||
2002 | if (&mvi->phy[phy_no] == phy) { | ||
2003 | mv_dprintk("Get signature time out, reset phy %d\n", | ||
2004 | phy_no+mvi->id*mvi->chip->n_phy); | ||
2005 | MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); | ||
2006 | } | ||
1253 | } | 2007 | } |
1254 | } | 2008 | } |
1255 | 2009 | ||
1256 | static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) | 2010 | static void mvs_sig_remove_timer(struct mvs_phy *phy) |
1257 | { | 2011 | { |
1258 | struct pci_dev *pdev = mvi->pdev; | 2012 | if (phy->timer.function) |
1259 | struct sas_ha_struct *sas_ha = &mvi->sas; | 2013 | del_timer(&phy->timer); |
2014 | phy->timer.function = NULL; | ||
2015 | } | ||
2016 | |||
2017 | void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) | ||
2018 | { | ||
2019 | u32 tmp; | ||
2020 | struct sas_ha_struct *sas_ha = mvi->sas; | ||
1260 | struct mvs_phy *phy = &mvi->phy[phy_no]; | 2021 | struct mvs_phy *phy = &mvi->phy[phy_no]; |
1261 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | 2022 | struct asd_sas_phy *sas_phy = &phy->sas_phy; |
1262 | 2023 | ||
1263 | phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); | 2024 | phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); |
2025 | mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, | ||
2026 | MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); | ||
2027 | mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, | ||
2028 | phy->irq_status); | ||
2029 | |||
1264 | /* | 2030 | /* |
1265 | * events is port event now , | 2031 | * events is port event now , |
1266 | * we need check the interrupt status which belongs to per port. | 2032 | * we need check the interrupt status which belongs to per port. |
1267 | */ | 2033 | */ |
1268 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1269 | "Port %d Event = %X\n", | ||
1270 | phy_no, phy->irq_status); | ||
1271 | |||
1272 | if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { | ||
1273 | mvs_release_task(mvi, phy_no); | ||
1274 | if (!mvs_is_phy_ready(mvi, phy_no)) { | ||
1275 | sas_phy_disconnected(sas_phy); | ||
1276 | sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); | ||
1277 | dev_printk(KERN_INFO, &pdev->dev, | ||
1278 | "Port %d Unplug Notice\n", phy_no); | ||
1279 | 2034 | ||
1280 | } else | 2035 | if (phy->irq_status & PHYEV_DCDR_ERR) |
1281 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); | 2036 | mv_dprintk("port %d STP decoding error.\n", |
1282 | } | 2037 | phy_no+mvi->id*mvi->chip->n_phy); |
1283 | if (!(phy->irq_status & PHYEV_DEC_ERR)) { | 2038 | |
1284 | if (phy->irq_status & PHYEV_COMWAKE) { | 2039 | if (phy->irq_status & PHYEV_POOF) { |
1285 | u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); | 2040 | if (!(phy->phy_event & PHY_PLUG_OUT)) { |
1286 | mvs_write_port_irq_mask(mvi, phy_no, | 2041 | int dev_sata = phy->phy_type & PORT_TYPE_SATA; |
1287 | tmp | PHYEV_SIG_FIS); | 2042 | int ready; |
2043 | mvs_release_task(mvi, phy_no, NULL); | ||
2044 | phy->phy_event |= PHY_PLUG_OUT; | ||
2045 | mvs_handle_event(mvi, | ||
2046 | (void *)(unsigned long)phy_no, | ||
2047 | PHY_PLUG_EVENT); | ||
2048 | ready = mvs_is_phy_ready(mvi, phy_no); | ||
2049 | if (!ready) | ||
2050 | mv_dprintk("phy%d Unplug Notice\n", | ||
2051 | phy_no + | ||
2052 | mvi->id * mvi->chip->n_phy); | ||
2053 | if (ready || dev_sata) { | ||
2054 | if (MVS_CHIP_DISP->stp_reset) | ||
2055 | MVS_CHIP_DISP->stp_reset(mvi, | ||
2056 | phy_no); | ||
2057 | else | ||
2058 | MVS_CHIP_DISP->phy_reset(mvi, | ||
2059 | phy_no, 0); | ||
2060 | return; | ||
2061 | } | ||
1288 | } | 2062 | } |
1289 | if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { | 2063 | } |
1290 | phy->phy_status = mvs_is_phy_ready(mvi, phy_no); | ||
1291 | if (phy->phy_status) { | ||
1292 | mvs_detect_porttype(mvi, phy_no); | ||
1293 | |||
1294 | if (phy->phy_type & PORT_TYPE_SATA) { | ||
1295 | u32 tmp = mvs_read_port_irq_mask(mvi, | ||
1296 | phy_no); | ||
1297 | tmp &= ~PHYEV_SIG_FIS; | ||
1298 | mvs_write_port_irq_mask(mvi, | ||
1299 | phy_no, tmp); | ||
1300 | } | ||
1301 | 2064 | ||
1302 | mvs_update_phyinfo(mvi, phy_no, 0); | 2065 | if (phy->irq_status & PHYEV_COMWAKE) { |
1303 | sas_ha->notify_phy_event(sas_phy, | 2066 | tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); |
1304 | PHYE_OOB_DONE); | 2067 | MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, |
1305 | mvs_bytes_dmaed(mvi, phy_no); | 2068 | tmp | PHYEV_SIG_FIS); |
1306 | } else { | 2069 | if (phy->timer.function == NULL) { |
1307 | dev_printk(KERN_DEBUG, &pdev->dev, | 2070 | phy->timer.data = (unsigned long)phy; |
1308 | "plugin interrupt but phy is gone\n"); | 2071 | phy->timer.function = mvs_sig_time_out; |
1309 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, | 2072 | phy->timer.expires = jiffies + 10*HZ; |
1310 | NULL); | 2073 | add_timer(&phy->timer); |
2074 | } | ||
2075 | } | ||
2076 | if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { | ||
2077 | phy->phy_status = mvs_is_phy_ready(mvi, phy_no); | ||
2078 | mvs_sig_remove_timer(phy); | ||
2079 | mv_dprintk("notify plug in on phy[%d]\n", phy_no); | ||
2080 | if (phy->phy_status) { | ||
2081 | mdelay(10); | ||
2082 | MVS_CHIP_DISP->detect_porttype(mvi, phy_no); | ||
2083 | if (phy->phy_type & PORT_TYPE_SATA) { | ||
2084 | tmp = MVS_CHIP_DISP->read_port_irq_mask( | ||
2085 | mvi, phy_no); | ||
2086 | tmp &= ~PHYEV_SIG_FIS; | ||
2087 | MVS_CHIP_DISP->write_port_irq_mask(mvi, | ||
2088 | phy_no, tmp); | ||
2089 | } | ||
2090 | mvs_update_phyinfo(mvi, phy_no, 0); | ||
2091 | mvs_bytes_dmaed(mvi, phy_no); | ||
2092 | /* whether driver is going to handle hot plug */ | ||
2093 | if (phy->phy_event & PHY_PLUG_OUT) { | ||
2094 | mvs_port_notify_formed(sas_phy, 0); | ||
2095 | phy->phy_event &= ~PHY_PLUG_OUT; | ||
1311 | } | 2096 | } |
1312 | } else if (phy->irq_status & PHYEV_BROAD_CH) { | 2097 | } else { |
1313 | mvs_release_task(mvi, phy_no); | 2098 | mv_dprintk("plugin interrupt but phy%d is gone\n", |
1314 | sas_ha->notify_port_event(sas_phy, | 2099 | phy_no + mvi->id*mvi->chip->n_phy); |
1315 | PORTE_BROADCAST_RCVD); | ||
1316 | } | 2100 | } |
2101 | } else if (phy->irq_status & PHYEV_BROAD_CH) { | ||
2102 | mv_dprintk("port %d broadcast change.\n", | ||
2103 | phy_no + mvi->id*mvi->chip->n_phy); | ||
2104 | /* exception for Samsung disk drive*/ | ||
2105 | mdelay(1000); | ||
2106 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
1317 | } | 2107 | } |
1318 | mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); | 2108 | MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); |
1319 | } | 2109 | } |
1320 | 2110 | ||
1321 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | 2111 | int mvs_int_rx(struct mvs_info *mvi, bool self_clear) |
1322 | { | 2112 | { |
1323 | void __iomem *regs = mvi->regs; | ||
1324 | u32 rx_prod_idx, rx_desc; | 2113 | u32 rx_prod_idx, rx_desc; |
1325 | bool attn = false; | 2114 | bool attn = false; |
1326 | struct pci_dev *pdev = mvi->pdev; | ||
1327 | 2115 | ||
1328 | /* the first dword in the RX ring is special: it contains | 2116 | /* the first dword in the RX ring is special: it contains |
1329 | * a mirror of the hardware's RX producer index, so that | 2117 | * a mirror of the hardware's RX producer index, so that |
@@ -1339,480 +2127,31 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1339 | * note: if coalescing is enabled, | 2127 | * note: if coalescing is enabled, |
1340 | * it will need to read from register every time for sure | 2128 | * it will need to read from register every time for sure |
1341 | */ | 2129 | */ |
1342 | if (mvi->rx_cons == rx_prod_idx) | 2130 | if (unlikely(mvi->rx_cons == rx_prod_idx)) |
1343 | mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; | 2131 | mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; |
1344 | 2132 | ||
1345 | if (mvi->rx_cons == rx_prod_idx) | 2133 | if (mvi->rx_cons == rx_prod_idx) |
1346 | return 0; | 2134 | return 0; |
1347 | 2135 | ||
1348 | while (mvi->rx_cons != rx_prod_idx) { | 2136 | while (mvi->rx_cons != rx_prod_idx) { |
1349 | |||
1350 | /* increment our internal RX consumer pointer */ | 2137 | /* increment our internal RX consumer pointer */ |
1351 | rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); | 2138 | rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); |
1352 | |||
1353 | rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); | 2139 | rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); |
1354 | 2140 | ||
1355 | if (likely(rx_desc & RXQ_DONE)) | 2141 | if (likely(rx_desc & RXQ_DONE)) |
1356 | mvs_slot_complete(mvi, rx_desc, 0); | 2142 | mvs_slot_complete(mvi, rx_desc, 0); |
1357 | if (rx_desc & RXQ_ATTN) { | 2143 | if (rx_desc & RXQ_ATTN) { |
1358 | attn = true; | 2144 | attn = true; |
1359 | dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", | ||
1360 | rx_desc); | ||
1361 | } else if (rx_desc & RXQ_ERR) { | 2145 | } else if (rx_desc & RXQ_ERR) { |
1362 | if (!(rx_desc & RXQ_DONE)) | 2146 | if (!(rx_desc & RXQ_DONE)) |
1363 | mvs_slot_complete(mvi, rx_desc, 0); | 2147 | mvs_slot_complete(mvi, rx_desc, 0); |
1364 | dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", | ||
1365 | rx_desc); | ||
1366 | } else if (rx_desc & RXQ_SLOT_RESET) { | 2148 | } else if (rx_desc & RXQ_SLOT_RESET) { |
1367 | dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", | ||
1368 | rx_desc); | ||
1369 | mvs_slot_free(mvi, rx_desc); | 2149 | mvs_slot_free(mvi, rx_desc); |
1370 | } | 2150 | } |
1371 | } | 2151 | } |
1372 | 2152 | ||
1373 | if (attn && self_clear) | 2153 | if (attn && self_clear) |
1374 | mvs_int_full(mvi); | 2154 | MVS_CHIP_DISP->int_full(mvi); |
1375 | |||
1376 | return 0; | 2155 | return 0; |
1377 | } | 2156 | } |
1378 | 2157 | ||
1379 | #ifndef MVS_DISABLE_NVRAM | ||
1380 | static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) | ||
1381 | { | ||
1382 | int timeout = 1000; | ||
1383 | |||
1384 | if (addr & ~SPI_ADDR_MASK) | ||
1385 | return -EINVAL; | ||
1386 | |||
1387 | writel(addr, regs + SPI_CMD); | ||
1388 | writel(TWSI_RD, regs + SPI_CTL); | ||
1389 | |||
1390 | while (timeout-- > 0) { | ||
1391 | if (readl(regs + SPI_CTL) & TWSI_RDY) { | ||
1392 | *data = readl(regs + SPI_DATA); | ||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | udelay(10); | ||
1397 | } | ||
1398 | |||
1399 | return -EBUSY; | ||
1400 | } | ||
1401 | |||
1402 | static int mvs_eep_read_buf(void __iomem *regs, u32 addr, | ||
1403 | void *buf, u32 buflen) | ||
1404 | { | ||
1405 | u32 addr_end, tmp_addr, i, j; | ||
1406 | u32 tmp = 0; | ||
1407 | int rc; | ||
1408 | u8 *tmp8, *buf8 = buf; | ||
1409 | |||
1410 | addr_end = addr + buflen; | ||
1411 | tmp_addr = ALIGN(addr, 4); | ||
1412 | if (addr > 0xff) | ||
1413 | return -EINVAL; | ||
1414 | |||
1415 | j = addr & 0x3; | ||
1416 | if (j) { | ||
1417 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
1418 | if (rc) | ||
1419 | return rc; | ||
1420 | |||
1421 | tmp8 = (u8 *)&tmp; | ||
1422 | for (i = j; i < 4; i++) | ||
1423 | *buf8++ = tmp8[i]; | ||
1424 | |||
1425 | tmp_addr += 4; | ||
1426 | } | ||
1427 | |||
1428 | for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { | ||
1429 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
1430 | if (rc) | ||
1431 | return rc; | ||
1432 | |||
1433 | memcpy(buf8, &tmp, 4); | ||
1434 | buf8 += 4; | ||
1435 | } | ||
1436 | |||
1437 | if (tmp_addr < addr_end) { | ||
1438 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
1439 | if (rc) | ||
1440 | return rc; | ||
1441 | |||
1442 | tmp8 = (u8 *)&tmp; | ||
1443 | j = addr_end - tmp_addr; | ||
1444 | for (i = 0; i < j; i++) | ||
1445 | *buf8++ = tmp8[i]; | ||
1446 | |||
1447 | tmp_addr += 4; | ||
1448 | } | ||
1449 | |||
1450 | return 0; | ||
1451 | } | ||
1452 | #endif | ||
1453 | |||
1454 | int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) | ||
1455 | { | ||
1456 | #ifndef MVS_DISABLE_NVRAM | ||
1457 | void __iomem *regs = mvi->regs; | ||
1458 | int rc, i; | ||
1459 | u32 sum; | ||
1460 | u8 hdr[2], *tmp; | ||
1461 | const char *msg; | ||
1462 | |||
1463 | rc = mvs_eep_read_buf(regs, addr, &hdr, 2); | ||
1464 | if (rc) { | ||
1465 | msg = "nvram hdr read failed"; | ||
1466 | goto err_out; | ||
1467 | } | ||
1468 | rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); | ||
1469 | if (rc) { | ||
1470 | msg = "nvram read failed"; | ||
1471 | goto err_out; | ||
1472 | } | ||
1473 | |||
1474 | if (hdr[0] != 0x5A) { | ||
1475 | /* entry id */ | ||
1476 | msg = "invalid nvram entry id"; | ||
1477 | rc = -ENOENT; | ||
1478 | goto err_out; | ||
1479 | } | ||
1480 | |||
1481 | tmp = buf; | ||
1482 | sum = ((u32)hdr[0]) + ((u32)hdr[1]); | ||
1483 | for (i = 0; i < buflen; i++) | ||
1484 | sum += ((u32)tmp[i]); | ||
1485 | |||
1486 | if (sum) { | ||
1487 | msg = "nvram checksum failure"; | ||
1488 | rc = -EILSEQ; | ||
1489 | goto err_out; | ||
1490 | } | ||
1491 | |||
1492 | return 0; | ||
1493 | |||
1494 | err_out: | ||
1495 | dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); | ||
1496 | return rc; | ||
1497 | #else | ||
1498 | /* FIXME , For SAS target mode */ | ||
1499 | memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); | ||
1500 | return 0; | ||
1501 | #endif | ||
1502 | } | ||
1503 | |||
1504 | static void mvs_int_sata(struct mvs_info *mvi) | ||
1505 | { | ||
1506 | u32 tmp; | ||
1507 | void __iomem *regs = mvi->regs; | ||
1508 | tmp = mr32(INT_STAT_SRS); | ||
1509 | mw32(INT_STAT_SRS, tmp & 0xFFFF); | ||
1510 | } | ||
1511 | |||
1512 | static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, | ||
1513 | u32 slot_idx) | ||
1514 | { | ||
1515 | void __iomem *regs = mvi->regs; | ||
1516 | struct domain_device *dev = task->dev; | ||
1517 | struct asd_sas_port *sas_port = dev->port; | ||
1518 | struct mvs_port *port = mvi->slot_info[slot_idx].port; | ||
1519 | u32 reg_set, phy_mask; | ||
1520 | |||
1521 | if (!sas_protocol_ata(task->task_proto)) { | ||
1522 | reg_set = 0; | ||
1523 | phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : | ||
1524 | sas_port->phy_mask; | ||
1525 | } else { | ||
1526 | reg_set = port->taskfileset; | ||
1527 | phy_mask = sas_port->phy_mask; | ||
1528 | } | ||
1529 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | | ||
1530 | (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | | ||
1531 | (phy_mask << TXQ_PHY_SHIFT) | | ||
1532 | (reg_set << TXQ_SRS_SHIFT)); | ||
1533 | |||
1534 | mw32(TX_PROD_IDX, mvi->tx_prod); | ||
1535 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); | ||
1536 | } | ||
1537 | |||
1538 | void mvs_int_full(struct mvs_info *mvi) | ||
1539 | { | ||
1540 | void __iomem *regs = mvi->regs; | ||
1541 | u32 tmp, stat; | ||
1542 | int i; | ||
1543 | |||
1544 | stat = mr32(INT_STAT); | ||
1545 | |||
1546 | mvs_int_rx(mvi, false); | ||
1547 | |||
1548 | for (i = 0; i < MVS_MAX_PORTS; i++) { | ||
1549 | tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); | ||
1550 | if (tmp) | ||
1551 | mvs_int_port(mvi, i, tmp); | ||
1552 | } | ||
1553 | |||
1554 | if (stat & CINT_SRS) | ||
1555 | mvs_int_sata(mvi); | ||
1556 | |||
1557 | mw32(INT_STAT, stat); | ||
1558 | } | ||
1559 | |||
1560 | #ifndef MVS_DISABLE_MSI | ||
1561 | static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) | ||
1562 | { | ||
1563 | struct mvs_info *mvi = opaque; | ||
1564 | |||
1565 | #ifndef MVS_USE_TASKLET | ||
1566 | spin_lock(&mvi->lock); | ||
1567 | |||
1568 | mvs_int_rx(mvi, true); | ||
1569 | |||
1570 | spin_unlock(&mvi->lock); | ||
1571 | #else | ||
1572 | tasklet_schedule(&mvi->tasklet); | ||
1573 | #endif | ||
1574 | return IRQ_HANDLED; | ||
1575 | } | ||
1576 | #endif | ||
1577 | |||
1578 | int mvs_task_abort(struct sas_task *task) | ||
1579 | { | ||
1580 | int rc; | ||
1581 | unsigned long flags; | ||
1582 | struct mvs_info *mvi = task->dev->port->ha->lldd_ha; | ||
1583 | struct pci_dev *pdev = mvi->pdev; | ||
1584 | int tag; | ||
1585 | |||
1586 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1587 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | ||
1588 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1589 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1590 | goto out_done; | ||
1591 | } | ||
1592 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1593 | |||
1594 | switch (task->task_proto) { | ||
1595 | case SAS_PROTOCOL_SMP: | ||
1596 | dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); | ||
1597 | break; | ||
1598 | case SAS_PROTOCOL_SSP: | ||
1599 | dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); | ||
1600 | break; | ||
1601 | case SAS_PROTOCOL_SATA: | ||
1602 | case SAS_PROTOCOL_STP: | ||
1603 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ | ||
1604 | dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); | ||
1605 | #if _MV_DUMP | ||
1606 | dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); | ||
1607 | mvs_hexdump(sizeof(struct host_to_dev_fis), | ||
1608 | (void *)&task->ata_task.fis, 0); | ||
1609 | dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); | ||
1610 | mvs_hexdump(16, task->ata_task.atapi_packet, 0); | ||
1611 | #endif | ||
1612 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1613 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { | ||
1614 | /* TODO */ | ||
1615 | ; | ||
1616 | } | ||
1617 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1618 | break; | ||
1619 | } | ||
1620 | default: | ||
1621 | break; | ||
1622 | } | ||
1623 | |||
1624 | if (mvs_find_tag(mvi, task, &tag)) { | ||
1625 | spin_lock_irqsave(&mvi->lock, flags); | ||
1626 | mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); | ||
1627 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1628 | } | ||
1629 | if (!mvs_task_exec(task, 1, GFP_ATOMIC)) | ||
1630 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1631 | else | ||
1632 | rc = TMF_RESP_FUNC_FAILED; | ||
1633 | out_done: | ||
1634 | return rc; | ||
1635 | } | ||
1636 | |||
1637 | int __devinit mvs_hw_init(struct mvs_info *mvi) | ||
1638 | { | ||
1639 | void __iomem *regs = mvi->regs; | ||
1640 | int i; | ||
1641 | u32 tmp, cctl; | ||
1642 | |||
1643 | /* make sure interrupts are masked immediately (paranoia) */ | ||
1644 | mw32(GBL_CTL, 0); | ||
1645 | tmp = mr32(GBL_CTL); | ||
1646 | |||
1647 | /* Reset Controller */ | ||
1648 | if (!(tmp & HBA_RST)) { | ||
1649 | if (mvi->flags & MVF_PHY_PWR_FIX) { | ||
1650 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
1651 | tmp &= ~PCTL_PWR_ON; | ||
1652 | tmp |= PCTL_OFF; | ||
1653 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
1654 | |||
1655 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
1656 | tmp &= ~PCTL_PWR_ON; | ||
1657 | tmp |= PCTL_OFF; | ||
1658 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
1659 | } | ||
1660 | |||
1661 | /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ | ||
1662 | mw32_f(GBL_CTL, HBA_RST); | ||
1663 | } | ||
1664 | |||
1665 | /* wait for reset to finish; timeout is just a guess */ | ||
1666 | i = 1000; | ||
1667 | while (i-- > 0) { | ||
1668 | msleep(10); | ||
1669 | |||
1670 | if (!(mr32(GBL_CTL) & HBA_RST)) | ||
1671 | break; | ||
1672 | } | ||
1673 | if (mr32(GBL_CTL) & HBA_RST) { | ||
1674 | dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); | ||
1675 | return -EBUSY; | ||
1676 | } | ||
1677 | |||
1678 | /* Init Chip */ | ||
1679 | /* make sure RST is set; HBA_RST /should/ have done that for us */ | ||
1680 | cctl = mr32(CTL); | ||
1681 | if (cctl & CCTL_RST) | ||
1682 | cctl &= ~CCTL_RST; | ||
1683 | else | ||
1684 | mw32_f(CTL, cctl | CCTL_RST); | ||
1685 | |||
1686 | /* write to device control _AND_ device status register? - A.C. */ | ||
1687 | pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); | ||
1688 | tmp &= ~PRD_REQ_MASK; | ||
1689 | tmp |= PRD_REQ_SIZE; | ||
1690 | pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); | ||
1691 | |||
1692 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
1693 | tmp |= PCTL_PWR_ON; | ||
1694 | tmp &= ~PCTL_OFF; | ||
1695 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
1696 | |||
1697 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
1698 | tmp |= PCTL_PWR_ON; | ||
1699 | tmp &= ~PCTL_OFF; | ||
1700 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
1701 | |||
1702 | mw32_f(CTL, cctl); | ||
1703 | |||
1704 | /* reset control */ | ||
1705 | mw32(PCS, 0); /*MVS_PCS */ | ||
1706 | |||
1707 | mvs_phy_hacks(mvi); | ||
1708 | |||
1709 | mw32(CMD_LIST_LO, mvi->slot_dma); | ||
1710 | mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); | ||
1711 | |||
1712 | mw32(RX_FIS_LO, mvi->rx_fis_dma); | ||
1713 | mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); | ||
1714 | |||
1715 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ); | ||
1716 | mw32(TX_LO, mvi->tx_dma); | ||
1717 | mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); | ||
1718 | |||
1719 | mw32(RX_CFG, MVS_RX_RING_SZ); | ||
1720 | mw32(RX_LO, mvi->rx_dma); | ||
1721 | mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); | ||
1722 | |||
1723 | /* enable auto port detection */ | ||
1724 | mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); | ||
1725 | msleep(1100); | ||
1726 | /* init and reset phys */ | ||
1727 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
1728 | u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); | ||
1729 | u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); | ||
1730 | |||
1731 | mvs_detect_porttype(mvi, i); | ||
1732 | |||
1733 | /* set phy local SAS address */ | ||
1734 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | ||
1735 | mvs_write_port_cfg_data(mvi, i, lo); | ||
1736 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
1737 | mvs_write_port_cfg_data(mvi, i, hi); | ||
1738 | |||
1739 | /* reset phy */ | ||
1740 | tmp = mvs_read_phy_ctl(mvi, i); | ||
1741 | tmp |= PHY_RST; | ||
1742 | mvs_write_phy_ctl(mvi, i, tmp); | ||
1743 | } | ||
1744 | |||
1745 | msleep(100); | ||
1746 | |||
1747 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
1748 | /* clear phy int status */ | ||
1749 | tmp = mvs_read_port_irq_stat(mvi, i); | ||
1750 | tmp &= ~PHYEV_SIG_FIS; | ||
1751 | mvs_write_port_irq_stat(mvi, i, tmp); | ||
1752 | |||
1753 | /* set phy int mask */ | ||
1754 | tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | | ||
1755 | PHYEV_ID_DONE | PHYEV_DEC_ERR; | ||
1756 | mvs_write_port_irq_mask(mvi, i, tmp); | ||
1757 | |||
1758 | msleep(100); | ||
1759 | mvs_update_phyinfo(mvi, i, 1); | ||
1760 | mvs_enable_xmt(mvi, i); | ||
1761 | } | ||
1762 | |||
1763 | /* FIXME: update wide port bitmaps */ | ||
1764 | |||
1765 | /* little endian for open address and command table, etc. */ | ||
1766 | /* A.C. | ||
1767 | * it seems that ( from the spec ) turning on big-endian won't | ||
1768 | * do us any good on big-endian machines, need further confirmation | ||
1769 | */ | ||
1770 | cctl = mr32(CTL); | ||
1771 | cctl |= CCTL_ENDIAN_CMD; | ||
1772 | cctl |= CCTL_ENDIAN_DATA; | ||
1773 | cctl &= ~CCTL_ENDIAN_OPEN; | ||
1774 | cctl |= CCTL_ENDIAN_RSP; | ||
1775 | mw32_f(CTL, cctl); | ||
1776 | |||
1777 | /* reset CMD queue */ | ||
1778 | tmp = mr32(PCS); | ||
1779 | tmp |= PCS_CMD_RST; | ||
1780 | mw32(PCS, tmp); | ||
1781 | /* interrupt coalescing may cause missing HW interrput in some case, | ||
1782 | * and the max count is 0x1ff, while our max slot is 0x200, | ||
1783 | * it will make count 0. | ||
1784 | */ | ||
1785 | tmp = 0; | ||
1786 | mw32(INT_COAL, tmp); | ||
1787 | |||
1788 | tmp = 0x100; | ||
1789 | mw32(INT_COAL_TMOUT, tmp); | ||
1790 | |||
1791 | /* ladies and gentlemen, start your engines */ | ||
1792 | mw32(TX_CFG, 0); | ||
1793 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); | ||
1794 | mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); | ||
1795 | /* enable CMD/CMPL_Q/RESP mode */ | ||
1796 | mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); | ||
1797 | |||
1798 | /* enable completion queue interrupt */ | ||
1799 | tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); | ||
1800 | mw32(INT_MASK, tmp); | ||
1801 | |||
1802 | /* Enable SRS interrupt */ | ||
1803 | mw32(INT_MASK_SRS, 0xFF); | ||
1804 | return 0; | ||
1805 | } | ||
1806 | |||
1807 | void __devinit mvs_print_info(struct mvs_info *mvi) | ||
1808 | { | ||
1809 | struct pci_dev *pdev = mvi->pdev; | ||
1810 | static int printed_version; | ||
1811 | |||
1812 | if (!printed_version++) | ||
1813 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | ||
1814 | |||
1815 | dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", | ||
1816 | mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); | ||
1817 | } | ||
1818 | |||