aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c75
1 files changed, 58 insertions, 17 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8e9e3212fe78..8b625a7f5685 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1152 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1153 dma_addr_t phys_addr;
1154 int idx; 1154 int idx;
1155 u16 copy_size, cmd_size; 1155 u16 copy_size, cmd_size, dma_size;
1156 bool had_nocopy = false; 1156 bool had_nocopy = false;
1157 int i; 1157 int i;
1158 u32 cmd_pos; 1158 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS];
1159 1161
1160 copy_size = sizeof(out_cmd->hdr); 1162 copy_size = sizeof(out_cmd->hdr);
1161 cmd_size = sizeof(out_cmd->hdr); 1163 cmd_size = sizeof(out_cmd->hdr);
@@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1164 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
1165 1167
1166 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1169 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i];
1171
1167 if (!cmd->len[i]) 1172 if (!cmd->len[i])
1168 continue; 1173 continue;
1174
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1178
1179 if (copy > cmdlen[i])
1180 copy = cmdlen[i];
1181 cmdlen[i] -= copy;
1182 cmddata[i] += copy;
1183 copy_size += copy;
1184 }
1185
1169 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1186 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1170 had_nocopy = true; 1187 had_nocopy = true;
1171 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1188 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1185 goto free_dup_buf; 1202 goto free_dup_buf;
1186 } 1203 }
1187 1204
1188 dup_buf = kmemdup(cmd->data[i], cmd->len[i], 1205 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1189 GFP_ATOMIC); 1206 GFP_ATOMIC);
1190 if (!dup_buf) 1207 if (!dup_buf)
1191 return -ENOMEM; 1208 return -ENOMEM;
@@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1195 idx = -EINVAL; 1212 idx = -EINVAL;
1196 goto free_dup_buf; 1213 goto free_dup_buf;
1197 } 1214 }
1198 copy_size += cmd->len[i]; 1215 copy_size += cmdlen[i];
1199 } 1216 }
1200 cmd_size += cmd->len[i]; 1217 cmd_size += cmd->len[i];
1201 } 1218 }
@@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1242 1259
1243 /* and copy the data that needs to be copied */ 1260 /* and copy the data that needs to be copied */
1244 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1261 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr);
1245 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1246 if (!cmd->len[i]) 1264 int copy = 0;
1265
1266 if (!cmd->len)
1247 continue; 1267 continue;
1248 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1268
1249 IWL_HCMD_DFL_DUP)) 1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1250 break; 1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1251 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); 1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1252 cmd_pos += cmd->len[i]; 1272
1273 if (copy > cmd->len[i])
1274 copy = cmd->len[i];
1275 }
1276
1277 /* copy everything if not nocopy/dup */
1278 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1279 IWL_HCMD_DFL_DUP)))
1280 copy = cmd->len[i];
1281
1282 if (copy) {
1283 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1284 cmd_pos += copy;
1285 copy_size += copy;
1286 }
1253 } 1287 }
1254 1288
1255 WARN_ON_ONCE(txq->entries[idx].copy_cmd); 1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
@@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1275 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1276 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1277 1311
1278 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 1312 /*
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
1314 * still map at least that many bytes for the hardware to write back to.
1315 * We have enough space, so that's not a problem.
1316 */
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
1318
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
1279 DMA_BIDIRECTIONAL); 1320 DMA_BIDIRECTIONAL);
1280 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1281 idx = -ENOMEM; 1322 idx = -ENOMEM;
@@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1283 } 1324 }
1284 1325
1285 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1286 dma_unmap_len_set(out_meta, len, copy_size); 1327 dma_unmap_len_set(out_meta, len, dma_size);
1287 1328
1288 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); 1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1289 1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */
1290 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1291 const void *data = cmd->data[i]; 1333 const void *data = cmddata[i];
1292 1334
1293 if (!cmd->len[i]) 1335 if (!cmdlen[i])
1294 continue; 1336 continue;
1295 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1337 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1296 IWL_HCMD_DFL_DUP))) 1338 IWL_HCMD_DFL_DUP)))
@@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1298 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1299 data = dup_buf; 1341 data = dup_buf;
1300 phys_addr = dma_map_single(trans->dev, (void *)data, 1342 phys_addr = dma_map_single(trans->dev, (void *)data,
1301 cmd->len[i], DMA_BIDIRECTIONAL); 1343 cmdlen[i], DMA_BIDIRECTIONAL);
1302 if (dma_mapping_error(trans->dev, phys_addr)) { 1344 if (dma_mapping_error(trans->dev, phys_addr)) {
1303 iwl_pcie_tfd_unmap(trans, out_meta, 1345 iwl_pcie_tfd_unmap(trans, out_meta,
1304 &txq->tfds[q->write_ptr], 1346 &txq->tfds[q->write_ptr],
@@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1307 goto out; 1349 goto out;
1308 } 1350 }
1309 1351
1310 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); 1352 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1311 } 1353 }
1312 1354
1313 out_meta->flags = cmd->flags; 1355 out_meta->flags = cmd->flags;
@@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1317 1359
1318 txq->need_update = 1; 1360 txq->need_update = 1;
1319 1361
1320 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, 1362 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1321 &out_cmd->hdr, copy_size);
1322 1363
1323 /* start timer if queue currently empty */ 1364 /* start timer if queue currently empty */
1324 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)