diff options
author | Bruce Allan <bruce.w.allan@intel.com> | 2018-10-26 14:44:39 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-11-20 14:39:03 -0500 |
commit | 7afdbc903a7c22510bbf5a82866e1a607e2f0997 (patch) | |
tree | 778feeb2923909b85313365ca53670fc880e7490 /drivers/net/ethernet/intel/ice/ice_controlq.c | |
parent | d38b08834fc35720ad685e9e2f6d865f681fbc34 (diff) |
ice: Cleanup duplicate control queue code
1. Assigning the register offset and mask values contains duplicate code
that can easily be replaced with a macro.
2. Separate functions for freeing send queue and receive queue rings are
not needed; replace with a single function that uses a pointer to the
struct ice_ctl_q_ring structure as a parameter instead of a pointer to
the struct ice_ctl_q_info structure.
3. Initializing register settings for both send queue and receive queue
contains duplicate code that can easily be replaced with a helper
function.
4. Separate functions for freeing send queue and receive queue buffers are
not needed; duplicate code can easily be replaced with a macro.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_controlq.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_controlq.c | 218 |
1 files changed, 76 insertions, 142 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 84c967294eaf..b920403c6616 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
@@ -3,6 +3,26 @@ | |||
3 | 3 | ||
4 | #include "ice_common.h" | 4 | #include "ice_common.h" |
5 | 5 | ||
6 | #define ICE_CQ_INIT_REGS(qinfo, prefix) \ | ||
7 | do { \ | ||
8 | (qinfo)->sq.head = prefix##_ATQH; \ | ||
9 | (qinfo)->sq.tail = prefix##_ATQT; \ | ||
10 | (qinfo)->sq.len = prefix##_ATQLEN; \ | ||
11 | (qinfo)->sq.bah = prefix##_ATQBAH; \ | ||
12 | (qinfo)->sq.bal = prefix##_ATQBAL; \ | ||
13 | (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ | ||
14 | (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ | ||
15 | (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ | ||
16 | (qinfo)->rq.head = prefix##_ARQH; \ | ||
17 | (qinfo)->rq.tail = prefix##_ARQT; \ | ||
18 | (qinfo)->rq.len = prefix##_ARQLEN; \ | ||
19 | (qinfo)->rq.bah = prefix##_ARQBAH; \ | ||
20 | (qinfo)->rq.bal = prefix##_ARQBAL; \ | ||
21 | (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ | ||
22 | (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ | ||
23 | (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ | ||
24 | } while (0) | ||
25 | |||
6 | /** | 26 | /** |
7 | * ice_adminq_init_regs - Initialize AdminQ registers | 27 | * ice_adminq_init_regs - Initialize AdminQ registers |
8 | * @hw: pointer to the hardware structure | 28 | * @hw: pointer to the hardware structure |
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw) | |||
13 | { | 33 | { |
14 | struct ice_ctl_q_info *cq = &hw->adminq; | 34 | struct ice_ctl_q_info *cq = &hw->adminq; |
15 | 35 | ||
16 | cq->sq.head = PF_FW_ATQH; | 36 | ICE_CQ_INIT_REGS(cq, PF_FW); |
17 | cq->sq.tail = PF_FW_ATQT; | ||
18 | cq->sq.len = PF_FW_ATQLEN; | ||
19 | cq->sq.bah = PF_FW_ATQBAH; | ||
20 | cq->sq.bal = PF_FW_ATQBAL; | ||
21 | cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; | ||
22 | cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; | ||
23 | cq->sq.head_mask = PF_FW_ATQH_ATQH_M; | ||
24 | |||
25 | cq->rq.head = PF_FW_ARQH; | ||
26 | cq->rq.tail = PF_FW_ARQT; | ||
27 | cq->rq.len = PF_FW_ARQLEN; | ||
28 | cq->rq.bah = PF_FW_ARQBAH; | ||
29 | cq->rq.bal = PF_FW_ARQBAL; | ||
30 | cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; | ||
31 | cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; | ||
32 | cq->rq.head_mask = PF_FW_ARQH_ARQH_M; | ||
33 | } | 37 | } |
34 | 38 | ||
35 | /** | 39 | /** |
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw) | |||
42 | { | 46 | { |
43 | struct ice_ctl_q_info *cq = &hw->mailboxq; | 47 | struct ice_ctl_q_info *cq = &hw->mailboxq; |
44 | 48 | ||
45 | /* set head and tail registers in our local struct */ | 49 | ICE_CQ_INIT_REGS(cq, PF_MBX); |
46 | cq->sq.head = PF_MBX_ATQH; | ||
47 | cq->sq.tail = PF_MBX_ATQT; | ||
48 | cq->sq.len = PF_MBX_ATQLEN; | ||
49 | cq->sq.bah = PF_MBX_ATQBAH; | ||
50 | cq->sq.bal = PF_MBX_ATQBAL; | ||
51 | cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; | ||
52 | cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; | ||
53 | cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; | ||
54 | |||
55 | cq->rq.head = PF_MBX_ARQH; | ||
56 | cq->rq.tail = PF_MBX_ARQT; | ||
57 | cq->rq.len = PF_MBX_ARQLEN; | ||
58 | cq->rq.bah = PF_MBX_ARQBAH; | ||
59 | cq->rq.bal = PF_MBX_ARQBAL; | ||
60 | cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; | ||
61 | cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; | ||
62 | cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; | ||
63 | } | 50 | } |
64 | 51 | ||
65 | /** | 52 | /** |
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
131 | } | 118 | } |
132 | 119 | ||
133 | /** | 120 | /** |
134 | * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings | 121 | * ice_free_cq_ring - Free control queue ring |
135 | * @hw: pointer to the hardware structure | ||
136 | * @cq: pointer to the specific Control queue | ||
137 | * | ||
138 | * This assumes the posted send buffers have already been cleaned | ||
139 | * and de-allocated | ||
140 | */ | ||
141 | static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
142 | { | ||
143 | dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, | ||
144 | cq->sq.desc_buf.va, cq->sq.desc_buf.pa); | ||
145 | cq->sq.desc_buf.va = NULL; | ||
146 | cq->sq.desc_buf.pa = 0; | ||
147 | cq->sq.desc_buf.size = 0; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings | ||
152 | * @hw: pointer to the hardware structure | 122 | * @hw: pointer to the hardware structure |
153 | * @cq: pointer to the specific Control queue | 123 | * @ring: pointer to the specific control queue ring |
154 | * | 124 | * |
155 | * This assumes the posted receive buffers have already been cleaned | 125 | * This assumes the posted buffers have already been cleaned |
156 | * and de-allocated | 126 | * and de-allocated |
157 | */ | 127 | */ |
158 | static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) | 128 | static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) |
159 | { | 129 | { |
160 | dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, | 130 | dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, |
161 | cq->rq.desc_buf.va, cq->rq.desc_buf.pa); | 131 | ring->desc_buf.va, ring->desc_buf.pa); |
162 | cq->rq.desc_buf.va = NULL; | 132 | ring->desc_buf.va = NULL; |
163 | cq->rq.desc_buf.pa = 0; | 133 | ring->desc_buf.pa = 0; |
164 | cq->rq.desc_buf.size = 0; | 134 | ring->desc_buf.size = 0; |
165 | } | 135 | } |
166 | 136 | ||
167 | /** | 137 | /** |
@@ -280,54 +250,23 @@ unwind_alloc_sq_bufs: | |||
280 | return ICE_ERR_NO_MEMORY; | 250 | return ICE_ERR_NO_MEMORY; |
281 | } | 251 | } |
282 | 252 | ||
283 | /** | 253 | static enum ice_status |
284 | * ice_free_rq_bufs - Free ARQ buffer info elements | 254 | ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) |
285 | * @hw: pointer to the hardware structure | ||
286 | * @cq: pointer to the specific Control queue | ||
287 | */ | ||
288 | static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
289 | { | ||
290 | int i; | ||
291 | |||
292 | /* free descriptors */ | ||
293 | for (i = 0; i < cq->num_rq_entries; i++) { | ||
294 | dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, | ||
295 | cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); | ||
296 | cq->rq.r.rq_bi[i].va = NULL; | ||
297 | cq->rq.r.rq_bi[i].pa = 0; | ||
298 | cq->rq.r.rq_bi[i].size = 0; | ||
299 | } | ||
300 | |||
301 | /* free the dma header */ | ||
302 | devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ice_free_sq_bufs - Free ATQ buffer info elements | ||
307 | * @hw: pointer to the hardware structure | ||
308 | * @cq: pointer to the specific Control queue | ||
309 | */ | ||
310 | static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
311 | { | 255 | { |
312 | int i; | 256 | /* Clear Head and Tail */ |
257 | wr32(hw, ring->head, 0); | ||
258 | wr32(hw, ring->tail, 0); | ||
313 | 259 | ||
314 | /* only unmap if the address is non-NULL */ | 260 | /* set starting point */ |
315 | for (i = 0; i < cq->num_sq_entries; i++) | 261 | wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); |
316 | if (cq->sq.r.sq_bi[i].pa) { | 262 | wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); |
317 | dmam_free_coherent(ice_hw_to_dev(hw), | 263 | wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); |
318 | cq->sq.r.sq_bi[i].size, | ||
319 | cq->sq.r.sq_bi[i].va, | ||
320 | cq->sq.r.sq_bi[i].pa); | ||
321 | cq->sq.r.sq_bi[i].va = NULL; | ||
322 | cq->sq.r.sq_bi[i].pa = 0; | ||
323 | cq->sq.r.sq_bi[i].size = 0; | ||
324 | } | ||
325 | 264 | ||
326 | /* free the buffer info list */ | 265 | /* Check one register to verify that config was applied */ |
327 | devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); | 266 | if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) |
267 | return ICE_ERR_AQ_ERROR; | ||
328 | 268 | ||
329 | /* free the dma header */ | 269 | return 0; |
330 | devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); | ||
331 | } | 270 | } |
332 | 271 | ||
333 | /** | 272 | /** |
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
340 | static enum ice_status | 279 | static enum ice_status |
341 | ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | 280 | ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) |
342 | { | 281 | { |
343 | u32 reg = 0; | 282 | return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); |
344 | |||
345 | /* Clear Head and Tail */ | ||
346 | wr32(hw, cq->sq.head, 0); | ||
347 | wr32(hw, cq->sq.tail, 0); | ||
348 | |||
349 | /* set starting point */ | ||
350 | wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); | ||
351 | wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); | ||
352 | wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); | ||
353 | |||
354 | /* Check one register to verify that config was applied */ | ||
355 | reg = rd32(hw, cq->sq.bal); | ||
356 | if (reg != lower_32_bits(cq->sq.desc_buf.pa)) | ||
357 | return ICE_ERR_AQ_ERROR; | ||
358 | |||
359 | return 0; | ||
360 | } | 283 | } |
361 | 284 | ||
362 | /** | 285 | /** |
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
369 | static enum ice_status | 292 | static enum ice_status |
370 | ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) | 293 | ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) |
371 | { | 294 | { |
372 | u32 reg = 0; | 295 | enum ice_status status; |
373 | |||
374 | /* Clear Head and Tail */ | ||
375 | wr32(hw, cq->rq.head, 0); | ||
376 | wr32(hw, cq->rq.tail, 0); | ||
377 | 296 | ||
378 | /* set starting point */ | 297 | status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); |
379 | wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); | 298 | if (status) |
380 | wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); | 299 | return status; |
381 | wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); | ||
382 | 300 | ||
383 | /* Update tail in the HW to post pre-allocated buffers */ | 301 | /* Update tail in the HW to post pre-allocated buffers */ |
384 | wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); | 302 | wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); |
385 | 303 | ||
386 | /* Check one register to verify that config was applied */ | ||
387 | reg = rd32(hw, cq->rq.bal); | ||
388 | if (reg != lower_32_bits(cq->rq.desc_buf.pa)) | ||
389 | return ICE_ERR_AQ_ERROR; | ||
390 | |||
391 | return 0; | 304 | return 0; |
392 | } | 305 | } |
393 | 306 | ||
@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
444 | goto init_ctrlq_exit; | 357 | goto init_ctrlq_exit; |
445 | 358 | ||
446 | init_ctrlq_free_rings: | 359 | init_ctrlq_free_rings: |
447 | ice_free_ctrlq_sq_ring(hw, cq); | 360 | ice_free_cq_ring(hw, &cq->sq); |
448 | 361 | ||
449 | init_ctrlq_exit: | 362 | init_ctrlq_exit: |
450 | return ret_code; | 363 | return ret_code; |
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
503 | goto init_ctrlq_exit; | 416 | goto init_ctrlq_exit; |
504 | 417 | ||
505 | init_ctrlq_free_rings: | 418 | init_ctrlq_free_rings: |
506 | ice_free_ctrlq_rq_ring(hw, cq); | 419 | ice_free_cq_ring(hw, &cq->rq); |
507 | 420 | ||
508 | init_ctrlq_exit: | 421 | init_ctrlq_exit: |
509 | return ret_code; | 422 | return ret_code; |
510 | } | 423 | } |
511 | 424 | ||
425 | #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ | ||
426 | do { \ | ||
427 | int i; \ | ||
428 | /* free descriptors */ \ | ||
429 | for (i = 0; i < (qi)->num_##ring##_entries; i++) \ | ||
430 | if ((qi)->ring.r.ring##_bi[i].pa) { \ | ||
431 | dmam_free_coherent(ice_hw_to_dev(hw), \ | ||
432 | (qi)->ring.r.ring##_bi[i].size,\ | ||
433 | (qi)->ring.r.ring##_bi[i].va,\ | ||
434 | (qi)->ring.r.ring##_bi[i].pa);\ | ||
435 | (qi)->ring.r.ring##_bi[i].va = NULL; \ | ||
436 | (qi)->ring.r.ring##_bi[i].pa = 0; \ | ||
437 | (qi)->ring.r.ring##_bi[i].size = 0; \ | ||
438 | } \ | ||
439 | /* free the buffer info list */ \ | ||
440 | if ((qi)->ring.cmd_buf) \ | ||
441 | devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ | ||
442 | /* free dma head */ \ | ||
443 | devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ | ||
444 | } while (0) | ||
445 | |||
512 | /** | 446 | /** |
513 | * ice_shutdown_sq - shutdown the Control ATQ | 447 | * ice_shutdown_sq - shutdown the Control ATQ |
514 | * @hw: pointer to the hardware structure | 448 | * @hw: pointer to the hardware structure |
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
538 | cq->sq.count = 0; /* to indicate uninitialized queue */ | 472 | cq->sq.count = 0; /* to indicate uninitialized queue */ |
539 | 473 | ||
540 | /* free ring buffers and the ring itself */ | 474 | /* free ring buffers and the ring itself */ |
541 | ice_free_sq_bufs(hw, cq); | 475 | ICE_FREE_CQ_BUFS(hw, cq, sq); |
542 | ice_free_ctrlq_sq_ring(hw, cq); | 476 | ice_free_cq_ring(hw, &cq->sq); |
543 | 477 | ||
544 | shutdown_sq_out: | 478 | shutdown_sq_out: |
545 | mutex_unlock(&cq->sq_lock); | 479 | mutex_unlock(&cq->sq_lock); |
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |||
606 | cq->rq.count = 0; | 540 | cq->rq.count = 0; |
607 | 541 | ||
608 | /* free ring buffers and the ring itself */ | 542 | /* free ring buffers and the ring itself */ |
609 | ice_free_rq_bufs(hw, cq); | 543 | ICE_FREE_CQ_BUFS(hw, cq, rq); |
610 | ice_free_ctrlq_rq_ring(hw, cq); | 544 | ice_free_cq_ring(hw, &cq->rq); |
611 | 545 | ||
612 | shutdown_rq_out: | 546 | shutdown_rq_out: |
613 | mutex_unlock(&cq->rq_lock); | 547 | mutex_unlock(&cq->rq_lock); |