aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSalil Mehta <salil.mehta@huawei.com>2018-03-22 10:28:58 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-22 15:29:04 -0400
commit07a0556a3a735f57060c274c55e895682e4055e6 (patch)
treecfee163c9c516d74ef9aee0d5dac86e156900927
parent7a01c89723301c343f75862098e4fa0885b75b3b (diff)
net: hns3: Changes to support ARQ(Asynchronous Receive Queue)
Current mailbox CRQ could consists of both synchronous and async responses from the PF. Synchronous responses are time critical and should be handed over to the waiting tasks/context as quickly as possible otherwise timeout occurs. Above problem gets accentuated if CRQ consists of even single async message. Hence, it is important to have quick handling of synchronous messages and maybe deferred handling of async messages This patch introduces separate ARQ(async receive queues) for the async messages. These messages are processed later with repsect to mailbox task while synchronous messages still gets processed in context to mailbox interrupt. ARQ is important as VF reset introduces some new async messages like MBX_ASSERTING_RESET which adds up to the presssure on the responses for synchronousmessages and they timeout even more quickly. Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c83
5 files changed, 111 insertions, 14 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index e6e1d221b5c8..f3e90c29958c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -85,6 +85,21 @@ struct hclge_mbx_pf_to_vf_cmd {
85 u16 msg[8]; 85 u16 msg[8];
86}; 86};
87 87
88/* used by VF to store the received Async responses from PF */
89struct hclgevf_mbx_arq_ring {
90#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
91#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024
92 struct hclgevf_dev *hdev;
93 u32 head;
94 u32 tail;
95 u32 count;
96 u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
97};
98
88#define hclge_mbx_ring_ptr_move_crq(crq) \ 99#define hclge_mbx_ring_ptr_move_crq(crq) \
89 (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) 100 (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
101#define hclge_mbx_tail_ptr_move_arq(arq) \
102 (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
103#define hclge_mbx_head_ptr_move_arq(arq) \
104 (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
90#endif 105#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 85985e731311..1bbfe131b596 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -315,6 +315,12 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
315 goto err_csq; 315 goto err_csq;
316 } 316 }
317 317
318 /* initialize the pointers of async rx queue of mailbox */
319 hdev->arq.hdev = hdev;
320 hdev->arq.head = 0;
321 hdev->arq.tail = 0;
322 hdev->arq.count = 0;
323
318 /* get firmware version */ 324 /* get firmware version */
319 ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); 325 ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
320 if (ret) { 326 if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 6dd75614cc67..2b8426412cc9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1010,10 +1010,13 @@ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1010 } 1010 }
1011} 1011}
1012 1012
1013static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1013void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1014{ 1014{
1015 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1015 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1016 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1017 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1016 schedule_work(&hdev->mbx_service_task); 1018 schedule_work(&hdev->mbx_service_task);
1019 }
1017} 1020}
1018 1021
1019static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1022static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
@@ -1025,6 +1028,10 @@ static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1025 1028
1026static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1029static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1027{ 1030{
1031 /* if we have any pending mailbox event then schedule the mbx task */
1032 if (hdev->mbx_event_pending)
1033 hclgevf_mbx_task_schedule(hdev);
1034
1028 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1035 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1029 hclgevf_reset_task_schedule(hdev); 1036 hclgevf_reset_task_schedule(hdev);
1030} 1037}
@@ -1118,7 +1125,7 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
1118 1125
1119 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1126 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1120 1127
1121 hclgevf_mbx_handler(hdev); 1128 hclgevf_mbx_async_handler(hdev);
1122 1129
1123 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1130 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1124} 1131}
@@ -1178,8 +1185,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1178 if (!hclgevf_check_event_cause(hdev, &clearval)) 1185 if (!hclgevf_check_event_cause(hdev, &clearval))
1179 goto skip_sched; 1186 goto skip_sched;
1180 1187
1181 /* schedule the VF mailbox service task, if not already scheduled */ 1188 hclgevf_mbx_handler(hdev);
1182 hclgevf_mbx_task_schedule(hdev);
1183 1189
1184 hclgevf_clear_event_cause(hdev, clearval); 1190 hclgevf_clear_event_cause(hdev, clearval);
1185 1191
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 8cdc6027671f..a477a7c36bbd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -152,7 +152,9 @@ struct hclgevf_dev {
152 int *vector_irq; 152 int *vector_irq;
153 153
154 bool accept_mta_mc; /* whether to accept mta filter multicast */ 154 bool accept_mta_mc; /* whether to accept mta filter multicast */
155 bool mbx_event_pending;
155 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ 156 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
157 struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
156 158
157 struct timer_list service_timer; 159 struct timer_list service_timer;
158 struct work_struct service_task; 160 struct work_struct service_task;
@@ -187,8 +189,11 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
187 const u8 *msg_data, u8 msg_len, bool need_resp, 189 const u8 *msg_data, u8 msg_len, bool need_resp,
188 u8 *resp_data, u16 resp_len); 190 u8 *resp_data, u16 resp_len);
189void hclgevf_mbx_handler(struct hclgevf_dev *hdev); 191void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
192void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
193
190void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); 194void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
191void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 195void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
192 u8 duplex); 196 u8 duplex);
193void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); 197void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
198void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
194#endif 199#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a63ed3aa2c00..7687911d3eb8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -132,9 +132,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
132 struct hclge_mbx_pf_to_vf_cmd *req; 132 struct hclge_mbx_pf_to_vf_cmd *req;
133 struct hclgevf_cmq_ring *crq; 133 struct hclgevf_cmq_ring *crq;
134 struct hclgevf_desc *desc; 134 struct hclgevf_desc *desc;
135 u16 link_status, flag; 135 u16 *msg_q;
136 u32 speed; 136 u16 flag;
137 u8 duplex;
138 u8 *temp; 137 u8 *temp;
139 int i; 138 int i;
140 139
@@ -146,6 +145,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
146 desc = &crq->desc[crq->next_to_use]; 145 desc = &crq->desc[crq->next_to_use];
147 req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; 146 req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
148 147
148 /* synchronous messages are time critical and need preferential
149 * treatment. Therefore, we need to acknowledge all the sync
150 * responses as quickly as possible so that waiting tasks do not
151 * timeout and simultaneously queue the async messages for later
152 * prcessing in context of mailbox task i.e. the slow path.
153 */
149 switch (req->msg[0]) { 154 switch (req->msg[0]) {
150 case HCLGE_MBX_PF_VF_RESP: 155 case HCLGE_MBX_PF_VF_RESP:
151 if (resp->received_resp) 156 if (resp->received_resp)
@@ -165,13 +170,30 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
165 } 170 }
166 break; 171 break;
167 case HCLGE_MBX_LINK_STAT_CHANGE: 172 case HCLGE_MBX_LINK_STAT_CHANGE:
168 link_status = le16_to_cpu(req->msg[1]); 173 /* set this mbx event as pending. This is required as we
169 memcpy(&speed, &req->msg[2], sizeof(speed)); 174 * might loose interrupt event when mbx task is busy
170 duplex = (u8)le16_to_cpu(req->msg[4]); 175 * handling. This shall be cleared when mbx task just
176 * enters handling state.
177 */
178 hdev->mbx_event_pending = true;
171 179
172 /* update upper layer with new link link status */ 180 /* we will drop the async msg if we find ARQ as full
173 hclgevf_update_link_status(hdev, link_status); 181 * and continue with next message
174 hclgevf_update_speed_duplex(hdev, speed, duplex); 182 */
183 if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
184 dev_warn(&hdev->pdev->dev,
185 "Async Q full, dropping msg(%d)\n",
186 req->msg[1]);
187 break;
188 }
189
190 /* tail the async message in arq */
191 msg_q = hdev->arq.msg_q[hdev->arq.tail];
192 memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
193 hclge_mbx_tail_ptr_move_arq(hdev->arq);
194 hdev->arq.count++;
195
196 hclgevf_mbx_task_schedule(hdev);
175 197
176 break; 198 break;
177 default: 199 default:
@@ -189,3 +211,46 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
189 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, 211 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
190 crq->next_to_use); 212 crq->next_to_use);
191} 213}
214
215void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
216{
217 u16 link_status;
218 u16 *msg_q;
219 u8 duplex;
220 u32 speed;
221 u32 tail;
222
223 /* we can safely clear it now as we are at start of the async message
224 * processing
225 */
226 hdev->mbx_event_pending = false;
227
228 tail = hdev->arq.tail;
229
230 /* process all the async queue messages */
231 while (tail != hdev->arq.head) {
232 msg_q = hdev->arq.msg_q[hdev->arq.head];
233
234 switch (msg_q[0]) {
235 case HCLGE_MBX_LINK_STAT_CHANGE:
236 link_status = le16_to_cpu(msg_q[1]);
237 memcpy(&speed, &msg_q[2], sizeof(speed));
238 duplex = (u8)le16_to_cpu(msg_q[4]);
239
240 /* update upper layer with new link link status */
241 hclgevf_update_link_status(hdev, link_status);
242 hclgevf_update_speed_duplex(hdev, speed, duplex);
243
244 break;
245 default:
246 dev_err(&hdev->pdev->dev,
247 "fetched unsupported(%d) message from arq\n",
248 msg_q[0]);
249 break;
250 }
251
252 hclge_mbx_head_ptr_move_arq(hdev->arq);
253 hdev->arq.count--;
254 msg_q = hdev->arq.msg_q[hdev->arq.head];
255 }
256}