aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/raw.c
blob: bbfa0e241cba38928c8a5b5157a7a2a3ac178d2a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
/*
 * linux/drivers/char/raw.c
 *
 * Front-end raw character devices.  These can be bound to any block
 * devices to provide genuine Unix raw character device semantics.
 *
 * We reserve minor number 0 for a control interface.  ioctl()s on this
 * device are used to bind the other minor numbers to block devices.
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/raw.h>
#include <linux/capability.h>
#include <linux/uio.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mutex.h>

#include <asm/uaccess.h>

struct raw_device_data {
	struct block_device *binding;
	int inuse;
};

static struct class *raw_class;
static struct raw_device_data raw_devices[MAX_RAW_MINORS];
static DEFINE_MUTEX(raw_mutex);
static const struct file_operations raw_ctl_fops; /* forward declaration */

/*
 * Open/close code for raw IO.
 *
 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
 * point at the blockdev's address_space and set the file handle to use
 * O_DIRECT.
 *
 * Set the device's soft blocksize to the minimum possible.  This gives the
 * finest possible alignment and has no adverse impact on performance.
 */
static int raw_open(struct inode *inode, struct file *filp)
{
	const int minor = iminor(inode);
	struct block_device *bdev;
	int err;

	if (minor == 0) {	/* It is the control device */
		filp->f_op = &raw_ctl_fops;
		return 0;
	}

	mutex_lock(&raw_mutex);

	/*
	 * All we need to do on open is check that the device is bound.
	 */
	bdev = raw_devices[minor].binding;
	err = -ENODEV;
	if (!bdev)
		goto out;
	igrab(bdev->bd_inode);
	err = blkdev_get(bdev, filp->f_mode, 0);
	if (err)
		goto out;
	err = bd_claim(bdev, raw_open);
	if (err)
		goto out1;
	err = set_blocksize(bdev, bdev_hardsect_size(bdev));
	if (err)
		goto out2;
	filp->f_flags |= O_DIRECT;
	filp->f_mapping = bdev->bd_inode->i_mapping;
	if (++raw_devices[minor].inuse == 1)
		filp->f_path.dentry->d_inode->i_mapping =
			bdev->bd_inode->i_mapping;
	filp->private_data = bdev;
	mutex_unlock(&raw_mutex);
	return 0;

out2:
	bd_release(bdev);
out1:
	blkdev_put(bdev);
out:
	mutex_unlock(&raw_mutex);
	return err;
}

/*
 * When the final fd which refers to this character-special node is closed, we
 * make its ->mapping point back at its own i_data.
 */
static int raw_release(struct inode *inode, struct file *filp)
{
	const int minor= iminor(inode);
	struct block_device *bdev;

	mutex_lock(&raw_mutex);
	bdev = raw_devices[minor].binding;
	if (--raw_devices[minor].inuse == 0) {
		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
		inode->i_mapping = &inode->i_data;
		inode->i_mapping->backing_dev_info = &default_backing_dev_info;
	}
	mutex_unlock(&raw_mutex);

	bd_release(bdev);
	blkdev_put(bdev);
	return 0;
}

/*
 * Forward ioctls to the underlying block device.
 */
static int
raw_ioctl(struct inode *inode, struct file *filp,
		  unsigned int command, unsigned long arg)
{
	struct block_device *bdev = filp->private_data;

	return blkdev_ioctl(bdev->bd_inode, NULL, command, arg);
}

static void bind_device(struct raw_config_request *rq)
{
	device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor),
		      "raw%d", rq->raw_minor);
}

/*
 * Deal with ioctls against the raw-device control interface, to bind
 * and unbind other raw devices.
 */
static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
			unsigned int command, unsigned long arg)
{
	struct raw_config_request rq;
	struct raw_device_data *rawdev;
	int err = 0;

	switch (command) {
	case RAW_SETBIND:
	case RAW_GETBIND:

		/* First, find out which raw minor we want */

		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) {
			err = -EFAULT;
			goto out;
		}

		if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
			err = -EINVAL;
			goto out;
		}
		rawdev = &raw_devices[rq.raw_minor];

		if (command == RAW_SETBIND) {
			dev_t dev;

			/*
			 * This is like making block devices, so demand the
			 * same capability
			 */
			if (!capable(CAP_SYS_ADMIN)) {
				err = -EPERM;
				goto out;
			}

			/*
			 * For now, we don't need to check that the underlying
			 * block device is present or not: we can do that when
			 * the raw device is opened.  Just check that the
			 * major/minor numbers make sense.
			 */

			dev = MKDEV(rq.block_major, rq.block_minor);
			if ((rq.block_major == 0 && rq.block_minor != 0) ||
					MAJOR(dev) != rq.block_major ||
					MINOR(dev) != rq.block_minor) {
				err = -EINVAL;
				goto out;
			}

			mutex_lock(&raw_mutex);
			if (rawdev->inuse) {
				mutex_unlock(&raw_mutex);
				err = -EBUSY;
				goto out;
			}
			if (rawdev->binding) {
				bdput(rawdev->binding);
				module_put(THIS_MODULE);
			}
			if (rq.block_major == 0 && rq.block_minor == 0) {
				/* unbind */
				rawdev->binding = NULL;
				device_destroy(raw_class,
						MKDEV(RAW_MAJOR, rq.raw_minor));
			} else {
				rawdev->binding = bdget(dev);
				if (rawdev->binding == NULL)
					err = -ENOMEM;
				else {
					__module_get(THIS_MODULE);
					bind_device(&rq);
				}
			}
			mutex_unlock(&raw_mutex);
		} else {
			struct block_device *bdev;

			mutex_lock(&raw_mutex);
			bdev = rawdev->binding;
			if (bdev) {
				rq.block_major = MAJOR(bdev->bd_dev);
				rq.block_minor = MINOR(bdev->bd_dev);
			} else {
				rq.block_major = rq.block_minor = 0;
			}
			mutex_unlock(&raw_mutex);
			if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
				err = -EFAULT;
				goto out;
			}
		}
		break;
	default:
		err = -EINVAL;
		break;
	}
out:
	return err;
}

static const struct file_operations raw_fops = {
	.read	=	do_sync_read,
	.aio_read = 	generic_file_aio_read,
	.write	=	do_sync_write,
	.aio_write = 	generic_file_aio_write_nolock,
	.open	=	raw_open,
	.release=	raw_release,
	.ioctl	=	raw_ioctl,
	.owner	=	THIS_MODULE,
};

static const struct file_operations raw_ctl_fops = {
	.ioctl	=	raw_ctl_ioctl,
	.open	=	raw_open,
	.owner	=	THIS_MODULE,
};

static struct cdev raw_cdev;

static int __init raw_init(void)
{
	dev_t dev = MKDEV(RAW_MAJOR, 0);
	int ret;

	ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
	if (ret)
		goto error;

	cdev_init(&raw_cdev, &raw_fops);
	ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
	if (ret) {
		kobject_put(&raw_cdev.kobj);
		goto error_region;
	}

	raw_class = class_create(THIS_MODULE, "raw");
	if (IS_ERR(raw_class)) {
		printk(KERN_ERR "Error creating raw class.\n");
		cdev_del(&raw_cdev);
		ret = PTR_ERR(raw_class);
		goto error_region;
	}
	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), "rawctl");

	return 0;

error_region:
	unregister_chrdev_region(dev, MAX_RAW_MINORS);
error:
	return ret;
}

static void __exit raw_exit(void)
{
	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
	class_destroy(raw_class);
	cdev_del(&raw_cdev);
	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
}

module_init(raw_init);
module_exit(raw_exit);
MODULE_LICENSE("GPL");
pt">(ht_cap, 0, sizeof(*ht_cap)); if (!ht_cap_ie) return; ht_cap->ht_supported = true; ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap; ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS; ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS; ampdu_info = ht_cap_ie->ampdu_params_info; ht_cap->ampdu_factor = ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; ht_cap->ampdu_density = (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; /* own MCS TX capabilities */ tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; /* can we TX with MCS rates? */ if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) return; /* Counting from 0, therefore +1 */ if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) max_tx_streams = ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; else max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; /* * 802.11n D5.0 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream * - 8 to 31 are multiple spatial streams using equal modulation * [8..15 for two streams, 16..23 for three and 24..31 for four] * - remainder are multiple spatial streams using unequal modulation */ for (i = 0; i < max_tx_streams; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; i < IEEE80211_HT_MCS_MASK_LEN; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; /* handle MCS rate 32 too */ if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap->mcs.rx_mask[32/8] |= 1; } /* * ieee80211_enable_ht should be called only after the operating band * has been determined as ht configuration depends on the hw's * HT abilities for a specific band. */ u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, struct ieee80211_ht_info *hti, u16 ap_ht_cap_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct ieee80211_bss_ht_conf ht; u32 changed = 0; bool enable_ht = true, ht_changed; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; memset(&ht, 0, sizeof(ht)); /* HT is not supported */ if (!sband->ht_cap.ht_supported) enable_ht = false; /* check that channel matches the right operating channel */ if (local->hw.conf.channel->center_freq != ieee80211_channel_to_frequency(hti->control_chan)) enable_ht = false; /* * XXX: This is totally incorrect when there are multiple virtual * interfaces, needs to be fixed later. */ ht_changed = local->hw.conf.ht.enabled != enable_ht; local->hw.conf.ht.enabled = enable_ht; if (ht_changed) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_HT); /* disable HT */ if (!enable_ht) return 0; ht.secondary_channel_offset = hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; ht.width_40_ok = !(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY); ht.operation_mode = le16_to_cpu(hti->operation_mode); /* if bss configuration changed store the new one */ if (memcmp(&sdata->vif.bss_conf.ht, &ht, sizeof(ht))) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht = ht; } return changed; } static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u8 dialog_token, u16 start_seq_num, u16 agg_size, u16 timeout) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer " "for addba request frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; mgmt->u.action.u.addba_req.dialog_token = dialog_token; capab = (u16)(1 << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); ieee80211_tx_skb(sdata, skb, 0); } static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout) { struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer " "for addba resp frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; capab = (u16)(policy << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); ieee80211_tx_skb(sdata, skb, 0); } static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 params; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer " "for delba frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; params = (u16)(initiator << 11); /* bit 11 initiator */ params |= (u16)(tid << 12); /* bit 15:12 TID number */ mgmt->u.action.u.delba.params = cpu_to_le16(params); mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); ieee80211_tx_skb(sdata, skb, 0); } void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_bar *bar; u16 bar_control = 0; skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer for " "bar frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); memset(bar, 0, sizeof(*bar)); bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); memcpy(bar->ra, ra, ETH_ALEN); memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; bar_control |= (u16)(tid << 12); bar->control = cpu_to_le16(bar_control); bar->start_seq_num = cpu_to_le16(ssn); ieee80211_tx_skb(sdata, skb, 0); } void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 initiator, u16 reason) { struct ieee80211_local *local = sdata->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; int ret, i; rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); return; } /* check if TID is in operational state */ spin_lock_bh(&sta->lock); if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_REQ_STOP_BA_MSK | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); spin_unlock_bh(&sta->lock); /* stop HW Rx aggregation. ampdu_action existence * already verified in session init so we add the BUG_ON */ BUG_ON(!local->ops->ampdu_action); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, &sta->sta, tid, NULL); if (ret) printk(KERN_DEBUG "HW problem - can not stop rx " "aggregation for tid %d\n", tid); /* shutdown timer has not expired */ if (initiator != WLAN_BACK_TIMER) del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); /* check if this is a self generated aggregation halt */ if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) ieee80211_send_delba(sdata, ra, tid, 0, reason); /* free the reordering buffer */ for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { /* release the reordered frames */ dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; } } /* free resources */ kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); kfree(sta->ampdu_mlme.tid_rx[tid]); sta->ampdu_mlme.tid_rx[tid] = NULL; sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; rcu_read_unlock(); } /* * After sending add Block Ack request we activated a timer until * add Block Ack response will arrive from the recipient. * If this timer expires sta_addba_resp_timer_expired will be executed. */ static void sta_addba_resp_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and both sta_info and TID are needed, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u16 tid = *(u8 *)data; struct sta_info *temp_sta = container_of((void *)data, struct sta_info, timer_to_tid[tid]); struct ieee80211_local *local = temp_sta->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; u8 *state; rcu_read_lock(); sta = sta_info_get(local, temp_sta->sta.addr); if (!sta) { rcu_read_unlock(); return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* check if the TID waits for addBA response */ spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { spin_unlock_bh(&sta->lock); *state = HT_AGG_STATE_IDLE; #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "timer expired on tid %d but we are not " "expecting addBA response there", tid); #endif goto timer_expired_exit; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); #endif /* go through the state check in stop_BA_session */ *state = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid, WLAN_BACK_INITIATOR); timer_expired_exit: rcu_read_unlock(); } void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr) { struct ieee80211_local *local = sdata->local; int i; for (i = 0; i < STA_TID_NUM; i++) { ieee80211_stop_tx_ba_session(&local->hw, addr, i, WLAN_BACK_INITIATOR); ieee80211_sta_stop_rx_ba_session(sdata, addr, i, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_LEAVE_QBSS); } } int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; struct ieee80211_sub_if_data *sdata; u16 start_seq_num; u8 *state; int ret; if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) return -EINVAL; #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find the station\n"); #endif ret = -ENOENT; goto exit; } spin_lock_bh(&sta->lock); /* we have tried too many times, receiver does not want A-MPDU */ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { ret = -EBUSY; goto err_unlock_sta; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* check if the TID is not in aggregation flow already */ if (*state != HT_AGG_STATE_IDLE) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - session is not " "idle on tid %u\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ ret = -EAGAIN; goto err_unlock_sta; } /* prepare A-MPDU MLME for Tx aggregation */ sta->ampdu_mlme.tid_tx[tid] = kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); if (!sta->ampdu_mlme.tid_tx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate tx mlme to tid %d failed\n", tid); #endif ret = -ENOMEM; goto err_unlock_sta; } /* Tx timer */ sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = sta_addba_resp_timer_expired; sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); if (hw->ampdu_queues) { /* create a new queue for this aggregation */ ret = ieee80211_ht_agg_queue_add(local, sta, tid); /* case no queue is available to aggregation * don't switch to aggregation */ if (ret) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - " "queue unavailable for tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto err_unlock_queue; } } sdata = sta->sdata; /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the * call back right away, it must see that the flow has begun */ *state |= HT_ADDBA_REQUESTED_MSK; /* This is slightly racy because the queue isn't stopped */ start_seq_num = sta->tid_seq[tid]; if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, &sta->sta, tid, &start_seq_num); if (ret) { /* No need to requeue the packets in the agg queue, since we * held the tx lock: no packet could be enqueued to the newly * allocated queue */ if (hw->ampdu_queues) ieee80211_ht_agg_queue_remove(local, sta, tid, 0); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - HW unavailable for" " tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ *state = HT_AGG_STATE_IDLE; goto err_unlock_queue; } /* Will put all the packets in the new SW queue */ if (hw->ampdu_queues) ieee80211_requeue(local, ieee802_1d_to_ac[tid]); spin_unlock_bh(&sta->lock); /* send an addBA request */ sta->ampdu_mlme.dialog_token_allocator++; sta->ampdu_mlme.tid_tx[tid]->dialog_token = sta->ampdu_mlme.dialog_token_allocator; sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; ieee80211_send_addba_request(sta->sdata, ra, tid, sta->ampdu_mlme.tid_tx[tid]->dialog_token, sta->ampdu_mlme.tid_tx[tid]->ssn, 0x40, 5000); /* activate the timer for the recipient's addBA response */ sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = jiffies + ADDBA_RESP_INTERVAL; add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); #endif goto exit; err_unlock_queue: kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; ret = -EBUSY; err_unlock_sta: spin_unlock_bh(&sta->lock); exit: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid, enum ieee80211_back_parties initiator) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; int ret = 0; if (tid >= STA_TID_NUM) return -EINVAL; rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); return -ENOENT; } /* check if the TID is in aggregation */ state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (*state != HT_AGG_STATE_OPERATIONAL) { ret = -ENOENT; goto stop_BA_exit; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (hw->ampdu_queues) ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); *state = HT_AGG_STATE_REQ_STOP_BA_MSK | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, &sta->sta, tid, NULL); /* case HW denied going back to legacy */ if (ret) { WARN_ON(ret != -EBUSY); *state = HT_AGG_STATE_OPERATIONAL; if (hw->ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); goto stop_BA_exit; } stop_BA_exit: spin_unlock_bh(&sta->lock); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; if (tid >= STA_TID_NUM) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", tid, STA_TID_NUM); #endif return; } rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", *state); #endif spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); *state |= HT_ADDBA_DRV_READY_MSK; if (*state == HT_AGG_STATE_OPERATIONAL) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); #endif if (hw->ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); } spin_unlock_bh(&sta->lock); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; int agg_queue; if (tid >= STA_TID_NUM) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", tid, STA_TID_NUM); #endif return; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif rcu_read_unlock(); return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* NOTE: no need to use sta->lock in this state check, as * ieee80211_stop_tx_ba_session will let only one stop call to * pass through per sta/tid */ if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); #endif rcu_read_unlock(); return; } if (*state & HT_AGG_STATE_INITIATOR_MSK) ieee80211_send_delba(sta->sdata, ra, tid, WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); if (hw->ampdu_queues) { agg_queue = sta->tid_to_tx_q[tid]; ieee80211_ht_agg_queue_remove(local, sta, tid, 1); /* We just requeued the all the frames that were in the * removed queue, and since we might miss a softirq we do * netif_schedule_queue. ieee80211_wake_queue is not used * here as this queue is not necessarily stopped */ netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); } spin_lock_bh(&sta->lock); *state = HT_AGG_STATE_IDLE; sta->ampdu_mlme.addba_req_num[tid] = 0; kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; spin_unlock_bh(&sta->lock); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_ra_tid *ra_tid; struct sk_buff *skb = dev_alloc_skb(0); if (unlikely(!skb)) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_WARNING "%s: Not enough memory, " "dropping start BA session", skb->dev->name); #endif return; } ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; skb->pkt_type = IEEE80211_ADDBA_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_ra_tid *ra_tid; struct sk_buff *skb = dev_alloc_skb(0); if (unlikely(!skb)) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_WARNING "%s: Not enough memory, " "dropping stop BA session", skb->dev->name); #endif return; } ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; skb->pkt_type = IEEE80211_DELBA_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); /* * After accepting the AddBA Request we activated a timer, * resetting it after each frame that arrives from the originator. * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. */ static void sta_rx_agg_session_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and various sta_info are needed here, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u8 *ptid = (u8 *)data; u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); #endif ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, (u16)*ptid, WLAN_BACK_TIMER, WLAN_REASON_QSTA_TIMEOUT); } void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_conf *conf = &hw->conf; struct tid_ampdu_rx *tid_agg_rx; u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; u8 dialog_token; int ret = -EOPNOTSUPP; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); start_seq_num = le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; status = WLAN_STATUS_REQUEST_DECLINED; /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { status = WLAN_STATUS_INVALID_QOS_PARAM; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "AddBA Req with bad params from " "%pM on tid %u. policy %d, buffer size %d\n", mgmt->sa, tid, ba_policy, buf_size); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end_no_lock; } /* determine default buffer size */ if (buf_size == 0) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[conf->channel->band]; buf_size = IEEE80211_MIN_AMPDU_BUF; buf_size = buf_size << sband->ht_cap.ampdu_factor; } /* examine state machine */ spin_lock_bh(&sta->lock); if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "unexpected AddBA Req from " "%pM on tid %u\n", mgmt->sa, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end; } /* prepare A-MPDU MLME for Rx aggregation */ sta->ampdu_mlme.tid_rx[tid] = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); if (!sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate rx mlme to tid %d failed\n", tid); #endif goto end; } /* rx timer */ sta->ampdu_mlme.tid_rx[tid]->session_timer.function = sta_rx_agg_session_timer_expired; sta->ampdu_mlme.tid_rx[tid]->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; /* prepare reordering buffer */ tid_agg_rx->reorder_buf = kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); if (!tid_agg_rx->reorder_buf) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "can not allocate reordering buffer " "to tid %d\n", tid); #endif kfree(sta->ampdu_mlme.tid_rx[tid]); goto end; } memset(tid_agg_rx->reorder_buf, 0, buf_size * sizeof(struct sk_buff *)); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, &sta->sta, tid, &start_seq_num); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (ret) { kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx); sta->ampdu_mlme.tid_rx[tid] = NULL; goto end; } /* change state and send addba resp */ sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; tid_agg_rx->dialog_token = dialog_token; tid_agg_rx->ssn = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num; tid_agg_rx->buf_size = buf_size; tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; status = WLAN_STATUS_SUCCESS; end: spin_unlock_bh(&sta->lock); end_no_lock: ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, dialog_token, status, 1, buf_size, timeout); } void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_hw *hw = &local->hw; u16 capab; u16 tid, start_seq_num; u8 *state; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { spin_unlock_bh(&sta->lock); return; } if (mgmt->u.action.u.addba_resp.dialog_token != sta->ampdu_mlme.tid_tx[tid]->dialog_token) { spin_unlock_bh(&sta->lock); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ return; } del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS) { *state |= HT_ADDBA_RECEIVED_MSK; sta->ampdu_mlme.addba_req_num[tid] = 0; if (*state == HT_AGG_STATE_OPERATIONAL && local->hw.ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); if (local->ops->ampdu_action) { (void)local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_RESUME, &sta->sta, tid, &start_seq_num); } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ spin_unlock_bh(&sta->lock); } else { sta->ampdu_mlme.addba_req_num[tid]++; /* this will allow the state check in stop_BA_session */ *state = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid, WLAN_BACK_INITIATOR); } } void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; u16 tid, params; u16 initiator; params = le16_to_cpu(mgmt->u.action.u.delba.params); tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", mgmt->sa, initiator ? "initiator" : "recipient", tid, mgmt->u.action.u.delba.reason_code); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (initiator == WLAN_BACK_INITIATOR) ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, WLAN_BACK_INITIATOR, 0); else { /* WLAN_BACK_RECIPIENT */ spin_lock_bh(&sta->lock); sta->ampdu_mlme.tid_state_tx[tid] = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid, WLAN_BACK_RECIPIENT); } }