aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorAndreas Langer <an.langer@gmx.de>2010-09-04 19:58:25 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-09-05 03:29:45 -0400
commite63760e59542ed872f7a5b1605a46e03b879d909 (patch)
treee93a45f0ad5224fa2213a6d04e9806422dac7e7a /drivers/staging
parent24c76fc07b7680536fa372905f16b57a45c0d514 (diff)
Staging: batman-adv: layer2 unicast packet fragmentation
This patch implements a simple layer2 fragmentation to allow traffic exchange over network interfaces with a MTU smaller than 1500 bytes. The fragmentation splits the big packets into two parts and marks the frames accordingly. The receiving end buffers the packets to reassemble the orignal packet before passing it to the higher layers. This feature makes it necessary to modify the batman-adv encapsulation for unicast packets by adding a sequence number, flags and the originator address. This modifcation is part of a seperate packet type for fragemented packets to keep the original overhead as low as possible. This patch enables the feature by default to ensure the data traffic can travel through the network. But it also prints a warning to notify the user about the performance implications. Note: Fragmentation should be avoided at all costs since it has a dramatic impact on the performance, especially when it comes wifi networks. Instead of a single packet, 2 packets have to be sent! Not only valuable airtime is wasted but also packetloss decreases the throughput. A link with 50% packetloss and fragmentation enabled is pretty much unusable. Signed-off-by: Andreas Langer <an.langer@gmx.de> [sven.eckelmann@gmx.de: Rework on top of current version] Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c55
-rw-r--r--drivers/staging/batman-adv/hard-interface.c34
-rw-r--r--drivers/staging/batman-adv/originator.c10
-rw-r--r--drivers/staging/batman-adv/packet.h26
-rw-r--r--drivers/staging/batman-adv/routing.c115
-rw-r--r--drivers/staging/batman-adv/routing.h1
-rw-r--r--drivers/staging/batman-adv/soft-interface.c4
-rw-r--r--drivers/staging/batman-adv/types.h10
-rw-r--r--drivers/staging/batman-adv/unicast.c177
-rw-r--r--drivers/staging/batman-adv/unicast.h12
10 files changed, 416 insertions, 28 deletions
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index d49addeb0ad..b9fe391b641 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -134,6 +134,58 @@ static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
134 return count; 134 return count;
135} 135}
136 136
137static ssize_t show_frag(struct kobject *kobj, struct attribute *attr,
138 char *buff)
139{
140 struct device *dev = to_dev(kobj->parent);
141 struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
142 int frag_status = atomic_read(&bat_priv->frag_enabled);
143
144 return sprintf(buff, "%s\n",
145 frag_status == 0 ? "disabled" : "enabled");
146}
147
148static ssize_t store_frag(struct kobject *kobj, struct attribute *attr,
149 char *buff, size_t count)
150{
151 struct device *dev = to_dev(kobj->parent);
152 struct net_device *net_dev = to_net_dev(dev);
153 struct bat_priv *bat_priv = netdev_priv(net_dev);
154 int frag_enabled_tmp = -1;
155
156 if (((count == 2) && (buff[0] == '1')) ||
157 (strncmp(buff, "enable", 6) == 0))
158 frag_enabled_tmp = 1;
159
160 if (((count == 2) && (buff[0] == '0')) ||
161 (strncmp(buff, "disable", 7) == 0))
162 frag_enabled_tmp = 0;
163
164 if (frag_enabled_tmp < 0) {
165 if (buff[count - 1] == '\n')
166 buff[count - 1] = '\0';
167
168 bat_err(net_dev,
169 "Invalid parameter for 'fragmentation' setting on mesh"
170 "received: %s\n", buff);
171 return -EINVAL;
172 }
173
174 if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
175 return count;
176
177 bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
178 atomic_read(&bat_priv->frag_enabled) == 1 ?
179 "enabled" : "disabled",
180 frag_enabled_tmp == 1 ? "enabled" : "disabled");
181
182 atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
183
184 update_min_mtu();
185
186 return count;
187}
188
137static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr, 189static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
138 char *buff) 190 char *buff)
139{ 191{
@@ -279,6 +331,7 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
279static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR, 331static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
280 show_aggr_ogms, store_aggr_ogms); 332 show_aggr_ogms, store_aggr_ogms);
281static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond); 333static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
334static BAT_ATTR(fragmentation, S_IRUGO | S_IWUSR, show_frag, store_frag);
282static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 335static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
283static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR, 336static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
284 show_orig_interval, store_orig_interval); 337 show_orig_interval, store_orig_interval);
@@ -289,6 +342,7 @@ static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
289static struct bat_attribute *mesh_attrs[] = { 342static struct bat_attribute *mesh_attrs[] = {
290 &bat_attr_aggregated_ogms, 343 &bat_attr_aggregated_ogms,
291 &bat_attr_bonding, 344 &bat_attr_bonding,
345 &bat_attr_fragmentation,
292 &bat_attr_vis_mode, 346 &bat_attr_vis_mode,
293 &bat_attr_orig_interval, 347 &bat_attr_orig_interval,
294#ifdef CONFIG_BATMAN_ADV_DEBUG 348#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -308,6 +362,7 @@ int sysfs_add_meshif(struct net_device *dev)
308 routine as soon as we have it */ 362 routine as soon as we have it */
309 atomic_set(&bat_priv->aggregation_enabled, 1); 363 atomic_set(&bat_priv->aggregation_enabled, 1);
310 atomic_set(&bat_priv->bonding_enabled, 0); 364 atomic_set(&bat_priv->bonding_enabled, 0);
365 atomic_set(&bat_priv->frag_enabled, 1);
311 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 366 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
312 atomic_set(&bat_priv->orig_interval, 1000); 367 atomic_set(&bat_priv->orig_interval, 1000);
313 atomic_set(&bat_priv->log_level, 0); 368 atomic_set(&bat_priv->log_level, 0);
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 6e973a79aa2..82d3d554467 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -166,6 +166,11 @@ int hardif_min_mtu(void)
166 /* allow big frames if all devices are capable to do so 166 /* allow big frames if all devices are capable to do so
167 * (have MTU > 1500 + BAT_HEADER_LEN) */ 167 * (have MTU > 1500 + BAT_HEADER_LEN) */
168 int min_mtu = ETH_DATA_LEN; 168 int min_mtu = ETH_DATA_LEN;
169 /* FIXME: each batman_if will be attached to a softif */
170 struct bat_priv *bat_priv = netdev_priv(soft_device);
171
172 if (atomic_read(&bat_priv->frag_enabled))
173 goto out;
169 174
170 rcu_read_lock(); 175 rcu_read_lock();
171 list_for_each_entry_rcu(batman_if, &if_list, list) { 176 list_for_each_entry_rcu(batman_if, &if_list, list) {
@@ -175,7 +180,7 @@ int hardif_min_mtu(void)
175 min_mtu); 180 min_mtu);
176 } 181 }
177 rcu_read_unlock(); 182 rcu_read_unlock();
178 183out:
179 return min_mtu; 184 return min_mtu;
180} 185}
181 186
@@ -261,8 +266,30 @@ int hardif_enable_interface(struct batman_if *batman_if)
261 orig_hash_add_if(batman_if, bat_priv->num_ifaces); 266 orig_hash_add_if(batman_if, bat_priv->num_ifaces);
262 267
263 atomic_set(&batman_if->seqno, 1); 268 atomic_set(&batman_if->seqno, 1);
269 atomic_set(&batman_if->frag_seqno, 1);
264 bat_info(soft_device, "Adding interface: %s\n", batman_if->dev); 270 bat_info(soft_device, "Adding interface: %s\n", batman_if->dev);
265 271
272 if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
273 ETH_DATA_LEN + BAT_HEADER_LEN)
274 bat_info(soft_device,
275 "The MTU of interface %s is too small (%i) to handle "
276 "the transport of batman-adv packets. Packets going "
277 "over this interface will be fragmented on layer2 "
278 "which could impact the performance. Setting the MTU "
279 "to %zi would solve the problem.\n",
280 batman_if->dev, batman_if->net_dev->mtu,
281 ETH_DATA_LEN + BAT_HEADER_LEN);
282
283 if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
284 ETH_DATA_LEN + BAT_HEADER_LEN)
285 bat_info(soft_device,
286 "The MTU of interface %s is too small (%i) to handle "
287 "the transport of batman-adv packets. If you experience"
288 " problems getting traffic through try increasing the "
289 "MTU to %zi.\n",
290 batman_if->dev, batman_if->net_dev->mtu,
291 ETH_DATA_LEN + BAT_HEADER_LEN);
292
266 if (hardif_is_iface_up(batman_if)) 293 if (hardif_is_iface_up(batman_if))
267 hardif_activate_interface(soft_device, bat_priv, batman_if); 294 hardif_activate_interface(soft_device, bat_priv, batman_if);
268 else 295 else
@@ -495,6 +522,11 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
495 ret = recv_unicast_packet(skb, batman_if); 522 ret = recv_unicast_packet(skb, batman_if);
496 break; 523 break;
497 524
525 /* fragmented unicast packet */
526 case BAT_UNICAST_FRAG:
527 ret = recv_ucast_frag_packet(skb, batman_if);
528 break;
529
498 /* broadcast packet */ 530 /* broadcast packet */
499 case BAT_BCAST: 531 case BAT_BCAST:
500 ret = recv_bcast_packet(skb); 532 ret = recv_bcast_packet(skb);
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index de5a8c1a810..b51a13ef856 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -27,6 +27,7 @@
27#include "translation-table.h" 27#include "translation-table.h"
28#include "routing.h" 28#include "routing.h"
29#include "hard-interface.h" 29#include "hard-interface.h"
30#include "unicast.h"
30 31
31static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig); 32static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
32 33
@@ -95,6 +96,7 @@ static void free_orig_node(void *data)
95 kfree(neigh_node); 96 kfree(neigh_node);
96 } 97 }
97 98
99 frag_list_free(&orig_node->frag_list);
98 hna_global_del_orig(orig_node, "originator timed out"); 100 hna_global_del_orig(orig_node, "originator timed out");
99 101
100 kfree(orig_node->bcast_own); 102 kfree(orig_node->bcast_own);
@@ -157,6 +159,10 @@ struct orig_node *get_orig_node(uint8_t *addr)
157 159
158 size = bat_priv->num_ifaces * sizeof(uint8_t); 160 size = bat_priv->num_ifaces * sizeof(uint8_t);
159 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); 161 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
162
163 INIT_LIST_HEAD(&orig_node->frag_list);
164 orig_node->last_frag_packet = 0;
165
160 if (!orig_node->bcast_own_sum) 166 if (!orig_node->bcast_own_sum)
161 goto free_bcast_own; 167 goto free_bcast_own;
162 168
@@ -271,6 +277,10 @@ void purge_orig(struct work_struct *work)
271 hash_remove_bucket(orig_hash, &hashit); 277 hash_remove_bucket(orig_hash, &hashit);
272 free_orig_node(orig_node); 278 free_orig_node(orig_node);
273 } 279 }
280
281 if (time_after(jiffies, (orig_node->last_frag_packet +
282 msecs_to_jiffies(FRAG_TIMEOUT))))
283 frag_list_free(&orig_node->frag_list);
274 } 284 }
275 285
276 spin_unlock_irqrestore(&orig_hash_lock, flags); 286 spin_unlock_irqrestore(&orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
index abb5e460f23..44de1bfb0b0 100644
--- a/drivers/staging/batman-adv/packet.h
+++ b/drivers/staging/batman-adv/packet.h
@@ -24,14 +24,15 @@
24 24
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27#define BAT_PACKET 0x01 27#define BAT_PACKET 0x01
28#define BAT_ICMP 0x02 28#define BAT_ICMP 0x02
29#define BAT_UNICAST 0x03 29#define BAT_UNICAST 0x03
30#define BAT_BCAST 0x04 30#define BAT_BCAST 0x04
31#define BAT_VIS 0x05 31#define BAT_VIS 0x05
32#define BAT_UNICAST_FRAG 0x06
32 33
33/* this file is included by batctl which needs these defines */ 34/* this file is included by batctl which needs these defines */
34#define COMPAT_VERSION 11 35#define COMPAT_VERSION 13
35#define DIRECTLINK 0x40 36#define DIRECTLINK 0x40
36#define VIS_SERVER 0x20 37#define VIS_SERVER 0x20
37#define PRIMARIES_FIRST_HOP 0x10 38#define PRIMARIES_FIRST_HOP 0x10
@@ -47,6 +48,9 @@
47#define VIS_TYPE_SERVER_SYNC 0 48#define VIS_TYPE_SERVER_SYNC 0
48#define VIS_TYPE_CLIENT_UPDATE 1 49#define VIS_TYPE_CLIENT_UPDATE 1
49 50
51/* fragmentation defines */
52#define UNI_FRAG_HEAD 0x01
53
50struct batman_packet { 54struct batman_packet {
51 uint8_t packet_type; 55 uint8_t packet_type;
52 uint8_t version; /* batman version field */ 56 uint8_t version; /* batman version field */
@@ -96,6 +100,16 @@ struct unicast_packet {
96 uint8_t ttl; 100 uint8_t ttl;
97} __attribute__((packed)); 101} __attribute__((packed));
98 102
103struct unicast_frag_packet {
104 uint8_t packet_type;
105 uint8_t version; /* batman version field */
106 uint8_t dest[6];
107 uint8_t ttl;
108 uint8_t flags;
109 uint8_t orig[6];
110 uint16_t seqno;
111} __attribute__((packed));
112
99struct bcast_packet { 113struct bcast_packet {
100 uint8_t packet_type; 114 uint8_t packet_type;
101 uint8_t version; /* batman version field */ 115 uint8_t version; /* batman version field */
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 032195e6de9..d8e77ac21c2 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -32,6 +32,7 @@
32#include "ring_buffer.h" 32#include "ring_buffer.h"
33#include "vis.h" 33#include "vis.h"
34#include "aggregation.h" 34#include "aggregation.h"
35#include "unicast.h"
35 36
36static DECLARE_WAIT_QUEUE_HEAD(thread_wait); 37static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
37 38
@@ -1105,43 +1106,43 @@ struct neigh_node *find_router(struct orig_node *orig_node,
1105 return router; 1106 return router;
1106} 1107}
1107 1108
1108int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) 1109static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1109{ 1110{
1110 struct unicast_packet *unicast_packet;
1111 struct orig_node *orig_node;
1112 struct neigh_node *router;
1113 struct ethhdr *ethhdr; 1111 struct ethhdr *ethhdr;
1114 struct batman_if *batman_if;
1115 struct sk_buff *skb_old;
1116 uint8_t dstaddr[ETH_ALEN];
1117 int hdr_size = sizeof(struct unicast_packet);
1118 unsigned long flags;
1119 1112
1120 /* drop packet if it has not necessary minimum size */ 1113 /* drop packet if it has not necessary minimum size */
1121 if (skb_headlen(skb) < hdr_size) 1114 if (skb_headlen(skb) < hdr_size)
1122 return NET_RX_DROP; 1115 return -1;
1123 1116
1124 ethhdr = (struct ethhdr *) skb_mac_header(skb); 1117 ethhdr = (struct ethhdr *) skb_mac_header(skb);
1125 1118
1126 /* packet with unicast indication but broadcast recipient */ 1119 /* packet with unicast indication but broadcast recipient */
1127 if (is_bcast(ethhdr->h_dest)) 1120 if (is_bcast(ethhdr->h_dest))
1128 return NET_RX_DROP; 1121 return -1;
1129 1122
1130 /* packet with broadcast sender address */ 1123 /* packet with broadcast sender address */
1131 if (is_bcast(ethhdr->h_source)) 1124 if (is_bcast(ethhdr->h_source))
1132 return NET_RX_DROP; 1125 return -1;
1133 1126
1134 /* not for me */ 1127 /* not for me */
1135 if (!is_my_mac(ethhdr->h_dest)) 1128 if (!is_my_mac(ethhdr->h_dest))
1136 return NET_RX_DROP; 1129 return -1;
1137 1130
1138 unicast_packet = (struct unicast_packet *) skb->data; 1131 return 0;
1132}
1139 1133
1140 /* packet for me */ 1134static int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1141 if (is_my_mac(unicast_packet->dest)) { 1135 int hdr_size)
1142 interface_rx(skb, hdr_size); 1136{
1143 return NET_RX_SUCCESS; 1137 struct orig_node *orig_node;
1144 } 1138 struct neigh_node *router;
1139 struct batman_if *batman_if;
1140 struct sk_buff *skb_old;
1141 uint8_t dstaddr[ETH_ALEN];
1142 unsigned long flags;
1143 struct unicast_packet *unicast_packet =
1144 (struct unicast_packet *) skb->data;
1145 struct ethhdr *ethhdr = (struct ethhdr *) skb_mac_header(skb);
1145 1146
1146 /* TTL exceeded */ 1147 /* TTL exceeded */
1147 if (unicast_packet->ttl < 2) { 1148 if (unicast_packet->ttl < 2) {
@@ -1172,7 +1173,7 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1172 spin_unlock_irqrestore(&orig_hash_lock, flags); 1173 spin_unlock_irqrestore(&orig_hash_lock, flags);
1173 1174
1174 /* create a copy of the skb, if needed, to modify it. */ 1175 /* create a copy of the skb, if needed, to modify it. */
1175 if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) { 1176 if (!skb_clone_writable(skb, hdr_size)) {
1176 skb_old = skb; 1177 skb_old = skb;
1177 skb = skb_copy(skb, GFP_ATOMIC); 1178 skb = skb_copy(skb, GFP_ATOMIC);
1178 if (!skb) 1179 if (!skb)
@@ -1191,6 +1192,80 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1191 return NET_RX_SUCCESS; 1192 return NET_RX_SUCCESS;
1192} 1193}
1193 1194
1195int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1196{
1197 struct unicast_packet *unicast_packet;
1198 int hdr_size = sizeof(struct unicast_packet);
1199
1200 if (check_unicast_packet(skb, hdr_size) < 0)
1201 return NET_RX_DROP;
1202
1203 unicast_packet = (struct unicast_packet *) skb->data;
1204
1205 /* packet for me */
1206 if (is_my_mac(unicast_packet->dest)) {
1207 interface_rx(skb, hdr_size);
1208 return NET_RX_SUCCESS;
1209 }
1210
1211 return route_unicast_packet(skb, recv_if, hdr_size);
1212}
1213
1214int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1215{
1216 struct unicast_frag_packet *unicast_packet;
1217 struct orig_node *orig_node;
1218 struct frag_packet_list_entry *tmp_frag_entry;
1219 int hdr_size = sizeof(struct unicast_frag_packet);
1220 unsigned long flags;
1221
1222 if (check_unicast_packet(skb, hdr_size) < 0)
1223 return NET_RX_DROP;
1224
1225 unicast_packet = (struct unicast_frag_packet *) skb->data;
1226
1227 /* packet for me */
1228 if (is_my_mac(unicast_packet->dest)) {
1229
1230 spin_lock_irqsave(&orig_hash_lock, flags);
1231 orig_node = ((struct orig_node *)
1232 hash_find(orig_hash, unicast_packet->orig));
1233
1234 if (!orig_node) {
1235 pr_warning("couldn't find orig node for "
1236 "fragmentation\n");
1237 spin_unlock_irqrestore(&orig_hash_lock, flags);
1238 return NET_RX_DROP;
1239 }
1240
1241 orig_node->last_frag_packet = jiffies;
1242
1243 if (list_empty(&orig_node->frag_list))
1244 create_frag_buffer(&orig_node->frag_list);
1245
1246 tmp_frag_entry =
1247 search_frag_packet(&orig_node->frag_list,
1248 unicast_packet);
1249
1250 if (!tmp_frag_entry) {
1251 create_frag_entry(&orig_node->frag_list, skb);
1252 spin_unlock_irqrestore(&orig_hash_lock, flags);
1253 return NET_RX_SUCCESS;
1254 }
1255
1256 skb = merge_frag_packet(&orig_node->frag_list,
1257 tmp_frag_entry, skb);
1258 spin_unlock_irqrestore(&orig_hash_lock, flags);
1259 if (!skb)
1260 return NET_RX_DROP;
1261
1262 interface_rx(skb, hdr_size);
1263 return NET_RX_SUCCESS;
1264 }
1265
1266 return route_unicast_packet(skb, recv_if, hdr_size);
1267}
1268
1194int recv_bcast_packet(struct sk_buff *skb) 1269int recv_bcast_packet(struct sk_buff *skb)
1195{ 1270{
1196 struct orig_node *orig_node; 1271 struct orig_node *orig_node;
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 3eac64e3cf9..81c684f7a64 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -34,6 +34,7 @@ void update_routes(struct orig_node *orig_node,
34 unsigned char *hna_buff, int hna_buff_len); 34 unsigned char *hna_buff, int hna_buff_len);
35int recv_icmp_packet(struct sk_buff *skb); 35int recv_icmp_packet(struct sk_buff *skb);
36int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); 36int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
37int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
37int recv_bcast_packet(struct sk_buff *skb); 38int recv_bcast_packet(struct sk_buff *skb);
38int recv_vis_packet(struct sk_buff *skb); 39int recv_vis_packet(struct sk_buff *skb);
39int recv_bat_packet(struct sk_buff *skb, 40int recv_bat_packet(struct sk_buff *skb,
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 315fdeb38cf..d60b1a8ac7f 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -249,7 +249,9 @@ void interface_setup(struct net_device *dev)
249#endif 249#endif
250 dev->destructor = free_netdev; 250 dev->destructor = free_netdev;
251 251
252 dev->mtu = hardif_min_mtu(); 252 dev->mtu = ETH_DATA_LEN; /* can't call min_mtu, because the
253 * needed variables have not been
254 * initialized yet */
253 dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the 255 dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
254 * skbuff for our header */ 256 * skbuff for our header */
255 257
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 67bf4db32d5..ac165753e84 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -41,6 +41,7 @@ struct batman_if {
41 char addr_str[ETH_STR_LEN]; 41 char addr_str[ETH_STR_LEN];
42 struct net_device *net_dev; 42 struct net_device *net_dev;
43 atomic_t seqno; 43 atomic_t seqno;
44 atomic_t frag_seqno;
44 unsigned char *packet_buff; 45 unsigned char *packet_buff;
45 int packet_len; 46 int packet_len;
46 struct kobject *hardif_obj; 47 struct kobject *hardif_obj;
@@ -81,6 +82,8 @@ struct orig_node {
81 TYPE_OF_WORD bcast_bits[NUM_WORDS]; 82 TYPE_OF_WORD bcast_bits[NUM_WORDS];
82 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
83 struct list_head neigh_list; 84 struct list_head neigh_list;
85 struct list_head frag_list;
86 unsigned long last_frag_packet;
84 struct { 87 struct {
85 uint8_t candidates; 88 uint8_t candidates;
86 struct neigh_node *selected; 89 struct neigh_node *selected;
@@ -110,6 +113,7 @@ struct bat_priv {
110 struct net_device_stats stats; 113 struct net_device_stats stats;
111 atomic_t aggregation_enabled; 114 atomic_t aggregation_enabled;
112 atomic_t bonding_enabled; 115 atomic_t bonding_enabled;
116 atomic_t frag_enabled;
113 atomic_t vis_mode; 117 atomic_t vis_mode;
114 atomic_t orig_interval; 118 atomic_t orig_interval;
115 atomic_t log_level; 119 atomic_t log_level;
@@ -183,4 +187,10 @@ struct debug_log {
183 wait_queue_head_t queue_wait; 187 wait_queue_head_t queue_wait;
184}; 188};
185 189
190struct frag_packet_list_entry {
191 struct list_head list;
192 uint16_t seqno;
193 struct sk_buff *skb;
194};
195
186#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 196#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 27c4abb2530..153914e2951 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -28,6 +28,177 @@
28#include "routing.h" 28#include "routing.h"
29#include "hard-interface.h" 29#include "hard-interface.h"
30 30
31
32struct sk_buff *merge_frag_packet(struct list_head *head,
33 struct frag_packet_list_entry *tfp,
34 struct sk_buff *skb)
35{
36 struct unicast_frag_packet *up =
37 (struct unicast_frag_packet *) skb->data;
38 struct sk_buff *tmp_skb;
39
40 /* set skb to the first part and tmp_skb to the second part */
41 if (up->flags & UNI_FRAG_HEAD) {
42 tmp_skb = tfp->skb;
43 } else {
44 tmp_skb = skb;
45 skb = tfp->skb;
46 }
47
48 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
49 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
50 /* free buffered skb, skb will be freed later */
51 kfree_skb(tfp->skb);
52 return NULL;
53 }
54
55 /* move free entry to end */
56 tfp->skb = NULL;
57 tfp->seqno = 0;
58 list_move_tail(&tfp->list, head);
59
60 memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
61 kfree_skb(tmp_skb);
62 return skb;
63}
64
65void create_frag_entry(struct list_head *head, struct sk_buff *skb)
66{
67 struct frag_packet_list_entry *tfp;
68 struct unicast_frag_packet *up =
69 (struct unicast_frag_packet *) skb->data;
70
71 /* free and oldest packets stand at the end */
72 tfp = list_entry((head)->prev, typeof(*tfp), list);
73 kfree_skb(tfp->skb);
74
75 tfp->seqno = ntohs(up->seqno);
76 tfp->skb = skb;
77 list_move(&tfp->list, head);
78 return;
79}
80
81void create_frag_buffer(struct list_head *head)
82{
83 int i;
84 struct frag_packet_list_entry *tfp;
85
86 for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
87 tfp = kmalloc(sizeof(struct frag_packet_list_entry),
88 GFP_ATOMIC);
89 tfp->skb = NULL;
90 tfp->seqno = 0;
91 INIT_LIST_HEAD(&tfp->list);
92 list_add(&tfp->list, head);
93 }
94
95 return;
96}
97
98struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
99 struct unicast_frag_packet *up)
100{
101 struct frag_packet_list_entry *tfp;
102 struct unicast_frag_packet *tmp_up = NULL;
103 uint16_t search_seqno;
104
105 if (up->flags & UNI_FRAG_HEAD)
106 search_seqno = ntohs(up->seqno)+1;
107 else
108 search_seqno = ntohs(up->seqno)-1;
109
110 list_for_each_entry(tfp, head, list) {
111
112 if (!tfp->skb)
113 continue;
114
115 if (tfp->seqno == ntohs(up->seqno))
116 goto mov_tail;
117
118 tmp_up = (struct unicast_frag_packet *) tfp->skb->data;
119
120 if (tfp->seqno == search_seqno) {
121
122 if ((tmp_up->flags & UNI_FRAG_HEAD) !=
123 (up->flags & UNI_FRAG_HEAD))
124 return tfp;
125 else
126 goto mov_tail;
127 }
128 }
129 return NULL;
130
131mov_tail:
132 list_move_tail(&tfp->list, head);
133 return NULL;
134}
135
136void frag_list_free(struct list_head *head)
137{
138 struct frag_packet_list_entry *pf, *tmp_pf;
139
140 if (!list_empty(head)) {
141
142 list_for_each_entry_safe(pf, tmp_pf, head, list) {
143 kfree_skb(pf->skb);
144 list_del(&pf->list);
145 kfree(pf);
146 }
147 }
148 return;
149}
150
151static int unicast_send_frag_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
152 struct batman_if *batman_if, uint8_t dstaddr[],
153 struct orig_node *orig_node)
154{
155 struct unicast_frag_packet *ucast_frag1, *ucast_frag2;
156 int hdr_len = sizeof(struct unicast_frag_packet);
157 struct sk_buff *frag_skb;
158 int data_len = skb->len;
159
160 if (!bat_priv->primary_if)
161 goto dropped;
162
163 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + hdr_len);
164 skb_split(skb, frag_skb, data_len / 2);
165
166 if (my_skb_push(frag_skb, hdr_len) < 0 ||
167 my_skb_push(skb, hdr_len) < 0)
168 goto drop_frag;
169
170 ucast_frag1 = (struct unicast_frag_packet *)skb->data;
171 ucast_frag2 = (struct unicast_frag_packet *)frag_skb->data;
172
173 ucast_frag1->version = COMPAT_VERSION;
174 ucast_frag1->packet_type = BAT_UNICAST_FRAG;
175 ucast_frag1->ttl = TTL;
176 memcpy(ucast_frag1->orig,
177 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
178 memcpy(ucast_frag1->dest, orig_node->orig, ETH_ALEN);
179
180 memcpy(ucast_frag2, ucast_frag1, sizeof(struct unicast_frag_packet));
181
182 ucast_frag1->flags |= UNI_FRAG_HEAD;
183 ucast_frag2->flags &= ~UNI_FRAG_HEAD;
184
185 ucast_frag1->seqno = htons((uint16_t)atomic_inc_return(
186 &batman_if->frag_seqno));
187
188 ucast_frag2->seqno = htons((uint16_t)atomic_inc_return(
189 &batman_if->frag_seqno));
190
191 send_skb_packet(skb, batman_if, dstaddr);
192 send_skb_packet(frag_skb, batman_if, dstaddr);
193 return 0;
194
195drop_frag:
196 kfree_skb(frag_skb);
197dropped:
198 kfree_skb(skb);
199 return 1;
200}
201
31int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 202int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
32{ 203{
33 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 204 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
@@ -35,6 +206,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
35 struct orig_node *orig_node; 206 struct orig_node *orig_node;
36 struct batman_if *batman_if; 207 struct batman_if *batman_if;
37 struct neigh_node *router; 208 struct neigh_node *router;
209 int data_len = skb->len;
38 uint8_t dstaddr[6]; 210 uint8_t dstaddr[6];
39 unsigned long flags; 211 unsigned long flags;
40 212
@@ -63,6 +235,11 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
63 if (batman_if->if_status != IF_ACTIVE) 235 if (batman_if->if_status != IF_ACTIVE)
64 goto dropped; 236 goto dropped;
65 237
238 if (atomic_read(&bat_priv->frag_enabled) &&
239 data_len + sizeof(struct unicast_packet) > batman_if->net_dev->mtu)
240 return unicast_send_frag_skb(skb, bat_priv, batman_if,
241 dstaddr, orig_node);
242
66 if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0) 243 if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
67 goto dropped; 244 goto dropped;
68 245
diff --git a/drivers/staging/batman-adv/unicast.h b/drivers/staging/batman-adv/unicast.h
index dd00703103f..1d5cbeb6733 100644
--- a/drivers/staging/batman-adv/unicast.h
+++ b/drivers/staging/batman-adv/unicast.h
@@ -22,6 +22,18 @@
22#ifndef _NET_BATMAN_ADV_UNICAST_H_ 22#ifndef _NET_BATMAN_ADV_UNICAST_H_
23#define _NET_BATMAN_ADV_UNICAST_H_ 23#define _NET_BATMAN_ADV_UNICAST_H_
24 24
25#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
26#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
27
28struct sk_buff *merge_frag_packet(struct list_head *head,
29 struct frag_packet_list_entry *tfp,
30 struct sk_buff *skb);
31
32void create_frag_entry(struct list_head *head, struct sk_buff *skb);
33void create_frag_buffer(struct list_head *head);
34struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
35 struct unicast_frag_packet *up);
36void frag_list_free(struct list_head *head);
25int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 37int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
26 38
27#endif /* _NET_BATMAN_ADV_UNICAST_H_ */ 39#endif /* _NET_BATMAN_ADV_UNICAST_H_ */