aboutsummaryrefslogblamecommitdiffstats
path: root/net/TUNABLE
blob: 9913211f07a7d7b7a3f2c0c4eb2a2e0b453916a0 (plain) (tree)

















































                                                                                 
The following parameters should be tunable at compile time. Some of them
exist as sysctls too.

This is far from complete

Item			Description
----------------------------------------------------------------------------
MAX_LINKS		Maximum number of netlink minor devices. (1-32)
RIF_TABLE_SIZE		Token ring RIF cache size (tunable)
AARP_HASH_SIZE		Size of Appletalk hash table (tunable)
AX25_DEF_T1		AX.25 parameters. These are all tunable via
AX25_DEF_T2		SIOCAX25SETPARMS
AX25_DEF_T3		T1-T3,N2 have the meanings in the specification
AX25_DEF_N2
AX25_DEF_AXDEFMODE	8 = normal 128 is PE1CHL extended
AX25_DEF_IPDEFMODE	'D' - datagram  'V' - virtual connection
AX25_DEF_BACKOFF	'E'xponential 'L'inear
AX25_DEF_NETROM		Allow netrom 1=Y
AX25_DF_TEXT		Allow PID=Text 1=Y
AX25_DEF_WINDOW		Window for normal mode
AX25_DEF_EWINDOW	Window for PE1CHL mode
AX25_DEF_DIGI		1 for inband 2 for cross band 3 for both
AX25_DEF_CONMODE	Allow connected modes 1=Yes
AX25_ROUTE_MAX		AX.25 route cache size - no currently tunable
Unnamed (16)		Number of protocol hash slots (tunable)
DEV_NUMBUFFS		Number of priority levels (not easily tunable)
Unnamed (300)		Maximum packet backlog queue (tunable)
MAX_IOVEC		Maximum number of iovecs in a message (tunable)
MIN_WINDOW		Offered minimum window (tunable)
MAX_WINDOW		Offered maximum window (tunable)
MAX_HEADER		Largest physical header (tunable)
MAX_ADDR_LEN		Largest physical address (tunable)
SOCK_ARRAY_SIZE		IP socket array hash size (tunable)
IP_MAX_MEMBERSHIPS	Largest number of groups per socket (BSD style) (tunable)
16			Hard coded constant for amount of room allowed for
			cache align and faster forwarding (tunable)
IP_FRAG_TIME		Time we hold a fragment for. (tunable)
PORT_MASQ_BEGIN		First port reserved for masquerade (tunable)
PORT_MASQ_END		Last port used for masquerade	(tunable)
MASQUERADE_EXPIRE_TCP_FIN	Time we keep a masquerade for after a FIN
MASQUERADE_EXPIRE_UDP	Time we keep a UDP masquerade for (tunable)
MAXVIFS			Maximum mrouted vifs (1-32)
MFC_LINES		Lines in the multicast router cache (tunable)

NetROM parameters are tunable via an ioctl passing a struct

4000			Size a Unix domain socket malloc falls back to 
			(tunable) should be 8K - a bit for 8K machines like
			the ALPHA

ref='#n130'>130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/spinlock.h>
#include <linux/poll.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>

#include <net/mac80211.h>
#include "rate.h"

#include "rc80211_pid.h"

static void rate_control_pid_event(struct rc_pid_event_buffer *buf,
				   enum rc_pid_event_type type,
				   union rc_pid_event_data *data)
{
	struct rc_pid_event *ev;
	unsigned long status;

	spin_lock_irqsave(&buf->lock, status);
	ev = &(buf->ring[buf->next_entry]);
	buf->next_entry = (buf->next_entry + 1) % RC_PID_EVENT_RING_SIZE;

	ev->timestamp = jiffies;
	ev->id = buf->ev_count++;
	ev->type = type;
	ev->data = *data;

	spin_unlock_irqrestore(&buf->lock, status);

	wake_up_all(&buf->waitqueue);
}

void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
					     struct ieee80211_tx_status *stat)
{
	union rc_pid_event_data evd;

	memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_status));
	rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd);
}

void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
					       int index, int rate)
{
	union rc_pid_event_data evd;

	evd.index = index;
	evd.rate = rate;
	rate_control_pid_event(buf, RC_PID_EVENT_TYPE_RATE_CHANGE, &evd);
}

void rate_control_pid_event_tx_rate(struct rc_pid_event_buffer *buf,
					   int index, int rate)
{
	union rc_pid_event_data evd;

	evd.index = index;
	evd.rate = rate;
	rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_RATE, &evd);
}

void rate_control_pid_event_pf_sample(struct rc_pid_event_buffer *buf,
					     s32 pf_sample, s32 prop_err,
					     s32 int_err, s32 der_err)
{
	union rc_pid_event_data evd;

	evd.pf_sample = pf_sample;
	evd.prop_err = prop_err;
	evd.int_err = int_err;
	evd.der_err = der_err;
	rate_control_pid_event(buf, RC_PID_EVENT_TYPE_PF_SAMPLE, &evd);
}

static int rate_control_pid_events_open(struct inode *inode, struct file *file)
{
	struct rc_pid_sta_info *sinfo = inode->i_private;
	struct rc_pid_event_buffer *events = &sinfo->events;
	struct rc_pid_events_file_info *file_info;
	unsigned int status;

	/* Allocate a state struct */
	file_info = kmalloc(sizeof(*file_info), GFP_KERNEL);
	if (file_info == NULL)
		return -ENOMEM;

	spin_lock_irqsave(&events->lock, status);

	file_info->next_entry = events->next_entry;
	file_info->events = events;

	spin_unlock_irqrestore(&events->lock, status);

	file->private_data = file_info;

	return 0;
}

static int rate_control_pid_events_release(struct inode *inode,
					   struct file *file)
{
	struct rc_pid_events_file_info *file_info = file->private_data;

	kfree(file_info);

	return 0;
}

static unsigned int rate_control_pid_events_poll(struct file *file,
						 poll_table *wait)
{
	struct rc_pid_events_file_info *file_info = file->private_data;

	poll_wait(file, &file_info->events->waitqueue, wait);

	return POLLIN | POLLRDNORM;
}

#define RC_PID_PRINT_BUF_SIZE 64

static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
					    size_t length, loff_t *offset)
{
	struct rc_pid_events_file_info *file_info = file->private_data;
	struct rc_pid_event_buffer *events = file_info->events;
	struct rc_pid_event *ev;
	char pb[RC_PID_PRINT_BUF_SIZE];
	int ret;
	int p;
	unsigned int status;

	/* Check if there is something to read. */
	if (events->next_entry == file_info->next_entry) {
		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;

		/* Wait */
		ret = wait_event_interruptible(events->waitqueue,
				events->next_entry != file_info->next_entry);

		if (ret)
			return ret;
	}

	/* Write out one event per call. I don't care whether it's a little
	 * inefficient, this is debugging code anyway. */
	spin_lock_irqsave(&events->lock, status);

	/* Get an event */
	ev = &(events->ring[file_info->next_entry]);
	file_info->next_entry = (file_info->next_entry + 1) %
				RC_PID_EVENT_RING_SIZE;

	/* Print information about the event. Note that userpace needs to
	 * provide large enough buffers. */
	length = length < RC_PID_PRINT_BUF_SIZE ?
		 length : RC_PID_PRINT_BUF_SIZE;
	p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
	switch (ev->type) {
	case RC_PID_EVENT_TYPE_TX_STATUS:
		p += snprintf(pb + p, length - p, "tx_status %u %u",
			      ev->data.tx_status.excessive_retries,
			      ev->data.tx_status.retry_count);
		break;
	case RC_PID_EVENT_TYPE_RATE_CHANGE:
		p += snprintf(pb + p, length - p, "rate_change %d %d",
			      ev->data.index, ev->data.rate);
		break;
	case RC_PID_EVENT_TYPE_TX_RATE:
		p += snprintf(pb + p, length - p, "tx_rate %d %d",
			      ev->data.index, ev->data.rate);
		break;
	case RC_PID_EVENT_TYPE_PF_SAMPLE:
		p += snprintf(pb + p, length - p,
			      "pf_sample %d %d %d %d",
			      ev->data.pf_sample, ev->data.prop_err,
			      ev->data.int_err, ev->data.der_err);
		break;
	}
	p += snprintf(pb + p, length - p, "\n");

	spin_unlock_irqrestore(&events->lock, status);

	if (copy_to_user(buf, pb, p))
		return -EFAULT;

	return p;
}

#undef RC_PID_PRINT_BUF_SIZE

static struct file_operations rc_pid_fop_events = {
	.owner = THIS_MODULE,
	.read = rate_control_pid_events_read,
	.poll = rate_control_pid_events_poll,
	.open = rate_control_pid_events_open,
	.release = rate_control_pid_events_release,
};

void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta,
					     struct dentry *dir)
{
	struct rc_pid_sta_info *spinfo = priv_sta;

	spinfo->events_entry = debugfs_create_file("rc_pid_events", S_IRUGO,
						   dir, spinfo,
						   &rc_pid_fop_events);
}

void rate_control_pid_remove_sta_debugfs(void *priv, void *priv_sta)
{
	struct rc_pid_sta_info *spinfo = priv_sta;

	debugfs_remove(spinfo->events_entry);
}