aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/core.h
blob: 0207db04179a00feecf6f2cc04ded91f40f1db45 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
/*
 * net/tipc/core.h: Include file for TIPC global declarations
 *
 * Copyright (c) 2005-2006, Ericsson AB
 * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _TIPC_CORE_H
#define _TIPC_CORE_H

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/tipc.h>
#include <linux/tipc_config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>


#define TIPC_MOD_VER "2.0.0"

#define ULTRA_STRING_MAX_LEN	32768
#define TIPC_MAX_SUBSCRIPTIONS	65535
#define TIPC_MAX_PUBLICATIONS	65535

struct tipc_msg;	/* msg.h */

int tipc_snprintf(char *buf, int len, const char *fmt, ...);

/*
 * TIPC-specific error codes
 */
#define ELINKCONG EAGAIN	/* link congestion <=> resource unavailable */

/*
 * Global configuration variables
 */
extern u32 tipc_own_addr __read_mostly;
extern int tipc_max_ports __read_mostly;
extern int tipc_net_id __read_mostly;
extern int tipc_remote_management __read_mostly;

/*
 * Other global variables
 */
extern int tipc_random __read_mostly;

/*
 * Routines available to privileged subsystems
 */
extern int tipc_core_start_net(unsigned long);
extern int  tipc_handler_start(void);
extern void tipc_handler_stop(void);
extern int  tipc_netlink_start(void);
extern void tipc_netlink_stop(void);
extern int  tipc_socket_init(void);
extern void tipc_socket_stop(void);

/*
 * TIPC timer and signal code
 */
typedef void (*Handler) (unsigned long);

u32 tipc_k_signal(Handler routine, unsigned long argument);

/**
 * k_init_timer - initialize a timer
 * @timer: pointer to timer structure
 * @routine: pointer to routine to invoke when timer expires
 * @argument: value to pass to routine when timer expires
 *
 * Timer must be initialized before use (and terminated when no longer needed).
 */
static inline void k_init_timer(struct timer_list *timer, Handler routine,
				unsigned long argument)
{
	setup_timer(timer, routine, argument);
}

/**
 * k_start_timer - start a timer
 * @timer: pointer to timer structure
 * @msec: time to delay (in ms)
 *
 * Schedules a previously initialized timer for later execution.
 * If timer is already running, the new timeout overrides the previous request.
 *
 * To ensure the timer doesn't expire before the specified delay elapses,
 * the amount of delay is rounded up when converting to the jiffies
 * then an additional jiffy is added to account for the fact that
 * the starting time may be in the middle of the current jiffy.
 */
static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
{
	mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
}

/**
 * k_cancel_timer - cancel a timer
 * @timer: pointer to timer structure
 *
 * Cancels a previously initialized timer.
 * Can be called safely even if the timer is already inactive.
 *
 * WARNING: Must not be called when holding locks required by the timer's
 *          timeout routine, otherwise deadlock can occur on SMP systems!
 */
static inline void k_cancel_timer(struct timer_list *timer)
{
	del_timer_sync(timer);
}

/**
 * k_term_timer - terminate a timer
 * @timer: pointer to timer structure
 *
 * Prevents further use of a previously initialized timer.
 *
 * WARNING: Caller must ensure timer isn't currently running.
 *
 * (Do not "enhance" this routine to automatically cancel an active timer,
 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
 */
static inline void k_term_timer(struct timer_list *timer)
{
}

/*
 * TIPC message buffer code
 *
 * TIPC message buffer headroom reserves space for the worst-case
 * link-level device header (in case the message is sent off-node).
 *
 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
 *       are word aligned for quicker access
 */
#define BUF_HEADROOM LL_MAX_HEADER

struct tipc_skb_cb {
	void *handle;
};

#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))

static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
{
	return (struct tipc_msg *)skb->data;
}

extern struct sk_buff *tipc_buf_acquire(u32 size);

#endif
opt">, "SATA XOR-DMA" }, { BCMA_CORE_ETHERNET_GBIT, "GBit Ethernet" }, { BCMA_CORE_PCIE, "PCIe" }, { BCMA_CORE_PHY_N, "PHY N" }, { BCMA_CORE_SRAM_CTL, "SRAM Controller" }, { BCMA_CORE_MINI_MACPHY, "Mini MACPHY" }, { BCMA_CORE_PHY_LP, "PHY LP" }, { BCMA_CORE_PMU, "PMU" }, { BCMA_CORE_PHY_SSN, "PHY SSN" }, { BCMA_CORE_SDIO_DEV, "SDIO Device" }, { BCMA_CORE_PHY_HT, "PHY HT" }, { BCMA_CORE_MAC_GBIT, "GBit MAC" }, { BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" }, { BCMA_CORE_PCIE_RC, "PCIe Root Complex" }, { BCMA_CORE_OCP_OCP_BRIDGE, "OCP to OCP Bridge" }, { BCMA_CORE_SHARED_COMMON, "Common Shared" }, { BCMA_CORE_OCP_AHB_BRIDGE, "OCP to AHB Bridge" }, { BCMA_CORE_SPI_HOST, "SPI Host" }, { BCMA_CORE_I2S, "I2S" }, { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" }, { BCMA_CORE_SHIM, "SHIM" }, { BCMA_CORE_DEFAULT, "Default" }, }; static const struct bcma_device_id_name bcma_mips_device_names[] = { { BCMA_CORE_MIPS, "MIPS" }, { BCMA_CORE_MIPS_3302, "MIPS 3302" }, { BCMA_CORE_MIPS_74K, "MIPS 74K" }, }; static const char *bcma_device_name(const struct bcma_device_id *id) { const struct bcma_device_id_name *names; int size, i; /* search manufacturer specific names */ switch (id->manuf) { case BCMA_MANUF_ARM: names = bcma_arm_device_names; size = ARRAY_SIZE(bcma_arm_device_names); break; case BCMA_MANUF_BCM: names = bcma_bcm_device_names; size = ARRAY_SIZE(bcma_bcm_device_names); break; case BCMA_MANUF_MIPS: names = bcma_mips_device_names; size = ARRAY_SIZE(bcma_mips_device_names); break; default: return "UNKNOWN"; } for (i = 0; i < size; i++) { if (names[i].id == id->id) return names[i].name; } return "UNKNOWN"; } static u32 bcma_scan_read32(struct bcma_bus *bus, u8 current_coreidx, u16 offset) { return readl(bus->mmio + offset); } static void bcma_scan_switch_core(struct bcma_bus *bus, u32 addr) { if (bus->hosttype == BCMA_HOSTTYPE_PCI) pci_write_config_dword(bus->host_pci, BCMA_PCI_BAR0_WIN, addr); } static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 **eromptr) { u32 ent = readl(*eromptr); (*eromptr)++; return ent; } static void bcma_erom_push_ent(u32 **eromptr) { (*eromptr)--; } static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 **eromptr) { u32 ent = bcma_erom_get_ent(bus, eromptr); if (!(ent & SCAN_ER_VALID)) return -ENOENT; if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_CI) return -ENOENT; return ent; } static bool bcma_erom_is_end(struct bcma_bus *bus, u32 **eromptr) { u32 ent = bcma_erom_get_ent(bus, eromptr); bcma_erom_push_ent(eromptr); return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID)); } static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 **eromptr) { u32 ent = bcma_erom_get_ent(bus, eromptr); bcma_erom_push_ent(eromptr); return (((ent & SCAN_ER_VALID)) && ((ent & SCAN_ER_TAGX) == SCAN_ER_TAG_ADDR) && ((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE)); } static void bcma_erom_skip_component(struct bcma_bus *bus, u32 **eromptr) { u32 ent; while (1) { ent = bcma_erom_get_ent(bus, eromptr); if ((ent & SCAN_ER_VALID) && ((ent & SCAN_ER_TAG) == SCAN_ER_TAG_CI)) break; if (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID)) break; } bcma_erom_push_ent(eromptr); } static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 **eromptr) { u32 ent = bcma_erom_get_ent(bus, eromptr); if (!(ent & SCAN_ER_VALID)) return -ENOENT; if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_MP) return -ENOENT; return ent; } static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 **eromptr, u32 type, u8 port) { u32 addrl, addrh, sizel, sizeh = 0; u32 size; u32 ent = bcma_erom_get_ent(bus, eromptr); if ((!(ent & SCAN_ER_VALID)) || ((ent & SCAN_ER_TAGX) != SCAN_ER_TAG_ADDR) || ((ent & SCAN_ADDR_TYPE) != type) || (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) { bcma_erom_push_ent(eromptr); return -EINVAL; } addrl = ent & SCAN_ADDR_ADDR; if (ent & SCAN_ADDR_AG32) addrh = bcma_erom_get_ent(bus, eromptr); else addrh = 0; if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) { size = bcma_erom_get_ent(bus, eromptr); sizel = size & SCAN_SIZE_SZ; if (size & SCAN_SIZE_SG32) sizeh = bcma_erom_get_ent(bus, eromptr); } else sizel = SCAN_ADDR_SZ_BASE << ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT); return addrl; } static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus, u16 index) { struct bcma_device *core; list_for_each_entry(core, &bus->cores, list) { if (core->core_index == index) return core; } return NULL; } static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 coreid) { struct bcma_device *core; list_for_each_entry_reverse(core, &bus->cores, list) { if (core->id.id == coreid) return core; } return NULL; } static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, struct bcma_device_id *match, int core_num, struct bcma_device *core) { s32 tmp; u8 i, j; s32 cia, cib; u8 ports[2], wrappers[2]; /* get CIs */ cia = bcma_erom_get_ci(bus, eromptr); if (cia < 0) { bcma_erom_push_ent(eromptr); if (bcma_erom_is_end(bus, eromptr)) return -ESPIPE; return -EILSEQ; } cib = bcma_erom_get_ci(bus, eromptr); if (cib < 0) return -EILSEQ; /* parse CIs */ core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT; core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT; core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT; ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT; ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT; wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT; wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT; core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT; if (((core->id.manuf == BCMA_MANUF_ARM) && (core->id.id == 0xFFF)) || (ports[1] == 0)) { bcma_erom_skip_component(bus, eromptr); return -ENXIO; } /* check if component is a core at all */ if (wrappers[0] + wrappers[1] == 0) { /* Some specific cores don't need wrappers */ switch (core->id.id) { case BCMA_CORE_4706_MAC_GBIT_COMMON: /* Not used yet: case BCMA_CORE_OOB_ROUTER: */ break; default: bcma_erom_skip_component(bus, eromptr); return -ENXIO; } } if (bcma_erom_is_bridge(bus, eromptr)) { bcma_erom_skip_component(bus, eromptr); return -ENXIO; } if (bcma_find_core_by_index(bus, core_num)) { bcma_erom_skip_component(bus, eromptr); return -ENODEV; } if (match && ((match->manuf != BCMA_ANY_MANUF && match->manuf != core->id.manuf) || (match->id != BCMA_ANY_ID && match->id != core->id.id) || (match->rev != BCMA_ANY_REV && match->rev != core->id.rev) || (match->class != BCMA_ANY_CLASS && match->class != core->id.class) )) { bcma_erom_skip_component(bus, eromptr); return -ENODEV; } /* get & parse master ports */ for (i = 0; i < ports[0]; i++) { s32 mst_port_d = bcma_erom_get_mst_port(bus, eromptr); if (mst_port_d < 0) return -EILSEQ; } /* First Slave Address Descriptor should be port 0: * the main register space for the core */ tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); if (tmp <= 0) { /* Try again to see if it is a bridge */ tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_BRIDGE, 0); if (tmp <= 0) { return -EILSEQ; } else { bcma_info(bus, "Bridge found\n"); return -ENXIO; } } core->addr = tmp; /* get & parse slave ports */ for (i = 0; i < ports[1]; i++) { for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, i); if (tmp < 0) { /* no more entries for port _i_ */ /* pr_debug("erom: slave port %d " * "has %d descriptors\n", i, j); */ break; } else { if (i == 0 && j == 0) core->addr1 = tmp; } } } /* get & parse master wrappers */ for (i = 0; i < wrappers[0]; i++) { for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_MWRAP, i); if (tmp < 0) { /* no more entries for port _i_ */ /* pr_debug("erom: master wrapper %d " * "has %d descriptors\n", i, j); */ break; } else { if (i == 0 && j == 0) core->wrap = tmp; } } } /* get & parse slave wrappers */ for (i = 0; i < wrappers[1]; i++) { u8 hack = (ports[1] == 1) ? 0 : 1; for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SWRAP, i + hack); if (tmp < 0) { /* no more entries for port _i_ */ /* pr_debug("erom: master wrapper %d " * has %d descriptors\n", i, j); */ break; } else { if (wrappers[0] == 0 && !i && !j) core->wrap = tmp; } } } if (bus->hosttype == BCMA_HOSTTYPE_SOC) { core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE); if (!core->io_addr) return -ENOMEM; core->io_wrap = ioremap_nocache(core->wrap, BCMA_CORE_SIZE); if (!core->io_wrap) { iounmap(core->io_addr); return -ENOMEM; } } return 0; } void bcma_init_bus(struct bcma_bus *bus) { s32 tmp; struct bcma_chipinfo *chipinfo = &(bus->chipinfo); if (bus->init_done) return; INIT_LIST_HEAD(&bus->cores); bus->nr_cores = 0; bcma_scan_switch_core(bus, BCMA_ADDR_BASE); tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID); chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT; chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT; chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT; bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", chipinfo->id, chipinfo->rev, chipinfo->pkg); bus->init_done = true; } int bcma_bus_scan(struct bcma_bus *bus) { u32 erombase; u32 __iomem *eromptr, *eromend; int err, core_num = 0; bcma_init_bus(bus); erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); if (!eromptr) return -ENOMEM; } else { eromptr = bus->mmio; } eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32); bcma_scan_switch_core(bus, erombase); while (eromptr < eromend) { struct bcma_device *other_core; struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL); if (!core) return -ENOMEM; INIT_LIST_HEAD(&core->list); core->bus = bus; err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core); if (err < 0) { kfree(core); if (err == -ENODEV) { core_num++; continue; } else if (err == -ENXIO) { continue; } else if (err == -ESPIPE) { break; } return err; } core->core_index = core_num++; bus->nr_cores++; other_core = bcma_find_core_reverse(bus, core->id.id); core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1; bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", core->core_index, bcma_device_name(&core->id), core->id.manuf, core->id.id, core->id.rev, core->id.class); list_add_tail(&core->list, &bus->cores); } if (bus->hosttype == BCMA_HOSTTYPE_SOC) iounmap(eromptr); return 0; } int __init bcma_bus_scan_early(struct bcma_bus *bus, struct bcma_device_id *match, struct bcma_device *core) { u32 erombase; u32 __iomem *eromptr, *eromend; int err = -ENODEV; int core_num = 0; erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); if (!eromptr) return -ENOMEM; } else { eromptr = bus->mmio; } eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32); bcma_scan_switch_core(bus, erombase); while (eromptr < eromend) { memset(core, 0, sizeof(*core)); INIT_LIST_HEAD(&core->list); core->bus = bus; err = bcma_get_next_core(bus, &eromptr, match, core_num, core); if (err == -ENODEV) { core_num++; continue; } else if (err == -ENXIO) continue; else if (err == -ESPIPE) break; else if (err < 0) return err; core->core_index = core_num++; bus->nr_cores++; bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", core->core_index, bcma_device_name(&core->id), core->id.manuf, core->id.id, core->id.rev, core->id.class); list_add_tail(&core->list, &bus->cores); err = 0; break; } if (bus->hosttype == BCMA_HOSTTYPE_SOC) iounmap(eromptr); return err; }