aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-03-22 20:59:58 -0400
committerDavid S. Miller <davem@davemloft.net>2008-03-22 20:59:58 -0400
commit6440cc9e0f48ade57af7be28008cbfa6a991f287 (patch)
tree233300e665ed862dab958087b8afa31baaf0ef5f /net
parent69d1506731168d6845a76a303b2c45f7c05f3f2c (diff)
[IPV4] fib_trie: fix warning from rcu_assign_poinger
This gets rid of a warning caused by the test in rcu_assign_pointer. I tried to fix rcu_assign_pointer, but that devolved into a long set of discussions about doing it right that came to no real solution. Since the test in rcu_assign_pointer for constant NULL would never succeed in fib_trie, just open code instead. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/fib_trie.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1ff446d0fa8b..f6cdc012eec5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -177,10 +177,13 @@ static inline struct tnode *node_parent_rcu(struct node *node)
177 return rcu_dereference(ret); 177 return rcu_dereference(ret);
178} 178}
179 179
180/* Same as rcu_assign_pointer
181 * but that macro() assumes that value is a pointer.
182 */
180static inline void node_set_parent(struct node *node, struct tnode *ptr) 183static inline void node_set_parent(struct node *node, struct tnode *ptr)
181{ 184{
182 rcu_assign_pointer(node->parent, 185 smp_wmb();
183 (unsigned long)ptr | NODE_TYPE(node)); 186 node->parent = (unsigned long)ptr | NODE_TYPE(node);
184} 187}
185 188
186static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i) 189static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
n276' href='#n276'>276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/*
 * Copyright 2006 Jake Moilanen <moilanen@austin.ibm.com>, IBM Corp.
 * Copyright 2006-2007 Michael Ellerman, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2 of the
 * License.
 *
 */

#include <linux/device.h>
#include <linux/irq.h>
#include <linux/msi.h>

#include <asm/rtas.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>

static int query_token, change_token;

#define RTAS_QUERY_FN		0
#define RTAS_CHANGE_FN		1
#define RTAS_RESET_FN		2
#define RTAS_CHANGE_MSI_FN	3
#define RTAS_CHANGE_MSIX_FN	4

static struct pci_dn *get_pdn(struct pci_dev *pdev)
{
	struct device_node *dn;
	struct pci_dn *pdn;

	dn = pci_device_to_OF_node(pdev);
	if (!dn) {
		dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
		return NULL;
	}

	pdn = PCI_DN(dn);
	if (!pdn) {
		dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
		return NULL;
	}

	return pdn;
}

/* RTAS Helpers */

static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
{
	u32 addr, seq_num, rtas_ret[3];
	unsigned long buid;
	int rc;

	addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
	buid = pdn->phb->buid;

	seq_num = 1;
	do {
		if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN)
			rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
					BUID_HI(buid), BUID_LO(buid),
					func, num_irqs, seq_num);
		else
			rc = rtas_call(change_token, 6, 3, rtas_ret, addr,
					BUID_HI(buid), BUID_LO(buid),
					func, num_irqs, seq_num);

		seq_num = rtas_ret[1];
	} while (rtas_busy_delay(rc));

	/*
	 * If the RTAS call succeeded, return the number of irqs allocated.
	 * If not, make sure we return a negative error code.
	 */
	if (rc == 0)
		rc = rtas_ret[0];
	else if (rc > 0)
		rc = -rc;

	pr_debug("rtas_msi: ibm,change_msi(func=%d,num=%d), got %d rc = %d\n",
		 func, num_irqs, rtas_ret[0], rc);

	return rc;
}

static void rtas_disable_msi(struct pci_dev *pdev)
{
	struct pci_dn *pdn;

	pdn = get_pdn(pdev);
	if (!pdn)
		return;

	if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0)
		pr_debug("rtas_msi: Setting MSIs to 0 failed!\n");
}

static int rtas_query_irq_number(struct pci_dn *pdn, int offset)
{
	u32 addr, rtas_ret[2];
	unsigned long buid;
	int rc;

	addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
	buid = pdn->phb->buid;

	do {
		rc = rtas_call(query_token, 4, 3, rtas_ret, addr,
			       BUID_HI(buid), BUID_LO(buid), offset);
	} while (rtas_busy_delay(rc));

	if (rc) {
		pr_debug("rtas_msi: error (%d) querying source number\n", rc);
		return rc;
	}

	return rtas_ret[0];
}

static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
{
	struct msi_desc *entry;

	list_for_each_entry(entry, &pdev->msi_list, list) {
		if (entry->irq == NO_IRQ)
			continue;

		set_irq_msi(entry->irq, NULL);
		irq_dispose_mapping(entry->irq);
	}

	rtas_disable_msi(pdev);
}

static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
	struct device_node *dn;
	struct pci_dn *pdn;
	const u32 *req_msi;

	pdn = get_pdn(pdev);
	if (!pdn)
		return -ENODEV;

	dn = pdn->node;

	req_msi = of_get_property(dn, prop_name, NULL);
	if (!req_msi) {
		pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
		return -ENOENT;
	}

	if (*req_msi < nvec) {
		pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);

		if (*req_msi == 0) /* Be paranoid */
			return -ENOSPC;

		return *req_msi;
	}

	return 0;
}

static int check_req_msi(struct pci_dev *pdev, int nvec)
{
	return check_req(pdev, nvec, "ibm,req#msi");
}

static int check_req_msix(struct pci_dev *pdev, int nvec)
{
	return check_req(pdev, nvec, "ibm,req#msi-x");
}

/* Quota calculation */

static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{
	struct device_node *dn;
	const u32 *p;

	dn = of_node_get(pci_device_to_OF_node(dev));
	while (dn) {
		p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
		if (p) {
			pr_debug("rtas_msi: found prop on dn %s\n",
				dn->full_name);
			*total = *p;
			return dn;
		}

		dn = of_get_next_parent(dn);
	}

	return NULL;
}

static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
	struct device_node *dn;

	/* Found our PE and assume 8 at that point. */

	dn = pci_device_to_OF_node(dev);
	if (!dn)
		return NULL;

	dn = find_device_pe(dn);
	if (!dn)
		return NULL;

	/* We actually want the parent */
	dn = of_get_parent(dn);
	if (!dn)
		return NULL;

	/* Hardcode of 8 for old firmwares */
	*total = 8;
	pr_debug("rtas_msi: using PE dn %s\n", dn->full_name);

	return dn;
}

struct msi_counts {
	struct device_node *requestor;
	int num_devices;
	int request;
	int quota;
	int spare;
	int over_quota;
};

static void *count_non_bridge_devices(struct device_node *dn, void *data)
{
	struct msi_counts *counts = data;
	const u32 *p;
	u32 class;

	pr_debug("rtas_msi: counting %s\n", dn->full_name);

	p = of_get_property(dn, "class-code", NULL);
	class = p ? *p : 0;

	if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
		counts->num_devices++;

	return NULL;
}

static void *count_spare_msis(struct device_node *dn, void *data)
{
	struct msi_counts *counts = data;
	const u32 *p;
	int req;

	if (dn == counts->requestor)
		req = counts->request;
	else {
		/* We don't know if a driver will try to use MSI or MSI-X,
		 * so we just have to punt and use the larger of the two. */
		req = 0;
		p = of_get_property(dn, "ibm,req#msi", NULL);
		if (p)
			req = *p;

		p = of_get_property(dn, "ibm,req#msi-x", NULL);
		if (p)
			req = max(req, (int)*p);
	}

	if (req < counts->quota)
		counts->spare += counts->quota - req;
	else if (req > counts->quota)
		counts->over_quota++;

	return NULL;
}

static int msi_quota_for_device(struct pci_dev *dev, int request)
{
	struct device_node *pe_dn;
	struct msi_counts counts;
	int total;

	pr_debug("rtas_msi: calc quota for %s, request %d\n", pci_name(dev),
		  request);

	pe_dn = find_pe_total_msi(dev, &total);
	if (!pe_dn)
		pe_dn = find_pe_dn(dev, &total);

	if (!pe_dn) {
		pr_err("rtas_msi: couldn't find PE for %s\n", pci_name(dev));
		goto out;
	}

	pr_debug("rtas_msi: found PE %s\n", pe_dn->full_name);

	memset(&counts, 0, sizeof(struct msi_counts));

	/* Work out how many devices we have below this PE */
	traverse_pci_devices(pe_dn, count_non_bridge_devices, &counts);

	if (counts.num_devices == 0) {
		pr_err("rtas_msi: found 0 devices under PE for %s\n",
			pci_name(dev));
		goto out;
	}

	counts.quota = total / counts.num_devices;
	if (request <= counts.quota)
		goto out;

	/* else, we have some more calculating to do */
	counts.requestor = pci_device_to_OF_node(dev);
	counts.request = request;
	traverse_pci_devices(pe_dn, count_spare_msis, &counts);

	/* If the quota isn't an integer multiple of the total, we can
	 * use the remainder as spare MSIs for anyone that wants them. */
	counts.spare += total % counts.num_devices;

	/* Divide any spare by the number of over-quota requestors */
	if (counts.over_quota)
		counts.quota += counts.spare / counts.over_quota;

	/* And finally clamp the request to the possibly adjusted quota */
	request = min(counts.quota, request);

	pr_debug("rtas_msi: request clamped to quota %d\n", request);
out:
	of_node_put(pe_dn);

	return request;
}

static int rtas_msi_check_device(struct pci_dev *pdev, int nvec, int type)
{
	int quota, rc;

	if (type == PCI_CAP_ID_MSIX)
		rc = check_req_msix(pdev, nvec);
	else
		rc = check_req_msi(pdev, nvec);

	if (rc)
		return rc;

	quota = msi_quota_for_device(pdev, nvec);

	if (quota && quota < nvec)
		return quota;

	return 0;
}

static int check_msix_entries(struct pci_dev *pdev)
{
	struct msi_desc *entry;
	int expected;

	/* There's no way for us to express to firmware that we want
	 * a discontiguous, or non-zero based, range of MSI-X entries.
	 * So we must reject such requests. */

	expected = 0;
	list_for_each_entry(entry, &pdev->msi_list, list) {
		if (entry->msi_attrib.entry_nr != expected) {
			pr_debug("rtas_msi: bad MSI-X entries.\n");
			return -EINVAL;
		}
		expected++;
	}

	return 0;
}

static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
	struct pci_dn *pdn;
	int hwirq, virq, i, rc;
	struct msi_desc *entry;
	struct msi_msg msg;

	pdn = get_pdn(pdev);
	if (!pdn)
		return -ENODEV;

	if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev))
		return -EINVAL;

	/*
	 * Try the new more explicit firmware interface, if that fails fall
	 * back to the old interface. The old interface is known to never
	 * return MSI-Xs.
	 */
	if (type == PCI_CAP_ID_MSI) {
		rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);

		if (rc < 0) {
			pr_debug("rtas_msi: trying the old firmware call.\n");
			rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
		}
	} else
		rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);

	if (rc != nvec) {
		pr_debug("rtas_msi: rtas_change_msi() failed\n");
		return rc;
	}

	i = 0;
	list_for_each_entry(entry, &pdev->msi_list, list) {
		hwirq = rtas_query_irq_number(pdn, i++);
		if (hwirq < 0) {
			pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
			return hwirq;
		}

		virq = irq_create_mapping(NULL, hwirq);

		if (virq == NO_IRQ) {
			pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
			return -ENOSPC;
		}

		dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq);
		set_irq_msi(virq, entry);

		/* Read config space back so we can restore after reset */
		read_msi_msg(virq, &msg);
		entry->msg = msg;

		unmask_msi_irq(virq);
	}

	return 0;
}

static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
{
	/* No LSI -> leave MSIs (if any) configured */
	if (pdev->irq == NO_IRQ) {
		dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n");
		return;
	}

	/* No MSI -> MSIs can't have been assigned by fw, leave LSI */
	if (check_req_msi(pdev, 1) && check_req_msix(pdev, 1)) {
		dev_dbg(&pdev->dev, "rtas_msi: no req#msi/x, nothing to do.\n");
		return;
	}

	dev_dbg(&pdev->dev, "rtas_msi: disabling existing MSI.\n");
	rtas_disable_msi(pdev);
}

static int rtas_msi_init(void)
{
	query_token  = rtas_token("ibm,query-interrupt-source-number");
	change_token = rtas_token("ibm,change-msi");

	if ((query_token == RTAS_UNKNOWN_SERVICE) ||
			(change_token == RTAS_UNKNOWN_SERVICE)) {
		pr_debug("rtas_msi: no RTAS tokens, no MSI support.\n");
		return -1;
	}

	pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n");

	WARN_ON(ppc_md.setup_msi_irqs);
	ppc_md.setup_msi_irqs = rtas_setup_msi_irqs;
	ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs;
	ppc_md.msi_check_device = rtas_msi_check_device;

	WARN_ON(ppc_md.pci_irq_fixup);
	ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;

	return 0;
}
arch_initcall(rtas_msi_init);