aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/cipso_ipv4.c
diff options
context:
space:
mode:
authorPaul Moore <paul.moore@hp.com>2006-09-25 18:56:09 -0400
committerDavid S. Miller <davem@davemloft.net>2006-09-25 18:56:09 -0400
commitfcd48280643e92ec6cb29a04e9079dd7b6b5bfef (patch)
treec594e16a021262e97f8b41493529c95bd616529e /net/ipv4/cipso_ipv4.c
parent4fe5d5c07ab615a52fd1b0ceba5aeed7c612821a (diff)
[NetLabel]: rework the Netlink attribute handling (part 1)
At the suggestion of Thomas Graf, rewrite NetLabel's use of Netlink attributes to better follow the common Netlink attribute usage. Signed-off-by: Paul Moore <paul.moore@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/cipso_ipv4.c')
-rw-r--r--net/ipv4/cipso_ipv4.c203
1 files changed, 24 insertions, 179 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 87e71563335..e6ce0b3ba62 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -530,197 +530,42 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
530} 530}
531 531
532/** 532/**
533 * cipso_v4_doi_dump_all - Dump all the CIPSO DOI definitions into a sk_buff 533 * cipso_v4_doi_walk - Iterate through the DOI definitions
534 * @headroom: the amount of headroom to allocate for the sk_buff 534 * @skip_cnt: skip past this number of DOI definitions, updated
535 * @callback: callback for each DOI definition
536 * @cb_arg: argument for the callback function
535 * 537 *
536 * Description: 538 * Description:
537 * Dump a list of all the configured DOI values into a sk_buff. The returned 539 * Iterate over the DOI definition list, skipping the first @skip_cnt entries.
538 * sk_buff has room at the front of the sk_buff for @headroom bytes. See 540 * For each entry call @callback, if @callback returns a negative value stop
539 * net/netlabel/netlabel_cipso_v4.h for the LISTALL message format. This 541 * 'walking' through the list and return. Updates the value in @skip_cnt upon
540 * function may fail if another process is changing the DOI list at the same 542 * return. Returns zero on success, negative values on failure.
541 * time. Returns a pointer to a sk_buff on success, NULL on error.
542 * 543 *
543 */ 544 */
544struct sk_buff *cipso_v4_doi_dump_all(size_t headroom) 545int cipso_v4_doi_walk(u32 *skip_cnt,
546 int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
547 void *cb_arg)
545{ 548{
546 struct sk_buff *skb = NULL; 549 int ret_val = -ENOENT;
547 struct cipso_v4_doi *iter;
548 u32 doi_cnt = 0; 550 u32 doi_cnt = 0;
549 ssize_t buf_len; 551 struct cipso_v4_doi *iter_doi;
550
551 buf_len = NETLBL_LEN_U32;
552 rcu_read_lock();
553 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
554 if (iter->valid) {
555 doi_cnt += 1;
556 buf_len += 2 * NETLBL_LEN_U32;
557 }
558
559 skb = netlbl_netlink_alloc_skb(headroom, buf_len, GFP_ATOMIC);
560 if (skb == NULL)
561 goto doi_dump_all_failure;
562
563 if (nla_put_u32(skb, NLA_U32, doi_cnt) != 0)
564 goto doi_dump_all_failure;
565 buf_len -= NETLBL_LEN_U32;
566 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
567 if (iter->valid) {
568 if (buf_len < 2 * NETLBL_LEN_U32)
569 goto doi_dump_all_failure;
570 if (nla_put_u32(skb, NLA_U32, iter->doi) != 0)
571 goto doi_dump_all_failure;
572 if (nla_put_u32(skb, NLA_U32, iter->type) != 0)
573 goto doi_dump_all_failure;
574 buf_len -= 2 * NETLBL_LEN_U32;
575 }
576 rcu_read_unlock();
577
578 return skb;
579
580doi_dump_all_failure:
581 rcu_read_unlock();
582 kfree(skb);
583 return NULL;
584}
585
586/**
587 * cipso_v4_doi_dump - Dump a CIPSO DOI definition into a sk_buff
588 * @doi: the DOI value
589 * @headroom: the amount of headroom to allocate for the sk_buff
590 *
591 * Description:
592 * Lookup the DOI definition matching @doi and dump it's contents into a
593 * sk_buff. The returned sk_buff has room at the front of the sk_buff for
594 * @headroom bytes. See net/netlabel/netlabel_cipso_v4.h for the LIST message
595 * format. This function may fail if another process is changing the DOI list
596 * at the same time. Returns a pointer to a sk_buff on success, NULL on error.
597 *
598 */
599struct sk_buff *cipso_v4_doi_dump(u32 doi, size_t headroom)
600{
601 struct sk_buff *skb = NULL;
602 struct cipso_v4_doi *iter;
603 u32 tag_cnt = 0;
604 u32 lvl_cnt = 0;
605 u32 cat_cnt = 0;
606 ssize_t buf_len;
607 ssize_t tmp;
608 552
609 rcu_read_lock(); 553 rcu_read_lock();
610 iter = cipso_v4_doi_getdef(doi); 554 list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
611 if (iter == NULL) 555 if (iter_doi->valid) {
612 goto doi_dump_failure; 556 if (doi_cnt++ < *skip_cnt)
613 buf_len = NETLBL_LEN_U32; 557 continue;
614 switch (iter->type) { 558 ret_val = callback(iter_doi, cb_arg);
615 case CIPSO_V4_MAP_PASS: 559 if (ret_val < 0) {
616 buf_len += NETLBL_LEN_U32; 560 doi_cnt--;
617 while(tag_cnt < CIPSO_V4_TAG_MAXCNT && 561 goto doi_walk_return;
618 iter->tags[tag_cnt] != CIPSO_V4_TAG_INVALID) {
619 tag_cnt += 1;
620 buf_len += NETLBL_LEN_U8;
621 }
622 break;
623 case CIPSO_V4_MAP_STD:
624 buf_len += 3 * NETLBL_LEN_U32;
625 while (tag_cnt < CIPSO_V4_TAG_MAXCNT &&
626 iter->tags[tag_cnt] != CIPSO_V4_TAG_INVALID) {
627 tag_cnt += 1;
628 buf_len += NETLBL_LEN_U8;
629 }
630 for (tmp = 0; tmp < iter->map.std->lvl.local_size; tmp++)
631 if (iter->map.std->lvl.local[tmp] !=
632 CIPSO_V4_INV_LVL) {
633 lvl_cnt += 1;
634 buf_len += NETLBL_LEN_U32 + NETLBL_LEN_U8;
635 } 562 }
636 for (tmp = 0; tmp < iter->map.std->cat.local_size; tmp++)
637 if (iter->map.std->cat.local[tmp] !=
638 CIPSO_V4_INV_CAT) {
639 cat_cnt += 1;
640 buf_len += NETLBL_LEN_U32 + NETLBL_LEN_U16;
641 }
642 break;
643 }
644
645 skb = netlbl_netlink_alloc_skb(headroom, buf_len, GFP_ATOMIC);
646 if (skb == NULL)
647 goto doi_dump_failure;
648
649 if (nla_put_u32(skb, NLA_U32, iter->type) != 0)
650 goto doi_dump_failure;
651 buf_len -= NETLBL_LEN_U32;
652 if (iter != cipso_v4_doi_getdef(doi))
653 goto doi_dump_failure;
654 switch (iter->type) {
655 case CIPSO_V4_MAP_PASS:
656 if (nla_put_u32(skb, NLA_U32, tag_cnt) != 0)
657 goto doi_dump_failure;
658 buf_len -= NETLBL_LEN_U32;
659 for (tmp = 0;
660 tmp < CIPSO_V4_TAG_MAXCNT &&
661 iter->tags[tmp] != CIPSO_V4_TAG_INVALID;
662 tmp++) {
663 if (buf_len < NETLBL_LEN_U8)
664 goto doi_dump_failure;
665 if (nla_put_u8(skb, NLA_U8, iter->tags[tmp]) != 0)
666 goto doi_dump_failure;
667 buf_len -= NETLBL_LEN_U8;
668 } 563 }
669 break;
670 case CIPSO_V4_MAP_STD:
671 if (nla_put_u32(skb, NLA_U32, tag_cnt) != 0)
672 goto doi_dump_failure;
673 if (nla_put_u32(skb, NLA_U32, lvl_cnt) != 0)
674 goto doi_dump_failure;
675 if (nla_put_u32(skb, NLA_U32, cat_cnt) != 0)
676 goto doi_dump_failure;
677 buf_len -= 3 * NETLBL_LEN_U32;
678 for (tmp = 0;
679 tmp < CIPSO_V4_TAG_MAXCNT &&
680 iter->tags[tmp] != CIPSO_V4_TAG_INVALID;
681 tmp++) {
682 if (buf_len < NETLBL_LEN_U8)
683 goto doi_dump_failure;
684 if (nla_put_u8(skb, NLA_U8, iter->tags[tmp]) != 0)
685 goto doi_dump_failure;
686 buf_len -= NETLBL_LEN_U8;
687 }
688 for (tmp = 0; tmp < iter->map.std->lvl.local_size; tmp++)
689 if (iter->map.std->lvl.local[tmp] !=
690 CIPSO_V4_INV_LVL) {
691 if (buf_len < NETLBL_LEN_U32 + NETLBL_LEN_U8)
692 goto doi_dump_failure;
693 if (nla_put_u32(skb, NLA_U32, tmp) != 0)
694 goto doi_dump_failure;
695 if (nla_put_u8(skb,
696 NLA_U8,
697 iter->map.std->lvl.local[tmp]) != 0)
698 goto doi_dump_failure;
699 buf_len -= NETLBL_LEN_U32 + NETLBL_LEN_U8;
700 }
701 for (tmp = 0; tmp < iter->map.std->cat.local_size; tmp++)
702 if (iter->map.std->cat.local[tmp] !=
703 CIPSO_V4_INV_CAT) {
704 if (buf_len < NETLBL_LEN_U32 + NETLBL_LEN_U16)
705 goto doi_dump_failure;
706 if (nla_put_u32(skb, NLA_U32, tmp) != 0)
707 goto doi_dump_failure;
708 if (nla_put_u16(skb,
709 NLA_U16,
710 iter->map.std->cat.local[tmp]) != 0)
711 goto doi_dump_failure;
712 buf_len -= NETLBL_LEN_U32 + NETLBL_LEN_U16;
713 }
714 break;
715 }
716 rcu_read_unlock();
717
718 return skb;
719 564
720doi_dump_failure: 565doi_walk_return:
721 rcu_read_unlock(); 566 rcu_read_unlock();
722 kfree(skb); 567 *skip_cnt = doi_cnt;
723 return NULL; 568 return ret_val;
724} 569}
725 570
726/** 571/**