diff options
author | Jon Paul Maloy <jon.maloy@ericsson.com> | 2014-07-16 20:41:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-17 00:38:19 -0400 |
commit | c4116e10579c5bbbfc3cd2ad0324ee0d8691e531 (patch) | |
tree | 43f4306627b3f1c8e30e54ee2234603f7ddbcd28 /net/tipc/link.c | |
parent | 0abd8ff21f19adddc465538354e9baaca63df073 (diff) |
tipc: remove unreferenced functions
We can now remove a number of functions which have become obsolete
and unreferenced through this commit series. There are no functional
changes in this commit.
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Reviewed-by: Erik Hugne <erik.hugne@ericsson.com>
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r-- | net/tipc/link.c | 247 |
1 files changed, 0 insertions, 247 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c index d1255ba51216..28730ddf4b78 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -85,7 +85,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); | |||
85 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 85 | static void link_state_event(struct tipc_link *l_ptr, u32 event); |
86 | static void link_reset_statistics(struct tipc_link *l_ptr); | 86 | static void link_reset_statistics(struct tipc_link *l_ptr); |
87 | static void link_print(struct tipc_link *l_ptr, const char *str); | 87 | static void link_print(struct tipc_link *l_ptr, const char *str); |
88 | static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf); | ||
89 | static void tipc_link_sync_xmit(struct tipc_link *l); | 88 | static void tipc_link_sync_xmit(struct tipc_link *l); |
90 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 89 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); |
91 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf); | 90 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf); |
@@ -679,180 +678,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
679 | } | 678 | } |
680 | } | 679 | } |
681 | 680 | ||
682 | /* | ||
683 | * link_bundle_buf(): Append contents of a buffer to | ||
684 | * the tail of an existing one. | ||
685 | */ | ||
686 | static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, | ||
687 | struct sk_buff *buf) | ||
688 | { | ||
689 | struct tipc_msg *bundler_msg = buf_msg(bundler); | ||
690 | struct tipc_msg *msg = buf_msg(buf); | ||
691 | u32 size = msg_size(msg); | ||
692 | u32 bundle_size = msg_size(bundler_msg); | ||
693 | u32 to_pos = align(bundle_size); | ||
694 | u32 pad = to_pos - bundle_size; | ||
695 | |||
696 | if (msg_user(bundler_msg) != MSG_BUNDLER) | ||
697 | return 0; | ||
698 | if (msg_type(bundler_msg) != OPEN_MSG) | ||
699 | return 0; | ||
700 | if (skb_tailroom(bundler) < (pad + size)) | ||
701 | return 0; | ||
702 | if (l_ptr->max_pkt < (to_pos + size)) | ||
703 | return 0; | ||
704 | |||
705 | skb_put(bundler, pad + size); | ||
706 | skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); | ||
707 | msg_set_size(bundler_msg, to_pos + size); | ||
708 | msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); | ||
709 | kfree_skb(buf); | ||
710 | l_ptr->stats.sent_bundled++; | ||
711 | return 1; | ||
712 | } | ||
713 | |||
714 | static void link_add_to_outqueue(struct tipc_link *l_ptr, | ||
715 | struct sk_buff *buf, | ||
716 | struct tipc_msg *msg) | ||
717 | { | ||
718 | u32 ack = mod(l_ptr->next_in_no - 1); | ||
719 | u32 seqno = mod(l_ptr->next_out_no++); | ||
720 | |||
721 | msg_set_word(msg, 2, ((ack << 16) | seqno)); | ||
722 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | ||
723 | buf->next = NULL; | ||
724 | if (l_ptr->first_out) { | ||
725 | l_ptr->last_out->next = buf; | ||
726 | l_ptr->last_out = buf; | ||
727 | } else | ||
728 | l_ptr->first_out = l_ptr->last_out = buf; | ||
729 | |||
730 | l_ptr->out_queue_size++; | ||
731 | if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) | ||
732 | l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; | ||
733 | } | ||
734 | |||
735 | static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, | ||
736 | struct sk_buff *buf_chain, | ||
737 | u32 long_msgno) | ||
738 | { | ||
739 | struct sk_buff *buf; | ||
740 | struct tipc_msg *msg; | ||
741 | |||
742 | if (!l_ptr->next_out) | ||
743 | l_ptr->next_out = buf_chain; | ||
744 | while (buf_chain) { | ||
745 | buf = buf_chain; | ||
746 | buf_chain = buf_chain->next; | ||
747 | |||
748 | msg = buf_msg(buf); | ||
749 | msg_set_long_msgno(msg, long_msgno); | ||
750 | link_add_to_outqueue(l_ptr, buf, msg); | ||
751 | } | ||
752 | } | ||
753 | |||
754 | /* | ||
755 | * tipc_link_xmit() is the 'full path' for messages, called from | ||
756 | * inside TIPC when the 'fast path' in tipc_send_xmit | ||
757 | * has failed, and from link_send() | ||
758 | */ | ||
759 | int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf) | ||
760 | { | ||
761 | struct tipc_msg *msg = buf_msg(buf); | ||
762 | u32 size = msg_size(msg); | ||
763 | u32 dsz = msg_data_sz(msg); | ||
764 | u32 queue_size = l_ptr->out_queue_size; | ||
765 | u32 imp = tipc_msg_tot_importance(msg); | ||
766 | u32 queue_limit = l_ptr->queue_limit[imp]; | ||
767 | u32 max_packet = l_ptr->max_pkt; | ||
768 | |||
769 | /* Match msg importance against queue limits: */ | ||
770 | if (unlikely(queue_size >= queue_limit)) { | ||
771 | if (imp <= TIPC_CRITICAL_IMPORTANCE) { | ||
772 | link_schedule_port(l_ptr, msg_origport(msg), size); | ||
773 | kfree_skb(buf); | ||
774 | return -ELINKCONG; | ||
775 | } | ||
776 | kfree_skb(buf); | ||
777 | if (imp > CONN_MANAGER) { | ||
778 | pr_warn("%s<%s>, send queue full", link_rst_msg, | ||
779 | l_ptr->name); | ||
780 | tipc_link_reset(l_ptr); | ||
781 | } | ||
782 | return dsz; | ||
783 | } | ||
784 | |||
785 | /* Fragmentation needed ? */ | ||
786 | if (size > max_packet) | ||
787 | return tipc_link_frag_xmit(l_ptr, buf); | ||
788 | |||
789 | /* Packet can be queued or sent. */ | ||
790 | if (likely(!link_congested(l_ptr))) { | ||
791 | link_add_to_outqueue(l_ptr, buf, msg); | ||
792 | |||
793 | tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); | ||
794 | l_ptr->unacked_window = 0; | ||
795 | return dsz; | ||
796 | } | ||
797 | /* Congestion: can message be bundled ? */ | ||
798 | if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && | ||
799 | (msg_user(msg) != MSG_FRAGMENTER)) { | ||
800 | |||
801 | /* Try adding message to an existing bundle */ | ||
802 | if (l_ptr->next_out && | ||
803 | link_bundle_buf(l_ptr, l_ptr->last_out, buf)) | ||
804 | return dsz; | ||
805 | |||
806 | /* Try creating a new bundle */ | ||
807 | if (size <= max_packet * 2 / 3) { | ||
808 | struct sk_buff *bundler = tipc_buf_acquire(max_packet); | ||
809 | struct tipc_msg bundler_hdr; | ||
810 | |||
811 | if (bundler) { | ||
812 | tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, | ||
813 | INT_H_SIZE, l_ptr->addr); | ||
814 | skb_copy_to_linear_data(bundler, &bundler_hdr, | ||
815 | INT_H_SIZE); | ||
816 | skb_trim(bundler, INT_H_SIZE); | ||
817 | link_bundle_buf(l_ptr, bundler, buf); | ||
818 | buf = bundler; | ||
819 | msg = buf_msg(buf); | ||
820 | l_ptr->stats.sent_bundles++; | ||
821 | } | ||
822 | } | ||
823 | } | ||
824 | if (!l_ptr->next_out) | ||
825 | l_ptr->next_out = buf; | ||
826 | link_add_to_outqueue(l_ptr, buf, msg); | ||
827 | return dsz; | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use | ||
832 | * has not been selected yet, and the the owner node is not locked | ||
833 | * Called by TIPC internal users, e.g. the name distributor | ||
834 | */ | ||
835 | int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector) | ||
836 | { | ||
837 | struct tipc_link *l_ptr; | ||
838 | struct tipc_node *n_ptr; | ||
839 | int res = -ELINKCONG; | ||
840 | |||
841 | n_ptr = tipc_node_find(dest); | ||
842 | if (n_ptr) { | ||
843 | tipc_node_lock(n_ptr); | ||
844 | l_ptr = n_ptr->active_links[selector & 1]; | ||
845 | if (l_ptr) | ||
846 | res = __tipc_link_xmit(l_ptr, buf); | ||
847 | else | ||
848 | kfree_skb(buf); | ||
849 | tipc_node_unlock(n_ptr); | ||
850 | } else { | ||
851 | kfree_skb(buf); | ||
852 | } | ||
853 | return res; | ||
854 | } | ||
855 | |||
856 | /* tipc_link_cong: determine return value and how to treat the | 681 | /* tipc_link_cong: determine return value and how to treat the |
857 | * sent buffer during link congestion. | 682 | * sent buffer during link congestion. |
858 | * - For plain, errorless user data messages we keep the buffer and | 683 | * - For plain, errorless user data messages we keep the buffer and |
@@ -2123,78 +1948,6 @@ void tipc_link_bundle_rcv(struct sk_buff *buf) | |||
2123 | kfree_skb(buf); | 1948 | kfree_skb(buf); |
2124 | } | 1949 | } |
2125 | 1950 | ||
2126 | /* | ||
2127 | * Fragmentation/defragmentation: | ||
2128 | */ | ||
2129 | |||
2130 | /* | ||
2131 | * tipc_link_frag_xmit: Entry for buffers needing fragmentation. | ||
2132 | * The buffer is complete, inclusive total message length. | ||
2133 | * Returns user data length. | ||
2134 | */ | ||
2135 | static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf) | ||
2136 | { | ||
2137 | struct sk_buff *buf_chain = NULL; | ||
2138 | struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; | ||
2139 | struct tipc_msg *inmsg = buf_msg(buf); | ||
2140 | struct tipc_msg fragm_hdr; | ||
2141 | u32 insize = msg_size(inmsg); | ||
2142 | u32 dsz = msg_data_sz(inmsg); | ||
2143 | unchar *crs = buf->data; | ||
2144 | u32 rest = insize; | ||
2145 | u32 pack_sz = l_ptr->max_pkt; | ||
2146 | u32 fragm_sz = pack_sz - INT_H_SIZE; | ||
2147 | u32 fragm_no = 0; | ||
2148 | u32 destaddr; | ||
2149 | |||
2150 | if (msg_short(inmsg)) | ||
2151 | destaddr = l_ptr->addr; | ||
2152 | else | ||
2153 | destaddr = msg_destnode(inmsg); | ||
2154 | |||
2155 | /* Prepare reusable fragment header: */ | ||
2156 | tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | ||
2157 | INT_H_SIZE, destaddr); | ||
2158 | |||
2159 | /* Chop up message: */ | ||
2160 | while (rest > 0) { | ||
2161 | struct sk_buff *fragm; | ||
2162 | |||
2163 | if (rest <= fragm_sz) { | ||
2164 | fragm_sz = rest; | ||
2165 | msg_set_type(&fragm_hdr, LAST_FRAGMENT); | ||
2166 | } | ||
2167 | fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); | ||
2168 | if (fragm == NULL) { | ||
2169 | kfree_skb(buf); | ||
2170 | kfree_skb_list(buf_chain); | ||
2171 | return -ENOMEM; | ||
2172 | } | ||
2173 | msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); | ||
2174 | fragm_no++; | ||
2175 | msg_set_fragm_no(&fragm_hdr, fragm_no); | ||
2176 | skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); | ||
2177 | skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, | ||
2178 | fragm_sz); | ||
2179 | buf_chain_tail->next = fragm; | ||
2180 | buf_chain_tail = fragm; | ||
2181 | |||
2182 | rest -= fragm_sz; | ||
2183 | crs += fragm_sz; | ||
2184 | msg_set_type(&fragm_hdr, FRAGMENT); | ||
2185 | } | ||
2186 | kfree_skb(buf); | ||
2187 | |||
2188 | /* Append chain of fragments to send queue & send them */ | ||
2189 | l_ptr->long_msg_seq_no++; | ||
2190 | link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); | ||
2191 | l_ptr->stats.sent_fragments += fragm_no; | ||
2192 | l_ptr->stats.sent_fragmented++; | ||
2193 | tipc_link_push_queue(l_ptr); | ||
2194 | |||
2195 | return dsz; | ||
2196 | } | ||
2197 | |||
2198 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) | 1951 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) |
2199 | { | 1952 | { |
2200 | if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) | 1953 | if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) |