diff options
| -rw-r--r-- | net/caif/caif_socket.c | 10 | ||||
| -rw-r--r-- | net/caif/cfmuxl.c | 3 |
2 files changed, 7 insertions, 6 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 732897d64f41..691a5710974e 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
| @@ -60,7 +60,7 @@ struct debug_fs_counter { | |||
| 60 | atomic_t num_rx_flow_off; | 60 | atomic_t num_rx_flow_off; |
| 61 | atomic_t num_rx_flow_on; | 61 | atomic_t num_rx_flow_on; |
| 62 | }; | 62 | }; |
| 63 | struct debug_fs_counter cnt; | 63 | static struct debug_fs_counter cnt; |
| 64 | #define dbfs_atomic_inc(v) atomic_inc(v) | 64 | #define dbfs_atomic_inc(v) atomic_inc(v) |
| 65 | #define dbfs_atomic_dec(v) atomic_dec(v) | 65 | #define dbfs_atomic_dec(v) atomic_dec(v) |
| 66 | #else | 66 | #else |
| @@ -128,13 +128,13 @@ static void caif_read_unlock(struct sock *sk) | |||
| 128 | mutex_unlock(&cf_sk->readlock); | 128 | mutex_unlock(&cf_sk->readlock); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | int sk_rcvbuf_lowwater(struct caifsock *cf_sk) | 131 | static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) |
| 132 | { | 132 | { |
| 133 | /* A quarter of full buffer is used a low water mark */ | 133 | /* A quarter of full buffer is used a low water mark */ |
| 134 | return cf_sk->sk.sk_rcvbuf / 4; | 134 | return cf_sk->sk.sk_rcvbuf / 4; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | void caif_flow_ctrl(struct sock *sk, int mode) | 137 | static void caif_flow_ctrl(struct sock *sk, int mode) |
| 138 | { | 138 | { |
| 139 | struct caifsock *cf_sk; | 139 | struct caifsock *cf_sk; |
| 140 | cf_sk = container_of(sk, struct caifsock, sk); | 140 | cf_sk = container_of(sk, struct caifsock, sk); |
| @@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode) | |||
| 146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are | 146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are |
| 147 | * not dropped, but CAIF is sending flow off instead. | 147 | * not dropped, but CAIF is sending flow off instead. |
| 148 | */ | 148 | */ |
| 149 | int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 149 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
| 150 | { | 150 | { |
| 151 | int err; | 151 | int err; |
| 152 | int skb_len; | 152 | int skb_len; |
| @@ -1184,7 +1184,7 @@ static struct net_proto_family caif_family_ops = { | |||
| 1184 | .owner = THIS_MODULE, | 1184 | .owner = THIS_MODULE, |
| 1185 | }; | 1185 | }; |
| 1186 | 1186 | ||
| 1187 | int af_caif_init(void) | 1187 | static int af_caif_init(void) |
| 1188 | { | 1188 | { |
| 1189 | int err = sock_register(&caif_family_ops); | 1189 | int err = sock_register(&caif_family_ops); |
| 1190 | if (!err) | 1190 | if (!err) |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 7372f27f1d32..80c8d332b258 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
| @@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |||
| 174 | spin_lock(&muxl->receive_lock); | 174 | spin_lock(&muxl->receive_lock); |
| 175 | up = get_up(muxl, id); | 175 | up = get_up(muxl, id); |
| 176 | if (up == NULL) | 176 | if (up == NULL) |
| 177 | return NULL; | 177 | goto out; |
| 178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | 178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); |
| 179 | list_del(&up->node); | 179 | list_del(&up->node); |
| 180 | cfsrvl_put(up); | 180 | cfsrvl_put(up); |
| 181 | out: | ||
| 181 | spin_unlock(&muxl->receive_lock); | 182 | spin_unlock(&muxl->receive_lock); |
| 182 | return up; | 183 | return up; |
| 183 | } | 184 | } |
