diff options
Diffstat (limited to 'net/phonet/socket.c')
-rw-r--r-- | net/phonet/socket.c | 332 |
1 files changed, 296 insertions, 36 deletions
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 6e9848bf0370..ab07711cf2f4 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock) | |||
52 | 52 | ||
53 | static struct { | 53 | static struct { |
54 | struct hlist_head hlist[PN_HASHSIZE]; | 54 | struct hlist_head hlist[PN_HASHSIZE]; |
55 | spinlock_t lock; | 55 | struct mutex lock; |
56 | } pnsocks; | 56 | } pnsocks; |
57 | 57 | ||
58 | void __init pn_sock_init(void) | 58 | void __init pn_sock_init(void) |
@@ -61,7 +61,7 @@ void __init pn_sock_init(void) | |||
61 | 61 | ||
62 | for (i = 0; i < PN_HASHSIZE; i++) | 62 | for (i = 0; i < PN_HASHSIZE; i++) |
63 | INIT_HLIST_HEAD(pnsocks.hlist + i); | 63 | INIT_HLIST_HEAD(pnsocks.hlist + i); |
64 | spin_lock_init(&pnsocks.lock); | 64 | mutex_init(&pnsocks.lock); |
65 | } | 65 | } |
66 | 66 | ||
67 | static struct hlist_head *pn_hash_list(u16 obj) | 67 | static struct hlist_head *pn_hash_list(u16 obj) |
@@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
82 | u8 res = spn->spn_resource; | 82 | u8 res = spn->spn_resource; |
83 | struct hlist_head *hlist = pn_hash_list(obj); | 83 | struct hlist_head *hlist = pn_hash_list(obj); |
84 | 84 | ||
85 | spin_lock_bh(&pnsocks.lock); | 85 | rcu_read_lock(); |
86 | 86 | sk_for_each_rcu(sknode, node, hlist) { | |
87 | sk_for_each(sknode, node, hlist) { | ||
88 | struct pn_sock *pn = pn_sk(sknode); | 87 | struct pn_sock *pn = pn_sk(sknode); |
89 | BUG_ON(!pn->sobject); /* unbound socket */ | 88 | BUG_ON(!pn->sobject); /* unbound socket */ |
90 | 89 | ||
@@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
107 | sock_hold(sknode); | 106 | sock_hold(sknode); |
108 | break; | 107 | break; |
109 | } | 108 | } |
110 | 109 | rcu_read_unlock(); | |
111 | spin_unlock_bh(&pnsocks.lock); | ||
112 | 110 | ||
113 | return rval; | 111 | return rval; |
114 | } | 112 | } |
@@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | |||
119 | struct hlist_head *hlist = pnsocks.hlist; | 117 | struct hlist_head *hlist = pnsocks.hlist; |
120 | unsigned h; | 118 | unsigned h; |
121 | 119 | ||
122 | spin_lock(&pnsocks.lock); | 120 | rcu_read_lock(); |
123 | for (h = 0; h < PN_HASHSIZE; h++) { | 121 | for (h = 0; h < PN_HASHSIZE; h++) { |
124 | struct hlist_node *node; | 122 | struct hlist_node *node; |
125 | struct sock *sknode; | 123 | struct sock *sknode; |
@@ -140,24 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | |||
140 | } | 138 | } |
141 | hlist++; | 139 | hlist++; |
142 | } | 140 | } |
143 | spin_unlock(&pnsocks.lock); | 141 | rcu_read_unlock(); |
144 | } | 142 | } |
145 | 143 | ||
146 | void pn_sock_hash(struct sock *sk) | 144 | void pn_sock_hash(struct sock *sk) |
147 | { | 145 | { |
148 | struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); | 146 | struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); |
149 | 147 | ||
150 | spin_lock_bh(&pnsocks.lock); | 148 | mutex_lock(&pnsocks.lock); |
151 | sk_add_node(sk, hlist); | 149 | sk_add_node_rcu(sk, hlist); |
152 | spin_unlock_bh(&pnsocks.lock); | 150 | mutex_unlock(&pnsocks.lock); |
153 | } | 151 | } |
154 | EXPORT_SYMBOL(pn_sock_hash); | 152 | EXPORT_SYMBOL(pn_sock_hash); |
155 | 153 | ||
156 | void pn_sock_unhash(struct sock *sk) | 154 | void pn_sock_unhash(struct sock *sk) |
157 | { | 155 | { |
158 | spin_lock_bh(&pnsocks.lock); | 156 | mutex_lock(&pnsocks.lock); |
159 | sk_del_node_init(sk); | 157 | sk_del_node_init_rcu(sk); |
160 | spin_unlock_bh(&pnsocks.lock); | 158 | mutex_unlock(&pnsocks.lock); |
159 | pn_sock_unbind_all_res(sk); | ||
160 | synchronize_rcu(); | ||
161 | } | 161 | } |
162 | EXPORT_SYMBOL(pn_sock_unhash); | 162 | EXPORT_SYMBOL(pn_sock_unhash); |
163 | 163 | ||
@@ -224,6 +224,83 @@ static int pn_socket_autobind(struct socket *sock) | |||
224 | return 0; /* socket was already bound */ | 224 | return 0; /* socket was already bound */ |
225 | } | 225 | } |
226 | 226 | ||
227 | static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, | ||
228 | int len, int flags) | ||
229 | { | ||
230 | struct sock *sk = sock->sk; | ||
231 | struct pn_sock *pn = pn_sk(sk); | ||
232 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; | ||
233 | struct task_struct *tsk = current; | ||
234 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | ||
235 | int err; | ||
236 | |||
237 | if (pn_socket_autobind(sock)) | ||
238 | return -ENOBUFS; | ||
239 | if (len < sizeof(struct sockaddr_pn)) | ||
240 | return -EINVAL; | ||
241 | if (spn->spn_family != AF_PHONET) | ||
242 | return -EAFNOSUPPORT; | ||
243 | |||
244 | lock_sock(sk); | ||
245 | |||
246 | switch (sock->state) { | ||
247 | case SS_UNCONNECTED: | ||
248 | if (sk->sk_state != TCP_CLOSE) { | ||
249 | err = -EISCONN; | ||
250 | goto out; | ||
251 | } | ||
252 | break; | ||
253 | case SS_CONNECTING: | ||
254 | err = -EALREADY; | ||
255 | goto out; | ||
256 | default: | ||
257 | err = -EISCONN; | ||
258 | goto out; | ||
259 | } | ||
260 | |||
261 | pn->dobject = pn_sockaddr_get_object(spn); | ||
262 | pn->resource = pn_sockaddr_get_resource(spn); | ||
263 | sock->state = SS_CONNECTING; | ||
264 | |||
265 | err = sk->sk_prot->connect(sk, addr, len); | ||
266 | if (err) { | ||
267 | sock->state = SS_UNCONNECTED; | ||
268 | pn->dobject = 0; | ||
269 | goto out; | ||
270 | } | ||
271 | |||
272 | while (sk->sk_state == TCP_SYN_SENT) { | ||
273 | DEFINE_WAIT(wait); | ||
274 | |||
275 | if (!timeo) { | ||
276 | err = -EINPROGRESS; | ||
277 | goto out; | ||
278 | } | ||
279 | if (signal_pending(tsk)) { | ||
280 | err = sock_intr_errno(timeo); | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, | ||
285 | TASK_INTERRUPTIBLE); | ||
286 | release_sock(sk); | ||
287 | timeo = schedule_timeout(timeo); | ||
288 | lock_sock(sk); | ||
289 | finish_wait(sk_sleep(sk), &wait); | ||
290 | } | ||
291 | |||
292 | if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) | ||
293 | err = 0; | ||
294 | else if (sk->sk_state == TCP_CLOSE_WAIT) | ||
295 | err = -ECONNRESET; | ||
296 | else | ||
297 | err = -ECONNREFUSED; | ||
298 | sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; | ||
299 | out: | ||
300 | release_sock(sk); | ||
301 | return err; | ||
302 | } | ||
303 | |||
227 | static int pn_socket_accept(struct socket *sock, struct socket *newsock, | 304 | static int pn_socket_accept(struct socket *sock, struct socket *newsock, |
228 | int flags) | 305 | int flags) |
229 | { | 306 | { |
@@ -231,6 +308,9 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock, | |||
231 | struct sock *newsk; | 308 | struct sock *newsk; |
232 | int err; | 309 | int err; |
233 | 310 | ||
311 | if (unlikely(sk->sk_state != TCP_LISTEN)) | ||
312 | return -EINVAL; | ||
313 | |||
234 | newsk = sk->sk_prot->accept(sk, flags, &err); | 314 | newsk = sk->sk_prot->accept(sk, flags, &err); |
235 | if (!newsk) | 315 | if (!newsk) |
236 | return err; | 316 | return err; |
@@ -267,13 +347,8 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock, | |||
267 | 347 | ||
268 | poll_wait(file, sk_sleep(sk), wait); | 348 | poll_wait(file, sk_sleep(sk), wait); |
269 | 349 | ||
270 | switch (sk->sk_state) { | 350 | if (sk->sk_state == TCP_CLOSE) |
271 | case TCP_LISTEN: | ||
272 | return hlist_empty(&pn->ackq) ? 0 : POLLIN; | ||
273 | case TCP_CLOSE: | ||
274 | return POLLERR; | 351 | return POLLERR; |
275 | } | ||
276 | |||
277 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 352 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
278 | mask |= POLLIN | POLLRDNORM; | 353 | mask |= POLLIN | POLLRDNORM; |
279 | if (!skb_queue_empty(&pn->ctrlreq_queue)) | 354 | if (!skb_queue_empty(&pn->ctrlreq_queue)) |
@@ -281,7 +356,9 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock, | |||
281 | if (!mask && sk->sk_state == TCP_CLOSE_WAIT) | 356 | if (!mask && sk->sk_state == TCP_CLOSE_WAIT) |
282 | return POLLHUP; | 357 | return POLLHUP; |
283 | 358 | ||
284 | if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits)) | 359 | if (sk->sk_state == TCP_ESTABLISHED && |
360 | atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && | ||
361 | atomic_read(&pn->tx_credits)) | ||
285 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 362 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
286 | 363 | ||
287 | return mask; | 364 | return mask; |
@@ -330,19 +407,19 @@ static int pn_socket_listen(struct socket *sock, int backlog) | |||
330 | struct sock *sk = sock->sk; | 407 | struct sock *sk = sock->sk; |
331 | int err = 0; | 408 | int err = 0; |
332 | 409 | ||
333 | if (sock->state != SS_UNCONNECTED) | ||
334 | return -EINVAL; | ||
335 | if (pn_socket_autobind(sock)) | 410 | if (pn_socket_autobind(sock)) |
336 | return -ENOBUFS; | 411 | return -ENOBUFS; |
337 | 412 | ||
338 | lock_sock(sk); | 413 | lock_sock(sk); |
339 | if (sk->sk_state != TCP_CLOSE) { | 414 | if (sock->state != SS_UNCONNECTED) { |
340 | err = -EINVAL; | 415 | err = -EINVAL; |
341 | goto out; | 416 | goto out; |
342 | } | 417 | } |
343 | 418 | ||
344 | sk->sk_state = TCP_LISTEN; | 419 | if (sk->sk_state != TCP_LISTEN) { |
345 | sk->sk_ack_backlog = 0; | 420 | sk->sk_state = TCP_LISTEN; |
421 | sk->sk_ack_backlog = 0; | ||
422 | } | ||
346 | sk->sk_max_ack_backlog = backlog; | 423 | sk->sk_max_ack_backlog = backlog; |
347 | out: | 424 | out: |
348 | release_sock(sk); | 425 | release_sock(sk); |
@@ -390,7 +467,7 @@ const struct proto_ops phonet_stream_ops = { | |||
390 | .owner = THIS_MODULE, | 467 | .owner = THIS_MODULE, |
391 | .release = pn_socket_release, | 468 | .release = pn_socket_release, |
392 | .bind = pn_socket_bind, | 469 | .bind = pn_socket_bind, |
393 | .connect = sock_no_connect, | 470 | .connect = pn_socket_connect, |
394 | .socketpair = sock_no_socketpair, | 471 | .socketpair = sock_no_socketpair, |
395 | .accept = pn_socket_accept, | 472 | .accept = pn_socket_accept, |
396 | .getname = pn_socket_getname, | 473 | .getname = pn_socket_getname, |
@@ -470,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) | |||
470 | unsigned h; | 547 | unsigned h; |
471 | 548 | ||
472 | for (h = 0; h < PN_HASHSIZE; h++) { | 549 | for (h = 0; h < PN_HASHSIZE; h++) { |
473 | sk_for_each(sknode, node, hlist) { | 550 | sk_for_each_rcu(sknode, node, hlist) { |
474 | if (!net_eq(net, sock_net(sknode))) | 551 | if (!net_eq(net, sock_net(sknode))) |
475 | continue; | 552 | continue; |
476 | if (!pos) | 553 | if (!pos) |
@@ -494,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) | |||
494 | } | 571 | } |
495 | 572 | ||
496 | static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) | 573 | static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) |
497 | __acquires(pnsocks.lock) | 574 | __acquires(rcu) |
498 | { | 575 | { |
499 | spin_lock_bh(&pnsocks.lock); | 576 | rcu_read_lock(); |
500 | return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 577 | return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
501 | } | 578 | } |
502 | 579 | ||
@@ -513,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
513 | } | 590 | } |
514 | 591 | ||
515 | static void pn_sock_seq_stop(struct seq_file *seq, void *v) | 592 | static void pn_sock_seq_stop(struct seq_file *seq, void *v) |
516 | __releases(pnsocks.lock) | 593 | __releases(rcu) |
517 | { | 594 | { |
518 | spin_unlock_bh(&pnsocks.lock); | 595 | rcu_read_unlock(); |
519 | } | 596 | } |
520 | 597 | ||
521 | static int pn_sock_seq_show(struct seq_file *seq, void *v) | 598 | static int pn_sock_seq_show(struct seq_file *seq, void *v) |
@@ -530,9 +607,9 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) | |||
530 | struct pn_sock *pn = pn_sk(sk); | 607 | struct pn_sock *pn = pn_sk(sk); |
531 | 608 | ||
532 | seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " | 609 | seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " |
533 | "%d %p %d%n", | 610 | "%d %pK %d%n", |
534 | sk->sk_protocol, pn->sobject, 0, pn->resource, | 611 | sk->sk_protocol, pn->sobject, pn->dobject, |
535 | sk->sk_state, | 612 | pn->resource, sk->sk_state, |
536 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), | 613 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), |
537 | sock_i_uid(sk), sock_i_ino(sk), | 614 | sock_i_uid(sk), sock_i_ino(sk), |
538 | atomic_read(&sk->sk_refcnt), sk, | 615 | atomic_read(&sk->sk_refcnt), sk, |
@@ -563,3 +640,186 @@ const struct file_operations pn_sock_seq_fops = { | |||
563 | .release = seq_release_net, | 640 | .release = seq_release_net, |
564 | }; | 641 | }; |
565 | #endif | 642 | #endif |
643 | |||
644 | static struct { | ||
645 | struct sock *sk[256]; | ||
646 | } pnres; | ||
647 | |||
648 | /* | ||
649 | * Find and hold socket based on resource. | ||
650 | */ | ||
651 | struct sock *pn_find_sock_by_res(struct net *net, u8 res) | ||
652 | { | ||
653 | struct sock *sk; | ||
654 | |||
655 | if (!net_eq(net, &init_net)) | ||
656 | return NULL; | ||
657 | |||
658 | rcu_read_lock(); | ||
659 | sk = rcu_dereference(pnres.sk[res]); | ||
660 | if (sk) | ||
661 | sock_hold(sk); | ||
662 | rcu_read_unlock(); | ||
663 | return sk; | ||
664 | } | ||
665 | |||
666 | static DEFINE_MUTEX(resource_mutex); | ||
667 | |||
668 | int pn_sock_bind_res(struct sock *sk, u8 res) | ||
669 | { | ||
670 | int ret = -EADDRINUSE; | ||
671 | |||
672 | if (!net_eq(sock_net(sk), &init_net)) | ||
673 | return -ENOIOCTLCMD; | ||
674 | if (!capable(CAP_SYS_ADMIN)) | ||
675 | return -EPERM; | ||
676 | if (pn_socket_autobind(sk->sk_socket)) | ||
677 | return -EAGAIN; | ||
678 | |||
679 | mutex_lock(&resource_mutex); | ||
680 | if (pnres.sk[res] == NULL) { | ||
681 | sock_hold(sk); | ||
682 | rcu_assign_pointer(pnres.sk[res], sk); | ||
683 | ret = 0; | ||
684 | } | ||
685 | mutex_unlock(&resource_mutex); | ||
686 | return ret; | ||
687 | } | ||
688 | |||
689 | int pn_sock_unbind_res(struct sock *sk, u8 res) | ||
690 | { | ||
691 | int ret = -ENOENT; | ||
692 | |||
693 | if (!capable(CAP_SYS_ADMIN)) | ||
694 | return -EPERM; | ||
695 | |||
696 | mutex_lock(&resource_mutex); | ||
697 | if (pnres.sk[res] == sk) { | ||
698 | rcu_assign_pointer(pnres.sk[res], NULL); | ||
699 | ret = 0; | ||
700 | } | ||
701 | mutex_unlock(&resource_mutex); | ||
702 | |||
703 | if (ret == 0) { | ||
704 | synchronize_rcu(); | ||
705 | sock_put(sk); | ||
706 | } | ||
707 | return ret; | ||
708 | } | ||
709 | |||
710 | void pn_sock_unbind_all_res(struct sock *sk) | ||
711 | { | ||
712 | unsigned res, match = 0; | ||
713 | |||
714 | mutex_lock(&resource_mutex); | ||
715 | for (res = 0; res < 256; res++) { | ||
716 | if (pnres.sk[res] == sk) { | ||
717 | rcu_assign_pointer(pnres.sk[res], NULL); | ||
718 | match++; | ||
719 | } | ||
720 | } | ||
721 | mutex_unlock(&resource_mutex); | ||
722 | |||
723 | while (match > 0) { | ||
724 | __sock_put(sk); | ||
725 | match--; | ||
726 | } | ||
727 | /* Caller is responsible for RCU sync before final sock_put() */ | ||
728 | } | ||
729 | |||
730 | #ifdef CONFIG_PROC_FS | ||
731 | static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) | ||
732 | { | ||
733 | struct net *net = seq_file_net(seq); | ||
734 | unsigned i; | ||
735 | |||
736 | if (!net_eq(net, &init_net)) | ||
737 | return NULL; | ||
738 | |||
739 | for (i = 0; i < 256; i++) { | ||
740 | if (pnres.sk[i] == NULL) | ||
741 | continue; | ||
742 | if (!pos) | ||
743 | return pnres.sk + i; | ||
744 | pos--; | ||
745 | } | ||
746 | return NULL; | ||
747 | } | ||
748 | |||
749 | static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) | ||
750 | { | ||
751 | struct net *net = seq_file_net(seq); | ||
752 | unsigned i; | ||
753 | |||
754 | BUG_ON(!net_eq(net, &init_net)); | ||
755 | |||
756 | for (i = (sk - pnres.sk) + 1; i < 256; i++) | ||
757 | if (pnres.sk[i]) | ||
758 | return pnres.sk + i; | ||
759 | return NULL; | ||
760 | } | ||
761 | |||
762 | static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos) | ||
763 | __acquires(resource_mutex) | ||
764 | { | ||
765 | mutex_lock(&resource_mutex); | ||
766 | return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
767 | } | ||
768 | |||
769 | static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
770 | { | ||
771 | struct sock **sk; | ||
772 | |||
773 | if (v == SEQ_START_TOKEN) | ||
774 | sk = pn_res_get_idx(seq, 0); | ||
775 | else | ||
776 | sk = pn_res_get_next(seq, v); | ||
777 | (*pos)++; | ||
778 | return sk; | ||
779 | } | ||
780 | |||
781 | static void pn_res_seq_stop(struct seq_file *seq, void *v) | ||
782 | __releases(resource_mutex) | ||
783 | { | ||
784 | mutex_unlock(&resource_mutex); | ||
785 | } | ||
786 | |||
787 | static int pn_res_seq_show(struct seq_file *seq, void *v) | ||
788 | { | ||
789 | int len; | ||
790 | |||
791 | if (v == SEQ_START_TOKEN) | ||
792 | seq_printf(seq, "%s%n", "rs uid inode", &len); | ||
793 | else { | ||
794 | struct sock **psk = v; | ||
795 | struct sock *sk = *psk; | ||
796 | |||
797 | seq_printf(seq, "%02X %5d %lu%n", | ||
798 | (int) (psk - pnres.sk), sock_i_uid(sk), | ||
799 | sock_i_ino(sk), &len); | ||
800 | } | ||
801 | seq_printf(seq, "%*s\n", 63 - len, ""); | ||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | static const struct seq_operations pn_res_seq_ops = { | ||
806 | .start = pn_res_seq_start, | ||
807 | .next = pn_res_seq_next, | ||
808 | .stop = pn_res_seq_stop, | ||
809 | .show = pn_res_seq_show, | ||
810 | }; | ||
811 | |||
812 | static int pn_res_open(struct inode *inode, struct file *file) | ||
813 | { | ||
814 | return seq_open_net(inode, file, &pn_res_seq_ops, | ||
815 | sizeof(struct seq_net_private)); | ||
816 | } | ||
817 | |||
818 | const struct file_operations pn_res_seq_fops = { | ||
819 | .owner = THIS_MODULE, | ||
820 | .open = pn_res_open, | ||
821 | .read = seq_read, | ||
822 | .llseek = seq_lseek, | ||
823 | .release = seq_release_net, | ||
824 | }; | ||
825 | #endif | ||