aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2014-10-16 02:50:19 -0400
committerIlya Dryomov <idryomov@redhat.com>2014-10-30 06:11:50 -0400
commit89baaa570ab0b476db09408d209578cfed700e9f (patch)
treece7d44eae139ca6b5c03026e8b471727f0df1fc1 /net
parentf5ee37bd31678d6cb2313631f203794b5c25e862 (diff)
libceph: use memalloc flags for net IO
This patch has ceph's lib code use the memalloc flags. If the VM layer needs to write data out to free up memory to handle new allocation requests, the block layer must be able to make forward progress. To handle that requirement we use structs like mempools to reserve memory for objects like bios and requests. The problem is when we send/receive block layer requests over the network layer, net skb allocations can fail and the system can lock up. To solve this, the memalloc related flags were added. NBD, iSCSI and NFS uses these flags to tell the network/vm layer that it should use memory reserves to fullfill allcation requests for structs like skbs. I am running ceph in a bunch of VMs in my laptop, so this patch was not tested very harshly. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Reviewed-by: Ilya Dryomov <idryomov@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/ceph/messenger.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 559c9f619c20..8d1653caffdb 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
484 IPPROTO_TCP, &sock); 484 IPPROTO_TCP, &sock);
485 if (ret) 485 if (ret)
486 return ret; 486 return ret;
487 sock->sk->sk_allocation = GFP_NOFS; 487 sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
488 488
489#ifdef CONFIG_LOCKDEP 489#ifdef CONFIG_LOCKDEP
490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
@@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con)
509 509
510 return ret; 510 return ret;
511 } 511 }
512
513 sk_set_memalloc(sock->sk);
514
512 con->sock = sock; 515 con->sock = sock;
513 return 0; 516 return 0;
514} 517}
@@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work)
2769{ 2772{
2770 struct ceph_connection *con = container_of(work, struct ceph_connection, 2773 struct ceph_connection *con = container_of(work, struct ceph_connection,
2771 work.work); 2774 work.work);
2775 unsigned long pflags = current->flags;
2772 bool fault; 2776 bool fault;
2773 2777
2778 current->flags |= PF_MEMALLOC;
2779
2774 mutex_lock(&con->mutex); 2780 mutex_lock(&con->mutex);
2775 while (true) { 2781 while (true) {
2776 int ret; 2782 int ret;
@@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work)
2824 con_fault_finish(con); 2830 con_fault_finish(con);
2825 2831
2826 con->ops->put(con); 2832 con->ops->put(con);
2833
2834 tsk_restore_flags(current, pflags, PF_MEMALLOC);
2827} 2835}
2828 2836
2829/* 2837/*