Skip to content

Commit a90bfb0

Browse files
committed
3.4-stable patches
added patches: ceph-call-handle_cap_grant-for-cap-import-message.patch ceph-close-old-con-before-reopening-on-mds-reconnect.patch ceph-don-t-add-dirty-inode-to-dirty-list-if-caps-is-in-migration.patch ceph-don-t-reference-req-after-put.patch ceph-don-t-update-i_max_size-when-handling-non-auth-cap.patch ceph-fix-__ceph_do_pending_vmtruncate.patch ceph-fix-infinite-loop-in-__wake_requests.patch ceph-propagate-layout-error-on-osd-request-creation.patch ceph-tolerate-and-warn-on-extraneous-dentry-from-mds.patch libceph-always-reset-osds-when-kicking.patch libceph-avoid-using-freed-osd-in-__kick_osd_requests.patch libceph-don-t-use-rb_init_node-in-ceph_osdc_alloc_request.patch libceph-fix-osdmap-decode-error-paths.patch libceph-fix-protocol-feature-mismatch-failure-path.patch libceph-init-event-node-in-ceph_osdc_create_event.patch libceph-init-osd-o_node-in-create_osd.patch libceph-move-linger-requests-sooner-in-kick_requests.patch libceph-register-request-before-unregister-linger.patch libceph-remove-osdtimeout-option.patch libceph-report-connection-fault-with-warning.patch libceph-socket-can-close-in-any-connection-state.patch libceph-unlock-unprocessed-pages-in-start_read-error-path.patch libceph-warn-don-t-bug-on-unexpected-connection-states.patch rbd-add-read_only-rbd-map-option.patch rbd-bug-on-invalid-layout.patch rbd-do-not-allow-remove-of-mounted-on-image.patch rbd-drop-dev-reference-on-error-in-rbd_open.patch rbd-expose-the-correct-size-of-the-device-in-sysfs.patch rbd-fix-bug-in-rbd_dev_id_put.patch rbd-kill-create_snap-sysfs-entry.patch rbd-kill-notify_timeout-option.patch rbd-only-reset-capacity-when-pointing-to-head.patch rbd-remove-linger-unconditionally.patch rbd-return-errors-for-mapped-but-deleted-snapshot.patch rbd-send-header-version-when-notifying.patch rbd-set-image-size-when-header-is-updated.patch rbd-use-reference-counting-for-the-snap-context.patch
1 parent 1fb8fa8 commit a90bfb0

38 files changed

Lines changed: 2519 additions & 0 deletions

File tree

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
From bbe2e24262afd42a08e2fbc4fb1c134b94f64a57 Mon Sep 17 00:00:00 2001
2+
From: "Yan, Zheng" <[email protected]>
3+
Date: Mon, 19 Nov 2012 10:49:09 +0800
4+
Subject: ceph: call handle_cap_grant() for cap import message
5+
6+
7+
From: "Yan, Zheng" <[email protected]>
8+
9+
If client sends cap message that requests new max size during
10+
exporting caps, the exporting MDS will drop the message quietly.
11+
So the client may wait for the reply that updates the max size
12+
forever. call handle_cap_grant() for cap import message can
13+
avoid this issue.
14+
15+
Signed-off-by: Yan, Zheng <[email protected]>
16+
Signed-off-by: Sage Weil <[email protected]>
17+
(cherry picked from commit 0e5e1774a92e6fe9c511585de8f078b4c4c68dbb)
18+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
19+
---
20+
fs/ceph/caps.c | 4 ++--
21+
1 file changed, 2 insertions(+), 2 deletions(-)
22+
23+
--- a/fs/ceph/caps.c
24+
+++ b/fs/ceph/caps.c
25+
@@ -2749,6 +2749,7 @@ static void handle_cap_import(struct cep
26+
27+
/* make sure we re-request max_size, if necessary */
28+
spin_lock(&ci->i_ceph_lock);
29+
+ ci->i_wanted_max_size = 0; /* reset */
30+
ci->i_requested_max_size = 0;
31+
spin_unlock(&ci->i_ceph_lock);
32+
}
33+
@@ -2844,8 +2845,6 @@ void ceph_handle_caps(struct ceph_mds_se
34+
case CEPH_CAP_OP_IMPORT:
35+
handle_cap_import(mdsc, inode, h, session,
36+
snaptrace, snaptrace_len);
37+
- ceph_check_caps(ceph_inode(inode), 0, session);
38+
- goto done_unlocked;
39+
}
40+
41+
/* the rest require a cap */
42+
@@ -2862,6 +2861,7 @@ void ceph_handle_caps(struct ceph_mds_se
43+
switch (op) {
44+
case CEPH_CAP_OP_REVOKE:
45+
case CEPH_CAP_OP_GRANT:
46+
+ case CEPH_CAP_OP_IMPORT:
47+
handle_cap_grant(inode, h, session, cap, msg->middle);
48+
goto done_unlocked;
49+
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
From c9a3f6ab3490925ecc0714a5e4fd4c8b0a110bc4 Mon Sep 17 00:00:00 2001
2+
From: Sage Weil <[email protected]>
3+
Date: Mon, 30 Jul 2012 16:21:17 -0700
4+
Subject: ceph: close old con before reopening on mds reconnect
5+
6+
7+
From: Sage Weil <[email protected]>
8+
9+
When we detect a mds session reset, close the old ceph_connection before
10+
reopening it. This ensures we clean up the old socket properly and keep
11+
the ceph_connection state correct.
12+
13+
Signed-off-by: Sage Weil <[email protected]>
14+
Reviewed-by: Alex Elder <[email protected]>
15+
Reviewed-by: Yehuda Sadeh <[email protected]>
16+
(cherry picked from commit a53aab645c82f0146e35684b34692c69b5118121)
17+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
18+
---
19+
fs/ceph/mds_client.c | 1 +
20+
1 file changed, 1 insertion(+)
21+
22+
--- a/fs/ceph/mds_client.c
23+
+++ b/fs/ceph/mds_client.c
24+
@@ -2528,6 +2528,7 @@ static void send_mds_reconnect(struct ce
25+
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
26+
session->s_seq = 0;
27+
28+
+ ceph_con_close(&session->s_con);
29+
ceph_con_open(&session->s_con,
30+
CEPH_ENTITY_TYPE_MDS, mds,
31+
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
From 8677d84432bc48ae52d6fc07e4af459b8b6aaeb4 Mon Sep 17 00:00:00 2001
2+
From: "Yan, Zheng" <[email protected]>
3+
Date: Mon, 19 Nov 2012 10:49:07 +0800
4+
Subject: ceph: Don't add dirty inode to dirty list if caps is in migration
5+
6+
7+
From: "Yan, Zheng" <[email protected]>
8+
9+
Add dirty inode to cap_dirty_migrating list instead, this can avoid
10+
ceph_flush_dirty_caps() entering infinite loop.
11+
12+
Signed-off-by: Yan, Zheng <[email protected]>
13+
Signed-off-by: Sage Weil <[email protected]>
14+
(cherry picked from commit 0685235ffd9dbdb9ccbda587f8a3c83ad1d5a921)
15+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
16+
---
17+
fs/ceph/caps.c | 10 +++++++---
18+
1 file changed, 7 insertions(+), 3 deletions(-)
19+
20+
--- a/fs/ceph/caps.c
21+
+++ b/fs/ceph/caps.c
22+
@@ -1349,11 +1349,15 @@ int __ceph_mark_dirty_caps(struct ceph_i
23+
if (!ci->i_head_snapc)
24+
ci->i_head_snapc = ceph_get_snap_context(
25+
ci->i_snap_realm->cached_context);
26+
- dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
27+
- ci->i_head_snapc);
28+
+ dout(" inode %p now dirty snapc %p auth cap %p\n",
29+
+ &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
30+
BUG_ON(!list_empty(&ci->i_dirty_item));
31+
spin_lock(&mdsc->cap_dirty_lock);
32+
- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
33+
+ if (ci->i_auth_cap)
34+
+ list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
35+
+ else
36+
+ list_add(&ci->i_dirty_item,
37+
+ &mdsc->cap_dirty_migrating);
38+
spin_unlock(&mdsc->cap_dirty_lock);
39+
if (ci->i_flushing_caps == 0) {
40+
ihold(inode);
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
From f54e923eff7ca2a1711023d39dcd40889f6407a4 Mon Sep 17 00:00:00 2001
2+
From: Alex Elder <[email protected]>
3+
Date: Thu, 29 Nov 2012 08:37:03 -0600
4+
Subject: ceph: don't reference req after put
5+
6+
7+
From: Alex Elder <[email protected]>
8+
9+
In __unregister_request(), there is a call to list_del_init()
10+
referencing a request that was the subject of a call to
11+
ceph_osdc_put_request() on the previous line. This is not
12+
safe, because the request structure could have been freed
13+
by the time we reach the list_del_init().
14+
15+
Fix this by reversing the order of these lines.
16+
17+
Signed-off-by: Alex Elder <[email protected]>
18+
Reviewed-off-by: Sage Weil <[email protected]>
19+
(cherry picked from commit 7d5f24812bd182a2471cb69c1c2baf0648332e1f)
20+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
21+
---
22+
net/ceph/osd_client.c | 2 +-
23+
1 file changed, 1 insertion(+), 1 deletion(-)
24+
25+
--- a/net/ceph/osd_client.c
26+
+++ b/net/ceph/osd_client.c
27+
@@ -871,9 +871,9 @@ static void __unregister_request(struct
28+
req->r_osd = NULL;
29+
}
30+
31+
+ list_del_init(&req->r_req_lru_item);
32+
ceph_osdc_put_request(req);
33+
34+
- list_del_init(&req->r_req_lru_item);
35+
if (osdc->num_requests == 0) {
36+
dout(" no requests, canceling timeout\n");
37+
__cancel_osd_timeout(osdc);
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
From 50c532cd7abb2054f5bb045244cd9a561b7e70ff Mon Sep 17 00:00:00 2001
2+
From: "Yan, Zheng" <[email protected]>
3+
Date: Mon, 19 Nov 2012 10:49:04 +0800
4+
Subject: ceph: Don't update i_max_size when handling non-auth cap
5+
6+
7+
From: "Yan, Zheng" <[email protected]>
8+
9+
The cap from non-auth mds doesn't have a meaningful max_size value.
10+
11+
Signed-off-by: Yan, Zheng <[email protected]>
12+
Signed-off-by: Sage Weil <[email protected]>
13+
(cherry picked from commit 5e62ad30157d0da04cf40c6d1a2f4bc840948b9c)
14+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
15+
---
16+
fs/ceph/caps.c | 2 +-
17+
1 file changed, 1 insertion(+), 1 deletion(-)
18+
19+
--- a/fs/ceph/caps.c
20+
+++ b/fs/ceph/caps.c
21+
@@ -2388,7 +2388,7 @@ static void handle_cap_grant(struct inod
22+
&atime);
23+
24+
/* max size increase? */
25+
- if (max_size != ci->i_max_size) {
26+
+ if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
27+
dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
28+
ci->i_max_size = max_size;
29+
if (max_size >= ci->i_wanted_max_size) {
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
From 379ad3e7100d3c1deebb150af4dc38b9f4e90006 Mon Sep 17 00:00:00 2001
2+
From: "Yan, Zheng" <[email protected]>
3+
Date: Mon, 19 Nov 2012 10:49:08 +0800
4+
Subject: ceph: Fix __ceph_do_pending_vmtruncate
5+
6+
7+
From: "Yan, Zheng" <[email protected]>
8+
9+
we should set i_truncate_pending to 0 after page cache is truncated
10+
to i_truncate_size
11+
12+
Signed-off-by: Yan, Zheng <[email protected]>
13+
Signed-off-by: Sage Weil <[email protected]>
14+
(cherry picked from commit a85f50b6ef93fbbb2ae932ce9b2376509d172796)
15+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
16+
---
17+
fs/ceph/inode.c | 15 +++++++++------
18+
1 file changed, 9 insertions(+), 6 deletions(-)
19+
20+
--- a/fs/ceph/inode.c
21+
+++ b/fs/ceph/inode.c
22+
@@ -1466,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct
23+
{
24+
struct ceph_inode_info *ci = ceph_inode(inode);
25+
u64 to;
26+
- int wrbuffer_refs, wake = 0;
27+
+ int wrbuffer_refs, finish = 0;
28+
29+
retry:
30+
spin_lock(&ci->i_ceph_lock);
31+
@@ -1498,15 +1498,18 @@ retry:
32+
truncate_inode_pages(inode->i_mapping, to);
33+
34+
spin_lock(&ci->i_ceph_lock);
35+
- ci->i_truncate_pending--;
36+
- if (ci->i_truncate_pending == 0)
37+
- wake = 1;
38+
+ if (to == ci->i_truncate_size) {
39+
+ ci->i_truncate_pending = 0;
40+
+ finish = 1;
41+
+ }
42+
spin_unlock(&ci->i_ceph_lock);
43+
+ if (!finish)
44+
+ goto retry;
45+
46+
if (wrbuffer_refs == 0)
47+
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
48+
- if (wake)
49+
- wake_up_all(&ci->i_cap_wq);
50+
+
51+
+ wake_up_all(&ci->i_cap_wq);
52+
}
53+
54+
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
From c9c3fd311561a922ebbd999f3ad00b5f907000c2 Mon Sep 17 00:00:00 2001
2+
From: "Yan, Zheng" <[email protected]>
3+
Date: Mon, 19 Nov 2012 10:49:06 +0800
4+
Subject: ceph: Fix infinite loop in __wake_requests
5+
6+
7+
From: "Yan, Zheng" <[email protected]>
8+
9+
__wake_requests() will enter infinite loop if we use it to wake
10+
requests in the session->s_waiting list. __wake_requests() deletes
11+
requests from the list and __do_request() adds requests back to
12+
the list.
13+
14+
Signed-off-by: Yan, Zheng <[email protected]>
15+
Signed-off-by: Sage Weil <[email protected]>
16+
(cherry picked from commit ed75ec2cd19b47efcd292b6e23f58e56f4c5bc34)
17+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
18+
---
19+
fs/ceph/mds_client.c | 9 +++++++--
20+
1 file changed, 7 insertions(+), 2 deletions(-)
21+
22+
--- a/fs/ceph/mds_client.c
23+
+++ b/fs/ceph/mds_client.c
24+
@@ -1886,9 +1886,14 @@ finish:
25+
static void __wake_requests(struct ceph_mds_client *mdsc,
26+
struct list_head *head)
27+
{
28+
- struct ceph_mds_request *req, *nreq;
29+
+ struct ceph_mds_request *req;
30+
+ LIST_HEAD(tmp_list);
31+
32+
- list_for_each_entry_safe(req, nreq, head, r_wait) {
33+
+ list_splice_init(head, &tmp_list);
34+
+
35+
+ while (!list_empty(&tmp_list)) {
36+
+ req = list_entry(tmp_list.next,
37+
+ struct ceph_mds_request, r_wait);
38+
list_del_init(&req->r_wait);
39+
__do_request(mdsc, req);
40+
}
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
From 7dab35042aab340d087737d42c2fae34af0b5c78 Mon Sep 17 00:00:00 2001
2+
From: Sage Weil <[email protected]>
3+
Date: Mon, 24 Sep 2012 21:01:02 -0700
4+
Subject: ceph: propagate layout error on osd request creation
5+
6+
7+
From: Sage Weil <[email protected]>
8+
9+
If we are creating an osd request and get an invalid layout, return
10+
an EINVAL to the caller. We switch up the return to have an error
11+
code instead of NULL implying -ENOMEM.
12+
13+
Signed-off-by: Sage Weil <[email protected]>
14+
Reviewed-by: Alex Elder <[email protected]>
15+
(cherry picked from commit 6816282dab3a72efe8c0d182c1bc2960d87f4322)
16+
Signed-off-by: Greg Kroah-Hartman <[email protected]>
17+
---
18+
fs/ceph/addr.c | 8 ++++----
19+
fs/ceph/file.c | 4 ++--
20+
net/ceph/osd_client.c | 15 +++++++++------
21+
3 files changed, 15 insertions(+), 12 deletions(-)
22+
23+
--- a/fs/ceph/addr.c
24+
+++ b/fs/ceph/addr.c
25+
@@ -308,8 +308,8 @@ static int start_read(struct inode *inod
26+
NULL, 0,
27+
ci->i_truncate_seq, ci->i_truncate_size,
28+
NULL, false, 1, 0);
29+
- if (!req)
30+
- return -ENOMEM;
31+
+ if (IS_ERR(req))
32+
+ return PTR_ERR(req);
33+
34+
/* build page vector */
35+
nr_pages = len >> PAGE_CACHE_SHIFT;
36+
@@ -831,8 +831,8 @@ get_more_pages:
37+
ci->i_truncate_size,
38+
&inode->i_mtime, true, 1, 0);
39+
40+
- if (!req) {
41+
- rc = -ENOMEM;
42+
+ if (IS_ERR(req)) {
43+
+ rc = PTR_ERR(req);
44+
unlock_page(page);
45+
break;
46+
}
47+
--- a/fs/ceph/file.c
48+
+++ b/fs/ceph/file.c
49+
@@ -529,8 +529,8 @@ more:
50+
do_sync,
51+
ci->i_truncate_seq, ci->i_truncate_size,
52+
&mtime, false, 2, page_align);
53+
- if (!req)
54+
- return -ENOMEM;
55+
+ if (IS_ERR(req))
56+
+ return PTR_ERR(req);
57+
58+
if (file->f_flags & O_DIRECT) {
59+
pages = ceph_get_direct_page_vector(data, num_pages, false);
60+
--- a/net/ceph/osd_client.c
61+
+++ b/net/ceph/osd_client.c
62+
@@ -461,6 +461,7 @@ struct ceph_osd_request *ceph_osdc_new_r
63+
{
64+
struct ceph_osd_req_op ops[3];
65+
struct ceph_osd_request *req;
66+
+ int r;
67+
68+
ops[0].op = opcode;
69+
ops[0].extent.truncate_seq = truncate_seq;
70+
@@ -479,10 +480,12 @@ struct ceph_osd_request *ceph_osdc_new_r
71+
use_mempool,
72+
GFP_NOFS, NULL, NULL);
73+
if (!req)
74+
- return NULL;
75+
+ return ERR_PTR(-ENOMEM);
76+
77+
/* calculate max write size */
78+
- calc_layout(osdc, vino, layout, off, plen, req, ops);
79+
+ r = calc_layout(osdc, vino, layout, off, plen, req, ops);
80+
+ if (r < 0)
81+
+ return ERR_PTR(r);
82+
req->r_file_layout = *layout; /* keep a copy */
83+
84+
/* in case it differs from natural (file) alignment that
85+
@@ -1925,8 +1928,8 @@ int ceph_osdc_readpages(struct ceph_osd_
86+
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
87+
NULL, 0, truncate_seq, truncate_size, NULL,
88+
false, 1, page_align);
89+
- if (!req)
90+
- return -ENOMEM;
91+
+ if (IS_ERR(req))
92+
+ return PTR_ERR(req);
93+
94+
/* it may be a short read due to an object boundary */
95+
req->r_pages = pages;
96+
@@ -1968,8 +1971,8 @@ int ceph_osdc_writepages(struct ceph_osd
97+
snapc, do_sync,
98+
truncate_seq, truncate_size, mtime,
99+
nofail, 1, page_align);
100+
- if (!req)
101+
- return -ENOMEM;
102+
+ if (IS_ERR(req))
103+
+ return PTR_ERR(req);
104+
105+
/* it may be a short write due to an object boundary */
106+
req->r_pages = pages;

0 commit comments

Comments
 (0)