Skip to content

Commit 856e110

Browse files
committed
Start work on restore tests.
1 parent 3c82429 commit 856e110

File tree

3 files changed

+358
-2
lines changed

3 files changed

+358
-2
lines changed

tests/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import unittest
22

3-
from . import init_test, option_test, show_test, backup_test, delete_test
3+
from . import init_test, option_test, show_test, backup_test, delete_test, restore_test
44

55

66
def load_tests(loader, tests, pattern):
@@ -10,5 +10,6 @@ def load_tests(loader, tests, pattern):
1010
suite.addTests(loader.loadTestsFromModule(show_test))
1111
suite.addTests(loader.loadTestsFromModule(backup_test))
1212
suite.addTests(loader.loadTestsFromModule(delete_test))
13+
suite.addTests(loader.loadTestsFromModule(restore_test))
1314

1415
return suite

tests/pb_lib.py

Lines changed: 64 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,31 @@ def __init__(self, split_line):
3535
class ProbackupTest(object):
3636
def __init__(self, *args, **kwargs):
3737
super(ProbackupTest, self).__init__(*args, **kwargs)
38+
self.test_env = os.environ.copy()
39+
envs_list = [
40+
"LANGUAGE",
41+
"LC_ALL",
42+
"PGCONNECT_TIMEOUT",
43+
"PGDATA",
44+
"PGDATABASE",
45+
"PGHOSTADDR",
46+
"PGREQUIRESSL",
47+
"PGSERVICE",
48+
"PGSSLMODE",
49+
"PGUSER",
50+
"PGPORT",
51+
"PGHOST"
52+
]
53+
54+
for e in envs_list:
55+
try:
56+
del self.test_env[e]
57+
except:
58+
pass
59+
60+
self.test_env["LC_MESSAGES"] = "C"
61+
self.test_env["LC_TIME"] = "C"
62+
3863
self.dir_path = path.dirname(os.path.realpath(__file__))
3964
try:
4065
os.makedirs(path.join(self.dir_path, "tmp_dirs"))
@@ -73,7 +98,8 @@ def run_pb(self, command):
7398
try:
7499
return subprocess.check_output(
75100
[self.probackup_path] + command,
76-
stderr=subprocess.STDOUT
101+
stderr=subprocess.STDOUT,
102+
env=self.test_env
77103
)
78104
except subprocess.CalledProcessError as err:
79105
return err.output
@@ -102,6 +128,18 @@ def backup_pb(self, node, backup_type="full", options=[]):
102128
# print(cmd_list)
103129
return self.run_pb(cmd_list + options)
104130

131+
def restore_pb(self, node, id=None, options=[]):
132+
cmd_list = [
133+
"-D", node.data_dir,
134+
"-B", self.backup_dir(node),
135+
"restore"
136+
]
137+
if id:
138+
cmd_list.append(id)
139+
140+
# print(cmd_list)
141+
return self.run_pb(cmd_list + options)
142+
105143
def show_pb(self, node, id=None, options=[], as_text=False):
106144
cmd_list = [
107145
"-B", self.backup_dir(node),
@@ -137,3 +175,28 @@ def delete_pb(self, node, id=None, options=[]):
137175

138176
# print(cmd_list)
139177
return self.run_pb(options + cmd_list)
178+
179+
def get_control_data(self, node):
180+
pg_controldata = node.get_bin_path("pg_controldata")
181+
out_data = {}
182+
lines = subprocess.check_output(
183+
[pg_controldata] + ["-D", node.data_dir],
184+
stderr=subprocess.STDOUT,
185+
env=self.test_env
186+
).splitlines()
187+
for l in lines:
188+
key, value = l.split(b":", maxsplit=1)
189+
out_data[key.strip()] = value.strip()
190+
return out_data
191+
192+
def get_recovery_conf(self, node):
193+
out_dict = {}
194+
with open(path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf:
195+
for line in recovery_conf:
196+
try:
197+
key, value = line.split("=")
198+
except:
199+
continue
200+
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
201+
202+
return out_dict

tests/restore_test.py

Lines changed: 292 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,292 @@
1+
import unittest
2+
from os import path
3+
import six
4+
from .pb_lib import ProbackupTest
5+
from testgres import stop_all
6+
import subprocess
7+
from datetime import datetime
8+
9+
10+
class RestoreTest(ProbackupTest, unittest.TestCase):
11+
12+
def __init__(self, *args, **kwargs):
13+
super(RestoreTest, self).__init__(*args, **kwargs)
14+
15+
@classmethod
16+
def tearDownClass(cls):
17+
stop_all()
18+
19+
def test_restore_to_latest_1(self):
20+
"""recovery to latest from full backup"""
21+
node = self.make_bnode('restore_to_latest_1', base_dir="tmp_dirs/restore/restore_to_latest_1")
22+
node.start()
23+
self.assertEqual(self.init_pb(node), six.b(""))
24+
node.pgbench_init(scale=2)
25+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
26+
pgbench.wait()
27+
pgbench.stdout.close()
28+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
29+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
30+
backup_log.write(self.backup_pb(node, options=["--verbose"]))
31+
32+
node.pg_ctl("stop", {
33+
"-D": node.data_dir,
34+
"-w": None,
35+
"-m": "immediate"
36+
})
37+
38+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
39+
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
40+
41+
node.pg_ctl("start", {
42+
"-D": node.data_dir,
43+
"-w": None,
44+
"-t": "600"
45+
})
46+
47+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
48+
self.assertEqual(before, after)
49+
50+
node.stop()
51+
52+
def test_restore_to_latest_2(self):
53+
"""recovery to latest from full + page backups"""
54+
node = self.make_bnode('restore_to_latest_2', base_dir="tmp_dirs/restore/restore_to_latest_2")
55+
node.start()
56+
self.assertEqual(self.init_pb(node), six.b(""))
57+
node.pgbench_init(scale=2)
58+
59+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
60+
backup_log.write(self.backup_pb(node, options=["--verbose"]))
61+
62+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
63+
pgbench.wait()
64+
pgbench.stdout.close()
65+
66+
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
67+
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
68+
69+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
70+
71+
node.pg_ctl("stop", {
72+
"-D": node.data_dir,
73+
"-w": None,
74+
"-m": "immediate"
75+
})
76+
77+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
78+
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
79+
80+
node.pg_ctl("start", {
81+
"-D": node.data_dir,
82+
"-w": None,
83+
"-t": "600"
84+
})
85+
86+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
87+
self.assertEqual(before, after)
88+
89+
node.stop()
90+
91+
def test_restore_to_timeline_3(self):
92+
"""recovery to target timeline"""
93+
node = self.make_bnode('restore_to_timeline_3', base_dir="tmp_dirs/restore/restore_to_timeline_3")
94+
node.start()
95+
self.assertEqual(self.init_pb(node), six.b(""))
96+
node.pgbench_init(scale=2)
97+
98+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
99+
100+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
101+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
102+
103+
target_tli = int(self.get_control_data(node)[six.b("Latest checkpoint's TimeLineID")])
104+
node.pg_ctl("stop", {
105+
"-D": node.data_dir,
106+
"-w": None,
107+
"-m": "immediate"
108+
})
109+
110+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
111+
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
112+
113+
node.pg_ctl("start", {
114+
"-D": node.data_dir,
115+
"-w": None,
116+
"-t": "600"
117+
})
118+
119+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
120+
pgbench.wait()
121+
pgbench.stdout.close()
122+
123+
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
124+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
125+
126+
node.pg_ctl("stop", {
127+
"-D": node.data_dir,
128+
"-w": None,
129+
"-m": "immediate"
130+
})
131+
132+
with open(path.join(node.logs_dir, "restore_2.log"), "wb") as restore_log:
133+
restore_log.write(self.restore_pb(
134+
node,
135+
options=["-j", "4", "--verbose", "--timeline=%i" % target_tli]
136+
))
137+
138+
recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"]
139+
self.assertEqual(int(recovery_target_timeline), target_tli)
140+
141+
node.pg_ctl("start", {
142+
"-D": node.data_dir,
143+
"-w": None,
144+
"-t": "600"
145+
})
146+
147+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
148+
self.assertEqual(before, after)
149+
150+
node.stop()
151+
152+
def test_restore_to_time_4(self):
153+
"""recovery to target timeline"""
154+
node = self.make_bnode('restore_to_time_4', base_dir="tmp_dirs/restore/restore_to_time_4")
155+
node.start()
156+
self.assertEqual(self.init_pb(node), six.b(""))
157+
node.pgbench_init(scale=2)
158+
159+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
160+
161+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
162+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
163+
164+
target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
165+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
166+
pgbench.wait()
167+
pgbench.stdout.close()
168+
169+
node.pg_ctl("stop", {
170+
"-D": node.data_dir,
171+
"-w": None,
172+
"-m": "immediate"
173+
})
174+
175+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
176+
restore_log.write(self.restore_pb(
177+
node,
178+
options=["-j", "4", "--verbose", '--time="%s"' % target_time]
179+
))
180+
181+
node.pg_ctl("start", {
182+
"-D": node.data_dir,
183+
"-w": None,
184+
"-t": "600"
185+
})
186+
187+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
188+
self.assertEqual(before, after)
189+
190+
node.stop()
191+
192+
def test_restore_to_xid_5(self):
193+
"""recovery to target xid"""
194+
node = self.make_bnode('restore_to_xid_5', base_dir="tmp_dirs/restore/restore_to_xid_5")
195+
node.start()
196+
self.assertEqual(self.init_pb(node), six.b(""))
197+
node.pgbench_init(scale=2)
198+
with node.connect("postgres") as con:
199+
con.execute("CREATE TABLE tbl0005 (a text)")
200+
con.commit()
201+
202+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
203+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
204+
205+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
206+
pgbench.wait()
207+
pgbench.stdout.close()
208+
209+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
210+
with node.connect("postgres") as con:
211+
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
212+
con.commit()
213+
target_xid = res[0][0]
214+
215+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
216+
pgbench.wait()
217+
pgbench.stdout.close()
218+
219+
# Enforce segment to be archived to ensure that recovery goes up to the
220+
# wanted point. There is no way to ensure that all segments needed have
221+
# been archived up to the xmin point saved earlier without that.
222+
node.execute("postgres", "SELECT pg_switch_xlog()")
223+
224+
node.pg_ctl("stop", {
225+
"-D": node.data_dir,
226+
"-w": None,
227+
"-m": "fast"
228+
})
229+
230+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
231+
restore_log.write(self.restore_pb(
232+
node,
233+
options=["-j", "4", "--verbose", '--xid=%s' % target_xid]
234+
))
235+
236+
node.pg_ctl("start", {
237+
"-D": node.data_dir,
238+
"-w": None,
239+
"-t": "600"
240+
})
241+
242+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
243+
self.assertEqual(before, after)
244+
245+
node.stop()
246+
247+
def test_restore_full_ptrack_6(self):
248+
"""recovery to latest from full + ptrack backups"""
249+
node = self.make_bnode('restore_full_ptrack_6', base_dir="tmp_dirs/restore/restore_full_ptrack_6")
250+
node.start()
251+
self.assertEqual(self.init_pb(node), six.b(""))
252+
node.pgbench_init(scale=2)
253+
is_ptrack = node.execute("postgres", "SELECT proname FROM pg_proc WHERE proname='pg_ptrack_clear'")
254+
if not is_ptrack:
255+
node.stop()
256+
self.skipTest("ptrack not supported")
257+
return
258+
259+
node.append_conf("postgresql.conf", "ptrack_enable = on")
260+
node.restart()
261+
262+
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
263+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
264+
265+
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
266+
pgbench.wait()
267+
pgbench.stdout.close()
268+
269+
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
270+
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"]))
271+
272+
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
273+
274+
node.pg_ctl("stop", {
275+
"-D": node.data_dir,
276+
"-w": None,
277+
"-m": "immediate"
278+
})
279+
280+
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
281+
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
282+
283+
node.pg_ctl("start", {
284+
"-D": node.data_dir,
285+
"-w": None,
286+
"-t": "600"
287+
})
288+
289+
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
290+
self.assertEqual(before, after)
291+
292+
node.stop()

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy