Skip to content

Commit 2b8da45

Browse files
committed
ptrack tests added
1 parent d474316 commit 2b8da45

21 files changed

+3053
-1383
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
/env
2929
/tests/__pycache__/
3030
/tests/tmp_dirs/
31+
/tests/*pyc
3132

3233
# Extra files
3334
/datapagemap.c

tests/__init__.py

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,32 @@
11
import unittest
22

33
from . import init_test, option_test, show_test, \
4-
backup_test, delete_test, restore_test, validate_test, \
5-
retention_test
4+
backup_test, delete_test, restore_test, validate_test, \
5+
retention_test, ptrack_clean, ptrack_cluster, \
6+
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
7+
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
8+
ptrack_vacuum_full, ptrack_vacuum_truncate
9+
610

711

812
def load_tests(loader, tests, pattern):
9-
suite = unittest.TestSuite()
10-
suite.addTests(loader.loadTestsFromModule(init_test))
11-
suite.addTests(loader.loadTestsFromModule(option_test))
12-
suite.addTests(loader.loadTestsFromModule(show_test))
13-
suite.addTests(loader.loadTestsFromModule(backup_test))
14-
suite.addTests(loader.loadTestsFromModule(delete_test))
15-
suite.addTests(loader.loadTestsFromModule(restore_test))
16-
suite.addTests(loader.loadTestsFromModule(validate_test))
17-
suite.addTests(loader.loadTestsFromModule(retention_test))
13+
suite = unittest.TestSuite()
14+
suite.addTests(loader.loadTestsFromModule(init_test))
15+
suite.addTests(loader.loadTestsFromModule(option_test))
16+
suite.addTests(loader.loadTestsFromModule(show_test))
17+
suite.addTests(loader.loadTestsFromModule(backup_test))
18+
suite.addTests(loader.loadTestsFromModule(delete_test))
19+
suite.addTests(loader.loadTestsFromModule(restore_test))
20+
suite.addTests(loader.loadTestsFromModule(validate_test))
21+
suite.addTests(loader.loadTestsFromModule(retention_test))
22+
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
23+
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
24+
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
25+
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
26+
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
27+
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
28+
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
29+
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
30+
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
1831

19-
return suite
32+
return suite

tests/backup_test.py

Lines changed: 161 additions & 145 deletions
Original file line numberDiff line numberDiff line change
@@ -1,153 +1,169 @@
11
import unittest
22
from os import path, listdir
33
import six
4-
from .pb_lib import ProbackupTest
4+
from .ptrack_helpers import ProbackupTest, ProbackupException
55
from testgres import stop_all
66

77

88
class BackupTest(ProbackupTest, unittest.TestCase):
99

10-
def __init__(self, *args, **kwargs):
11-
super(BackupTest, self).__init__(*args, **kwargs)
12-
13-
@classmethod
14-
def tearDownClass(cls):
15-
stop_all()
16-
17-
def test_backup_modes_1(self):
18-
"""standart backup modes"""
19-
node = self.make_bnode(base_dir="tmp_dirs/backup/backup_modes_1")
20-
node.start()
21-
self.assertEqual(self.init_pb(node), six.b(""))
22-
23-
# detect ptrack
24-
is_ptrack = node.execute("postgres", "SELECT proname FROM pg_proc WHERE proname='pg_ptrack_clear'")
25-
if len(is_ptrack):
26-
node.append_conf("postgresql.conf", "ptrack_enable = on")
27-
node.restart()
28-
29-
# full backup mode
30-
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
31-
backup_log.write(self.backup_pb(node, options=["--verbose"]))
32-
33-
show_backup = self.show_pb(node)[0]
34-
full_backup_id = show_backup.id
35-
self.assertEqual(show_backup.status, six.b("OK"))
36-
self.assertEqual(show_backup.mode, six.b("FULL"))
37-
38-
# postmaster.pid and postmaster.opts shouldn't be copied
39-
excluded = True
40-
backups_dir = path.join(self.backup_dir(node), "backups")
41-
for backup in listdir(backups_dir):
42-
db_dir = path.join(backups_dir, backup, "database")
43-
for f in listdir(db_dir):
44-
if path.isfile(path.join(db_dir, f)) and \
45-
(f == "postmaster.pid" or f == "postmaster.opts"):
46-
excluded = False
47-
self.assertEqual(excluded, True)
48-
49-
# page backup mode
50-
with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log:
51-
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
52-
53-
show_backup = self.show_pb(node)[0]
54-
self.assertEqual(show_backup.status, six.b("OK"))
55-
self.assertEqual(show_backup.mode, six.b("PAGE"))
56-
57-
# Check parent backup
58-
self.assertEqual(
59-
full_backup_id,
60-
self.show_pb(node, show_backup.id)[six.b("PARENT_BACKUP")].strip(six.b(" '"))
61-
)
62-
63-
# ptrack backup mode
64-
if len(is_ptrack):
65-
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
66-
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"]))
67-
68-
show_backup = self.show_pb(node)[0]
69-
self.assertEqual(show_backup.status, six.b("OK"))
70-
self.assertEqual(show_backup.mode, six.b("PTRACK"))
71-
72-
node.stop()
73-
74-
def test_smooth_checkpoint_2(self):
75-
"""full backup with smooth checkpoint"""
76-
node = self.make_bnode(base_dir="tmp_dirs/backup/smooth_checkpoint_2")
77-
node.start()
78-
self.assertEqual(self.init_pb(node), six.b(""))
79-
80-
with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log:
81-
backup_log.write(self.backup_pb(node, options=["--verbose", "-C"]))
82-
83-
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
84-
85-
node.stop()
86-
87-
def test_page_backup_without_full_3(self):
88-
"""page-level backup without validated full backup"""
89-
node = self.make_bnode(base_dir="tmp_dirs/backup/without_full_3")
90-
node.start()
91-
self.assertEqual(self.init_pb(node), six.b(""))
92-
93-
with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log:
94-
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
95-
96-
self.assertEqual(self.show_pb(node)[0].status, six.b("ERROR"))
97-
98-
node.stop()
99-
100-
def test_ptrack_threads_4(self):
101-
"""ptrack multi thread backup mode"""
102-
node = self.make_bnode(
103-
base_dir="tmp_dirs/backup/ptrack_threads_4",
104-
options={"ptrack_enable": "on"}
105-
)
106-
node.start()
107-
self.assertEqual(self.init_pb(node), six.b(""))
108-
109-
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
110-
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "-j", "4"]))
111-
112-
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
113-
114-
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
115-
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "-j", "4"]))
116-
117-
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
118-
119-
node.stop()
120-
121-
def test_ptrack_threads_stream_5(self):
122-
"""ptrack multi thread backup mode and stream"""
123-
node = self.make_bnode(
124-
base_dir="tmp_dirs/backup/ptrack_threads_stream_5",
125-
options={
126-
"ptrack_enable": "on",
127-
"max_wal_senders": "5"
128-
}
129-
)
130-
node.append_conf("pg_hba.conf", "local replication all trust")
131-
node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
132-
node.start()
133-
self.assertEqual(self.init_pb(node), six.b(""))
134-
135-
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
136-
backup_log.write(self.backup_pb(
137-
node,
138-
backup_type="full",
139-
options=["--verbose", "-j", "4", "--stream"]
140-
))
141-
142-
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
143-
144-
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
145-
backup_log.write(self.backup_pb(
146-
node,
147-
backup_type="ptrack",
148-
options=["--verbose", "-j", "4", "--stream"]
149-
))
150-
151-
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
152-
153-
node.stop()
10+
def __init__(self, *args, **kwargs):
11+
super(BackupTest, self).__init__(*args, **kwargs)
12+
13+
# @classmethod
14+
# def tearDownClass(cls):
15+
# stop_all()
16+
# @unittest.skip("123")
17+
def test_backup_modes_archive(self):
18+
"""standart backup modes with ARCHIVE WAL method"""
19+
fname = self.id().split('.')[3]
20+
print '{0} started'.format(fname)
21+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
22+
set_archiving=True,
23+
initdb_params=['--data-checksums'],
24+
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
25+
)
26+
node.start()
27+
self.assertEqual(self.init_pb(node), six.b(""))
28+
29+
# full backup mode
30+
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
31+
backup_log.write(self.backup_pb(node, options=["--verbose"]))
32+
33+
show_backup = self.show_pb(node)[0]
34+
full_backup_id = show_backup['ID']
35+
self.assertEqual(show_backup['Status'], six.b("OK"))
36+
self.assertEqual(show_backup['Mode'], six.b("FULL"))
37+
38+
# postmaster.pid and postmaster.opts shouldn't be copied
39+
excluded = True
40+
backups_dir = path.join(self.backup_dir(node), "backups")
41+
for backup in listdir(backups_dir):
42+
db_dir = path.join(backups_dir, backup, "database")
43+
for f in listdir(db_dir):
44+
if path.isfile(path.join(db_dir, f)) and \
45+
(f == "postmaster.pid" or f == "postmaster.opts"):
46+
excluded = False
47+
self.assertEqual(excluded, True)
48+
49+
# page backup mode
50+
with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log:
51+
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
52+
53+
print self.show_pb(node)
54+
show_backup = self.show_pb(node)[1]
55+
self.assertEqual(show_backup['Status'], six.b("OK"))
56+
self.assertEqual(show_backup['Mode'], six.b("PAGE"))
57+
58+
# Check parent backup
59+
self.assertEqual(
60+
full_backup_id,
61+
self.show_pb(node, id=show_backup['ID'])["parent-backup-id"])
62+
63+
# ptrack backup mode
64+
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
65+
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"]))
66+
67+
show_backup = self.show_pb(node)[2]
68+
self.assertEqual(show_backup['Status'], six.b("OK"))
69+
self.assertEqual(show_backup['Mode'], six.b("PTRACK"))
70+
71+
node.stop()
72+
73+
# @unittest.skip("123")
74+
def test_smooth_checkpoint(self):
75+
"""full backup with smooth checkpoint"""
76+
fname = self.id().split('.')[3]
77+
print '{0} started'.format(fname)
78+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
79+
set_archiving=True,
80+
initdb_params=['--data-checksums'],
81+
pg_options={'wal_level': 'replica'}
82+
)
83+
node.start()
84+
self.assertEqual(self.init_pb(node), six.b(""))
85+
86+
with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log:
87+
backup_log.write(self.backup_pb(node, options=["--verbose", "-C"]))
88+
89+
self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK"))
90+
91+
node.stop()
92+
93+
# @unittest.skip("123")
94+
def test_page_backup_without_full(self):
95+
"""page-level backup without validated full backup"""
96+
fname = self.id().split('.')[3]
97+
print '{0} started'.format(fname)
98+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
99+
set_archiving=True,
100+
initdb_params=['--data-checksums'],
101+
pg_options={'wal_level': 'replica'}
102+
)
103+
node.start()
104+
self.assertEqual(self.init_pb(node), six.b(""))
105+
106+
try:
107+
self.backup_pb(node, backup_type="page", options=["--verbose"])
108+
except ProbackupException, e:
109+
pass
110+
self.assertEqual(self.show_pb(node)[0]['Status'], six.b("ERROR"))
111+
112+
node.stop()
113+
114+
# @unittest.skip("123")
115+
def test_ptrack_threads(self):
116+
"""ptrack multi thread backup mode"""
117+
node = self.make_bnode(
118+
base_dir="tmp_dirs/backup/ptrack_threads_4",
119+
options={"ptrack_enable": "on", 'max_wal_senders': '2'}
120+
)
121+
node.start()
122+
self.assertEqual(self.init_pb(node), six.b(""))
123+
124+
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
125+
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "-j", "4"]))
126+
127+
self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK"))
128+
129+
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
130+
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "-j", "4"]))
131+
132+
self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK"))
133+
134+
node.stop()
135+
136+
# @unittest.skip("123")
137+
def test_ptrack_threads_stream(self):
138+
"""ptrack multi thread backup mode and stream"""
139+
fname = self.id().split('.')[3]
140+
print '{0} started'.format(fname)
141+
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
142+
set_replication=True,
143+
initdb_params=['--data-checksums'],
144+
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
145+
)
146+
# node.append_conf("pg_hba.conf", "local replication all trust")
147+
# node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
148+
node.start()
149+
self.assertEqual(self.init_pb(node), six.b(""))
150+
151+
with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
152+
backup_log.write(self.backup_pb(
153+
node,
154+
backup_type="full",
155+
options=["--verbose", "-j", "4", "--stream"]
156+
))
157+
158+
self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK"))
159+
160+
with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log:
161+
backup_log.write(self.backup_pb(
162+
node,
163+
backup_type="ptrack",
164+
options=["--verbose", "-j", "4", "--stream"]
165+
))
166+
167+
self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK"))
168+
169+
node.stop()

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy