diff --git a/tests/conftest.py b/tests/conftest.py index 9e8ea368..ff3b3cb4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,7 @@ import pathlib import math import datetime +import typing import _pytest.outcomes import _pytest.unittest @@ -105,71 +106,169 @@ class TEST_PROCESS_STATS: cXFailedTests: int = 0 cSkippedTests: int = 0 cNotXFailedTests: int = 0 + cWarningTests: int = 0 cUnexpectedTests: int = 0 cAchtungTests: int = 0 - FailedTests = list[str]() - XFailedTests = list[str]() + FailedTests = list[str, int]() + XFailedTests = list[str, int]() NotXFailedTests = list[str]() + WarningTests = list[str, int]() AchtungTests = list[str]() + cTotalDuration: datetime.timedelta = datetime.timedelta() + + cTotalErrors: int = 0 + cTotalWarnings: int = 0 + # -------------------------------------------------------------------- def incrementTotalTestCount() -> None: + assert type(__class__.cTotalTests) == int # noqa: E721 + assert __class__.cTotalTests >= 0 + __class__.cTotalTests += 1 + assert __class__.cTotalTests > 0 + # -------------------------------------------------------------------- def incrementNotExecutedTestCount() -> None: + assert type(__class__.cNotExecutedTests) == int # noqa: E721 + assert __class__.cNotExecutedTests >= 0 + __class__.cNotExecutedTests += 1 + assert __class__.cNotExecutedTests > 0 + # -------------------------------------------------------------------- def incrementExecutedTestCount() -> int: + assert type(__class__.cExecutedTests) == int # noqa: E721 + assert __class__.cExecutedTests >= 0 + __class__.cExecutedTests += 1 + + assert __class__.cExecutedTests > 0 return __class__.cExecutedTests # -------------------------------------------------------------------- def incrementPassedTestCount() -> None: + assert type(__class__.cPassedTests) == int # noqa: E721 + assert __class__.cPassedTests >= 0 + __class__.cPassedTests += 1 + assert __class__.cPassedTests > 0 + # -------------------------------------------------------------------- - def incrementFailedTestCount(testID: str) -> None: + def incrementFailedTestCount(testID: str, errCount: int) -> None: assert type(testID) == str # noqa: E721 + assert type(errCount) == int # noqa: E721 + assert errCount > 0 assert type(__class__.FailedTests) == list # noqa: E721 + assert type(__class__.cFailedTests) == int # noqa: E721 + assert __class__.cFailedTests >= 0 - __class__.FailedTests.append(testID) # raise? + __class__.FailedTests.append((testID, errCount)) # raise? __class__.cFailedTests += 1 + assert len(__class__.FailedTests) > 0 + assert __class__.cFailedTests > 0 + assert len(__class__.FailedTests) == __class__.cFailedTests + + # -------- + assert type(__class__.cTotalErrors) == int # noqa: E721 + assert __class__.cTotalErrors >= 0 + + __class__.cTotalErrors += errCount + + assert __class__.cTotalErrors > 0 + # -------------------------------------------------------------------- - def incrementXFailedTestCount(testID: str) -> None: + def incrementXFailedTestCount(testID: str, errCount: int) -> None: assert type(testID) == str # noqa: E721 + assert type(errCount) == int # noqa: E721 + assert errCount >= 0 assert type(__class__.XFailedTests) == list # noqa: E721 + assert type(__class__.cXFailedTests) == int # noqa: E721 + assert __class__.cXFailedTests >= 0 - __class__.XFailedTests.append(testID) # raise? + __class__.XFailedTests.append((testID, errCount)) # raise? __class__.cXFailedTests += 1 + assert len(__class__.XFailedTests) > 0 + assert __class__.cXFailedTests > 0 + assert len(__class__.XFailedTests) == __class__.cXFailedTests + # -------------------------------------------------------------------- def incrementSkippedTestCount() -> None: + assert type(__class__.cSkippedTests) == int # noqa: E721 + assert __class__.cSkippedTests >= 0 + __class__.cSkippedTests += 1 + assert __class__.cSkippedTests > 0 + # -------------------------------------------------------------------- def incrementNotXFailedTests(testID: str) -> None: assert type(testID) == str # noqa: E721 assert type(__class__.NotXFailedTests) == list # noqa: E721 + assert type(__class__.cNotXFailedTests) == int # noqa: E721 + assert __class__.cNotXFailedTests >= 0 __class__.NotXFailedTests.append(testID) # raise? __class__.cNotXFailedTests += 1 + assert len(__class__.NotXFailedTests) > 0 + assert __class__.cNotXFailedTests > 0 + assert len(__class__.NotXFailedTests) == __class__.cNotXFailedTests + + # -------------------------------------------------------------------- + def incrementWarningTestCount(testID: str, warningCount: int) -> None: + assert type(testID) == str # noqa: E721 + assert type(warningCount) == int # noqa: E721 + assert testID != "" + assert warningCount > 0 + assert type(__class__.WarningTests) == list # noqa: E721 + assert type(__class__.cWarningTests) == int # noqa: E721 + assert __class__.cWarningTests >= 0 + + __class__.WarningTests.append((testID, warningCount)) # raise? + __class__.cWarningTests += 1 + + assert len(__class__.WarningTests) > 0 + assert __class__.cWarningTests > 0 + assert len(__class__.WarningTests) == __class__.cWarningTests + + # -------- + assert type(__class__.cTotalWarnings) == int # noqa: E721 + assert __class__.cTotalWarnings >= 0 + + __class__.cTotalWarnings += warningCount + + assert __class__.cTotalWarnings > 0 + # -------------------------------------------------------------------- def incrementUnexpectedTests() -> None: + assert type(__class__.cUnexpectedTests) == int # noqa: E721 + assert __class__.cUnexpectedTests >= 0 + __class__.cUnexpectedTests += 1 + assert __class__.cUnexpectedTests > 0 + # -------------------------------------------------------------------- def incrementAchtungTestCount(testID: str) -> None: assert type(testID) == str # noqa: E721 assert type(__class__.AchtungTests) == list # noqa: E721 + assert type(__class__.cAchtungTests) == int # noqa: E721 + assert __class__.cAchtungTests >= 0 __class__.AchtungTests.append(testID) # raise? __class__.cAchtungTests += 1 + assert len(__class__.AchtungTests) > 0 + assert __class__.cAchtungTests > 0 + assert len(__class__.AchtungTests) == __class__.cAchtungTests + # ///////////////////////////////////////////////////////////////////////////// @@ -212,6 +311,12 @@ def helper__build_test_id(item: pytest.Function) -> str: return testID + +# ///////////////////////////////////////////////////////////////////////////// + +g_error_msg_count_key = pytest.StashKey[int]() +g_warning_msg_count_key = pytest.StashKey[int]() + # ///////////////////////////////////////////////////////////////////////////// @@ -285,6 +390,16 @@ def helper__makereport__call( assert type(call) == pytest.CallInfo # noqa: E721 assert type(outcome) == pluggy.Result # noqa: E721 + # -------- + item_error_msg_count = item.stash.get(g_error_msg_count_key, 0) + assert type(item_error_msg_count) == int # noqa: E721 + assert item_error_msg_count >= 0 + + item_warning_msg_count = item.stash.get(g_warning_msg_count_key, 0) + assert type(item_warning_msg_count) == int # noqa: E721 + assert item_warning_msg_count >= 0 + + # -------- rep = outcome.get_result() assert rep is not None assert type(rep) == pytest.TestReport # noqa: E721 @@ -312,30 +427,35 @@ def helper__makereport__call( if type(call.excinfo.value) == _pytest.outcomes.Skipped: # noqa: E721 assert not hasattr(rep, "wasxfail") - TEST_PROCESS_STATS.incrementSkippedTestCount() - exitStatus = "SKIPPED" reasonText = str(call.excinfo.value) reasonMsgTempl = "SKIP REASON: {0}" - elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 - TEST_PROCESS_STATS.incrementXFailedTestCount(testID) + TEST_PROCESS_STATS.incrementSkippedTestCount() + elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 exitStatus = "XFAILED" reasonText = str(call.excinfo.value) reasonMsgTempl = "XFAIL REASON: {0}" + + TEST_PROCESS_STATS.incrementXFailedTestCount(testID, item_error_msg_count) + else: exitStatus = "XFAILED" assert hasattr(rep, "wasxfail") assert rep.wasxfail is not None assert type(rep.wasxfail) == str # noqa: E721 - TEST_PROCESS_STATS.incrementXFailedTestCount(testID) - reasonText = rep.wasxfail reasonMsgTempl = "XFAIL REASON: {0}" - logging.error(call.excinfo.value) + if type(call.excinfo.value) == SIGNAL_EXCEPTION: # noqa: E721 + pass + else: + logging.error(call.excinfo.value) + item_error_msg_count += 1 + + TEST_PROCESS_STATS.incrementXFailedTestCount(testID, item_error_msg_count) assert type(reasonText) == str # noqa: E721 @@ -348,9 +468,16 @@ def helper__makereport__call( assert call.excinfo is not None assert call.excinfo.value is not None - TEST_PROCESS_STATS.incrementFailedTestCount(testID) + if type(call.excinfo.value) == SIGNAL_EXCEPTION: # noqa: E721 + assert item_error_msg_count > 0 + pass + else: + logging.error(call.excinfo.value) + item_error_msg_count += 1 + + assert item_error_msg_count > 0 + TEST_PROCESS_STATS.incrementFailedTestCount(testID, item_error_msg_count) - logging.error(call.excinfo.value) exitStatus = "FAILED" elif rep.outcome == "passed": assert call.excinfo is None @@ -360,12 +487,12 @@ def helper__makereport__call( TEST_PROCESS_STATS.incrementNotXFailedTests(testID) - warnMsg = "Test is marked as xfail" + warnMsg = "NOTE: Test is marked as xfail" if rep.wasxfail != "": warnMsg += " [" + rep.wasxfail + "]" - logging.warning(warnMsg) + logging.info(warnMsg) exitStatus = "NOT XFAILED" else: assert not hasattr(rep, "wasxfail") @@ -378,11 +505,25 @@ def helper__makereport__call( # [2025-03-28] It may create a useless problem in new environment. # assert False + # -------- + if item_warning_msg_count > 0: + TEST_PROCESS_STATS.incrementWarningTestCount(testID, item_warning_msg_count) + + # -------- + assert type(TEST_PROCESS_STATS.cTotalDuration) == datetime.timedelta # noqa: E721 + assert type(testDurration) == datetime.timedelta # noqa: E721 + + TEST_PROCESS_STATS.cTotalDuration += testDurration + + assert testDurration <= TEST_PROCESS_STATS.cTotalDuration + # -------- logging.info("*") - logging.info("* DURATION : {0}".format(timedelta_to_human_text(testDurration))) + logging.info("* DURATION : {0}".format(timedelta_to_human_text(testDurration))) logging.info("*") - logging.info("* EXIT STATUS : {0}".format(exitStatus)) + logging.info("* EXIT STATUS : {0}".format(exitStatus)) + logging.info("* ERROR COUNT : {0}".format(item_error_msg_count)) + logging.info("* WARNING COUNT: {0}".format(item_warning_msg_count)) logging.info("*") logging.info("* STOP TEST {0}".format(testID)) logging.info("*") @@ -437,6 +578,186 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): # ///////////////////////////////////////////////////////////////////////////// +class LogErrorWrapper2: + _old_method: any + _counter: typing.Optional[int] + + # -------------------------------------------------------------------- + def __init__(self): + self._old_method = None + self._counter = None + + # -------------------------------------------------------------------- + def __enter__(self): + assert self._old_method is None + assert self._counter is None + + self._old_method = logging.error + self._counter = 0 + + logging.error = self + return self + + # -------------------------------------------------------------------- + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._old_method is not None + assert self._counter is not None + + assert logging.error is self + + logging.error = self._old_method + + self._old_method = None + self._counter = None + return False + + # -------------------------------------------------------------------- + def __call__(self, *args, **kwargs): + assert self._old_method is not None + assert self._counter is not None + + assert type(self._counter) == int # noqa: E721 + assert self._counter >= 0 + + r = self._old_method(*args, **kwargs) + + self._counter += 1 + assert self._counter > 0 + + return r + + +# ///////////////////////////////////////////////////////////////////////////// + + +class LogWarningWrapper2: + _old_method: any + _counter: typing.Optional[int] + + # -------------------------------------------------------------------- + def __init__(self): + self._old_method = None + self._counter = None + + # -------------------------------------------------------------------- + def __enter__(self): + assert self._old_method is None + assert self._counter is None + + self._old_method = logging.warning + self._counter = 0 + + logging.warning = self + return self + + # -------------------------------------------------------------------- + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._old_method is not None + assert self._counter is not None + + assert logging.warning is self + + logging.warning = self._old_method + + self._old_method = None + self._counter = None + return False + + # -------------------------------------------------------------------- + def __call__(self, *args, **kwargs): + assert self._old_method is not None + assert self._counter is not None + + assert type(self._counter) == int # noqa: E721 + assert self._counter >= 0 + + r = self._old_method(*args, **kwargs) + + self._counter += 1 + assert self._counter > 0 + + return r + + +# ///////////////////////////////////////////////////////////////////////////// + + +class SIGNAL_EXCEPTION(Exception): + def __init__(self): + pass + + +# ///////////////////////////////////////////////////////////////////////////// + + +@pytest.hookimpl(hookwrapper=True) +def pytest_pyfunc_call(pyfuncitem: pytest.Function): + assert pyfuncitem is not None + assert isinstance(pyfuncitem, pytest.Function) + + debug__log_error_method = logging.error + assert debug__log_error_method is not None + + debug__log_warning_method = logging.warning + assert debug__log_warning_method is not None + + pyfuncitem.stash[g_error_msg_count_key] = 0 + pyfuncitem.stash[g_warning_msg_count_key] = 0 + + try: + with LogErrorWrapper2() as logErrorWrapper, LogWarningWrapper2() as logWarningWrapper: + assert type(logErrorWrapper) == LogErrorWrapper2 # noqa: E721 + assert logErrorWrapper._old_method is not None + assert type(logErrorWrapper._counter) == int # noqa: E721 + assert logErrorWrapper._counter == 0 + assert logging.error is logErrorWrapper + + assert type(logWarningWrapper) == LogWarningWrapper2 # noqa: E721 + assert logWarningWrapper._old_method is not None + assert type(logWarningWrapper._counter) == int # noqa: E721 + assert logWarningWrapper._counter == 0 + assert logging.warning is logWarningWrapper + + r: pluggy.Result = yield + + assert r is not None + assert type(r) == pluggy.Result # noqa: E721 + + assert logErrorWrapper._old_method is not None + assert type(logErrorWrapper._counter) == int # noqa: E721 + assert logErrorWrapper._counter >= 0 + assert logging.error is logErrorWrapper + + assert logWarningWrapper._old_method is not None + assert type(logWarningWrapper._counter) == int # noqa: E721 + assert logWarningWrapper._counter >= 0 + assert logging.warning is logWarningWrapper + + assert g_error_msg_count_key in pyfuncitem.stash + assert g_warning_msg_count_key in pyfuncitem.stash + + assert pyfuncitem.stash[g_error_msg_count_key] == 0 + assert pyfuncitem.stash[g_warning_msg_count_key] == 0 + + pyfuncitem.stash[g_error_msg_count_key] = logErrorWrapper._counter + pyfuncitem.stash[g_warning_msg_count_key] = logWarningWrapper._counter + + if r.exception is not None: + pass + elif logErrorWrapper._counter == 0: + pass + else: + assert logErrorWrapper._counter > 0 + r.force_exception(SIGNAL_EXCEPTION()) + finally: + assert logging.error is debug__log_error_method + assert logging.warning is debug__log_warning_method + pass + + +# ///////////////////////////////////////////////////////////////////////////// + + def helper__calc_W(n: int) -> int: assert n > 0 @@ -467,11 +788,42 @@ def helper__print_test_list(tests: list[str]) -> None: nTest = 0 - while nTest < len(tests): - testID = tests[nTest] - assert type(testID) == str # noqa: E721 + for t in tests: + assert type(t) == str # noqa: E721 + assert t != "" nTest += 1 - logging.info(templateLine.format(nTest, testID)) + logging.info(templateLine.format(nTest, t)) + + +# ------------------------------------------------------------------------ +def helper__print_test_list2(tests: list[str, int]) -> None: + assert type(tests) == list # noqa: E721 + + assert helper__calc_W(9) == 1 + assert helper__calc_W(10) == 2 + assert helper__calc_W(11) == 2 + assert helper__calc_W(99) == 2 + assert helper__calc_W(100) == 3 + assert helper__calc_W(101) == 3 + assert helper__calc_W(999) == 3 + assert helper__calc_W(1000) == 4 + assert helper__calc_W(1001) == 4 + + W = helper__calc_W(len(tests)) + + templateLine = "{0:0" + str(W) + "d}. {1} ({2})" + + nTest = 0 + + for t in tests: + assert type(t) == tuple # noqa: E721 + assert len(t) == 2 + assert type(t[0]) == str # noqa: E721 + assert type(t[1]) == int # noqa: E721 + assert t[0] != "" + assert t[1] >= 0 + nTest += 1 + logging.info(templateLine.format(nTest, t[0], t[1])) # ///////////////////////////////////////////////////////////////////////////// @@ -505,6 +857,22 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): helper__print_test_list(test_list) logging.info("") + def LOCAL__print_test_list2( + header: str, test_count: int, test_list: list[str, int] + ): + assert type(header) == str # noqa: E721 + assert type(test_count) == int # noqa: E721 + assert type(test_list) == list # noqa: E721 + assert header != "" + assert test_count >= 0 + assert len(test_list) == test_count + + LOCAL__print_line1_with_header(header) + logging.info("") + if len(test_list) > 0: + helper__print_test_list2(test_list) + logging.info("") + # fmt: off LOCAL__print_test_list( "ACHTUNG TESTS", @@ -512,13 +880,13 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): TEST_PROCESS_STATS.AchtungTests, ) - LOCAL__print_test_list( + LOCAL__print_test_list2( "FAILED TESTS", TEST_PROCESS_STATS.cFailedTests, TEST_PROCESS_STATS.FailedTests ) - LOCAL__print_test_list( + LOCAL__print_test_list2( "XFAILED TESTS", TEST_PROCESS_STATS.cXFailedTests, TEST_PROCESS_STATS.XFailedTests, @@ -529,22 +897,46 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): TEST_PROCESS_STATS.cNotXFailedTests, TEST_PROCESS_STATS.NotXFailedTests, ) + + LOCAL__print_test_list2( + "WARNING TESTS", + TEST_PROCESS_STATS.cWarningTests, + TEST_PROCESS_STATS.WarningTests, + ) # fmt: on LOCAL__print_line1_with_header("SUMMARY STATISTICS") logging.info("") logging.info("[TESTS]") - logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) - logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) - logging.info(" NOT EXECUTED: {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) - logging.info(" ACHTUNG : {0}".format(TEST_PROCESS_STATS.cAchtungTests)) + logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) + logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) + logging.info(" NOT EXECUTED : {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) + logging.info(" ACHTUNG : {0}".format(TEST_PROCESS_STATS.cAchtungTests)) + logging.info("") + logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) + logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) + logging.info(" XFAILED : {0}".format(TEST_PROCESS_STATS.cXFailedTests)) + logging.info(" NOT XFAILED : {0}".format(TEST_PROCESS_STATS.cNotXFailedTests)) + logging.info(" SKIPPED : {0}".format(TEST_PROCESS_STATS.cSkippedTests)) + logging.info(" WITH WARNINGS: {0}".format(TEST_PROCESS_STATS.cWarningTests)) + logging.info(" UNEXPECTED : {0}".format(TEST_PROCESS_STATS.cUnexpectedTests)) + logging.info("") + + assert type(TEST_PROCESS_STATS.cTotalDuration) == datetime.timedelta # noqa: E721 + + LOCAL__print_line1_with_header("TIME") + logging.info("") + logging.info( + " TOTAL DURATION: {0}".format( + timedelta_to_human_text(TEST_PROCESS_STATS.cTotalDuration) + ) + ) + logging.info("") + + LOCAL__print_line1_with_header("TOTAL INFORMATION") logging.info("") - logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) - logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) - logging.info(" XFAILED : {0}".format(TEST_PROCESS_STATS.cXFailedTests)) - logging.info(" NOT XFAILED : {0}".format(TEST_PROCESS_STATS.cNotXFailedTests)) - logging.info(" SKIPPED : {0}".format(TEST_PROCESS_STATS.cSkippedTests)) - logging.info(" UNEXPECTED : {0}".format(TEST_PROCESS_STATS.cUnexpectedTests)) + logging.info(" TOTAL ERROR COUNT : {0}".format(TEST_PROCESS_STATS.cTotalErrors)) + logging.info(" TOTAL WARNING COUNT: {0}".format(TEST_PROCESS_STATS.cTotalWarnings)) logging.info("") diff --git a/tests/test_conftest.py--devel b/tests/test_conftest.py--devel new file mode 100644 index 00000000..67c1dafe --- /dev/null +++ b/tests/test_conftest.py--devel @@ -0,0 +1,80 @@ +import pytest +import logging + + +class TestConfest: + def test_failed(self): + raise Exception("TEST EXCEPTION!") + + def test_ok(self): + pass + + @pytest.mark.skip() + def test_mark_skip__no_reason(self): + pass + + @pytest.mark.xfail() + def test_mark_xfail__no_reason(self): + raise Exception("XFAIL EXCEPTION") + + @pytest.mark.xfail() + def test_mark_xfail__no_reason___no_error(self): + pass + + @pytest.mark.skip(reason="reason") + def test_mark_skip__with_reason(self): + pass + + @pytest.mark.xfail(reason="reason") + def test_mark_xfail__with_reason(self): + raise Exception("XFAIL EXCEPTION") + + @pytest.mark.xfail(reason="reason") + def test_mark_xfail__with_reason___no_error(self): + pass + + def test_exc_skip__no_reason(self): + pytest.skip() + + def test_exc_xfail__no_reason(self): + pytest.xfail() + + def test_exc_skip__with_reason(self): + pytest.skip(reason="SKIP REASON") + + def test_exc_xfail__with_reason(self): + pytest.xfail(reason="XFAIL EXCEPTION") + + def test_log_error(self): + logging.error("IT IS A LOG ERROR!") + + def test_log_error_and_exc(self): + logging.error("IT IS A LOG ERROR!") + + raise Exception("TEST EXCEPTION!") + + def test_log_error_and_warning(self): + logging.error("IT IS A LOG ERROR!") + logging.warning("IT IS A LOG WARNING!") + logging.error("IT IS THE SECOND LOG ERROR!") + logging.warning("IT IS THE SECOND LOG WARNING!") + + @pytest.mark.xfail() + def test_log_error_and_xfail_mark_without_reason(self): + logging.error("IT IS A LOG ERROR!") + + @pytest.mark.xfail(reason="It is a reason message") + def test_log_error_and_xfail_mark_with_reason(self): + logging.error("IT IS A LOG ERROR!") + + @pytest.mark.xfail() + def test_two_log_error_and_xfail_mark_without_reason(self): + logging.error("IT IS THE FIRST LOG ERROR!") + logging.info("----------") + logging.error("IT IS THE SECOND LOG ERROR!") + + @pytest.mark.xfail(reason="It is a reason message") + def test_two_log_error_and_xfail_mark_with_reason(self): + logging.error("IT IS THE FIRST LOG ERROR!") + logging.info("----------") + logging.error("IT IS THE SECOND LOG ERROR!") pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy