diff --git a/acmc/phen.py b/acmc/phen.py index 14dc58348d5116658aa2eb908b411524ef01185b..49eefe1fded351b4e7982c84e7f9a56a70d8bae0 100644 --- a/acmc/phen.py +++ b/acmc/phen.py @@ -232,7 +232,9 @@ def validate(phen_dir): logger.info(f"Validating phenotype: {phen_dir}") phen_path = Path(phen_dir) if not phen_path.is_dir(): - raise NotADirectoryError(f"Error: '{str(phen_path.resolve())}' is not a directory") + raise NotADirectoryError( + f"Error: '{str(phen_path.resolve())}' is not a directory" + ) config_path = phen_path / CONFIG_FILE if not config_path.is_file(): @@ -676,8 +678,8 @@ def map(phen_dir, target_code_type): if len(code_errors) > 0: logger.error(f"The map processing has {len(code_errors)} errors") error_path = phen_path / MAP_DIR / "errors" - error_path.mkdir(parents=True, exist_ok=True) - error_filename = f"{target_code_type}-code-errors.csv" + error_path.mkdir(parents=True, exist_ok=True) + error_filename = f"{target_code_type}-code-errors.csv" write_code_errors(code_errors, error_path / error_filename) # Check there is output from processing @@ -944,11 +946,11 @@ def diff(phen_dir, phen_old_dir): new_output = new_map_path / file logger.debug(f"Old ouptput: {str(old_output.resolve())}") - logger.debug(f"New ouptput: {str(new_output.resolve())}") + logger.debug(f"New ouptput: {str(new_output.resolve())}") df1 = pd.read_csv(old_output) df1 = df1[["CONCEPT", "CONCEPT_SET"]].groupby("CONCEPT_SET").count() - df2 = pd.read_csv(new_output) + df2 = pd.read_csv(new_output) df2 = df2[["CONCEPT", "CONCEPT_SET"]].groupby("CONCEPT_SET").count() # Check for added and removed concepts diff --git a/tests/test_acmc.py b/tests/test_acmc.py index a89f0e6e5c68757e56c60aaac1f3609f9bcb28f9..3548a1d14362295d5a41d88e6181051a2c52ea06 100644 --- a/tests/test_acmc.py +++ b/tests/test_acmc.py @@ -46,14 +46,16 @@ def test_phen_init_local_specified(tmp_dir, monkeypatch, caplog): # TODO: This test will need to be refactored so that the expected outputs match the config files # right now it just tests that it runs successfully and does not check the contents of the output -@pytest.mark.parametrize("config_file", [ - ("config1.yaml"), # config.yaml test case - ("config2.yaml"), # config.yaml test case - -]) +@pytest.mark.parametrize( + "config_file", + [ + ("config1.yaml"), # config.yaml test case + ("config2.yaml"), # config.yaml test case + ], +) def test_phen_workflow(tmp_dir, monkeypatch, caplog, config_file): print(f"Temporary directory: {tmp_dir}") # Prints path for debugging - + with caplog.at_level(logging.DEBUG): phen_path = tmp_dir / "phen" phen_path = phen_path.resolve() @@ -79,7 +81,7 @@ def test_phen_workflow(tmp_dir, monkeypatch, caplog, config_file): shutil.copy(source, destination) # copy the test file to configuration - shutil.copy(phen_path / config_file, phen_path / "config.yaml") + shutil.copy(phen_path / config_file, phen_path / "config.yaml") monkeypatch.setattr( sys, "argv", ["main.py", "phen", "validate", "-d", str(phen_path.resolve())]