tests: add fixture for making test project directory

This commit is contained in:
Sarah Hoffmann
2021-11-30 18:01:46 +01:00
parent 37afa2180b
commit c8958a22d2
9 changed files with 237 additions and 241 deletions

View File

@@ -8,68 +8,68 @@ from nominatim.tokenizer import factory
from nominatim.errors import UsageError
from dummy_tokenizer import DummyTokenizer
@pytest.fixture
def test_config(def_config, tmp_path, property_table, tokenizer_mock):
def_config.project_dir = tmp_path
return def_config
def test_setup_dummy_tokenizer(temp_db_conn, test_config):
tokenizer = factory.create_tokenizer(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
assert (test_config.project_dir / 'tokenizer').is_dir()
assert properties.get_property(temp_db_conn, 'tokenizer') == 'dummy'
def test_setup_tokenizer_dir_exists(test_config):
(test_config.project_dir / 'tokenizer').mkdir()
tokenizer = factory.create_tokenizer(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
def test_setup_tokenizer_dir_failure(test_config):
(test_config.project_dir / 'tokenizer').write_text("foo")
with pytest.raises(UsageError):
factory.create_tokenizer(test_config)
def test_setup_bad_tokenizer_name(def_config, tmp_path, monkeypatch):
def_config.project_dir = tmp_path
def test_setup_bad_tokenizer_name(project_env, monkeypatch):
monkeypatch.setenv('NOMINATIM_TOKENIZER', 'dummy')
with pytest.raises(UsageError):
factory.create_tokenizer(def_config)
factory.create_tokenizer(project_env)
def test_load_tokenizer(test_config):
factory.create_tokenizer(test_config)
tokenizer = factory.get_tokenizer_for_db(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "loaded"
class TestFactory:
@pytest.fixture(autouse=True)
def init_env(self, project_env, property_table, tokenizer_mock):
self.config = project_env
def test_load_no_tokenizer_dir(test_config):
factory.create_tokenizer(test_config)
def test_setup_dummy_tokenizer(self, temp_db_conn):
tokenizer = factory.create_tokenizer(self.config)
test_config.project_dir = test_config.project_dir / 'foo'
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
assert (self.config.project_dir / 'tokenizer').is_dir()
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(test_config)
assert properties.get_property(temp_db_conn, 'tokenizer') == 'dummy'
def test_load_missing_propoerty(temp_db_cursor, test_config):
factory.create_tokenizer(test_config)
def test_setup_tokenizer_dir_exists(self):
(self.config.project_dir / 'tokenizer').mkdir()
temp_db_cursor.execute("TRUNCATE TABLE nominatim_properties")
tokenizer = factory.create_tokenizer(self.config)
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(test_config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
def test_setup_tokenizer_dir_failure(self):
(self.config.project_dir / 'tokenizer').write_text("foo")
with pytest.raises(UsageError):
factory.create_tokenizer(self.config)
def test_load_tokenizer(self):
factory.create_tokenizer(self.config)
tokenizer = factory.get_tokenizer_for_db(self.config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "loaded"
def test_load_no_tokenizer_dir(self):
factory.create_tokenizer(self.config)
self.config.project_dir = self.config.project_dir / 'foo'
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(self.config)
def test_load_missing_property(self, temp_db_cursor):
factory.create_tokenizer(self.config)
temp_db_cursor.execute("TRUNCATE TABLE nominatim_properties")
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(self.config)

View File

@@ -20,20 +20,17 @@ def word_table(temp_db_conn):
@pytest.fixture
def test_config(def_config, tmp_path):
def_config.project_dir = tmp_path / 'project'
def_config.project_dir.mkdir()
def test_config(project_env, tmp_path):
sqldir = tmp_path / 'sql'
sqldir.mkdir()
(sqldir / 'tokenizer').mkdir()
(sqldir / 'tokenizer' / 'icu_tokenizer.sql').write_text("SELECT 'a'")
shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql'))
def_config.lib_dir.sql = sqldir
project_env.lib_dir.sql = sqldir
return def_config
return project_env
@pytest.fixture

View File

@@ -11,18 +11,20 @@ from nominatim.errors import UsageError
from icu import Transliterator
@pytest.fixture
def test_config(def_config, tmp_path):
project_dir = tmp_path / 'project_dir'
project_dir.mkdir()
def_config.project_dir = project_dir
CONFIG_SECTIONS = ('normalization', 'transliteration', 'token-analysis')
return def_config
class TestIcuRuleLoader:
@pytest.fixture(autouse=True)
def init_env(self, project_env):
self.project_env = project_env
@pytest.fixture
def cfgrules(test_config):
def _create_config(*variants, **kwargs):
def write_config(self, content):
(self.project_env.project_dir / 'icu_tokenizer.yaml').write_text(dedent(content))
def config_rules(self, *variants):
content = dedent("""\
normalization:
- ":: NFD ()"
@@ -33,122 +35,116 @@ def cfgrules(test_config):
transliteration:
- ":: Latin ()"
- "[[:Punctuation:][:Space:]]+ > ' '"
""")
content += "token-analysis:\n - analyzer: generic\n variants:\n - words:\n"
content += '\n'.join((" - " + s for s in variants)) + '\n'
for k, v in kwargs:
content += " {}: {}\n".format(k, v)
(test_config.project_dir / 'icu_tokenizer.yaml').write_text(content)
return test_config
return _create_config
def test_empty_rule_set(test_config):
(test_config.project_dir / 'icu_tokenizer.yaml').write_text(dedent("""\
normalization:
transliteration:
token-analysis:
- analyzer: generic
variants:
"""))
rules = ICURuleLoader(test_config)
assert rules.get_search_rules() == ''
assert rules.get_normalization_rules() == ''
assert rules.get_transliteration_rules() == ''
CONFIG_SECTIONS = ('normalization', 'transliteration', 'token-analysis')
@pytest.mark.parametrize("section", CONFIG_SECTIONS)
def test_missing_section(section, test_config):
rule_cfg = { s: [] for s in CONFIG_SECTIONS if s != section}
(test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(rule_cfg))
with pytest.raises(UsageError):
ICURuleLoader(test_config)
def test_get_search_rules(cfgrules):
loader = ICURuleLoader(cfgrules())
rules = loader.get_search_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" Baum straße ") == " baum straße "
assert trans.transliterate(" Baumstraße ") == " baumstraße "
assert trans.transliterate(" Baumstrasse ") == " baumstrasse "
assert trans.transliterate(" Baumstr ") == " baumstr "
assert trans.transliterate(" Baumwegstr ") == " baumwegstr "
assert trans.transliterate(" Αθήνα ") == " athēna "
assert trans.transliterate(" проспект ") == " prospekt "
def test_get_normalization_rules(cfgrules):
loader = ICURuleLoader(cfgrules())
rules = loader.get_normalization_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " проспект prospekt "
def test_get_transliteration_rules(cfgrules):
loader = ICURuleLoader(cfgrules())
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " prospekt Prospekt "
def test_transliteration_rules_from_file(test_config):
cfgpath = test_config.project_dir / ('icu_tokenizer.yaml')
cfgpath.write_text(dedent("""\
normalization:
transliteration:
- "'ax' > 'b'"
- !include transliteration.yaml
token-analysis:
- analyzer: generic
variants:
"""))
transpath = test_config.project_dir / ('transliteration.yaml')
transpath.write_text('- "x > y"')
- words:
""")
content += '\n'.join((" - " + s for s in variants)) + '\n'
self.write_config(content)
loader = ICURuleLoader(test_config)
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" axxt ") == " byt "
def test_search_rules(cfgrules):
config = cfgrules('~street => s,st', 'master => mstr')
proc = ICURuleLoader(config).make_token_analysis()
assert proc.search.transliterate('Master Street').strip() == 'master street'
assert proc.search.transliterate('Earnes St').strip() == 'earnes st'
assert proc.search.transliterate('Nostreet').strip() == 'nostreet'
class TestGetReplacements:
@pytest.fixture(autouse=True)
def setup_cfg(self, cfgrules):
self.cfgrules = cfgrules
def get_replacements(self, *variants):
loader = ICURuleLoader(self.cfgrules(*variants))
self.config_rules(*variants)
loader = ICURuleLoader(self.project_env)
rules = loader.analysis[None].config['replacements']
return sorted((k, sorted(v)) for k,v in rules)
def test_empty_rule_set(self):
self.write_config("""\
normalization:
transliteration:
token-analysis:
- analyzer: generic
variants:
""")
rules = ICURuleLoader(self.project_env)
assert rules.get_search_rules() == ''
assert rules.get_normalization_rules() == ''
assert rules.get_transliteration_rules() == ''
@pytest.mark.parametrize("section", CONFIG_SECTIONS)
def test_missing_section(self, section):
rule_cfg = { s: [] for s in CONFIG_SECTIONS if s != section}
self.write_config(yaml.dump(rule_cfg))
with pytest.raises(UsageError):
ICURuleLoader(self.project_env)
def test_get_search_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_search_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" Baum straße ") == " baum straße "
assert trans.transliterate(" Baumstraße ") == " baumstraße "
assert trans.transliterate(" Baumstrasse ") == " baumstrasse "
assert trans.transliterate(" Baumstr ") == " baumstr "
assert trans.transliterate(" Baumwegstr ") == " baumwegstr "
assert trans.transliterate(" Αθήνα ") == " athēna "
assert trans.transliterate(" проспект ") == " prospekt "
def test_get_normalization_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_normalization_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " проспект prospekt "
def test_get_transliteration_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " prospekt Prospekt "
def test_transliteration_rules_from_file(self):
self.write_config("""\
normalization:
transliteration:
- "'ax' > 'b'"
- !include transliteration.yaml
token-analysis:
- analyzer: generic
variants:
""")
transpath = self.project_env.project_dir / ('transliteration.yaml')
transpath.write_text('- "x > y"')
loader = ICURuleLoader(self.project_env)
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" axxt ") == " byt "
def test_search_rules(self):
self.config_rules('~street => s,st', 'master => mstr')
proc = ICURuleLoader(self.project_env).make_token_analysis()
assert proc.search.transliterate('Master Street').strip() == 'master street'
assert proc.search.transliterate('Earnes St').strip() == 'earnes st'
assert proc.search.transliterate('Nostreet').strip() == 'nostreet'
@pytest.mark.parametrize("variant", ['foo > bar', 'foo -> bar -> bar',
'~foo~ -> bar', 'fo~ o -> bar'])
def test_invalid_variant_description(self, variant):
self.config_rules(variant)
with pytest.raises(UsageError):
ICURuleLoader(self.cfgrules(variant))
ICURuleLoader(self.project_env)
def test_add_full(self):
repl = self.get_replacements("foo -> bar")

View File

@@ -11,28 +11,25 @@ from nominatim.db import properties
from nominatim.errors import UsageError
@pytest.fixture
def test_config(def_config, tmp_path):
def_config.project_dir = tmp_path / 'project'
def_config.project_dir.mkdir()
def test_config(project_env, tmp_path):
module_dir = tmp_path / 'module_src'
module_dir.mkdir()
(module_dir / 'nominatim.so').write_text('TEST nomiantim.so')
def_config.lib_dir.module = module_dir
project_env.lib_dir.module = module_dir
sqldir = tmp_path / 'sql'
sqldir.mkdir()
(sqldir / 'tokenizer').mkdir()
(sqldir / 'tokenizer' / 'legacy_tokenizer.sql').write_text("SELECT 'a'")
(sqldir / 'words.sql').write_text("SELECT 'a'")
shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql'))
def_config.lib_dir.sql = sqldir
def_config.lib_dir.data = sqldir
project_env.lib_dir.sql = sqldir
project_env.lib_dir.data = sqldir
return def_config
return project_env
@pytest.fixture