mirror of
https://github.com/osm-search/Nominatim.git
synced 2026-02-14 18:37:58 +00:00
more unit tests for tokenizers
This commit is contained in:
@@ -17,6 +17,14 @@ class MockIcuWordTable:
|
||||
|
||||
conn.commit()
|
||||
|
||||
def add_full_word(self, word_id, word, word_token=None):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute("""INSERT INTO word (word_id, word_token, type, word, info)
|
||||
VALUES(%s, %s, 'W', %s, '{}'::jsonb)""",
|
||||
(word_id, word or word_token, word))
|
||||
self.conn.commit()
|
||||
|
||||
|
||||
def add_special(self, word_token, word, cls, typ, oper):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute("""INSERT INTO word (word_token, type, word, info)
|
||||
|
||||
@@ -20,6 +20,14 @@ class MockLegacyWordTable:
|
||||
|
||||
conn.commit()
|
||||
|
||||
def add_full_word(self, word_id, word, word_token=None):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute("""INSERT INTO word (word_id, word_token, word)
|
||||
VALUES (%s, %s, %s)
|
||||
""", (word_id, ' ' + (word_token or word), word))
|
||||
self.conn.commit()
|
||||
|
||||
|
||||
def add_special(self, word_token, word, cls, typ, oper):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute("""INSERT INTO word (word_token, word, class, type, operator)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Tests for Legacy ICU tokenizer.
|
||||
Tests for ICU tokenizer.
|
||||
"""
|
||||
import shutil
|
||||
import yaml
|
||||
@@ -141,12 +141,6 @@ LANGUAGE plpgsql;
|
||||
""")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def getorcreate_hnr_id(temp_db_cursor):
|
||||
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
|
||||
RETURNS INTEGER AS $$
|
||||
SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
|
||||
|
||||
|
||||
def test_init_new(tokenizer_factory, test_config, db_prop):
|
||||
tok = tokenizer_factory()
|
||||
@@ -194,6 +188,47 @@ def test_update_sql_functions(db_prop, temp_db_cursor,
|
||||
assert test_content == set((('1133', ), ))
|
||||
|
||||
|
||||
def test_finalize_import(tokenizer_factory, temp_db_conn,
|
||||
temp_db_cursor, test_config, sql_preprocessor_cfg):
|
||||
func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
|
||||
func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
|
||||
AS $$ SELECT 'b' $$ LANGUAGE SQL""")
|
||||
|
||||
tok = tokenizer_factory()
|
||||
tok.init_new_db(test_config)
|
||||
|
||||
tok.finalize_import(test_config)
|
||||
|
||||
temp_db_cursor.scalar('SELECT test()') == 'b'
|
||||
|
||||
|
||||
def test_check_database(test_config, tokenizer_factory,
|
||||
temp_db_cursor, sql_preprocessor_cfg):
|
||||
tok = tokenizer_factory()
|
||||
tok.init_new_db(test_config)
|
||||
|
||||
assert tok.check_database(test_config) is None
|
||||
|
||||
|
||||
def test_update_statistics_reverse_only(word_table, tokenizer_factory):
|
||||
tok = tokenizer_factory()
|
||||
tok.update_statistics()
|
||||
|
||||
|
||||
def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
|
||||
word_table.add_full_word(1000, 'hello')
|
||||
table_factory('search_name',
|
||||
'place_id BIGINT, name_vector INT[]',
|
||||
[(12, [1000])])
|
||||
tok = tokenizer_factory()
|
||||
|
||||
tok.update_statistics()
|
||||
|
||||
assert temp_db_cursor.scalar("""SELECT count(*) FROM word
|
||||
WHERE type = 'W' and
|
||||
(info->>'count')::int > 0""") > 0
|
||||
|
||||
|
||||
def test_normalize_postcode(analyzer):
|
||||
with analyzer() as anl:
|
||||
anl.normalize_postcode('123') == '123'
|
||||
@@ -364,6 +399,13 @@ class TestPlaceAddress:
|
||||
yield anl
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def getorcreate_hnr_id(self, temp_db_cursor):
|
||||
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
|
||||
RETURNS INTEGER AS $$
|
||||
SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
|
||||
|
||||
|
||||
def process_address(self, **kwargs):
|
||||
return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
Test for legacy tokenizer.
|
||||
"""
|
||||
import shutil
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -10,6 +11,14 @@ from nominatim.tokenizer import legacy_tokenizer
|
||||
from nominatim.db import properties
|
||||
from nominatim.errors import UsageError
|
||||
|
||||
from mock_legacy_word_table import MockLegacyWordTable
|
||||
|
||||
# Force use of legacy word table
|
||||
@pytest.fixture
|
||||
def word_table(temp_db_conn):
|
||||
return MockLegacyWordTable(temp_db_conn)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_config(project_env, tmp_path):
|
||||
module_dir = tmp_path / 'module_src'
|
||||
@@ -21,8 +30,23 @@ def test_config(project_env, tmp_path):
|
||||
sqldir = tmp_path / 'sql'
|
||||
sqldir.mkdir()
|
||||
(sqldir / 'tokenizer').mkdir()
|
||||
(sqldir / 'tokenizer' / 'legacy_tokenizer.sql').write_text("SELECT 'a'")
|
||||
|
||||
# Get the original SQL but replace make_standard_name to avoid module use.
|
||||
init_sql = (project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql').read_text()
|
||||
for fn in ('transliteration', 'gettokenstring'):
|
||||
init_sql = re.sub(f'CREATE OR REPLACE FUNCTION {fn}[^;]*;',
|
||||
'', init_sql, re.DOTALL)
|
||||
init_sql += """
|
||||
CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
|
||||
RETURNS TEXT AS $$ SELECT lower(name); $$ LANGUAGE SQL;
|
||||
|
||||
"""
|
||||
# Also load util functions. Some are needed by the tokenizer.
|
||||
init_sql += (project_env.lib_dir.sql / 'functions' / 'utils.sql').read_text()
|
||||
(sqldir / 'tokenizer' / 'legacy_tokenizer.sql').write_text(init_sql)
|
||||
|
||||
(sqldir / 'words.sql').write_text("SELECT 'a'")
|
||||
|
||||
shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
|
||||
str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql'))
|
||||
|
||||
@@ -52,12 +76,6 @@ def tokenizer_setup(tokenizer_factory, test_config, monkeypatch, sql_preprocesso
|
||||
@pytest.fixture
|
||||
def analyzer(tokenizer_factory, test_config, monkeypatch, sql_preprocessor,
|
||||
word_table, temp_db_with_extensions, tmp_path):
|
||||
sql = tmp_path / 'sql' / 'tokenizer' / 'legacy_tokenizer.sql'
|
||||
sql.write_text("""
|
||||
CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT)
|
||||
RETURNS INTEGER AS $$ SELECT 342; $$ LANGUAGE SQL;
|
||||
""")
|
||||
|
||||
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
|
||||
monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
|
||||
tok = tokenizer_factory()
|
||||
@@ -84,12 +102,6 @@ def create_postcode_id(temp_db_cursor):
|
||||
$$ LANGUAGE SQL""")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_keywords(temp_db_cursor, temp_db_with_extensions):
|
||||
temp_db_cursor.execute(
|
||||
"""CREATE OR REPLACE FUNCTION make_keywords(names HSTORE)
|
||||
RETURNS INTEGER[] AS $$ SELECT ARRAY[1, 2, 3] $$ LANGUAGE SQL""")
|
||||
|
||||
def test_init_new(tokenizer_factory, test_config, monkeypatch,
|
||||
temp_db_conn, sql_preprocessor):
|
||||
monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', 'xxvv')
|
||||
@@ -160,6 +172,23 @@ def test_update_sql_functions(sql_preprocessor, temp_db_conn,
|
||||
assert test_content == set((('1133', ), (str(test_config.project_dir / 'module'), )))
|
||||
|
||||
|
||||
def test_finalize_import(tokenizer_factory, temp_db_conn,
|
||||
temp_db_cursor, test_config, monkeypatch,
|
||||
sql_preprocessor_cfg):
|
||||
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
|
||||
|
||||
func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
|
||||
func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
|
||||
AS $$ SELECT 'b' $$ LANGUAGE SQL""")
|
||||
|
||||
tok = tokenizer_factory()
|
||||
tok.init_new_db(test_config)
|
||||
|
||||
tok.finalize_import(test_config)
|
||||
|
||||
temp_db_cursor.scalar('SELECT test()') == 'b'
|
||||
|
||||
|
||||
def test_migrate_database(tokenizer_factory, test_config, temp_db_conn, monkeypatch):
|
||||
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
|
||||
tok = tokenizer_factory()
|
||||
@@ -175,6 +204,53 @@ def test_migrate_database(tokenizer_factory, test_config, temp_db_conn, monkeypa
|
||||
assert outfile.stat().st_mode == 33261
|
||||
|
||||
|
||||
def test_check_database(test_config, tokenizer_factory, monkeypatch,
|
||||
temp_db_cursor, sql_preprocessor_cfg):
|
||||
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
|
||||
tok = tokenizer_factory()
|
||||
tok.init_new_db(test_config)
|
||||
|
||||
assert tok.check_database(False) is None
|
||||
|
||||
|
||||
def test_check_database_no_tokenizer(test_config, tokenizer_factory):
|
||||
tok = tokenizer_factory()
|
||||
|
||||
assert tok.check_database(False) is not None
|
||||
|
||||
|
||||
def test_check_database_bad_setup(test_config, tokenizer_factory, monkeypatch,
|
||||
temp_db_cursor, sql_preprocessor_cfg):
|
||||
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
|
||||
tok = tokenizer_factory()
|
||||
tok.init_new_db(test_config)
|
||||
|
||||
# Inject a bad transliteration.
|
||||
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
|
||||
RETURNS TEXT AS $$ SELECT 'garbage'; $$ LANGUAGE SQL""")
|
||||
|
||||
assert tok.check_database(False) is not None
|
||||
|
||||
|
||||
def test_update_statistics_reverse_only(word_table, tokenizer_factory):
|
||||
tok = tokenizer_factory()
|
||||
tok.update_statistics()
|
||||
|
||||
|
||||
def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
|
||||
word_table.add_full_word(1000, 'hello')
|
||||
table_factory('search_name',
|
||||
'place_id BIGINT, name_vector INT[]',
|
||||
[(12, [1000])])
|
||||
tok = tokenizer_factory()
|
||||
|
||||
tok.update_statistics()
|
||||
|
||||
assert temp_db_cursor.scalar("""SELECT count(*) FROM word
|
||||
WHERE word_token like ' %' and
|
||||
search_name_count > 0""") > 0
|
||||
|
||||
|
||||
def test_normalize(analyzer):
|
||||
assert analyzer.normalize('TEsT') == 'test'
|
||||
|
||||
@@ -186,7 +262,6 @@ def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table,
|
||||
|
||||
analyzer.update_postcodes_from_db()
|
||||
|
||||
assert word_table.count() == 3
|
||||
assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
|
||||
|
||||
|
||||
@@ -199,7 +274,6 @@ def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_t
|
||||
|
||||
analyzer.update_postcodes_from_db()
|
||||
|
||||
assert word_table.count() == 3
|
||||
assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
|
||||
|
||||
|
||||
@@ -281,12 +355,6 @@ def test_add_more_country_names(analyzer, word_table, make_standard_name):
|
||||
('it', ' #it#')}
|
||||
|
||||
|
||||
def test_process_place_names(analyzer, make_keywords):
|
||||
info = analyzer.process_place(PlaceInfo({'name' : {'name' : 'Soft bAr', 'ref': '34'}}))
|
||||
|
||||
assert info['names'] == '{1,2,3}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
|
||||
def test_process_place_postcode(analyzer, create_postcode_id, word_table, pcode):
|
||||
analyzer.process_place(PlaceInfo({'address': {'postcode' : pcode}}))
|
||||
@@ -337,3 +405,174 @@ class TestHousenumberName:
|
||||
'streetnumber' : '99a'}}))
|
||||
|
||||
assert set(info['hnr'].split(';')) == set(('134', '99a'))
|
||||
|
||||
|
||||
class TestPlaceNames:
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup(self, analyzer):
|
||||
self.analyzer = analyzer
|
||||
|
||||
|
||||
def expect_name_terms(self, info, *expected_terms):
|
||||
tokens = self.analyzer.get_word_token_info(list(expected_terms))
|
||||
for token in tokens:
|
||||
assert token[2] is not None, "No token for {0}".format(token)
|
||||
|
||||
assert eval(info['names']) == set((t[2] for t in tokens)),\
|
||||
f"Expected: {tokens}\nGot: {info['names']}"
|
||||
|
||||
|
||||
def process_named_place(self, names):
|
||||
return self.analyzer.process_place(PlaceInfo({'name': names}))
|
||||
|
||||
|
||||
def test_simple_names(self):
|
||||
info = self.process_named_place({'name': 'Soft bAr', 'ref': '34'})
|
||||
|
||||
self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('sep', [',' , ';'])
|
||||
def test_names_with_separator(self, sep):
|
||||
info = self.process_named_place({'name': sep.join(('New York', 'Big Apple'))})
|
||||
|
||||
self.expect_name_terms(info, '#New York', '#Big Apple',
|
||||
'new', 'york', 'big', 'apple')
|
||||
|
||||
|
||||
def test_full_names_with_bracket(self):
|
||||
info = self.process_named_place({'name': 'Houseboat (left)'})
|
||||
|
||||
self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
|
||||
'houseboat', '(left)')
|
||||
|
||||
|
||||
def test_country_name(self, word_table):
|
||||
place = PlaceInfo({'name' : {'name': 'Norge'},
|
||||
'country_code': 'no',
|
||||
'rank_address': 4,
|
||||
'class': 'boundary',
|
||||
'type': 'administrative'})
|
||||
|
||||
info = self.analyzer.process_place(place)
|
||||
|
||||
self.expect_name_terms(info, '#norge', 'norge')
|
||||
assert word_table.get_country() == {('no', ' norge')}
|
||||
|
||||
|
||||
class TestPlaceAddress:
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup(self, analyzer):
|
||||
self.analyzer = analyzer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def getorcreate_hnr_id(self, temp_db_cursor):
|
||||
temp_db_cursor.execute("""CREATE SEQUENCE seq_hnr start 1;
|
||||
CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT)
|
||||
RETURNS INTEGER AS $$
|
||||
SELECT -nextval('seq_hnr')::INTEGER; $$ LANGUAGE SQL""")
|
||||
|
||||
def process_address(self, **kwargs):
|
||||
return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
|
||||
|
||||
|
||||
def name_token_set(self, *expected_terms):
|
||||
tokens = self.analyzer.get_word_token_info(list(expected_terms))
|
||||
for token in tokens:
|
||||
assert token[2] is not None, "No token for {0}".format(token)
|
||||
|
||||
return set((t[2] for t in tokens))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
|
||||
def test_process_place_postcode(self, word_table, pcode):
|
||||
self.process_address(postcode=pcode)
|
||||
|
||||
assert word_table.get_postcodes() == {pcode, }
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
|
||||
def test_process_place_bad_postcode(self, word_table, pcode):
|
||||
self.process_address(postcode=pcode)
|
||||
|
||||
assert not word_table.get_postcodes()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('hnr', ['123a', '0', '101'])
|
||||
def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
|
||||
info = self.process_address(housenumber=hnr)
|
||||
|
||||
assert info['hnr'] == hnr.lower()
|
||||
assert info['hnr_tokens'] == "{-1}"
|
||||
|
||||
|
||||
def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
|
||||
info = self.process_address(conscriptionnumber='1; 2;3')
|
||||
|
||||
assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
|
||||
assert info['hnr_tokens'] == "{-1,-2,-3}"
|
||||
|
||||
|
||||
def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
|
||||
info = self.process_address(housenumber='134',
|
||||
conscriptionnumber='134',
|
||||
streetnumber='99A')
|
||||
|
||||
assert set(info['hnr'].split(';')) == set(('134', '99a'))
|
||||
assert info['hnr_tokens'] == "{-1,-2}"
|
||||
|
||||
|
||||
def test_process_place_street(self):
|
||||
# legacy tokenizer only indexes known names
|
||||
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road'}}))
|
||||
info = self.process_address(street='Grand Road')
|
||||
|
||||
assert eval(info['street']) == self.name_token_set('#Grand Road')
|
||||
|
||||
|
||||
def test_process_place_street_empty(self):
|
||||
info = self.process_address(street='🜵')
|
||||
|
||||
assert 'street' not in info
|
||||
|
||||
|
||||
def test_process_place_place(self):
|
||||
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Honu Lulu'}}))
|
||||
info = self.process_address(place='Honu Lulu')
|
||||
|
||||
assert eval(info['place_search']) == self.name_token_set('#Honu Lulu',
|
||||
'Honu', 'Lulu')
|
||||
assert eval(info['place_match']) == self.name_token_set('#Honu Lulu')
|
||||
|
||||
|
||||
def test_process_place_place_empty(self):
|
||||
info = self.process_address(place='🜵')
|
||||
|
||||
assert 'place' not in info
|
||||
|
||||
|
||||
def test_process_place_address_terms(self):
|
||||
for name in ('Zwickau', 'Haupstraße', 'Sachsen'):
|
||||
self.analyzer.process_place(PlaceInfo({'name': {'name' : name}}))
|
||||
info = self.process_address(country='de', city='Zwickau', state='Sachsen',
|
||||
suburb='Zwickau', street='Hauptstr',
|
||||
full='right behind the church')
|
||||
|
||||
city = self.name_token_set('ZWICKAU')
|
||||
state = self.name_token_set('SACHSEN')
|
||||
|
||||
print(info)
|
||||
result = {k: eval(v[0]) for k,v in info['addr'].items()}
|
||||
|
||||
assert result == {'city': city, 'suburb': city, 'state': state}
|
||||
|
||||
|
||||
def test_process_place_address_terms_empty(self):
|
||||
info = self.process_address(country='de', city=' ', street='Hauptstr',
|
||||
full='right behind the church')
|
||||
|
||||
assert 'addr' not in info
|
||||
|
||||
|
||||
Reference in New Issue
Block a user