mirror of
https://github.com/osm-search/Nominatim.git
synced 2026-02-26 11:08:13 +00:00
Merge pull request #2589 from lonvia/clean-housenumbers
Add command for cleaning up word table
This commit is contained in:
10
.github/workflows/ci-tests.yml
vendored
10
.github/workflows/ci-tests.yml
vendored
@@ -309,12 +309,20 @@ jobs:
|
|||||||
NOMINATIM_REPLICATION_MAX_DIFF=1 nominatim replication --once
|
NOMINATIM_REPLICATION_MAX_DIFF=1 nominatim replication --once
|
||||||
working-directory: /home/nominatim/nominatim-project
|
working-directory: /home/nominatim/nominatim-project
|
||||||
|
|
||||||
|
- name: Clean up database
|
||||||
|
run: nominatim refresh --postcodes --word-tokens
|
||||||
|
working-directory: /home/nominatim/nominatim-project
|
||||||
|
|
||||||
- name: Run reverse-only import
|
- name: Run reverse-only import
|
||||||
run : |
|
run : |
|
||||||
echo 'NOMINATIM_DATABASE_DSN="pgsql:dbname=reverse"' >> .env
|
echo 'NOMINATIM_DATABASE_DSN="pgsql:dbname=reverse"' >> .env
|
||||||
nominatim import --osm-file ../test.pbf --reverse-only --no-updates
|
nominatim import --osm-file ../test.pbf --reverse-only --no-updates
|
||||||
working-directory: /home/nominatim/data-env-reverse
|
working-directory: /home/nominatim/data-env-reverse
|
||||||
|
|
||||||
- name: Check reverse import
|
- name: Check reverse-only import
|
||||||
run: nominatim admin --check-database
|
run: nominatim admin --check-database
|
||||||
working-directory: /home/nominatim/data-env-reverse
|
working-directory: /home/nominatim/data-env-reverse
|
||||||
|
|
||||||
|
- name: Clean up database (reverse-only import)
|
||||||
|
run: nominatim refresh --postcodes --word-tokens
|
||||||
|
working-directory: /home/nominatim/nominatim-project
|
||||||
|
|||||||
@@ -39,6 +39,8 @@ class UpdateRefresh:
|
|||||||
group = parser.add_argument_group('Data arguments')
|
group = parser.add_argument_group('Data arguments')
|
||||||
group.add_argument('--postcodes', action='store_true',
|
group.add_argument('--postcodes', action='store_true',
|
||||||
help='Update postcode centroid table')
|
help='Update postcode centroid table')
|
||||||
|
group.add_argument('--word-tokens', action='store_true',
|
||||||
|
help='Clean up search terms')
|
||||||
group.add_argument('--word-counts', action='store_true',
|
group.add_argument('--word-counts', action='store_true',
|
||||||
help='Compute frequency of full-word search terms')
|
help='Compute frequency of full-word search terms')
|
||||||
group.add_argument('--address-levels', action='store_true',
|
group.add_argument('--address-levels', action='store_true',
|
||||||
@@ -76,6 +78,11 @@ class UpdateRefresh:
|
|||||||
LOG.error("The place table doesn't exist. "
|
LOG.error("The place table doesn't exist. "
|
||||||
"Postcode updates on a frozen database is not possible.")
|
"Postcode updates on a frozen database is not possible.")
|
||||||
|
|
||||||
|
if args.word_tokens:
|
||||||
|
LOG.warning('Updating word tokens')
|
||||||
|
tokenizer = self._get_tokenizer(args.config)
|
||||||
|
tokenizer.update_word_tokens()
|
||||||
|
|
||||||
if args.word_counts:
|
if args.word_counts:
|
||||||
LOG.warning('Recompute word statistics')
|
LOG.warning('Recompute word statistics')
|
||||||
self._get_tokenizer(args.config).update_statistics()
|
self._get_tokenizer(args.config).update_statistics()
|
||||||
|
|||||||
@@ -209,6 +209,13 @@ class AbstractTokenizer(ABC):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update_word_tokens(self) -> None:
|
||||||
|
""" Do house-keeping on the tokenizers internal data structures.
|
||||||
|
Remove unused word tokens, resort data etc.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def name_analyzer(self) -> AbstractAnalyzer:
|
def name_analyzer(self) -> AbstractAnalyzer:
|
||||||
""" Create a new analyzer for tokenizing names and queries
|
""" Create a new analyzer for tokenizing names and queries
|
||||||
|
|||||||
@@ -112,6 +112,47 @@ class LegacyICUTokenizer(AbstractTokenizer):
|
|||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup_housenumbers(self):
|
||||||
|
""" Remove unused house numbers.
|
||||||
|
"""
|
||||||
|
with connect(self.dsn) as conn:
|
||||||
|
if not conn.table_exists('search_name'):
|
||||||
|
return
|
||||||
|
with conn.cursor(name="hnr_counter") as cur:
|
||||||
|
cur.execute("""SELECT word_id, word_token FROM word
|
||||||
|
WHERE type = 'H'
|
||||||
|
AND NOT EXISTS(SELECT * FROM search_name
|
||||||
|
WHERE ARRAY[word.word_id] && name_vector)
|
||||||
|
AND (char_length(word_token) > 6
|
||||||
|
OR word_token not similar to '\\d+')
|
||||||
|
""")
|
||||||
|
candidates = {token: wid for wid, token in cur}
|
||||||
|
with conn.cursor(name="hnr_counter") as cur:
|
||||||
|
cur.execute("""SELECT housenumber FROM placex
|
||||||
|
WHERE housenumber is not null
|
||||||
|
AND (char_length(housenumber) > 6
|
||||||
|
OR housenumber not similar to '\\d+')
|
||||||
|
""")
|
||||||
|
for row in cur:
|
||||||
|
for hnr in row[0].split(';'):
|
||||||
|
candidates.pop(hnr, None)
|
||||||
|
LOG.info("There are %s outdated housenumbers.", len(candidates))
|
||||||
|
if candidates:
|
||||||
|
with conn.cursor() as cur:
|
||||||
|
cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
|
||||||
|
(list(candidates.values()), ))
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def update_word_tokens(self):
|
||||||
|
""" Remove unused tokens.
|
||||||
|
"""
|
||||||
|
LOG.warning("Cleaning up housenumber tokens.")
|
||||||
|
self._cleanup_housenumbers()
|
||||||
|
LOG.warning("Tokenizer house-keeping done.")
|
||||||
|
|
||||||
|
|
||||||
def name_analyzer(self):
|
def name_analyzer(self):
|
||||||
""" Create a new analyzer for tokenizing names and queries
|
""" Create a new analyzer for tokenizing names and queries
|
||||||
using this tokinzer. Analyzers are context managers and should
|
using this tokinzer. Analyzers are context managers and should
|
||||||
|
|||||||
@@ -211,6 +211,13 @@ class LegacyTokenizer(AbstractTokenizer):
|
|||||||
cur.drop_table("word_frequencies")
|
cur.drop_table("word_frequencies")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def update_word_tokens(self):
|
||||||
|
""" No house-keeping implemented for the legacy tokenizer.
|
||||||
|
"""
|
||||||
|
LOG.info("No tokenizer clean-up available.")
|
||||||
|
|
||||||
|
|
||||||
def name_analyzer(self):
|
def name_analyzer(self):
|
||||||
""" Create a new analyzer for tokenizing names and queries
|
""" Create a new analyzer for tokenizing names and queries
|
||||||
using this tokinzer. Analyzers are context managers and should
|
using this tokinzer. Analyzers are context managers and should
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ class DummyTokenizer:
|
|||||||
self.update_sql_functions_called = False
|
self.update_sql_functions_called = False
|
||||||
self.finalize_import_called = False
|
self.finalize_import_called = False
|
||||||
self.update_statistics_called = False
|
self.update_statistics_called = False
|
||||||
|
self.update_word_tokens_called = False
|
||||||
|
|
||||||
def update_sql_functions(self, *args):
|
def update_sql_functions(self, *args):
|
||||||
self.update_sql_functions_called = True
|
self.update_sql_functions_called = True
|
||||||
@@ -40,6 +41,9 @@ class DummyTokenizer:
|
|||||||
def update_statistics(self):
|
def update_statistics(self):
|
||||||
self.update_statistics_called = True
|
self.update_statistics_called = True
|
||||||
|
|
||||||
|
def update_word_tokens(self):
|
||||||
|
self.update_word_tokens_called = True
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def cli_call(src_dir):
|
def cli_call(src_dir):
|
||||||
|
|||||||
@@ -39,6 +39,11 @@ class TestRefresh:
|
|||||||
assert self.tokenizer_mock.update_statistics_called
|
assert self.tokenizer_mock.update_statistics_called
|
||||||
|
|
||||||
|
|
||||||
|
def test_refresh_word_tokens(self):
|
||||||
|
assert self.call_nominatim('refresh', '--word-tokens') == 0
|
||||||
|
assert self.tokenizer_mock.update_word_tokens_called
|
||||||
|
|
||||||
|
|
||||||
def test_refresh_postcodes(self, mock_func_factory, place_table):
|
def test_refresh_postcodes(self, mock_func_factory, place_table):
|
||||||
func_mock = mock_func_factory(nominatim.tools.postcodes, 'update_postcodes')
|
func_mock = mock_func_factory(nominatim.tools.postcodes, 'update_postcodes')
|
||||||
idx_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_postcodes')
|
idx_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_postcodes')
|
||||||
|
|||||||
@@ -58,6 +58,14 @@ class MockIcuWordTable:
|
|||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def add_housenumber(self, word_id, word_token):
|
||||||
|
with self.conn.cursor() as cur:
|
||||||
|
cur.execute("""INSERT INTO word (word_id, word_token, type)
|
||||||
|
VALUES (%s, %s, 'H')
|
||||||
|
""", (word_id, word_token))
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
|
||||||
def count(self):
|
def count(self):
|
||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
return cur.scalar("SELECT count(*) FROM word")
|
return cur.scalar("SELECT count(*) FROM word")
|
||||||
@@ -68,6 +76,11 @@ class MockIcuWordTable:
|
|||||||
return cur.scalar("SELECT count(*) FROM word WHERE type = 'S'")
|
return cur.scalar("SELECT count(*) FROM word WHERE type = 'S'")
|
||||||
|
|
||||||
|
|
||||||
|
def count_housenumbers(self):
|
||||||
|
with self.conn.cursor() as cur:
|
||||||
|
return cur.scalar("SELECT count(*) FROM word WHERE type = 'H'")
|
||||||
|
|
||||||
|
|
||||||
def get_special(self):
|
def get_special(self):
|
||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
cur.execute("SELECT word_token, info, word FROM word WHERE type = 'S'")
|
cur.execute("SELECT word_token, info, word FROM word WHERE type = 'S'")
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ Tests for ICU tokenizer.
|
|||||||
"""
|
"""
|
||||||
import shutil
|
import shutil
|
||||||
import yaml
|
import yaml
|
||||||
|
import itertools
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -554,3 +555,69 @@ class TestPlaceAddress:
|
|||||||
|
|
||||||
assert 'addr' not in info
|
assert 'addr' not in info
|
||||||
|
|
||||||
|
|
||||||
|
class TestUpdateWordTokens:
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def setup(self, tokenizer_factory, table_factory, placex_table, word_table):
|
||||||
|
table_factory('search_name', 'place_id BIGINT, name_vector INT[]')
|
||||||
|
self.tok = tokenizer_factory()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def search_entry(self, temp_db_cursor):
|
||||||
|
place_id = itertools.count(1000)
|
||||||
|
|
||||||
|
def _insert(*args):
|
||||||
|
temp_db_cursor.execute("INSERT INTO search_name VALUES (%s, %s)",
|
||||||
|
(next(place_id), list(args)))
|
||||||
|
|
||||||
|
return _insert
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('hnr', ('1a', '1234567', '34 5'))
|
||||||
|
def test_remove_unused_housenumbers(self, word_table, hnr):
|
||||||
|
word_table.add_housenumber(1000, hnr)
|
||||||
|
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
self.tok.update_word_tokens()
|
||||||
|
assert word_table.count_housenumbers() == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_keep_unused_numeral_housenumbers(self, word_table):
|
||||||
|
word_table.add_housenumber(1000, '5432')
|
||||||
|
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
self.tok.update_word_tokens()
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_keep_housenumbers_from_search_name_table(self, word_table, search_entry):
|
||||||
|
word_table.add_housenumber(9999, '5432a')
|
||||||
|
word_table.add_housenumber(9991, '9 a')
|
||||||
|
search_entry(123, 9999, 34)
|
||||||
|
|
||||||
|
assert word_table.count_housenumbers() == 2
|
||||||
|
self.tok.update_word_tokens()
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_keep_housenumbers_from_placex_table(self, word_table, placex_table):
|
||||||
|
word_table.add_housenumber(9999, '5432a')
|
||||||
|
word_table.add_housenumber(9990, '34z')
|
||||||
|
placex_table.add(housenumber='34z')
|
||||||
|
placex_table.add(housenumber='25432a')
|
||||||
|
|
||||||
|
assert word_table.count_housenumbers() == 2
|
||||||
|
self.tok.update_word_tokens()
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_keep_housenumbers_from_placex_table_hnr_list(self, word_table, placex_table):
|
||||||
|
word_table.add_housenumber(9991, '9 b')
|
||||||
|
word_table.add_housenumber(9990, '34z')
|
||||||
|
placex_table.add(housenumber='9 a;9 b;9 c')
|
||||||
|
|
||||||
|
assert word_table.count_housenumbers() == 2
|
||||||
|
self.tok.update_word_tokens()
|
||||||
|
assert word_table.count_housenumbers() == 1
|
||||||
|
|||||||
@@ -257,6 +257,13 @@ def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_
|
|||||||
search_name_count > 0""") > 0
|
search_name_count > 0""") > 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_word_tokens(tokenizer_factory):
|
||||||
|
tok = tokenizer_factory()
|
||||||
|
|
||||||
|
# This is a noop and should just pass.
|
||||||
|
tok.update_word_tokens()
|
||||||
|
|
||||||
|
|
||||||
def test_normalize(analyzer):
|
def test_normalize(analyzer):
|
||||||
assert analyzer.normalize('TEsT') == 'test'
|
assert analyzer.normalize('TEsT') == 'test'
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user