avoid lookup via partials on frequent words

Drops expensive searches via partials on terms like 'rue de'.

See #2979.
This commit is contained in:
Sarah Hoffmann
2023-07-05 14:07:11 +02:00
parent 3266daa8fd
commit cc45930ef9
2 changed files with 28 additions and 14 deletions

View File

@@ -235,19 +235,21 @@ class SearchBuilder:
yield penalty, sum(t.count for t in rare_names), lookup yield penalty, sum(t.count for t in rare_names), lookup
# To catch remaining results, lookup by name and address # To catch remaining results, lookup by name and address
if all(t.is_indexed for t in name_partials): # We only do this if there is a reasonable number of results expected.
lookup = [dbf.FieldLookup('name_vector', if min(exp_name_count, exp_addr_count) < 10000:
[t.token for t in name_partials], 'lookup_all')] if all(t.is_indexed for t in name_partials):
else: lookup = [dbf.FieldLookup('name_vector',
# we don't have the partials, try with the non-rare names [t.token for t in name_partials], 'lookup_all')]
non_rare_names = [t.token for t in name_fulls if t.count >= 1000] else:
if not non_rare_names: # we don't have the partials, try with the non-rare names
return non_rare_names = [t.token for t in name_fulls if t.count >= 1000]
lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')] if not non_rare_names:
if addr_tokens: return
lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all')) lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')]
yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\ if addr_tokens:
min(exp_name_count, exp_addr_count), lookup lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\
min(exp_name_count, exp_addr_count), lookup
def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking: def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking:

View File

@@ -382,7 +382,7 @@ def test_frequent_partials_in_name_but_not_in_address():
def test_frequent_partials_in_name_and_address(): def test_frequent_partials_in_name_and_address():
searches = make_counted_searches(10000, 1, 10000, 1) searches = make_counted_searches(9999, 1, 9999, 1)
assert len(searches) == 2 assert len(searches) == 2
@@ -393,3 +393,15 @@ def test_frequent_partials_in_name_and_address():
{('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')} {('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}
assert set((l.column, l.lookup_type) for l in searches[1].lookups) == \ assert set((l.column, l.lookup_type) for l in searches[1].lookups) == \
{('nameaddress_vector', 'lookup_all'), ('name_vector', 'lookup_all')} {('nameaddress_vector', 'lookup_all'), ('name_vector', 'lookup_all')}
def test_too_frequent_partials_in_name_and_address():
searches = make_counted_searches(10000, 1, 10000, 1)
assert len(searches) == 1
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type) for l in searches[0].lookups) == \
{('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}