remove support for unindexed tokens

This was a special feature of the legacy tokenizer who would not
index very frequent tokens.
This commit is contained in:
Sarah Hoffmann
2024-09-22 10:39:10 +02:00
parent 290c22a153
commit a690605a96
6 changed files with 23 additions and 30 deletions

View File

@@ -19,7 +19,7 @@ class MyToken(query.Token):
def mktoken(tid: int):
return MyToken(penalty=3.0, token=tid, count=1, addr_count=1,
lookup_word='foo', is_indexed=True)
lookup_word='foo')
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),

View File

@@ -33,7 +33,7 @@ def make_query(*args):
q.add_token(TokenRange(start, end), ttype,
MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0,
token=tid, count=1, addr_count=1,
lookup_word=word, is_indexed=True))
lookup_word=word))
return q
@@ -397,14 +397,14 @@ def make_counted_searches(name_part, name_full, address_part, address_full,
q.add_node(BreakType.END, PhraseType.NONE)
q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
MyToken(0.5, 1, name_part, 1, 'name_part', True))
MyToken(0.5, 1, name_part, 1, 'name_part'))
q.add_token(TokenRange(0, 1), TokenType.WORD,
MyToken(0, 101, name_full, 1, 'name_full', True))
MyToken(0, 101, name_full, 1, 'name_full'))
for i in range(num_address_parts):
q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL,
MyToken(0.5, 2, address_part, 1, 'address_part', True))
MyToken(0.5, 2, address_part, 1, 'address_part'))
q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD,
MyToken(0, 102, address_full, 1, 'address_full', True))
MyToken(0, 102, address_full, 1, 'address_full'))
builder = SearchBuilder(q, SearchDetails())

View File

@@ -20,7 +20,7 @@ class MyToken(Token):
def make_query(*args):
q = QueryStruct([Phrase(args[0][1], '')])
dummy = MyToken(penalty=3.0, token=45, count=1, addr_count=1,
lookup_word='foo', is_indexed=True)
lookup_word='foo')
for btype, ptype, _ in args[1:]:
q.add_node(btype, ptype)