Compare commits

...

93 Commits

Author SHA1 Message Date
Sarah Hoffmann
76b8b07f16 adapt docs for release 2025-04-01 11:24:32 +02:00
Sarah Hoffmann
fce279226f prepare release 5.1.0 2025-04-01 10:16:35 +02:00
Sarah Hoffmann
54d895c4ce Merge pull request #3695 from TuringVerified/doc-dependencies
[Small fix] Add documentation to install extras for mkdocstrings
2025-04-01 09:34:08 +02:00
TuringVerified
896a1c9d12 Add mkdocstrings extra 2025-04-01 11:06:46 +05:30
Sarah Hoffmann
32728d6c89 Merge pull request #3693 from lonvia/remove-unused-sql
Remove SQL function for address lookup
2025-03-31 17:11:39 +02:00
Sarah Hoffmann
bfd1c83cb0 Merge pull request #3692 from lonvia/word-lookup-variants
Avoid matching penalty for abbreviated search terms
2025-03-31 16:38:31 +02:00
Sarah Hoffmann
bbadc62371 remove SQL function for address lookup
This is now done in Python.
2025-03-31 15:09:40 +02:00
Sarah Hoffmann
5c9d3ca8d2 Merge pull request #3691 from lonvia/more-search-tweaks
More tweaks to search wights
2025-03-31 15:06:09 +02:00
Sarah Hoffmann
be4ba370ef adapt tests to extended results 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
3cb183ffb0 add lookup word to variants in word table 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
58ef032a2b do not write any word counts on initial word insert 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
1705bb5f57 do not save word counts of 1
This is the default setting, which will be assumed when the count is
missing.
2025-03-31 14:52:50 +02:00
Sarah Hoffmann
f2aa15778f always use lookup when requested
Doesn't seem to cause any issues in production.
2025-03-31 11:38:21 +02:00
Sarah Hoffmann
efe65c3e49 increase allowable address counts 2025-03-31 11:38:21 +02:00
Sarah Hoffmann
51847ebfeb more agressively reduce expected count for multi-word terms
Improves searching of non-latin scripts with forced token spaces.
2025-03-31 11:18:22 +02:00
Sarah Hoffmann
46579f08e4 Merge pull request #3690 from lonvia/fix-signature
Fix function signature for newer SQLAlchemy
2025-03-31 11:17:03 +02:00
Sarah Hoffmann
d4994a152b fix function signature for newer SQLAlchemy 2025-03-31 09:42:29 +02:00
Sarah Hoffmann
00b3ace3cf Merge pull request #3684 from lonvia/compact-en-variants
Clean up English variants
2025-03-24 15:15:13 +01:00
Sarah Hoffmann
522bc942cf restrict some English variants to end of word 2025-03-21 21:22:38 +01:00
Sarah Hoffmann
d6e749d621 make English variant list more compact 2025-03-21 21:13:34 +01:00
Sarah Hoffmann
13cfb7efe2 Merge pull request #3682 from lonvia/fix-postcode-case
Fix case issues when parsing postcodes
2025-03-21 11:41:24 +01:00
Sarah Hoffmann
35baf77b18 make query upper-case when parsing postcodes
The postcode patterns expect upper-case letters.
2025-03-21 09:44:15 +01:00
Sarah Hoffmann
7e68613cc7 Merge pull request #3679 from lonvia/output-fixes
Minor fixes for v1 frontend code
2025-03-19 21:56:28 +01:00
Sarah Hoffmann
b1fc721f4b fix layer setting for structured search 2025-03-19 17:31:43 +01:00
Sarah Hoffmann
d400fd5f76 fix debug output for lookup type 2025-03-19 17:31:18 +01:00
Sarah Hoffmann
e4295dba10 Merge pull request #3678 from lonvia/search-tweaks
Some minor tweaks to postcode parsing in query
2025-03-19 16:00:52 +01:00
Sarah Hoffmann
9419c5adb2 penalize postcode searches with multiple name qualifiers 2025-03-19 10:05:36 +01:00
Sarah Hoffmann
2c61fe08a0 use word_token length when penalizing against postcodes 2025-03-19 09:52:40 +01:00
Sarah Hoffmann
7b3c725f2a postcode token should have transliterated term in word_token 2025-03-19 09:52:40 +01:00
Sarah Hoffmann
edc5ada625 improve handling of leading postcodes
Setting the direction of the query while yielding assignments is
a bad idea because it may override a direction already set.
2025-03-19 09:52:40 +01:00
Sarah Hoffmann
72d3360fa2 Merge pull request #3673 from otbutz/parallel_safe
Mark functions as PARALLEL SAFE
2025-03-18 21:46:53 +01:00
Sarah Hoffmann
0ffe384c57 Merge pull request #3676 from lonvia/adjust-place-levels-sa
Adjust place ranks for Saudi-Arabia
2025-03-18 18:31:48 +01:00
Sarah Hoffmann
9dad5edeb6 adjust for special use of province and municipality in Saudi-Arabia 2025-03-18 16:38:10 +01:00
Thomas Butz
d86d491f2e Mark functions as PARALLEL SAFE 2025-03-13 10:53:11 +01:00
Sarah Hoffmann
3026c333ca adapt typing for latest SQLAlchemy version 2025-03-13 10:49:08 +01:00
Sarah Hoffmann
ad84bbdec7 Merge pull request #3671 from lonvia/remove-osm2pgsql-libdir
Remove code for setting osm2pgsql location via config.lib_dir
2025-03-11 11:22:46 +01:00
Sarah Hoffmann
f5755a7a82 remove code for setting osm2pgsql via config.lib_dir
With the internal osm2pgsql gone, configuration of the binary location
via settings is the only option left that makes sense.
2025-03-11 09:04:05 +01:00
Sarah Hoffmann
cd08956c61 Merge pull request #3670 from lonvia/flake-for-tests
Extend linting with flake to tests
2025-03-10 09:35:24 +01:00
Sarah Hoffmann
12f5719184 remove unused bdd util functions 2025-03-09 17:34:40 +01:00
Sarah Hoffmann
78f839fbd3 enable flake for bdd test code 2025-03-09 17:34:04 +01:00
Sarah Hoffmann
c70dfccaca also enable flake for tests in github actions 2025-03-09 16:03:02 +01:00
Sarah Hoffmann
4cc788f69e enable flake for Python tests 2025-03-09 15:33:24 +01:00
Sarah Hoffmann
5a245e33e0 Merge pull request #3667 from eumiro/simplify-int-float
Simplify  int/float manipulation
2025-03-09 09:44:15 +01:00
Miroslav Šedivý
6ff51712fe Simplify int/float manipulation 2025-03-06 19:26:56 +01:00
Sarah Hoffmann
c431e0e45d Merge pull request #3666 from eumiro/math-isclose
Replace custom Almost with stdlib math.isclose
2025-03-06 17:53:01 +01:00
Sarah Hoffmann
c2d62a59cb Merge pull request #3664 from eumiro/consolidate-random
Consolidate usage of random module
2025-03-06 17:52:19 +01:00
Miroslav Šedivý
cd64788a58 Replace custom Almost with stdlib math.isclose 2025-03-05 20:35:01 +01:00
Miroslav Šedivý
800a41721a Consolidate usage of random module 2025-03-05 19:38:28 +01:00
Sarah Hoffmann
1b44fe2555 Merge pull request #3665 from lonvia/pattern-matching-postcodes
Add full parsing of postcodes in query
2025-03-05 16:02:03 +01:00
Sarah Hoffmann
6b0d58d9fd restrict postcode parsing in typed phrases
Postcodes can only appear in postcode-type phrases and must then
cover the full phrase
2025-03-05 10:09:33 +01:00
Sarah Hoffmann
afb89f9c7a add unit tests for postcode parser 2025-03-04 16:25:00 +01:00
Sarah Hoffmann
6712627d5e adapt BDD tests to new postcode handling 2025-03-04 15:18:46 +01:00
Sarah Hoffmann
434fbbfd18 add support for country prefixes in postcodes 2025-03-04 15:18:27 +01:00
Sarah Hoffmann
921db8bb2f cache all info of ICUQueryAnalyser in a single object 2025-03-04 08:58:57 +01:00
Sarah Hoffmann
a574b98e4a remove postcode computation for word table during import 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
b2af358f66 reenable ZIP+ test 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
e67ae701ac show token begin and end in debug output 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
fc1c6261ed add postcode parser 2025-03-04 08:57:37 +01:00
Sarah Hoffmann
6759edfb5d make word generation from query a class method 2025-03-04 08:57:37 +01:00
Sarah Hoffmann
e362a965e1 search: merge QueryPart array with QueryNodes
The basic information on terms is pretty much always used together
with the node inforamtion. Merging them together saves some
allocation while making lookup easier at the same time.
2025-03-04 08:57:37 +01:00
Sarah Hoffmann
eff60ba6be enable parsing of US ZIP+ codes
The four-digit part of these postcodes will simply be ignored.
2025-02-25 20:29:06 +01:00
Sarah Hoffmann
157414a053 Merge pull request #3659 from lonvia/custom-datrie-structure
Replace datrie library with a simple custom Python implementation
2025-02-24 16:49:42 +01:00
Sarah Hoffmann
18d4996bec remove datrie dependency 2025-02-24 10:24:21 +01:00
Sarah Hoffmann
13db4c9731 replace datrie library with a more simple pure-Python class 2025-02-24 10:24:21 +01:00
Sarah Hoffmann
f567ea89cc Merge pull request #3658 from lonvia/minor-query-parsing-optimisations
Minor query parsing optimisations
2025-02-24 10:16:47 +01:00
Sarah Hoffmann
3e718e40d9 adapt documentation for PhraseType type 2025-02-21 17:16:42 +01:00
Sarah Hoffmann
49bd18b048 replace PhraseType enum with simple int constants 2025-02-21 16:44:12 +01:00
Sarah Hoffmann
31412e0674 replace TokenType enum with simple char constants 2025-02-21 10:23:41 +01:00
Sarah Hoffmann
4577669213 replace BreakType enum with simple char constants 2025-02-21 09:57:48 +01:00
Sarah Hoffmann
9bf1428d81 consistently use query module as qmod 2025-02-21 09:31:21 +01:00
Sarah Hoffmann
b56edf3d0a avoid yielding when extracting words from query 2025-02-20 23:32:39 +01:00
Sarah Hoffmann
abc911079e remove word_number counting for phrases
We can just examine the break types to know if we are dealing
with a partial token.
2025-02-20 17:36:50 +01:00
Sarah Hoffmann
adabfee3be Merge pull request #3655 from lonvia/remove-name-ranking-in-postcode-search
Tweak penalties for postcode searches
2025-02-20 14:32:43 +01:00
Sarah Hoffmann
46c4446dc2 remove address penalty for postcode search
Searches of the form <postcode> <city> are in fact quite common.
2025-02-20 11:11:45 +01:00
Sarah Hoffmann
add9244a2f do not rerank address by full match in postcode search
The reranking result will not be completely correct because
the address of a postcode refer to the address _and_ name
of the parent and reranking was only done against the
address. We assume here that the postcode is precise enough
as to not require a penalty to to partial matches.
2025-02-20 10:29:03 +01:00
Sarah Hoffmann
96d7a8e8f6 Merge pull request #3653 from lonvia/trailing-spaces-in-normalization
Strip leading and trailing space markers during normalization
2025-02-19 17:25:59 +01:00
Sarah Hoffmann
55c3176957 strip normalisation results of normal and special spaces 2025-02-19 14:40:35 +01:00
Sarah Hoffmann
e29823e28f add test for structured query with leading spaces 2025-02-19 10:31:36 +01:00
Sarah Hoffmann
97ed168996 Merge pull request #3652 from lonvia/update-variants
Cleanup and updates of tokenizer variant configuration
2025-02-18 19:47:45 +01:00
Sarah Hoffmann
9b8ef97d4b Merge pull request #3649 from lonvia/actions-move-to-ubuntu22
Move Github actions to Unbuntu-22 image
2025-02-18 13:21:09 +01:00
Sarah Hoffmann
4f3c88f0c1 remove e-ë mutation, this is taken care of by transliteration 2025-02-18 10:31:44 +01:00
mhsr21
7781186f3c Add USPS Standard Suffix Abbreviation 2025-02-18 09:28:13 +01:00
Sarah Hoffmann
f78686edb8 fix Norwegian variants
More cases of 'no' being interpreted as fasle by yaml.
2025-02-18 09:28:13 +01:00
Sarah Hoffmann
e330cd3162 remove ineffective and dupicate variants 2025-02-18 09:28:13 +01:00
Sarah Hoffmann
671af4cff2 Merge pull request #3555 from IvanShift/patch-1
Fixed Russian abbreviation list
2025-02-17 18:44:11 +01:00
Sarah Hoffmann
e612b7d550 actions: use Debians's script for adding the Postgres apt repo 2025-02-17 17:56:23 +01:00
Sarah Hoffmann
0b49d01703 actions: move tests to Ubuntu-20 2025-02-17 17:54:49 +01:00
Sarah Hoffmann
f6bc8e153f Merge pull request #3648 from lonvia/extratags-for-geocodejson
Enable output of extratags for geocodejson format
2025-02-17 11:14:52 +01:00
Sarah Hoffmann
f143ecaf1c add documentation for new extra field 2025-02-17 10:04:23 +01:00
Sarah Hoffmann
6730c8bac8 add optional output of extratags to geocodejson 2025-02-16 10:16:40 +01:00
IvanShift
bea9249e38 Added "дом" and fixed order "школа" 2024-10-06 17:59:59 +03:00
Alexander Sapozhnikov
1e4677b668 Expand Russian abbreviation list 2022-11-01 04:01:27 +05:00
Alexander Sapozhnikov
7f909dbbd8 Add replacement for Russian 2022-11-01 02:54:07 +05:00
171 changed files with 3035 additions and 3243 deletions

View File

@@ -6,3 +6,6 @@ extend-ignore =
E711 E711
per-file-ignores = per-file-ignores =
__init__.py: F401 __init__.py: F401
test/python/utils/test_json_writer.py: E131
test/python/conftest.py: E402
test/bdd/*: F821

View File

@@ -11,10 +11,8 @@ runs:
steps: steps:
- name: Remove existing PostgreSQL - name: Remove existing PostgreSQL
run: | run: |
sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y
sudo apt-get purge -yq postgresql* sudo apt-get purge -yq postgresql*
sudo apt install curl ca-certificates gnupg
curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
sudo apt-get update -qq sudo apt-get update -qq
shell: bash shell: bash

View File

@@ -37,10 +37,10 @@ jobs:
needs: create-archive needs: create-archive
strategy: strategy:
matrix: matrix:
flavour: ["ubuntu-20", "ubuntu-24"] flavour: ["ubuntu-22", "ubuntu-24"]
include: include:
- flavour: ubuntu-20 - flavour: ubuntu-22
ubuntu: 20 ubuntu: 22
postgresql: 12 postgresql: 12
lua: '5.1' lua: '5.1'
dependencies: pip dependencies: pip
@@ -81,7 +81,7 @@ jobs:
sudo make install sudo make install
cd ../.. cd ../..
rm -rf osm2pgsql-build rm -rf osm2pgsql-build
if: matrix.ubuntu == '20' if: matrix.ubuntu == '22'
env: env:
LUA_VERSION: ${{ matrix.lua }} LUA_VERSION: ${{ matrix.lua }}
@@ -100,7 +100,7 @@ jobs:
run: ./venv/bin/pip install -U flake8 run: ./venv/bin/pip install -U flake8
- name: Python linting - name: Python linting
run: ../venv/bin/python -m flake8 src run: ../venv/bin/python -m flake8 src test/python test/bdd
working-directory: Nominatim working-directory: Nominatim
- name: Install mypy and typechecking info - name: Install mypy and typechecking info

View File

@@ -1,3 +1,22 @@
5.1.0
* replace datrie with simple internal trie implementation
* add pattern-based postcode parser for queries,
postcodes no longer need to be present in OSM to be found
* take variants into account when computing token similarity
* add extratags output to geocodejson format
* fix default layer setting used for structured queries
* update abbreviation lists for Russian and English
(thanks @shoorick, @IvanShift, @mhsrn21)
* fix variant generation for Norwegian
* fix normalization around space-like characters
* improve postcode search and handling of postcodes in queries
* reorganise internal query structure and get rid of slow enums
* enable code linting for tests
* various code moderinsations in test code (thanks @eumiro)
* remove setting osm2pgsql location via config.lib_dir
* make SQL functions parallel save as far as possible (thanks @otbutz)
* various fixes and improvements to documentation (thanks @TuringVerified)
5.0.0 5.0.0
* increase required versions for PostgreSQL (12+), PostGIS (3.0+) * increase required versions for PostgreSQL (12+), PostGIS (3.0+)
* remove installation via cmake and debundle osm2pgsql * remove installation via cmake and debundle osm2pgsql

View File

@@ -24,7 +24,7 @@ pytest:
pytest test/python pytest test/python
lint: lint:
flake8 src flake8 src test/python test/bdd
bdd: bdd:
cd test/bdd; behave -DREMOVE_TEMPLATE=1 cd test/bdd; behave -DREMOVE_TEMPLATE=1

View File

@@ -9,7 +9,8 @@ versions.
| Version | End of support for security updates | | Version | End of support for security updates |
| ------- | ----------------------------------- | | ------- | ----------------------------------- |
| 5.0.x | 2027-02-06 | 5.1.x | 2027-04-01 |
| 5.0.x | 2027-02-06 |
| 4.5.x | 2026-09-12 | | 4.5.x | 2026-09-12 |
| 4.4.x | 2026-03-07 | | 4.4.x | 2026-03-07 |
| 4.3.x | 2025-09-07 | | 4.3.x | 2025-09-07 |

View File

@@ -37,7 +37,6 @@ Furthermore the following Python libraries are required:
* [Jinja2](https://palletsprojects.com/p/jinja/) * [Jinja2](https://palletsprojects.com/p/jinja/)
* [PyICU](https://pypi.org/project/PyICU/) * [PyICU](https://pypi.org/project/PyICU/)
* [PyYaml](https://pyyaml.org/) (5.1+) * [PyYaml](https://pyyaml.org/) (5.1+)
* [datrie](https://github.com/pytries/datrie)
These will be installed automatically when using pip installation. These will be installed automatically when using pip installation.
@@ -111,14 +110,17 @@ Then you can install Nominatim with:
pip install nominatim-db nominatim-api pip install nominatim-db nominatim-api
## Downloading and building Nominatim ## Downloading and building Nominatim from source
### Downloading the latest release The following instructions are only relevant, if you want to build and
install Nominatim **from source**.
### Downloading the source for the latest release
You can download the [latest release from nominatim.org](https://nominatim.org/downloads/). You can download the [latest release from nominatim.org](https://nominatim.org/downloads/).
The release contains all necessary files. Just unpack it. The release contains all necessary files. Just unpack it.
### Downloading the latest development version ### Downloading the source for the latest development version
If you want to install latest development version from github: If you want to install latest development version from github:
@@ -132,7 +134,7 @@ The development version does not include the country grid. Download it separatel
wget -O Nominatim/data/country_osm_grid.sql.gz https://nominatim.org/data/country_grid.sql.gz wget -O Nominatim/data/country_osm_grid.sql.gz https://nominatim.org/data/country_grid.sql.gz
``` ```
### Building Nominatim ### Building Nominatim from source
Nominatim is easiest to run from its own virtual environment. To create one, run: Nominatim is easiest to run from its own virtual environment. To create one, run:

View File

@@ -106,8 +106,11 @@ The following feature attributes are implemented:
* `name` - localised name of the place * `name` - localised name of the place
* `housenumber`, `street`, `locality`, `district`, `postcode`, `city`, * `housenumber`, `street`, `locality`, `district`, `postcode`, `city`,
`county`, `state`, `country` - `county`, `state`, `country` -
provided when it can be determined from the address provided when it can be determined from the address (only with `addressdetails=1`)
* `admin` - list of localised names of administrative boundaries (only with `addressdetails=1`) * `admin` - list of localised names of administrative boundaries (only with `addressdetails=1`)
* `extra` - dictionary with additional useful tags like `website` or `maxspeed`
(only with `extratags=1`)
Use `polygon_geojson` to output the full geometry of the object instead Use `polygon_geojson` to output the full geometry of the object instead
of the centroid. of the centroid.

View File

@@ -69,9 +69,9 @@ To set up the virtual environment with all necessary packages run:
```sh ```sh
virtualenv ~/nominatim-dev-venv virtualenv ~/nominatim-dev-venv
~/nominatim-dev-venv/bin/pip install\ ~/nominatim-dev-venv/bin/pip install\
psutil psycopg[binary] PyICU SQLAlchemy \ psutil 'psycopg[binary]' PyICU SQLAlchemy \
python-dotenv jinja2 pyYAML datrie behave \ python-dotenv jinja2 pyYAML behave \
mkdocs mkdocstrings mkdocs-gen-files pytest pytest-asyncio flake8 \ mkdocs 'mkdocstrings[python]' mkdocs-gen-files pytest pytest-asyncio flake8 \
types-jinja2 types-markupsafe types-psutil types-psycopg2 \ types-jinja2 types-markupsafe types-psutil types-psycopg2 \
types-pygments types-pyyaml types-requests types-ujson \ types-pygments types-pyyaml types-requests types-ujson \
types-urllib3 typing-extensions unicorn falcon starlette \ types-urllib3 typing-extensions unicorn falcon starlette \

View File

@@ -60,13 +60,19 @@ The order of phrases matters to Nominatim when doing further processing.
Thus, while you may split or join phrases, you should not reorder them Thus, while you may split or join phrases, you should not reorder them
unless you really know what you are doing. unless you really know what you are doing.
Phrase types (`nominatim_api.search.PhraseType`) can further help narrowing Phrase types can further help narrowing down how the tokens in the phrase
down how the tokens in the phrase are interpreted. The following phrase types are interpreted. The following phrase types are known:
are known:
::: nominatim_api.search.PhraseType | Name | Description |
options: |----------------|-------------|
heading_level: 6 | PHRASE_ANY | No specific designation (i.e. source is free-form query) |
| PHRASE_AMENITY | Contains name or type of a POI |
| PHRASE_STREET | Contains a street name optionally with a housenumber |
| PHRASE_CITY | Contains the postal city |
| PHRASE_COUNTY | Contains the equivalent of a county |
| PHRASE_STATE | Contains a state or province |
| PHRASE_POSTCODE| Contains a postal code |
| PHRASE_COUNTRY | Contains the country name or code |
## Custom sanitizer modules ## Custom sanitizer modules

View File

@@ -8,7 +8,6 @@
{% include('functions/utils.sql') %} {% include('functions/utils.sql') %}
{% include('functions/ranking.sql') %} {% include('functions/ranking.sql') %}
{% include('functions/importance.sql') %} {% include('functions/importance.sql') %}
{% include('functions/address_lookup.sql') %}
{% include('functions/interpolation.sql') %} {% include('functions/interpolation.sql') %}
{% if 'place' in db.tables %} {% if 'place' in db.tables %}

View File

@@ -1,334 +0,0 @@
-- SPDX-License-Identifier: GPL-2.0-only
--
-- This file is part of Nominatim. (https://nominatim.org)
--
-- Copyright (C) 2022 by the Nominatim developer community.
-- For a full list of authors see the git log.
-- Functions for returning address information for a place.
DROP TYPE IF EXISTS addressline CASCADE;
CREATE TYPE addressline as (
place_id BIGINT,
osm_type CHAR(1),
osm_id BIGINT,
name HSTORE,
class TEXT,
type TEXT,
place_type TEXT,
admin_level INTEGER,
fromarea BOOLEAN,
isaddress BOOLEAN,
rank_address INTEGER,
distance FLOAT
);
CREATE OR REPLACE FUNCTION get_name_by_language(name hstore, languagepref TEXT[])
RETURNS TEXT
AS $$
DECLARE
result TEXT;
BEGIN
IF name is null THEN
RETURN null;
END IF;
FOR j IN 1..array_upper(languagepref,1) LOOP
IF name ? languagepref[j] THEN
result := trim(name->languagepref[j]);
IF result != '' THEN
return result;
END IF;
END IF;
END LOOP;
-- as a fallback - take the last element since it is the default name
RETURN trim((avals(name))[array_length(avals(name), 1)]);
END;
$$
LANGUAGE plpgsql IMMUTABLE;
--housenumber only needed for tiger data
CREATE OR REPLACE FUNCTION get_address_by_language(for_place_id BIGINT,
housenumber INTEGER,
languagepref TEXT[])
RETURNS TEXT
AS $$
DECLARE
result TEXT[];
currresult TEXT;
prevresult TEXT;
location RECORD;
BEGIN
result := '{}';
prevresult := '';
FOR location IN
SELECT name,
CASE WHEN place_id = for_place_id THEN 99 ELSE rank_address END as rank_address
FROM get_addressdata(for_place_id, housenumber)
WHERE isaddress order by rank_address desc
LOOP
currresult := trim(get_name_by_language(location.name, languagepref));
IF currresult != prevresult AND currresult IS NOT NULL
AND result[(100 - location.rank_address)] IS NULL
THEN
result[(100 - location.rank_address)] := currresult;
prevresult := currresult;
END IF;
END LOOP;
RETURN array_to_string(result,', ');
END;
$$
LANGUAGE plpgsql STABLE;
DROP TYPE IF EXISTS addressdata_place;
CREATE TYPE addressdata_place AS (
place_id BIGINT,
country_code VARCHAR(2),
housenumber TEXT,
postcode TEXT,
class TEXT,
type TEXT,
name HSTORE,
address HSTORE,
centroid GEOMETRY
);
-- Compute the list of address parts for the given place.
--
-- If in_housenumber is greator or equal 0, look for an interpolation.
CREATE OR REPLACE FUNCTION get_addressdata(in_place_id BIGINT, in_housenumber INTEGER)
RETURNS setof addressline
AS $$
DECLARE
place addressdata_place;
location RECORD;
country RECORD;
current_rank_address INTEGER;
location_isaddress BOOLEAN;
BEGIN
-- The place in question might not have a direct entry in place_addressline.
-- Look for the parent of such places then and save it in place.
-- first query osmline (interpolation lines)
IF in_housenumber >= 0 THEN
SELECT parent_place_id as place_id, country_code,
in_housenumber as housenumber, postcode,
'place' as class, 'house' as type,
null as name, null as address,
ST_Centroid(linegeo) as centroid
INTO place
FROM location_property_osmline
WHERE place_id = in_place_id
AND in_housenumber between startnumber and endnumber;
END IF;
--then query tiger data
{% if config.get_bool('USE_US_TIGER_DATA') %}
IF place IS NULL AND in_housenumber >= 0 THEN
SELECT parent_place_id as place_id, 'us' as country_code,
in_housenumber as housenumber, postcode,
'place' as class, 'house' as type,
null as name, null as address,
ST_Centroid(linegeo) as centroid
INTO place
FROM location_property_tiger
WHERE place_id = in_place_id
AND in_housenumber between startnumber and endnumber;
END IF;
{% endif %}
-- postcode table
IF place IS NULL THEN
SELECT parent_place_id as place_id, country_code,
null::text as housenumber, postcode,
'place' as class, 'postcode' as type,
null as name, null as address,
null as centroid
INTO place
FROM location_postcode
WHERE place_id = in_place_id;
END IF;
-- POI objects in the placex table
IF place IS NULL THEN
SELECT parent_place_id as place_id, country_code,
coalesce(address->'housenumber',
address->'streetnumber',
address->'conscriptionnumber')::text as housenumber,
postcode,
class, type,
name, address,
centroid
INTO place
FROM placex
WHERE place_id = in_place_id and rank_search > 27;
END IF;
-- If place is still NULL at this point then the object has its own
-- entry in place_address line. However, still check if there is not linked
-- place we should be using instead.
IF place IS NULL THEN
select coalesce(linked_place_id, place_id) as place_id, country_code,
null::text as housenumber, postcode,
class, type,
null as name, address,
null as centroid
INTO place
FROM placex where place_id = in_place_id;
END IF;
--RAISE WARNING '% % % %',searchcountrycode, searchhousenumber, searchpostcode;
-- --- Return the record for the base entry.
current_rank_address := 1000;
FOR location IN
SELECT placex.place_id, osm_type, osm_id, name,
coalesce(extratags->'linked_place', extratags->'place') as place_type,
class, type, admin_level,
CASE WHEN rank_address = 0 THEN 100
WHEN rank_address = 11 THEN 5
ELSE rank_address END as rank_address,
country_code
FROM placex
WHERE place_id = place.place_id
LOOP
--RAISE WARNING '%',location;
-- mix in default names for countries
IF location.rank_address = 4 and place.country_code is not NULL THEN
FOR country IN
SELECT coalesce(name, ''::hstore) as name FROM country_name
WHERE country_code = place.country_code LIMIT 1
LOOP
place.name := country.name || place.name;
END LOOP;
END IF;
IF location.rank_address < 4 THEN
-- no country locations for ranks higher than country
place.country_code := NULL::varchar(2);
ELSEIF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
place.country_code := location.country_code;
END IF;
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
location.name, location.class, location.type,
location.place_type,
location.admin_level, true,
location.type not in ('postcode', 'postal_code'),
location.rank_address, 0)::addressline;
current_rank_address := location.rank_address;
END LOOP;
-- --- Return records for address parts.
FOR location IN
SELECT placex.place_id, osm_type, osm_id, name, class, type,
coalesce(extratags->'linked_place', extratags->'place') as place_type,
admin_level, fromarea, isaddress,
CASE WHEN rank_address = 11 THEN 5 ELSE rank_address END as rank_address,
distance, country_code, postcode
FROM place_addressline join placex on (address_place_id = placex.place_id)
WHERE place_addressline.place_id IN (place.place_id, in_place_id)
AND linked_place_id is null
AND (placex.country_code IS NULL OR place.country_code IS NULL
OR placex.country_code = place.country_code)
ORDER BY rank_address desc,
(place_addressline.place_id = in_place_id) desc,
(CASE WHEN coalesce((avals(name) && avals(place.address)), False) THEN 2
WHEN isaddress THEN 0
WHEN fromarea
and place.centroid is not null
and ST_Contains(geometry, place.centroid) THEN 1
ELSE -1 END) desc,
fromarea desc, distance asc, rank_search desc
LOOP
-- RAISE WARNING '%',location;
location_isaddress := location.rank_address != current_rank_address;
IF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
place.country_code := location.country_code;
END IF;
IF location.type in ('postcode', 'postal_code')
AND place.postcode is not null
THEN
-- If the place had a postcode assigned, take this one only
-- into consideration when it is an area and the place does not have
-- a postcode itself.
IF location.fromarea AND location_isaddress
AND (place.address is null or not place.address ? 'postcode')
THEN
place.postcode := null; -- remove the less exact postcode
ELSE
location_isaddress := false;
END IF;
END IF;
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
location.name, location.class, location.type,
location.place_type,
location.admin_level, location.fromarea,
location_isaddress,
location.rank_address,
location.distance)::addressline;
current_rank_address := location.rank_address;
END LOOP;
-- If no country was included yet, add the name information from country_name.
IF current_rank_address > 4 THEN
FOR location IN
SELECT name || coalesce(derived_name, ''::hstore) as name FROM country_name
WHERE country_code = place.country_code LIMIT 1
LOOP
--RAISE WARNING '% % %',current_rank_address,searchcountrycode,countryname;
RETURN NEXT ROW(null, null, null, location.name, 'place', 'country', NULL,
null, true, true, 4, 0)::addressline;
END LOOP;
END IF;
-- Finally add some artificial rows.
IF place.country_code IS NOT NULL THEN
location := ROW(null, null, null, hstore('ref', place.country_code),
'place', 'country_code', null, null, true, false, 4, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.name IS NOT NULL THEN
location := ROW(in_place_id, null, null, place.name, place.class,
place.type, null, null, true, true, 29, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.housenumber IS NOT NULL THEN
location := ROW(null, null, null, hstore('ref', place.housenumber),
'place', 'house_number', null, null, true, true, 28, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.address is not null and place.address ? '_unlisted_place' THEN
RETURN NEXT ROW(null, null, null, hstore('name', place.address->'_unlisted_place'),
'place', 'locality', null, null, true, true, 25, 0)::addressline;
END IF;
IF place.postcode is not null THEN
location := ROW(null, null, null, hstore('ref', place.postcode), 'place',
'postcode', null, null, false, true, 5, 0)::addressline;
RETURN NEXT location;
ELSEIF place.address is not null and place.address ? 'postcode'
and not place.address->'postcode' SIMILAR TO '%(,|;)%' THEN
location := ROW(null, null, null, hstore('ref', place.address->'postcode'), 'place',
'postcode', null, null, false, true, 5, 0)::addressline;
RETURN NEXT location;
END IF;
RETURN;
END;
$$
LANGUAGE plpgsql STABLE;

View File

@@ -65,7 +65,7 @@ BEGIN
RETURN NULL; RETURN NULL;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
{% else %} {% else %}
@@ -78,7 +78,7 @@ SELECT convert_from(CAST(E'\\x' || array_to_string(ARRAY(
FROM regexp_matches($1, '%[0-9a-f][0-9a-f]|.', 'gi') AS r(m) FROM regexp_matches($1, '%[0-9a-f][0-9a-f]|.', 'gi') AS r(m)
), '') AS bytea), 'UTF8'); ), '') AS bytea), 'UTF8');
$$ $$
LANGUAGE SQL IMMUTABLE STRICT; LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION catch_decode_url_part(p varchar) CREATE OR REPLACE FUNCTION catch_decode_url_part(p varchar)
@@ -91,7 +91,7 @@ EXCEPTION
WHEN others THEN return null; WHEN others THEN return null;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE STRICT; LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_wikipedia_match(extratags HSTORE, country_code varchar(2)) CREATE OR REPLACE FUNCTION get_wikipedia_match(extratags HSTORE, country_code varchar(2))
@@ -139,7 +139,7 @@ BEGIN
RETURN NULL; RETURN NULL;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
{% endif %} {% endif %}
@@ -203,5 +203,5 @@ BEGIN
RETURN result; RETURN result;
END; END;
$$ $$
LANGUAGE plpgsql; LANGUAGE plpgsql PARALLEL SAFE;

View File

@@ -34,7 +34,7 @@ BEGIN
RETURN in_address; RETURN in_address;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
@@ -70,7 +70,7 @@ BEGIN
RETURN parent_place_id; RETURN parent_place_id;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION reinsert_interpolation(way_id BIGINT, addr HSTORE, CREATE OR REPLACE FUNCTION reinsert_interpolation(way_id BIGINT, addr HSTORE,

View File

@@ -58,7 +58,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END END
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_address_place(in_partition SMALLINT, feature GEOMETRY, CREATE OR REPLACE FUNCTION get_address_place(in_partition SMALLINT, feature GEOMETRY,
@@ -87,7 +87,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
create or replace function deleteLocationArea(in_partition INTEGER, in_place_id BIGINT, in_rank_search INTEGER) RETURNS BOOLEAN AS $$ create or replace function deleteLocationArea(in_partition INTEGER, in_place_id BIGINT, in_rank_search INTEGER) RETURNS BOOLEAN AS $$
@@ -172,7 +172,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END END
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION getNearestNamedPlacePlaceId(in_partition INTEGER, CREATE OR REPLACE FUNCTION getNearestNamedPlacePlaceId(in_partition INTEGER,
point GEOMETRY, point GEOMETRY,
@@ -202,7 +202,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END END
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
create or replace function insertSearchName( create or replace function insertSearchName(
in_partition INTEGER, in_place_id BIGINT, in_name_vector INTEGER[], in_partition INTEGER, in_place_id BIGINT, in_name_vector INTEGER[],
@@ -310,7 +310,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END END
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION getNearestParallelRoadFeature(in_partition INTEGER, CREATE OR REPLACE FUNCTION getNearestParallelRoadFeature(in_partition INTEGER,
line GEOMETRY) line GEOMETRY)
@@ -354,4 +354,4 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition; RAISE EXCEPTION 'Unknown partition %', in_partition;
END END
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;

View File

@@ -109,7 +109,7 @@ BEGIN
RETURN result; RETURN result;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION find_associated_street(poi_osm_type CHAR(1), CREATE OR REPLACE FUNCTION find_associated_street(poi_osm_type CHAR(1),
@@ -200,7 +200,7 @@ BEGIN
RETURN result; RETURN result;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Find the parent road of a POI. -- Find the parent road of a POI.
@@ -286,7 +286,7 @@ BEGIN
RETURN parent_place_id; RETURN parent_place_id;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Try to find a linked place for the given object. -- Try to find a linked place for the given object.
CREATE OR REPLACE FUNCTION find_linked_place(bnd placex) CREATE OR REPLACE FUNCTION find_linked_place(bnd placex)
@@ -404,7 +404,7 @@ BEGIN
RETURN NULL; RETURN NULL;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION create_poi_search_terms(obj_place_id BIGINT, CREATE OR REPLACE FUNCTION create_poi_search_terms(obj_place_id BIGINT,

View File

@@ -29,7 +29,7 @@ BEGIN
RETURN 0.02; RETURN 0.02;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Return an approximate update radius according to the search rank. -- Return an approximate update radius according to the search rank.
@@ -60,7 +60,7 @@ BEGIN
RETURN 0; RETURN 0;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Compute a base address rank from the extent of the given geometry. -- Compute a base address rank from the extent of the given geometry.
-- --
@@ -107,7 +107,7 @@ BEGIN
RETURN 23; RETURN 23;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Guess a ranking for postcodes from country and postcode format. -- Guess a ranking for postcodes from country and postcode format.
@@ -167,7 +167,7 @@ BEGIN
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Get standard search and address rank for an object. -- Get standard search and address rank for an object.
@@ -236,7 +236,7 @@ BEGIN
END IF; END IF;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_addr_tag_rank(key TEXT, country TEXT, CREATE OR REPLACE FUNCTION get_addr_tag_rank(key TEXT, country TEXT,
OUT from_rank SMALLINT, OUT from_rank SMALLINT,
@@ -283,7 +283,7 @@ BEGIN
END LOOP; END LOOP;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION weigh_search(search_vector INT[], CREATE OR REPLACE FUNCTION weigh_search(search_vector INT[],
@@ -304,4 +304,4 @@ BEGIN
RETURN def_weight; RETURN def_weight;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;

View File

@@ -24,7 +24,7 @@ BEGIN
RETURN ST_PointOnSurface(place); RETURN ST_PointOnSurface(place);
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION geometry_sector(partition INTEGER, place GEOMETRY) CREATE OR REPLACE FUNCTION geometry_sector(partition INTEGER, place GEOMETRY)
@@ -34,7 +34,7 @@ BEGIN
RETURN (partition*1000000) + (500-ST_X(place)::INTEGER)*1000 + (500-ST_Y(place)::INTEGER); RETURN (partition*1000000) + (500-ST_X(place)::INTEGER)*1000 + (500-ST_Y(place)::INTEGER);
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
@@ -60,7 +60,7 @@ BEGIN
RETURN r; RETURN r;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Return the node members with a given label from a relation member list -- Return the node members with a given label from a relation member list
-- as a set. -- as a set.
@@ -88,7 +88,7 @@ BEGIN
RETURN; RETURN;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_rel_node_members(members JSONB, memberLabels TEXT[]) CREATE OR REPLACE FUNCTION get_rel_node_members(members JSONB, memberLabels TEXT[])
@@ -107,7 +107,7 @@ BEGIN
RETURN; RETURN;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Copy 'name' to or from the default language. -- Copy 'name' to or from the default language.
@@ -136,7 +136,7 @@ BEGIN
END IF; END IF;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Find the nearest artificial postcode for the given geometry. -- Find the nearest artificial postcode for the given geometry.
@@ -172,7 +172,7 @@ BEGIN
RETURN outcode; RETURN outcode;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_country_code(place geometry) CREATE OR REPLACE FUNCTION get_country_code(place geometry)
@@ -233,7 +233,7 @@ BEGIN
RETURN NULL; RETURN NULL;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_country_language_code(search_country_code VARCHAR(2)) CREATE OR REPLACE FUNCTION get_country_language_code(search_country_code VARCHAR(2))
@@ -251,7 +251,7 @@ BEGIN
RETURN NULL; RETURN NULL;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_partition(in_country_code VARCHAR(10)) CREATE OR REPLACE FUNCTION get_partition(in_country_code VARCHAR(10))
@@ -268,7 +268,7 @@ BEGIN
RETURN 0; RETURN 0;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Find the parent of an address with addr:street/addr:place tag. -- Find the parent of an address with addr:street/addr:place tag.
@@ -299,7 +299,7 @@ BEGIN
RETURN parent_place_id; RETURN parent_place_id;
END; END;
$$ $$
LANGUAGE plpgsql STABLE; LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION delete_location(OLD_place_id BIGINT) CREATE OR REPLACE FUNCTION delete_location(OLD_place_id BIGINT)
@@ -337,7 +337,7 @@ BEGIN
ST_Project(geom::geography, radius, 3.9269908)::geometry)); ST_Project(geom::geography, radius, 3.9269908)::geometry));
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2), CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2),
@@ -455,7 +455,7 @@ BEGIN
RETURN; RETURN;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY) CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
@@ -483,7 +483,7 @@ BEGIN
RETURN; RETURN;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION simplify_large_polygons(geometry GEOMETRY) CREATE OR REPLACE FUNCTION simplify_large_polygons(geometry GEOMETRY)
RETURNS GEOMETRY RETURNS GEOMETRY
@@ -497,7 +497,7 @@ BEGIN
RETURN geometry; RETURN geometry;
END; END;
$$ $$
LANGUAGE plpgsql IMMUTABLE; LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION place_force_delete(placeid BIGINT) CREATE OR REPLACE FUNCTION place_force_delete(placeid BIGINT)

View File

@@ -12,7 +12,7 @@ CREATE OR REPLACE FUNCTION token_get_name_search_tokens(info JSONB)
RETURNS INTEGER[] RETURNS INTEGER[]
AS $$ AS $$
SELECT (info->>'names')::INTEGER[] SELECT (info->>'names')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Get tokens for matching the place name against others. -- Get tokens for matching the place name against others.
@@ -22,7 +22,7 @@ CREATE OR REPLACE FUNCTION token_get_name_match_tokens(info JSONB)
RETURNS INTEGER[] RETURNS INTEGER[]
AS $$ AS $$
SELECT (info->>'names')::INTEGER[] SELECT (info->>'names')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return the housenumber tokens applicable for the place. -- Return the housenumber tokens applicable for the place.
@@ -30,7 +30,7 @@ CREATE OR REPLACE FUNCTION token_get_housenumber_search_tokens(info JSONB)
RETURNS INTEGER[] RETURNS INTEGER[]
AS $$ AS $$
SELECT (info->>'hnr_tokens')::INTEGER[] SELECT (info->>'hnr_tokens')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return the housenumber in the form that it can be matched during search. -- Return the housenumber in the form that it can be matched during search.
@@ -38,77 +38,77 @@ CREATE OR REPLACE FUNCTION token_normalized_housenumber(info JSONB)
RETURNS TEXT RETURNS TEXT
AS $$ AS $$
SELECT info->>'hnr'; SELECT info->>'hnr';
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_is_street_address(info JSONB) CREATE OR REPLACE FUNCTION token_is_street_address(info JSONB)
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT info->>'street' is not null or info->>'place' is null; SELECT info->>'street' is not null or info->>'place' is null;
$$ LANGUAGE SQL IMMUTABLE; $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_has_addr_street(info JSONB) CREATE OR REPLACE FUNCTION token_has_addr_street(info JSONB)
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT info->>'street' is not null and info->>'street' != '{}'; SELECT info->>'street' is not null and info->>'street' != '{}';
$$ LANGUAGE SQL IMMUTABLE; $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_has_addr_place(info JSONB) CREATE OR REPLACE FUNCTION token_has_addr_place(info JSONB)
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT info->>'place' is not null; SELECT info->>'place' is not null;
$$ LANGUAGE SQL IMMUTABLE; $$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_street(info JSONB, street_tokens INTEGER[]) CREATE OR REPLACE FUNCTION token_matches_street(info JSONB, street_tokens INTEGER[])
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT (info->>'street')::INTEGER[] && street_tokens SELECT (info->>'street')::INTEGER[] && street_tokens
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_place(info JSONB, place_tokens INTEGER[]) CREATE OR REPLACE FUNCTION token_matches_place(info JSONB, place_tokens INTEGER[])
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT (info->>'place')::INTEGER[] <@ place_tokens SELECT (info->>'place')::INTEGER[] <@ place_tokens
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_addr_place_search_tokens(info JSONB) CREATE OR REPLACE FUNCTION token_addr_place_search_tokens(info JSONB)
RETURNS INTEGER[] RETURNS INTEGER[]
AS $$ AS $$
SELECT (info->>'place')::INTEGER[] SELECT (info->>'place')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_address_keys(info JSONB) CREATE OR REPLACE FUNCTION token_get_address_keys(info JSONB)
RETURNS SETOF TEXT RETURNS SETOF TEXT
AS $$ AS $$
SELECT * FROM jsonb_object_keys(info->'addr'); SELECT * FROM jsonb_object_keys(info->'addr');
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_address_search_tokens(info JSONB, key TEXT) CREATE OR REPLACE FUNCTION token_get_address_search_tokens(info JSONB, key TEXT)
RETURNS INTEGER[] RETURNS INTEGER[]
AS $$ AS $$
SELECT (info->'addr'->>key)::INTEGER[]; SELECT (info->'addr'->>key)::INTEGER[];
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_address(info JSONB, key TEXT, tokens INTEGER[]) CREATE OR REPLACE FUNCTION token_matches_address(info JSONB, key TEXT, tokens INTEGER[])
RETURNS BOOLEAN RETURNS BOOLEAN
AS $$ AS $$
SELECT (info->'addr'->>key)::INTEGER[] <@ tokens; SELECT (info->'addr'->>key)::INTEGER[] <@ tokens;
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_postcode(info JSONB) CREATE OR REPLACE FUNCTION token_get_postcode(info JSONB)
RETURNS TEXT RETURNS TEXT
AS $$ AS $$
SELECT info->>'postcode'; SELECT info->>'postcode';
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return token info that should be saved permanently in the database. -- Return token info that should be saved permanently in the database.
@@ -116,7 +116,7 @@ CREATE OR REPLACE FUNCTION token_strip_info(info JSONB)
RETURNS JSONB RETURNS JSONB
AS $$ AS $$
SELECT NULL::JSONB; SELECT NULL::JSONB;
$$ LANGUAGE SQL IMMUTABLE STRICT; $$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
--------------- private functions ---------------------------------------------- --------------- private functions ----------------------------------------------
@@ -128,16 +128,14 @@ DECLARE
partial_terms TEXT[] = '{}'::TEXT[]; partial_terms TEXT[] = '{}'::TEXT[];
term TEXT; term TEXT;
term_id INTEGER; term_id INTEGER;
term_count INTEGER;
BEGIN BEGIN
SELECT min(word_id) INTO full_token SELECT min(word_id) INTO full_token
FROM word WHERE word = norm_term and type = 'W'; FROM word WHERE word = norm_term and type = 'W';
IF full_token IS NULL THEN IF full_token IS NULL THEN
full_token := nextval('seq_word'); full_token := nextval('seq_word');
INSERT INTO word (word_id, word_token, type, word, info) INSERT INTO word (word_id, word_token, type, word)
SELECT full_token, lookup_term, 'W', norm_term, SELECT full_token, lookup_term, 'W', norm_term
json_build_object('count', 0)
FROM unnest(lookup_terms) as lookup_term; FROM unnest(lookup_terms) as lookup_term;
END IF; END IF;
@@ -150,14 +148,67 @@ BEGIN
partial_tokens := '{}'::INT[]; partial_tokens := '{}'::INT[];
FOR term IN SELECT unnest(partial_terms) LOOP FOR term IN SELECT unnest(partial_terms) LOOP
SELECT min(word_id), max(info->>'count') INTO term_id, term_count SELECT min(word_id) INTO term_id
FROM word WHERE word_token = term and type = 'w'; FROM word WHERE word_token = term and type = 'w';
IF term_id IS NULL THEN IF term_id IS NULL THEN
term_id := nextval('seq_word'); term_id := nextval('seq_word');
term_count := 0; INSERT INTO word (word_id, word_token, type)
INSERT INTO word (word_id, word_token, type, info) VALUES (term_id, term, 'w');
VALUES (term_id, term, 'w', json_build_object('count', term_count)); END IF;
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);
END LOOP;
END;
$$
LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION getorcreate_full_word(norm_term TEXT,
lookup_terms TEXT[],
lookup_norm_terms TEXT[],
OUT full_token INT,
OUT partial_tokens INT[])
AS $$
DECLARE
partial_terms TEXT[] = '{}'::TEXT[];
term TEXT;
term_id INTEGER;
BEGIN
SELECT min(word_id) INTO full_token
FROM word WHERE word = norm_term and type = 'W';
IF full_token IS NULL THEN
full_token := nextval('seq_word');
IF lookup_norm_terms IS NULL THEN
INSERT INTO word (word_id, word_token, type, word)
SELECT full_token, lookup_term, 'W', norm_term
FROM unnest(lookup_terms) as lookup_term;
ELSE
INSERT INTO word (word_id, word_token, type, word, info)
SELECT full_token, t.lookup, 'W', norm_term,
CASE WHEN norm_term = t.norm THEN null
ELSE json_build_object('lookup', t.norm) END
FROM unnest(lookup_terms, lookup_norm_terms) as t(lookup, norm);
END IF;
END IF;
FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
term := trim(term);
IF NOT (ARRAY[term] <@ partial_terms) THEN
partial_terms := partial_terms || term;
END IF;
END LOOP;
partial_tokens := '{}'::INT[];
FOR term IN SELECT unnest(partial_terms) LOOP
SELECT min(word_id) INTO term_id
FROM word WHERE word_token = term and type = 'w';
IF term_id IS NULL THEN
term_id := nextval('seq_word');
INSERT INTO word (word_id, word_token, type)
VALUES (term_id, term, 'w');
END IF; END IF;
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]); partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);

View File

@@ -1,4 +1,4 @@
site_name: Nominatim Manual site_name: Nominatim 5.1.0 Manual
theme: theme:
font: false font: false
name: material name: material

View File

@@ -3,7 +3,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Helper script for development to run nominatim from the source directory. Helper script for development to run nominatim from the source directory.
@@ -15,4 +15,4 @@ sys.path.insert(1, str((Path(__file__) / '..' / 'src').resolve()))
from nominatim_db import cli from nominatim_db import cli
exit(cli.nominatim(module_dir=None, osm2pgsql_path=None)) exit(cli.nominatim())

View File

@@ -19,7 +19,6 @@ dependencies = [
"python-dotenv", "python-dotenv",
"jinja2", "jinja2",
"pyYAML>=5.1", "pyYAML>=5.1",
"datrie",
"psutil", "psutil",
"PyICU" "PyICU"
] ]

View File

@@ -2,4 +2,4 @@
from nominatim_db import cli from nominatim_db import cli
exit(cli.nominatim(osm2pgsql_path=None)) exit(cli.nominatim())

View File

@@ -216,6 +216,14 @@
} }
} }
}, },
{ "countries" : ["sa"],
"tags" : {
"place" : {
"province" : 12,
"municipality" : 18
}
}
},
{ "countries" : ["sk"], { "countries" : ["sk"],
"tags" : { "tags" : {
"boundary" : { "boundary" : {

View File

@@ -1809,7 +1809,8 @@ us:
languages: en languages: en
names: !include country-names/us.yaml names: !include country-names/us.yaml
postcode: postcode:
pattern: "ddddd" pattern: "(ddddd)(?:-dddd)?"
output: \1
# Uruguay (Uruguay) # Uruguay (Uruguay)

View File

@@ -4,7 +4,7 @@
- aparcament -> aparc - aparcament -> aparc
- apartament -> apmt - apartament -> apmt
- apartat -> apt - apartat -> apt
- àtic -> àt - àtic -> àt
- autopista -> auto - autopista -> auto
- autopista -> autop - autopista -> autop
- autovia -> autov - autovia -> autov
@@ -19,7 +19,6 @@
- biblioteca -> bibl - biblioteca -> bibl
- bloc -> bl - bloc -> bl
- carrer -> c - carrer -> c
- carrer -> c/
- carreró -> cró - carreró -> cró
- carretera -> ctra - carretera -> ctra
- cantonada -> cant - cantonada -> cant
@@ -58,7 +57,6 @@
- número -> n - número -> n
- sense número -> s/n - sense número -> s/n
- parada -> par - parada -> par
- parcel·la -> parc
- passadís -> pdís - passadís -> pdís
- passatge -> ptge - passatge -> ptge
- passeig -> pg - passeig -> pg

View File

@@ -1,438 +1,393 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#English # Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#English
# Source: https://pe.usps.com/text/pub28/28apc_002.htm
- lang: en - lang: en
words: words:
- Access -> Accs - Access -> Accs
- Air Force Base -> AFB - Air Force Base -> AFB
- Air National Guard Base -> ANGB - Air National Guard Base -> ANGB
- Airport -> Aprt - Airport -> Aprt
- Alley -> Al - Alley -> Al,All,Ally,Aly
- Alley -> All
- Alley -> Ally
- Alley -> Aly
- Alleyway -> Alwy - Alleyway -> Alwy
- Amble -> Ambl - Amble -> Ambl
- Anex -> Anx
- Apartments -> Apts - Apartments -> Apts
- Approach -> Apch - Approach -> Apch,App
- Approach -> App
- Arcade -> Arc - Arcade -> Arc
- Arterial -> Artl - Arterial -> Artl
- Artery -> Arty - Artery -> Arty
- Avenue -> Av - Avenue -> Av,Ave
- Avenue -> Ave
- Back -> Bk - Back -> Bk
- Banan -> Ba - Banan -> Ba
- Basin -> Basn - Basin -> Basn,Bsn
- Basin -> Bsn - Bayou -> Byu
- Beach -> Bch - Beach -> Bch
- Bend -> Bend
- Bend -> Bnd - Bend -> Bnd
- Block -> Blk - Block -> Blk
- Bluff -> Blf
- Bluffs -> Blfs
- Boardwalk -> Bwlk - Boardwalk -> Bwlk
- Boulevard -> Blvd - Bottom -> Btm
- Boulevard -> Bvd - Boulevard -> Blvd,Bvd
- Boundary -> Bdy - Boundary -> Bdy
- Bowl -> Bl - Bowl -> Bl
- Brace -> Br - Brace -> Br
- Brae -> Br - Brae -> Br
- Brae -> Brae - Branch -> Br
- Break -> Brk - Break -> Brk
- Bridge -> Bdge - Bridge$ -> Bdge,Br,Brdg,Brg,Bri
- Bridge -> Br - Broadway -> Bdwy,Bway,Bwy
- Bridge -> Brdg
- Bridge -> Bri
- Broadway -> Bdwy
- Broadway -> Bway
- Broadway -> Bwy
- Brook -> Brk - Brook -> Brk
- Brooks -> Brks
- Brow -> Brw - Brow -> Brw
- Brow -> Brow - Buildings -> Bldgs,Bldngs
- Buildings -> Bldgs
- Buildings -> Bldngs
- Business -> Bus - Business -> Bus
- Bypass -> Bps - Burg -> Bg
- Bypass -> Byp - Burgs -> Bgs
- Bypass -> Bypa - Bypass -> Bps,Byp,Bypa
- Byway -> Bywy - Byway -> Bywy
- Camp -> Cp
- Canyon -> Cyn
- Cape -> Cpe
- Caravan -> Cvn - Caravan -> Cvn
- Causeway -> Caus - Causeway -> Caus,Cswy,Cway
- Causeway -> Cswy - Center,Centre -> Cen,Ctr
- Causeway -> Cway - Centers -> Ctrs
- Center -> Cen
- Center -> Ctr
- Central -> Ctrl - Central -> Ctrl
- Centre -> Cen
- Centre -> Ctr
- Centreway -> Cnwy - Centreway -> Cnwy
- Chase -> Ch - Chase -> Ch
- Church -> Ch - Church -> Ch
- Circle -> Cir - Circle -> Cir
- Circuit -> Cct - Circles -> Cirs
- Circuit -> Ci - Circuit -> Cct,Ci
- Circus -> Crc - Circus -> Crc,Crcs
- Circus -> Crcs
- City -> Cty - City -> Cty
- Cliff -> Clf
- Cliffs -> Clfs
- Close -> Cl - Close -> Cl
- Common -> Cmn - Club -> Clb
- Common -> Comm - Common -> Cmn,Comm
- Commons -> Cmns
- Community -> Comm - Community -> Comm
- Concourse -> Cnc - Concourse -> Cnc
- Concourse -> Con - Concourse -> Con
- Copse -> Cps - Copse -> Cps
- Corner -> Cnr - Corner -> Cor,Cnr,Crn
- Corner -> Crn - Corners -> Cors
- Corso -> Cso - Corso -> Cso
- Cottages -> Cotts - Cottages -> Cotts
- County -> Co - County -> Co
- County Road -> CR - County Road -> CR
- County Route -> CR - County Route -> CR
- Court -> Crt - Course -> Crse
- Court -> Ct - Court -> Crt,Ct
- Courts -> Cts
- Courtyard -> Cyd - Courtyard -> Cyd
- Courtyard -> Ctyd - Courtyard -> Ctyd
- Cove -> Ce - Cove$ -> Ce,Cov,Cv
- Cove -> Cov - Coves -> Cvs
- Cove -> Cove - Creek$ -> Ck,Cr,Crk
- Cove -> Cv
- Creek -> Ck
- Creek -> Cr
- Creek -> Crk
- Crescent -> Cr - Crescent -> Cr
- Crescent -> Cres - Crescent -> Cres
- Crest -> Crst - Crest -> Crst,Cst
- Crest -> Cst
- Croft -> Cft - Croft -> Cft
- Cross -> Cs - Cross -> Cs,Crss
- Cross -> Crss - Crossing -> Crsg,Csg,Xing
- Crossing -> Crsg - Crossroad -> Crd,Xrd
- Crossing -> Csg - Crossroads -> Xrds
- Crossing -> Xing
- Crossroad -> Crd
- Crossway -> Cowy - Crossway -> Cowy
- Cul-de-sac -> Cds - Cul-de-sac -> Cds,Csac
- Cul-de-sac -> Csac - Curve -> Cve,Curv
- Curve -> Cve
- Cutting -> Cutt - Cutting -> Cutt
- Dale -> Dle - Dale -> Dle
- Dale -> Dale - Dam -> Dm
- Deviation -> Devn - Deviation -> Devn
- Dip -> Dip
- Distributor -> Dstr - Distributor -> Dstr
- Divide -> Dv
- Down -> Dn - Down -> Dn
- Downs -> Dn - Downs -> Dn
- Drive -> Dr - Drive -> Dr,Drv,Dv
- Drive -> Drv - Drives -> Drs
- Drive -> Dv
- Drive-In => Drive-In # prevent abbreviation here - Drive-In => Drive-In # prevent abbreviation here
- Driveway -> Drwy - Driveway -> Drwy,Dvwy,Dwy
- Driveway -> Dvwy
- Driveway -> Dwy
- East -> E - East -> E
- Edge -> Edg - Edge -> Edg
- Edge -> Edge
- Elbow -> Elb - Elbow -> Elb
- End -> End
- Entrance -> Ent - Entrance -> Ent
- Esplanade -> Esp - Esplanade -> Esp
- Estate -> Est - Estate -> Est
- Expressway -> Exp - Estates -> Ests
- Expressway -> Expy - Expressway -> Exp,Expy,Expwy,Xway
- Expressway -> Expwy
- Expressway -> Xway
- Extension -> Ex - Extension -> Ex
- Fairway -> Fawy - Extensions -> Exts
- Fairway -> Fy - Fairway -> Fawy,Fy
- Falls -> Fls
- Father -> Fr - Father -> Fr
- Ferry -> Fy - Ferry -> Fy,Fry
- Field -> Fd - Field -> Fd,Fld
- Fields -> Flds
- Fire Track -> Ftrk - Fire Track -> Ftrk
- Firetrail -> Fit - Firetrail -> Fit
- Flat -> Fl - Flat -> Fl,Flt
- Flat -> Flat - Flats -> Flts
- Follow -> Folw - Follow -> Folw
- Footway -> Ftwy - Footway -> Ftwy
- Ford -> Frd
- Fords -> Frds
- Foreshore -> Fshr - Foreshore -> Fshr
- Forest -> Frst
- Forest Service Road -> FSR - Forest Service Road -> FSR
- Forge -> Frg
- Forges -> Frgs
- Formation -> Form - Formation -> Form
- Fork -> Frk
- Forks -> Frks
- Fort -> Ft - Fort -> Ft
- Freeway -> Frwy - Freeway -> Frwy,Fwy
- Freeway -> Fwy
- Front -> Frnt - Front -> Frnt
- Frontage -> Fr - Frontage -> Fr,Frtg
- Frontage -> Frtg
- Gap -> Gap
- Garden -> Gdn - Garden -> Gdn
- Gardens -> Gdn - Gardens -> Gdn,Gdns
- Gardens -> Gdns - Gate,Gates -> Ga,Gte
- Gate -> Ga - Gateway -> Gwy,Gtwy
- Gate -> Gte
- Gates -> Ga
- Gates -> Gte
- Gateway -> Gwy
- George -> Geo - George -> Geo
- Glade -> Gl - Glade$ -> Gl,Gld,Glde
- Glade -> Gld
- Glade -> Glde
- Glen -> Gln - Glen -> Gln
- Glen -> Glen - Glens -> Glns
- Grange -> Gra - Grange -> Gra
- Green -> Gn - Green -> Gn,Grn
- Green -> Grn - Greens -> Grns
- Ground -> Grnd - Ground -> Grnd
- Grove -> Gr - Grove$ -> Gr,Gro,Grv
- Grove -> Gro - Groves -> Grvs
- Grovet -> Gr - Grovet -> Gr
- Gully -> Gly - Gully -> Gly
- Harbor -> Hbr - Harbor -> Hbr,Harbour
- Harbour -> Hbr - Harbors -> Hbrs
- Harbour -> Hbr,Harbor
- Haven -> Hvn - Haven -> Hvn
- Head -> Hd - Head -> Hd
- Heads -> Hd - Heads -> Hd
- Heights -> Hgts - Heights -> Hgts,Ht,Hts
- Heights -> Ht
- Heights -> Hts
- High School -> HS - High School -> HS
- Highroad -> Hird - Highroad -> Hird,Hrd
- Highroad -> Hrd
- Highway -> Hwy - Highway -> Hwy
- Hill -> Hill
- Hill -> Hl - Hill -> Hl
- Hills -> Hl - Hills -> Hl,Hls
- Hills -> Hls - Hollow -> Holw
- Hospital -> Hosp - Hospital -> Hosp
- House -> Ho - House -> Ho,Hse
- House -> Hse
- Industrial -> Ind - Industrial -> Ind
- Inlet -> Inlt
- Interchange -> Intg - Interchange -> Intg
- International -> Intl - International -> Intl
- Island -> I - Island -> I,Is
- Island -> Is - Islands -> Iss
- Junction -> Jctn - Junction -> Jct,Jctn,Jnc
- Junction -> Jnc - Junctions -> Jcts
- Junior -> Jr - Junior -> Jr
- Key -> Key - Key -> Ky
- Keys -> Kys
- Knoll -> Knl
- Knolls -> Knls
- Lagoon -> Lgn - Lagoon -> Lgn
- Lakes -> L - Lake -> Lk
- Landing -> Ldg - Lakes -> L,Lks
- Lane -> La - Landing -> Ldg,Lndg
- Lane -> Lane - Lane -> La,Ln
- Lane -> Ln
- Laneway -> Lnwy - Laneway -> Lnwy
- Line -> Line - Light -> Lgt
- Lights -> Lgts
- Line -> Ln - Line -> Ln
- Link -> Link
- Link -> Lk - Link -> Lk
- Little -> Lit - Little -> Lit,Lt
- Little -> Lt - Loaf -> Lf
- Lock -> Lck
- Locks -> Lcks
- Lodge -> Ldg - Lodge -> Ldg
- Lookout -> Lkt - Lookout -> Lkt
- Loop -> Loop
- Loop -> Lp - Loop -> Lp
- Lower -> Low - Lower -> Low,Lr,Lwr
- Lower -> Lr
- Lower -> Lwr
- Mall -> Mall
- Mall -> Ml - Mall -> Ml
- Manor -> Mnr - Manor -> Mnr
- Manors -> Mnrs
- Mansions -> Mans - Mansions -> Mans
- Market -> Mkt - Market -> Mkt
- Meadow -> Mdw - Meadow -> Mdw
- Meadows -> Mdw - Meadows -> Mdw,Mdws
- Meadows -> Mdws
- Mead -> Md - Mead -> Md
- Meander -> Mdr - Meander -> Mdr,Mndr,Mr
- Meander -> Mndr
- Meander -> Mr
- Medical -> Med - Medical -> Med
- Memorial -> Mem - Memorial -> Mem
- Mews -> Mews
- Mews -> Mw - Mews -> Mw
- Middle -> Mid - Middle -> Mid
- Middle School -> MS - Middle School -> MS
- Mile -> Mi - Mile -> Mi
- Military -> Mil - Military -> Mil
- Motorway -> Mtwy - Mill -> Ml
- Motorway -> Mwy - Mills -> Mls
- Mission -> Msn
- Motorway -> Mtwy,Mwy
- Mount -> Mt - Mount -> Mt
- Mountain -> Mtn - Mountain -> Mtn
- Mountains -> Mtn - Mountains$ -> Mtn,Mtns
- Municipal -> Mun - Municipal -> Mun
- Museum -> Mus - Museum -> Mus
- National Park -> NP - National Park -> NP
- National Recreation Area -> NRA - National Recreation Area -> NRA
- National Wildlife Refuge Area -> NWRA - National Wildlife Refuge Area -> NWRA
- Neck -> Nck
- Nook -> Nk - Nook -> Nk
- Nook -> Nook
- North -> N - North -> N
- Northeast -> NE - Northeast -> NE
- Northwest -> NW - Northwest -> NW
- Outlook -> Out - Orchard -> Orch
- Outlook -> Otlk - Outlook -> Out,Otlk
- Overpass -> Opas
- Parade -> Pde - Parade -> Pde
- Paradise -> Pdse - Paradise -> Pdse
- Park -> Park
- Park -> Pk - Park -> Pk
- Parklands -> Pkld - Parklands -> Pkld
- Parkway -> Pkwy - Parkway -> Pkwy,Pky,Pwy
- Parkway -> Pky - Parkways -> Pkwy
- Parkway -> Pwy
- Pass -> Pass
- Pass -> Ps - Pass -> Ps
- Passage -> Psge - Passage -> Psge
- Path -> Path - Pathway -> Phwy,Pway,Pwy
- Pathway -> Phwy
- Pathway -> Pway
- Pathway -> Pwy
- Piazza -> Piaz - Piazza -> Piaz
- Pike -> Pk - Pike -> Pk
- Pine -> Pne
- Pines -> Pnes
- Place -> Pl - Place -> Pl
- Plain -> Pl - Plain -> Pl,Pln
- Plains -> Pl - Plains -> Pl,Plns
- Plateau -> Plat - Plateau -> Plat
- Plaza -> Pl - Plaza -> Pl,Plz,Plza
- Plaza -> Plz
- Plaza -> Plza
- Pocket -> Pkt - Pocket -> Pkt
- Point -> Pnt - Point -> Pnt,Pt
- Point -> Pt - Points -> Pts
- Port -> Port - Port -> Prt,Pt
- Port -> Pt - Ports -> Prts
- Post Office -> PO - Post Office -> PO
- Prairie -> Pr
- Precinct -> Pct - Precinct -> Pct
- Promenade -> Prm - Promenade -> Prm,Prom
- Promenade -> Prom
- Quad -> Quad
- Quadrangle -> Qdgl - Quadrangle -> Qdgl
- Quadrant -> Qdrt - Quadrant -> Qdrt,Qd
- Quadrant -> Qd
- Quay -> Qy - Quay -> Qy
- Quays -> Qy - Quays -> Qy
- Quays -> Qys - Quays -> Qys
- Radial -> Radl
- Ramble -> Ra - Ramble -> Ra
- Ramble -> Rmbl - Ramble -> Rmbl
- Range -> Rge - Ranch -> Rnch
- Range -> Rnge - Range -> Rge,Rnge
- Rapid -> Rpd
- Rapids -> Rpds
- Reach -> Rch - Reach -> Rch
- Reservation -> Res - Reservation -> Res
- Reserve -> Res - Reserve -> Res
- Reservoir -> Res - Reservoir -> Res
- Rest -> Rest
- Rest -> Rst - Rest -> Rst
- Retreat -> Rt - Retreat -> Rt,Rtt
- Retreat -> Rtt
- Return -> Rtn - Return -> Rtn
- Ridge -> Rdg - Ridge -> Rdg,Rdge
- Ridge -> Rdge - Ridges -> Rdgs
- Ridgeway -> Rgwy - Ridgeway -> Rgwy
- Right of Way -> Rowy - Right of Way -> Rowy
- Rise -> Ri - Rise -> Ri
- Rise -> Rise - ^River -> R,Riv,Rvr
- River -> R - River$ -> R,Riv,Rvr
- River -> Riv
- River -> Rvr
- Riverway -> Rvwy - Riverway -> Rvwy
- Riviera -> Rvra - Riviera -> Rvra
- Road -> Rd - Road -> Rd
- Roads -> Rds - Roads -> Rds
- Roadside -> Rdsd - Roadside -> Rdsd
- Roadway -> Rdwy - Roadway -> Rdwy,Rdy
- Roadway -> Rdy
- Robert -> Robt
- Rocks -> Rks - Rocks -> Rks
- Ronde -> Rnde - Ronde -> Rnde
- Rosebowl -> Rsbl - Rosebowl -> Rsbl
- Rotary -> Rty - Rotary -> Rty
- Round -> Rnd - Round -> Rnd
- Route -> Rt - Route -> Rt,Rte
- Route -> Rte
- Row -> Row
- Rue -> Rue
- Run -> Run
- Saint -> St - Saint -> St
- Saints -> SS - Saints -> SS
- Senior -> Sr - Senior -> Sr
- Serviceway -> Swy - Serviceway -> Swy,Svwy
- Serviceway -> Svwy - Shoal -> Shl
- Shore -> Shr
- Shores -> Shrs
- Shunt -> Shun - Shunt -> Shun
- Siding -> Sdng - Siding -> Sdng
- Sister -> Sr - Sister -> Sr
- Skyway -> Skwy
- Slope -> Slpe - Slope -> Slpe
- Sound -> Snd - Sound -> Snd
- South -> S - South -> S,Sth
- South -> Sth
- Southeast -> SE - Southeast -> SE
- Southwest -> SW - Southwest -> SW
- Spur -> Spur - Spring -> Spg
- Springs -> Spgs
- Spurs -> Spur
- Square -> Sq - Square -> Sq
- Squares -> Sqs
- Stairway -> Strwy - Stairway -> Strwy
- State Highway -> SH - State Highway -> SH,SHwy
- State Highway -> SHwy
- State Route -> SR - State Route -> SR
- Station -> Sta - Station -> Sta,Stn
- Station -> Stn - Strand -> Sd,Stra
- Strand -> Sd - Stravenue -> Stra
- Strand -> Stra - Stream -> Strm
- Street -> St - Street -> St
- Streets -> Sts
- Strip -> Strp - Strip -> Strp
- Subway -> Sbwy - Subway -> Sbwy
- Summit -> Smt
- Tarn -> Tn - Tarn -> Tn
- Tarn -> Tarn
- Terminal -> Term - Terminal -> Term
- Terrace -> Tce - Terrace -> Tce,Ter,Terr
- Terrace -> Ter - Thoroughfare -> Thfr,Thor
- Terrace -> Terr - Throughway -> Trwy
- Thoroughfare -> Thfr - Tollway -> Tlwy,Twy
- Thoroughfare -> Thor
- Tollway -> Tlwy
- Tollway -> Twy
- Top -> Top
- Tor -> Tor
- Towers -> Twrs - Towers -> Twrs
- Township -> Twp - Township -> Twp
- Trace -> Trce - Trace -> Trce
- Track -> Tr - Track -> Tr,Trak,Trk
- Track -> Trk - Trafficway -> Trfy
- Trail -> Trl - Trail -> Trl
- Trailer -> Trlr - Trailer -> Trlr
- Triangle -> Tri - Triangle -> Tri
- Trunkway -> Tkwy - Trunkway -> Tkwy
- Tunnel -> Tun - Tunnel -> Tun,Tunl
- Turn -> Tn - Turn -> Tn,Trn
- Turn -> Trn - Turnpike -> Tpk,Tpke
- Turn -> Turn - Underpass -> Upas,Ups
- Turnpike -> Tpk - Union -> Un
- Turnpike -> Tpke - Unions -> Uns
- Underpass -> Upas - University -> Uni,Univ
- Underpass -> Ups
- University -> Uni
- University -> Univ
- Upper -> Up - Upper -> Up
- Upper -> Upr - Upper -> Upr
- Vale -> Va - Vale -> Va
- Vale -> Vale - Valley -> Vly
- Valley -> Vy - Valley -> Vy
- Viaduct -> Vdct - Valleys -> Vlys
- Viaduct -> Via - Viaduct$ -> Vdct,Via,Viad
- Viaduct -> Viad
- View -> Vw - View -> Vw
- View -> View - Views -> Vws
- Village -> Vill - Village -> Vill,Vlg
- Villages -> Vlgs
- Villas -> Vlls - Villas -> Vlls
- Vista -> Vst - Ville -> Vl
- Vista -> Vsta - Vista -> Vis,Vst,Vsta
- Walk -> Walk - Walk -> Wk,Wlk
- Walk -> Wk - Walks -> Walk
- Walk -> Wlk - Walkway -> Wkwy,Wky
- Walkway -> Wkwy
- Walkway -> Wky
- Waters -> Wtr - Waters -> Wtr
- Way -> Way
- Way -> Wy - Way -> Wy
- Well -> Wl
- Wells -> Wls
- West -> W - West -> W
- Wharf -> Whrf - Wharf -> Whrf
- William -> Wm - William -> Wm
- Wynd -> Wyn - Wynd -> Wyn
- Wynd -> Wynd
- Yard -> Yard
- Yard -> Yd - Yard -> Yd
- lang: en - lang: en
country: ca country: ca

View File

@@ -30,7 +30,6 @@
- Bloque -> Blq - Bloque -> Blq
- Bulevar -> Blvr - Bulevar -> Blvr
- Boulevard -> Blvd - Boulevard -> Blvd
- Calle -> C/
- Calle -> C - Calle -> C
- Calle -> Cl - Calle -> Cl
- Calleja -> Cllja - Calleja -> Cllja

View File

@@ -3,20 +3,16 @@
words: words:
- Abbaye -> ABE - Abbaye -> ABE
- Agglomération -> AGL - Agglomération -> AGL
- Aire -> AIRE
- Aires -> AIRE - Aires -> AIRE
- Allée -> ALL - Allée -> ALL
- Allée -> All
- Allées -> ALL - Allées -> ALL
- Ancien chemin -> ACH - Ancien chemin -> ACH
- Ancienne route -> ART - Ancienne route -> ART
- Anciennes routes -> ART - Anciennes routes -> ART
- Anse -> ANSE
- Arcade -> ARC - Arcade -> ARC
- Arcades -> ARC - Arcades -> ARC
- Autoroute -> AUT - Autoroute -> AUT
- Avenue -> AV - Avenue -> AV
- Avenue -> Av
- Barrière -> BRE - Barrière -> BRE
- Barrières -> BRE - Barrières -> BRE
- Bas chemin -> BCH - Bas chemin -> BCH
@@ -28,16 +24,11 @@
- Berges -> BER - Berges -> BER
- Bois -> BOIS - Bois -> BOIS
- Boucle -> BCLE - Boucle -> BCLE
- Boulevard -> Bd
- Boulevard -> BD - Boulevard -> BD
- Bourg -> BRG - Bourg -> BRG
- Butte -> BUT - Butte -> BUT
- Cité -> CITE
- Cités -> CITE - Cités -> CITE
- Côte -> COTE
- Côteau -> COTE - Côteau -> COTE
- Cale -> CALE
- Camp -> CAMP
- Campagne -> CGNE - Campagne -> CGNE
- Camping -> CPG - Camping -> CPG
- Carreau -> CAU - Carreau -> CAU
@@ -56,17 +47,13 @@
- Chaussées -> CHS - Chaussées -> CHS
- Chemin -> Ch - Chemin -> Ch
- Chemin -> CHE - Chemin -> CHE
- Chemin -> Che
- Chemin vicinal -> CHV - Chemin vicinal -> CHV
- Cheminement -> CHEM - Cheminement -> CHEM
- Cheminements -> CHEM - Cheminements -> CHEM
- Chemins -> CHE - Chemins -> CHE
- Chemins vicinaux -> CHV - Chemins vicinaux -> CHV
- Chez -> CHEZ
- Château -> CHT - Château -> CHT
- Cloître -> CLOI - Cloître -> CLOI
- Clos -> CLOS
- Col -> COL
- Colline -> COLI - Colline -> COLI
- Collines -> COLI - Collines -> COLI
- Contour -> CTR - Contour -> CTR
@@ -74,9 +61,7 @@
- Corniches -> COR - Corniches -> COR
- Cottage -> COTT - Cottage -> COTT
- Cottages -> COTT - Cottages -> COTT
- Cour -> COUR
- Cours -> CRS - Cours -> CRS
- Cours -> Crs
- Darse -> DARS - Darse -> DARS
- Degré -> DEG - Degré -> DEG
- Degrés -> DEG - Degrés -> DEG
@@ -87,11 +72,8 @@
- Domaine -> DOM - Domaine -> DOM
- Domaines -> DOM - Domaines -> DOM
- Écluse -> ECL - Écluse -> ECL
- Écluse -> ÉCL
- Écluses -> ECL - Écluses -> ECL
- Écluses -> ÉCL
- Église -> EGL - Église -> EGL
- Église -> ÉGL
- Enceinte -> EN - Enceinte -> EN
- Enclave -> ENV - Enclave -> ENV
- Enclos -> ENC - Enclos -> ENC
@@ -100,21 +82,16 @@
- Espace -> ESPA - Espace -> ESPA
- Esplanade -> ESP - Esplanade -> ESP
- Esplanades -> ESP - Esplanades -> ESP
- Étang -> ETANG
- Étang -> ÉTANG
- Faubourg -> FG - Faubourg -> FG
- Faubourg -> Fg
- Ferme -> FRM - Ferme -> FRM
- Fermes -> FRM - Fermes -> FRM
- Fontaine -> FON - Fontaine -> FON
- Fort -> FORT
- Forum -> FORM - Forum -> FORM
- Fosse -> FOS - Fosse -> FOS
- Fosses -> FOS - Fosses -> FOS
- Foyer -> FOYR - Foyer -> FOYR
- Galerie -> GAL - Galerie -> GAL
- Galeries -> GAL - Galeries -> GAL
- Gare -> GARE
- Garenne -> GARN - Garenne -> GARN
- Grand boulevard -> GBD - Grand boulevard -> GBD
- Grand ensemble -> GDEN - Grand ensemble -> GDEN
@@ -134,13 +111,9 @@
- Haut chemin -> HCH - Haut chemin -> HCH
- Hauts chemins -> HCH - Hauts chemins -> HCH
- Hippodrome -> HIP - Hippodrome -> HIP
- HLM -> HLM
- Île -> ILE
- Île -> ÎLE
- Immeuble -> IMM - Immeuble -> IMM
- Immeubles -> IMM - Immeubles -> IMM
- Impasse -> IMP - Impasse -> IMP
- Impasse -> Imp
- Impasses -> IMP - Impasses -> IMP
- Jardin -> JARD - Jardin -> JARD
- Jardins -> JARD - Jardins -> JARD
@@ -150,13 +123,11 @@
- Lieu-dit -> LD - Lieu-dit -> LD
- Lotissement -> LOT - Lotissement -> LOT
- Lotissements -> LOT - Lotissements -> LOT
- Mail -> MAIL
- Maison forestière -> MF - Maison forestière -> MF
- Manoir -> MAN - Manoir -> MAN
- Marche -> MAR - Marche -> MAR
- Marches -> MAR - Marches -> MAR
- Maréchal -> MAL - Maréchal -> MAL
- Mas -> MAS
- Monseigneur -> Mgr - Monseigneur -> Mgr
- Mont -> Mt - Mont -> Mt
- Montée -> MTE - Montée -> MTE
@@ -168,13 +139,9 @@
- Métro -> MÉT - Métro -> MÉT
- Nouvelle route -> NTE - Nouvelle route -> NTE
- Palais -> PAL - Palais -> PAL
- Parc -> PARC
- Parcs -> PARC
- Parking -> PKG - Parking -> PKG
- Parvis -> PRV - Parvis -> PRV
- Passage -> PAS - Passage -> PAS
- Passage -> Pas
- Passage -> Pass
- Passage à niveau -> PN - Passage à niveau -> PN
- Passe -> PASS - Passe -> PASS
- Passerelle -> PLE - Passerelle -> PLE
@@ -191,19 +158,14 @@
- Petite rue -> PTR - Petite rue -> PTR
- Petites allées -> PTA - Petites allées -> PTA
- Place -> PL - Place -> PL
- Place -> Pl
- Placis -> PLCI - Placis -> PLCI
- Plage -> PLAG - Plage -> PLAG
- Plages -> PLAG - Plages -> PLAG
- Plaine -> PLN - Plaine -> PLN
- Plan -> PLAN
- Plateau -> PLT - Plateau -> PLT
- Plateaux -> PLT - Plateaux -> PLT
- Pointe -> PNT - Pointe -> PNT
- Pont -> PONT
- Ponts -> PONT
- Porche -> PCH - Porche -> PCH
- Port -> PORT
- Porte -> PTE - Porte -> PTE
- Portique -> PORQ - Portique -> PORQ
- Portiques -> PORQ - Portiques -> PORQ
@@ -211,25 +173,19 @@
- Pourtour -> POUR - Pourtour -> POUR
- Presquîle -> PRQ - Presquîle -> PRQ
- Promenade -> PROM - Promenade -> PROM
- Promenade -> Prom
- Pré -> PRE
- Pré -> PRÉ
- Périphérique -> PERI - Périphérique -> PERI
- Péristyle -> PSTY - Péristyle -> PSTY
- Quai -> QU - Quai -> QU
- Quai -> Qu
- Quartier -> QUA - Quartier -> QUA
- Raccourci -> RAC - Raccourci -> RAC
- Raidillon -> RAID - Raidillon -> RAID
- Rampe -> RPE - Rampe -> RPE
- Rempart -> REM - Rempart -> REM
- Roc -> ROC
- Rocade -> ROC - Rocade -> ROC
- Rond point -> RPT - Rond point -> RPT
- Roquet -> ROQT - Roquet -> ROQT
- Rotonde -> RTD - Rotonde -> RTD
- Route -> RTE - Route -> RTE
- Route -> Rte
- Routes -> RTE - Routes -> RTE
- Rue -> R - Rue -> R
- Rue -> R - Rue -> R
@@ -245,7 +201,6 @@
- Sentier -> SEN - Sentier -> SEN
- Sentiers -> SEN - Sentiers -> SEN
- Square -> SQ - Square -> SQ
- Square -> Sq
- Stade -> STDE - Stade -> STDE
- Station -> STA - Station -> STA
- Terrain -> TRN - Terrain -> TRN
@@ -254,13 +209,11 @@
- Terre plein -> TPL - Terre plein -> TPL
- Tertre -> TRT - Tertre -> TRT
- Tertres -> TRT - Tertres -> TRT
- Tour -> TOUR
- Traverse -> TRA - Traverse -> TRA
- Vallon -> VAL - Vallon -> VAL
- Vallée -> VAL - Vallée -> VAL
- Venelle -> VEN - Venelle -> VEN
- Venelles -> VEN - Venelles -> VEN
- Via -> VIA
- Vieille route -> VTE - Vieille route -> VTE
- Vieux chemin -> VCHE - Vieux chemin -> VCHE
- Villa -> VLA - Villa -> VLA
@@ -269,7 +222,6 @@
- Villas -> VLA - Villas -> VLA
- Voie -> VOI - Voie -> VOI
- Voies -> VOI - Voies -> VOI
- Zone -> ZONE
- Zone artisanale -> ZA - Zone artisanale -> ZA
- Zone d'aménagement concerté -> ZAC - Zone d'aménagement concerté -> ZAC
- Zone d'aménagement différé -> ZAD - Zone d'aménagement différé -> ZAD
@@ -289,7 +241,6 @@
- Esplanade -> ESPL - Esplanade -> ESPL
- Passage -> PASS - Passage -> PASS
- Plateau -> PLAT - Plateau -> PLAT
- Rang -> RANG
- Rond-point -> RDPT - Rond-point -> RDPT
- Sentier -> SENT - Sentier -> SENT
- Subdivision -> SUBDIV - Subdivision -> SUBDIV

View File

@@ -29,7 +29,6 @@
- Prima -> I - Prima -> I
- Primo -> I - Primo -> I
- Primo -> 1 - Primo -> 1
- Primo -> 1°
- Quarta -> IV - Quarta -> IV
- Quarto -> IV - Quarto -> IV
- Quattro -> IV - Quattro -> IV

View File

@@ -1,11 +1,10 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#Norsk_-_Norwegian # Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#Norsk_-_Norwegian
- lang: no - lang: "no"
words: words:
# convert between Nynorsk and Bookmal here # convert between Nynorsk and Bookmal here
- vei, veg => v,vn,vei,veg - ~vei, ~veg -> v,vei,veg
- veien, vegen -> v,vn,veien,vegen - ~veien, ~vegen -> vn,veien,vegen
- gate -> g,gt
# convert between the two female forms # convert between the two female forms
- gaten, gata => g,gt,gaten,gata - gate, gaten, gata -> g,gt
- plass, plassen -> pl - plass, plassen -> pl
- sving, svingen -> sv - sving, svingen -> sv

View File

@@ -1,14 +1,128 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#.D0.A0.D1.83.D1.81.D1.81.D0.BA.D0.B8.D0.B9_-_Russian # Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#.D0.A0.D1.83.D1.81.D1.81.D0.BA.D0.B8.D0.B9_-_Russian
# Source: https://www.plantarium.ru/page/help/topic/abbreviations.html
# Source: https://dic.academic.ru/dic.nsf/ruwiki/1871310
- lang: ru - lang: ru
words: words:
- Академик, Академика -> Ак
- акционерное общество -> АО
- аллея -> ал - аллея -> ал
- архипелаг -> арх
- атомная электростанция -> АЭС
- аэродром -> аэрд
- аэропорт -> аэрп
- Башкирский, Башкирская, Башкирское, Башкирские -> Баш, Башк, Башкир
- Белый, Белая, Белое. Белые -> Бел
- болото -> бол
- больница -> больн
- Большой, Большая, Большое, Большие -> Б, Бол
- брод -> бр
- бульвар -> бул - бульвар -> бул
- бухта -> бух
- бывший, бывшая, бывшее, бывшие -> бывш
- Великий, Великая, Великое, Великие -> Вел
- Верхний, Верхняя, Верхнее, Верхние -> В, Верх
- водокачка -> вдкч
- водопад -> вдп
- водохранилище -> вдхр
- вокзал -> вкз, вокз
- Восточный, Восточная, Восточное, Восточные -> В, Вост
- вулкан -> влк
- гидроэлектростанция -> ГЭС
- гора -> г
- город -> г
- дворец культуры, дом культуры -> ДК
- дворец спорта -> ДС
- деревня -> д, дер
- детский оздоровительный лагерь -> ДОЛ
- дом -> д
- дом отдыха -> Д О
- железная дорога -> ж д
- железнодорожный, железнодорожная, железнодорожное -> ж-д
- железобетонных изделий -> ЖБИ
- жилой комплекс -> ЖК
- завод -> з-д
- закрытое административно-территориальное образование -> ЗАТО
- залив -> зал
- Западный, Западная, Западное, Западные -> З, Зап, Запад
- заповедник -> запов
- имени -> им
- институт -> инст
- исправительная колония -> ИК
- километр -> км
- Красный, Красная, Красное, Красные -> Кр, Крас
- лагерь -> лаг
- Левый, Левая,Левое, Левые -> Л, Лев
- ледник -> ледн
- лесничество -> леснич
- лесной, лесная, лесное -> лес
- линия электропередачи -> ЛЭП
- Малый, Малая, Малое, Малые -> М, Мал
- Мордовский, Мордовская, Мордовское, Мордовские -> Мордов
- морской, морская, морское -> мор
- Московский, Московская, Московское, Московские -> Мос, Моск
- мыс -> м
- набережная -> наб - набережная -> наб
- Нижний, Нижняя, Нижнее, Нижние -> Ниж, Н
- Новый, Новая, Новое, Новые -> Нов, Н
- обгонный пункт -> обг п
- область -> обл
- озеро -> оз
- особо охраняемая природная территория -> ООПТ
- остановочный пункт -> о п
- остров -> о
- острова -> о-ва
- парк культуры и отдыха -> ПКиО
- перевал -> пер
- переулок -> пер - переулок -> пер
- пещера -> пещ
- пионерский лагерь -> пионерлаг
- платформа -> пл, платф
- площадь -> пл - площадь -> пл
- подсобное хозяйство -> подсоб хоз
- полуостров -> п-ов
- посёлок -> пос, п
- посёлок городского типа -> п г т, пгт
- Правый, Правая, Правое, Правые -> П, Пр, Прав
- проезд -> пр - проезд -> пр
- проспект -> просп - проспект -> просп
- шоссе -> ш - пруд -> пр
- пустыня -> пуст
- разъезд -> рзд
- район -> р
- резинотехнических изделий -> РТИ
- река -> р
- речной, речная, речное -> реч, речн
- Российский, Российская, Российское, Российские -> Рос
- Русский, Русская, Русское, Русские -> Рус, Русск
- ручей -> руч
- садовое некоммерческое товарищество -> СНТ
- садовые участки -> сад уч
- санаторий -> сан
- сарай -> сар
- Северный, Северная, Северное, Северные -> С, Сев
- село -> с
- Сибирский, Сибирская, Сибирское, Сибирские -> Сиб
- Советский, Советская, Советское, Советские -> Сов
- совхоз -> свх
- Сортировочный, Сортировочная, Сортировочное, Сортировочные -> Сорт
- станция -> ст
- Старый, Старая, Среднее, Средние -> Ср
- Татарский, Татарская, Татарское, Татарские -> Тат, Татар
- теплоэлекстростанция -> ТЭС
- теплоэлектроцентраль -> ТЭЦ
- техникум -> техн
- тоннель, туннель -> тун
- тупик -> туп - тупик -> туп
- улица -> ул - улица -> ул
- область -> обл - Уральский, Уральская, Уральское, Уральские -> Ур, Урал
- урочище -> ур
- хозяйство -> хоз, хоз-во
- хребет -> хр
- хутор -> хут
- Чёрный, Чёрная, Чёрное, Чёрные -> Черн
- Чувашский, Чувашская, Чувашское, Чувашские -> Чуваш
- шахта -> шах
- школа -> шк
- шоссе -> ш
- элеватор -> элев
- Южный, Южная, Южное, Южные -> Ю, Юж, Южн

View File

@@ -46,7 +46,7 @@ sanitizers:
- step: strip-brace-terms - step: strip-brace-terms
- step: tag-analyzer-by-language - step: tag-analyzer-by-language
filter-kind: [".*name.*"] filter-kind: [".*name.*"]
whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,no,pl,pt,ro,ru,sk,sl,sv,tr,uk,vi] whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,"no",pl,pt,ro,ru,sk,sl,sv,tr,uk,vi]
use-defaults: all use-defaults: all
mode: append mode: append
- step: tag-japanese - step: tag-japanese
@@ -158,7 +158,7 @@ token-analysis:
mode: variant-only mode: variant-only
variants: variants:
- !include icu-rules/variants-nl.yaml - !include icu-rules/variants-nl.yaml
- id: no - id: "no"
analyzer: generic analyzer: generic
mode: variant-only mode: variant-only
variants: variants:

View File

@@ -26,7 +26,7 @@ from .connection import SearchConnection
from .status import get_status, StatusResult from .status import get_status, StatusResult
from .lookup import get_places, get_detailed_place from .lookup import get_places, get_detailed_place
from .reverse import ReverseGeocoder from .reverse import ReverseGeocoder
from .search import ForwardGeocoder, Phrase, PhraseType, make_query_analyzer from . import search as nsearch
from . import types as ntyp from . import types as ntyp
from .results import DetailedResult, ReverseResult, SearchResults from .results import DetailedResult, ReverseResult, SearchResults
@@ -207,7 +207,7 @@ class NominatimAPIAsync:
async with self.begin() as conn: async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
if details.keywords: if details.keywords:
await make_query_analyzer(conn) await nsearch.make_query_analyzer(conn)
return await get_detailed_place(conn, place, details) return await get_detailed_place(conn, place, details)
async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults: async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
@@ -219,7 +219,7 @@ class NominatimAPIAsync:
async with self.begin() as conn: async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
if details.keywords: if details.keywords:
await make_query_analyzer(conn) await nsearch.make_query_analyzer(conn)
return await get_places(conn, places, details) return await get_places(conn, places, details)
async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]: async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
@@ -237,7 +237,7 @@ class NominatimAPIAsync:
async with self.begin() as conn: async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
if details.keywords: if details.keywords:
await make_query_analyzer(conn) await nsearch.make_query_analyzer(conn)
geocoder = ReverseGeocoder(conn, details, geocoder = ReverseGeocoder(conn, details,
self.reverse_restrict_to_country_area) self.reverse_restrict_to_country_area)
return await geocoder.lookup(coord) return await geocoder.lookup(coord)
@@ -251,10 +251,10 @@ class NominatimAPIAsync:
async with self.begin() as conn: async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
geocoder = ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params), geocoder = nsearch.ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
self.config.get_int('REQUEST_TIMEOUT') self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None) if self.config.REQUEST_TIMEOUT else None)
phrases = [Phrase(PhraseType.NONE, p.strip()) for p in query.split(',')] phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p.strip()) for p in query.split(',')]
return await geocoder.lookup(phrases) return await geocoder.lookup(phrases)
async def search_address(self, amenity: Optional[str] = None, async def search_address(self, amenity: Optional[str] = None,
@@ -271,22 +271,22 @@ class NominatimAPIAsync:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
details = ntyp.SearchDetails.from_kwargs(params) details = ntyp.SearchDetails.from_kwargs(params)
phrases: List[Phrase] = [] phrases: List[nsearch.Phrase] = []
if amenity: if amenity:
phrases.append(Phrase(PhraseType.AMENITY, amenity)) phrases.append(nsearch.Phrase(nsearch.PHRASE_AMENITY, amenity))
if street: if street:
phrases.append(Phrase(PhraseType.STREET, street)) phrases.append(nsearch.Phrase(nsearch.PHRASE_STREET, street))
if city: if city:
phrases.append(Phrase(PhraseType.CITY, city)) phrases.append(nsearch.Phrase(nsearch.PHRASE_CITY, city))
if county: if county:
phrases.append(Phrase(PhraseType.COUNTY, county)) phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTY, county))
if state: if state:
phrases.append(Phrase(PhraseType.STATE, state)) phrases.append(nsearch.Phrase(nsearch.PHRASE_STATE, state))
if postalcode: if postalcode:
phrases.append(Phrase(PhraseType.POSTCODE, postalcode)) phrases.append(nsearch.Phrase(nsearch.PHRASE_POSTCODE, postalcode))
if country: if country:
phrases.append(Phrase(PhraseType.COUNTRY, country)) phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTRY, country))
if not phrases: if not phrases:
raise UsageError('Nothing to search for.') raise UsageError('Nothing to search for.')
@@ -304,14 +304,14 @@ class NominatimAPIAsync:
else: else:
details.restrict_min_max_rank(4, 4) details.restrict_min_max_rank(4, 4)
if 'layers' not in params: if details.layers is None:
details.layers = ntyp.DataLayer.ADDRESS details.layers = ntyp.DataLayer.ADDRESS
if amenity: if amenity:
details.layers |= ntyp.DataLayer.POI details.layers |= ntyp.DataLayer.POI
geocoder = ForwardGeocoder(conn, details, geocoder = nsearch.ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT') self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None) if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup(phrases) return await geocoder.lookup(phrases)
async def search_category(self, categories: List[Tuple[str, str]], async def search_category(self, categories: List[Tuple[str, str]],
@@ -328,15 +328,15 @@ class NominatimAPIAsync:
async with self.begin() as conn: async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout) conn.set_query_timeout(self.query_timeout)
if near_query: if near_query:
phrases = [Phrase(PhraseType.NONE, p) for p in near_query.split(',')] phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p) for p in near_query.split(',')]
else: else:
phrases = [] phrases = []
if details.keywords: if details.keywords:
await make_query_analyzer(conn) await nsearch.make_query_analyzer(conn)
geocoder = ForwardGeocoder(conn, details, geocoder = nsearch.ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT') self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None) if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup_pois(categories, phrases) return await geocoder.lookup_pois(categories, phrases)

View File

@@ -27,5 +27,5 @@ def create(config: QueryConfig) -> QueryProcessingFunc:
return lambda phrases: list( return lambda phrases: list(
filter(lambda p: p.text, filter(lambda p: p.text,
(Phrase(p.ptype, cast(str, normalizer.transliterate(p.text))) (Phrase(p.ptype, cast(str, normalizer.transliterate(p.text)).strip('-: '))
for p in phrases))) for p in phrases)))

View File

@@ -9,5 +9,12 @@ Module for forward search.
""" """
from .geocoder import (ForwardGeocoder as ForwardGeocoder) from .geocoder import (ForwardGeocoder as ForwardGeocoder)
from .query import (Phrase as Phrase, from .query import (Phrase as Phrase,
PhraseType as PhraseType) PHRASE_ANY as PHRASE_ANY,
PHRASE_AMENITY as PHRASE_AMENITY,
PHRASE_STREET as PHRASE_STREET,
PHRASE_CITY as PHRASE_CITY,
PHRASE_COUNTY as PHRASE_COUNTY,
PHRASE_STATE as PHRASE_STATE,
PHRASE_POSTCODE as PHRASE_POSTCODE,
PHRASE_COUNTRY as PHRASE_COUNTRY)
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer) from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)

View File

@@ -11,7 +11,7 @@ from typing import Optional, List, Tuple, Iterator, Dict
import heapq import heapq
from ..types import SearchDetails, DataLayer from ..types import SearchDetails, DataLayer
from .query import QueryStruct, Token, TokenType, TokenRange, BreakType from . import query as qmod
from .token_assignment import TokenAssignment from .token_assignment import TokenAssignment
from . import db_search_fields as dbf from . import db_search_fields as dbf
from . import db_searches as dbs from . import db_searches as dbs
@@ -51,7 +51,7 @@ class SearchBuilder:
""" Build the abstract search queries from token assignments. """ Build the abstract search queries from token assignments.
""" """
def __init__(self, query: QueryStruct, details: SearchDetails) -> None: def __init__(self, query: qmod.QueryStruct, details: SearchDetails) -> None:
self.query = query self.query = query
self.details = details self.details = details
@@ -97,7 +97,7 @@ class SearchBuilder:
builder = self.build_poi_search(sdata) builder = self.build_poi_search(sdata)
elif assignment.housenumber: elif assignment.housenumber:
hnr_tokens = self.query.get_tokens(assignment.housenumber, hnr_tokens = self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER) qmod.TOKEN_HOUSENUMBER)
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address) builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
else: else:
builder = self.build_special_search(sdata, assignment.address, builder = self.build_special_search(sdata, assignment.address,
@@ -128,7 +128,7 @@ class SearchBuilder:
yield dbs.PoiSearch(sdata) yield dbs.PoiSearch(sdata)
def build_special_search(self, sdata: dbf.SearchData, def build_special_search(self, sdata: dbf.SearchData,
address: List[TokenRange], address: List[qmod.TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]: is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for searches that do not involve """ Build abstract search queries for searches that do not involve
a named place. a named place.
@@ -148,11 +148,10 @@ class SearchBuilder:
[t.token for r in address [t.token for r in address
for t in self.query.get_partials_list(r)], for t in self.query.get_partials_list(r)],
lookups.Restrict)] lookups.Restrict)]
penalty += 0.2
yield dbs.PostcodeSearch(penalty, sdata) yield dbs.PostcodeSearch(penalty, sdata)
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token], def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[qmod.Token],
address: List[TokenRange]) -> Iterator[dbs.AbstractSearch]: address: List[qmod.TokenRange]) -> Iterator[dbs.AbstractSearch]:
""" Build a simple address search for special entries where the """ Build a simple address search for special entries where the
housenumber is the main name token. housenumber is the main name token.
""" """
@@ -174,7 +173,7 @@ class SearchBuilder:
list(partials), lookups.LookupAll)) list(partials), lookups.LookupAll))
else: else:
addr_fulls = [t.token for t addr_fulls = [t.token for t
in self.query.get_tokens(address[0], TokenType.WORD)] in self.query.get_tokens(address[0], qmod.TOKEN_WORD)]
if len(addr_fulls) > 5: if len(addr_fulls) > 5:
return return
sdata.lookups.append( sdata.lookups.append(
@@ -184,7 +183,7 @@ class SearchBuilder:
yield dbs.PlaceSearch(0.05, sdata, expected_count) yield dbs.PlaceSearch(0.05, sdata, expected_count)
def build_name_search(self, sdata: dbf.SearchData, def build_name_search(self, sdata: dbf.SearchData,
name: TokenRange, address: List[TokenRange], name: qmod.TokenRange, address: List[qmod.TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]: is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for simple name or address searches. """ Build abstract search queries for simple name or address searches.
""" """
@@ -197,7 +196,7 @@ class SearchBuilder:
sdata.lookups = lookup sdata.lookups = lookup
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count) yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
def yield_lookups(self, name: TokenRange, address: List[TokenRange] def yield_lookups(self, name: qmod.TokenRange, address: List[qmod.TokenRange]
) -> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]: ) -> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
""" Yield all variants how the given name and address should best """ Yield all variants how the given name and address should best
be searched for. This takes into account how frequent the terms be searched for. This takes into account how frequent the terms
@@ -209,26 +208,26 @@ class SearchBuilder:
addr_partials = [t for r in address for t in self.query.get_partials_list(r)] addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
addr_tokens = list({t.token for t in addr_partials}) addr_tokens = list({t.token for t in addr_partials})
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1)) exp_count = min(t.count for t in name_partials.values()) / (3**(len(name_partials) - 1))
if (len(name_partials) > 3 or exp_count < 8000): if (len(name_partials) > 3 or exp_count < 8000):
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens) yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
return return
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000 addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000
# Partial term to frequent. Try looking up by rare full names first. # Partial term to frequent. Try looking up by rare full names first.
name_fulls = self.query.get_tokens(name, TokenType.WORD) name_fulls = self.query.get_tokens(name, qmod.TOKEN_WORD)
if name_fulls: if name_fulls:
fulls_count = sum(t.count for t in name_fulls) fulls_count = sum(t.count for t in name_fulls)
if fulls_count < 50000 or addr_count < 30000: if fulls_count < 50000 or addr_count < 50000:
yield penalty, fulls_count / (2**len(addr_tokens)), \ yield penalty, fulls_count / (2**len(addr_tokens)), \
self.get_full_name_ranking(name_fulls, addr_partials, self.get_full_name_ranking(name_fulls, addr_partials,
fulls_count > 30000 / max(1, len(addr_tokens))) fulls_count > 30000 / max(1, len(addr_tokens)))
# To catch remaining results, lookup by name and address # To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected. # We only do this if there is a reasonable number of results expected.
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count exp_count /= 2**len(addr_tokens)
if exp_count < 10000 and addr_count < 20000: if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1, penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens)) 5 - len(name_partials) - len(addr_tokens))
@@ -236,7 +235,7 @@ class SearchBuilder:
self.get_name_address_ranking(list(name_partials.keys()), addr_partials) self.get_name_address_ranking(list(name_partials.keys()), addr_partials)
def get_name_address_ranking(self, name_tokens: List[int], def get_name_address_ranking(self, name_tokens: List[int],
addr_partials: List[Token]) -> List[dbf.FieldLookup]: addr_partials: List[qmod.Token]) -> List[dbf.FieldLookup]:
""" Create a ranking expression looking up by name and address. """ Create a ranking expression looking up by name and address.
""" """
lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)] lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
@@ -258,23 +257,16 @@ class SearchBuilder:
return lookup return lookup
def get_full_name_ranking(self, name_fulls: List[Token], addr_partials: List[Token], def get_full_name_ranking(self, name_fulls: List[qmod.Token], addr_partials: List[qmod.Token],
use_lookup: bool) -> List[dbf.FieldLookup]: use_lookup: bool) -> List[dbf.FieldLookup]:
""" Create a ranking expression with full name terms and """ Create a ranking expression with full name terms and
additional address lookup. When 'use_lookup' is true, then additional address lookup. When 'use_lookup' is true, then
address lookups will use the index, when the occurrences are not address lookups will use the index, when the occurrences are not
too many. too many.
""" """
# At this point drop unindexed partials from the address.
# This might yield wrong results, nothing we can do about that.
if use_lookup: if use_lookup:
addr_restrict_tokens = [] addr_restrict_tokens = []
addr_lookup_tokens = [] addr_lookup_tokens = [t.token for t in addr_partials]
for t in addr_partials:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
else: else:
addr_restrict_tokens = [t.token for t in addr_partials] addr_restrict_tokens = [t.token for t in addr_partials]
addr_lookup_tokens = [] addr_lookup_tokens = []
@@ -282,11 +274,11 @@ class SearchBuilder:
return dbf.lookup_by_any_name([t.token for t in name_fulls], return dbf.lookup_by_any_name([t.token for t in name_fulls],
addr_restrict_tokens, addr_lookup_tokens) addr_restrict_tokens, addr_lookup_tokens)
def get_name_ranking(self, trange: TokenRange, def get_name_ranking(self, trange: qmod.TokenRange,
db_field: str = 'name_vector') -> dbf.FieldRanking: db_field: str = 'name_vector') -> dbf.FieldRanking:
""" Create a ranking expression for a name term in the given range. """ Create a ranking expression for a name term in the given range.
""" """
name_fulls = self.query.get_tokens(trange, TokenType.WORD) name_fulls = self.query.get_tokens(trange, qmod.TOKEN_WORD)
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls] ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
ranks.sort(key=lambda r: r.penalty) ranks.sort(key=lambda r: r.penalty)
# Fallback, sum of penalty for partials # Fallback, sum of penalty for partials
@@ -294,7 +286,7 @@ class SearchBuilder:
default = sum(t.penalty for t in name_partials) + 0.2 default = sum(t.penalty for t in name_partials) + 0.2
return dbf.FieldRanking(db_field, default, ranks) return dbf.FieldRanking(db_field, default, ranks)
def get_addr_ranking(self, trange: TokenRange) -> dbf.FieldRanking: def get_addr_ranking(self, trange: qmod.TokenRange) -> dbf.FieldRanking:
""" Create a list of ranking expressions for an address term """ Create a list of ranking expressions for an address term
for the given ranges. for the given ranges.
""" """
@@ -305,10 +297,10 @@ class SearchBuilder:
while todo: while todo:
neglen, pos, rank = heapq.heappop(todo) neglen, pos, rank = heapq.heappop(todo)
for tlist in self.query.nodes[pos].starting: for tlist in self.query.nodes[pos].starting:
if tlist.ttype in (TokenType.PARTIAL, TokenType.WORD): if tlist.ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_WORD):
if tlist.end < trange.end: if tlist.end < trange.end:
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype] chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
if tlist.ttype == TokenType.PARTIAL: if tlist.ttype == qmod.TOKEN_PARTIAL:
penalty = rank.penalty + chgpenalty \ penalty = rank.penalty + chgpenalty \
+ max(t.penalty for t in tlist.tokens) + max(t.penalty for t in tlist.tokens)
heapq.heappush(todo, (neglen - 1, tlist.end, heapq.heappush(todo, (neglen - 1, tlist.end,
@@ -318,7 +310,7 @@ class SearchBuilder:
heapq.heappush(todo, (neglen - 1, tlist.end, heapq.heappush(todo, (neglen - 1, tlist.end,
rank.with_token(t, chgpenalty))) rank.with_token(t, chgpenalty)))
elif tlist.end == trange.end: elif tlist.end == trange.end:
if tlist.ttype == TokenType.PARTIAL: if tlist.ttype == qmod.TOKEN_PARTIAL:
ranks.append(dbf.RankedTokens(rank.penalty ranks.append(dbf.RankedTokens(rank.penalty
+ max(t.penalty for t in tlist.tokens), + max(t.penalty for t in tlist.tokens),
rank.tokens)) rank.tokens))
@@ -358,11 +350,11 @@ class SearchBuilder:
if assignment.housenumber: if assignment.housenumber:
sdata.set_strings('housenumbers', sdata.set_strings('housenumbers',
self.query.get_tokens(assignment.housenumber, self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER)) qmod.TOKEN_HOUSENUMBER))
if assignment.postcode: if assignment.postcode:
sdata.set_strings('postcodes', sdata.set_strings('postcodes',
self.query.get_tokens(assignment.postcode, self.query.get_tokens(assignment.postcode,
TokenType.POSTCODE)) qmod.TOKEN_POSTCODE))
if assignment.qualifier: if assignment.qualifier:
tokens = self.get_qualifier_tokens(assignment.qualifier) tokens = self.get_qualifier_tokens(assignment.qualifier)
if not tokens: if not tokens:
@@ -387,23 +379,23 @@ class SearchBuilder:
return sdata return sdata
def get_country_tokens(self, trange: TokenRange) -> List[Token]: def get_country_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
""" Return the list of country tokens for the given range, """ Return the list of country tokens for the given range,
optionally filtered by the country list from the details optionally filtered by the country list from the details
parameters. parameters.
""" """
tokens = self.query.get_tokens(trange, TokenType.COUNTRY) tokens = self.query.get_tokens(trange, qmod.TOKEN_COUNTRY)
if self.details.countries: if self.details.countries:
tokens = [t for t in tokens if t.lookup_word in self.details.countries] tokens = [t for t in tokens if t.lookup_word in self.details.countries]
return tokens return tokens
def get_qualifier_tokens(self, trange: TokenRange) -> List[Token]: def get_qualifier_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
""" Return the list of qualifier tokens for the given range, """ Return the list of qualifier tokens for the given range,
optionally filtered by the qualifier list from the details optionally filtered by the qualifier list from the details
parameters. parameters.
""" """
tokens = self.query.get_tokens(trange, TokenType.QUALIFIER) tokens = self.query.get_tokens(trange, qmod.TOKEN_QUALIFIER)
if self.details.categories: if self.details.categories:
tokens = [t for t in tokens if t.get_category() in self.details.categories] tokens = [t for t in tokens if t.get_category() in self.details.categories]
@@ -416,7 +408,7 @@ class SearchBuilder:
""" """
if assignment.near_item: if assignment.near_item:
tokens: Dict[Tuple[str, str], float] = {} tokens: Dict[Tuple[str, str], float] = {}
for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM): for t in self.query.get_tokens(assignment.near_item, qmod.TOKEN_NEAR_ITEM):
cat = t.get_category() cat = t.get_category()
# The category of a near search will be that of near_item. # The category of a near search will be that of near_item.
# Thus, if search is restricted to a category parameter, # Thus, if search is restricted to a category parameter,
@@ -430,11 +422,11 @@ class SearchBuilder:
PENALTY_WORDCHANGE = { PENALTY_WORDCHANGE = {
BreakType.START: 0.0, qmod.BREAK_START: 0.0,
BreakType.END: 0.0, qmod.BREAK_END: 0.0,
BreakType.PHRASE: 0.0, qmod.BREAK_PHRASE: 0.0,
BreakType.SOFT_PHRASE: 0.0, qmod.BREAK_SOFT_PHRASE: 0.0,
BreakType.WORD: 0.1, qmod.BREAK_WORD: 0.1,
BreakType.PART: 0.2, qmod.BREAK_PART: 0.2,
BreakType.TOKEN: 0.4 qmod.BREAK_TOKEN: 0.4
} }

View File

@@ -581,9 +581,13 @@ class PostcodeSearch(AbstractSearch):
.where((tsearch.c.name_vector + tsearch.c.nameaddress_vector) .where((tsearch.c.name_vector + tsearch.c.nameaddress_vector)
.contains(sa.type_coerce(self.lookups[0].tokens, .contains(sa.type_coerce(self.lookups[0].tokens,
IntArray))) IntArray)))
# Do NOT add rerank penalties based on the address terms.
# The standard rerank penalty only checks the address vector
# while terms may appear in name and address vector. This would
# lead to overly high penalties.
# We assume that a postcode is precise enough to not require
# additional full name matches.
for ranking in self.rankings:
penalty += ranking.sql_penalty(conn.t.search_name)
penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes), penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes),
else_=1.0) else_=1.0)

View File

@@ -238,7 +238,7 @@ def _dump_searches(searches: List[AbstractSearch], query: QueryStruct,
if not lk: if not lk:
return '' return ''
return f"{lk.lookup_type}({lk.column}{tk(lk.tokens)})" return f"{lk.lookup_type.__name__}({lk.column}{tk(lk.tokens)})"
def fmt_cstr(c: Any) -> str: def fmt_cstr(c: Any) -> str:
if not c: if not c:

View File

@@ -8,7 +8,6 @@
Implementation of query analysis for the ICU tokenizer. Implementation of query analysis for the ICU tokenizer.
""" """
from typing import Tuple, Dict, List, Optional, Iterator, Any, cast from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
from collections import defaultdict
import dataclasses import dataclasses
import difflib import difflib
import re import re
@@ -25,62 +24,30 @@ from ..connection import SearchConnection
from ..logging import log from ..logging import log
from . import query as qmod from . import query as qmod
from ..query_preprocessing.config import QueryConfig from ..query_preprocessing.config import QueryConfig
from ..query_preprocessing.base import QueryProcessingFunc
from .query_analyzer_factory import AbstractQueryAnalyzer from .query_analyzer_factory import AbstractQueryAnalyzer
from .postcode_parser import PostcodeParser
DB_TO_TOKEN_TYPE = { DB_TO_TOKEN_TYPE = {
'W': qmod.TokenType.WORD, 'W': qmod.TOKEN_WORD,
'w': qmod.TokenType.PARTIAL, 'w': qmod.TOKEN_PARTIAL,
'H': qmod.TokenType.HOUSENUMBER, 'H': qmod.TOKEN_HOUSENUMBER,
'P': qmod.TokenType.POSTCODE, 'P': qmod.TOKEN_POSTCODE,
'C': qmod.TokenType.COUNTRY 'C': qmod.TOKEN_COUNTRY
} }
PENALTY_IN_TOKEN_BREAK = { PENALTY_IN_TOKEN_BREAK = {
qmod.BreakType.START: 0.5, qmod.BREAK_START: 0.5,
qmod.BreakType.END: 0.5, qmod.BREAK_END: 0.5,
qmod.BreakType.PHRASE: 0.5, qmod.BREAK_PHRASE: 0.5,
qmod.BreakType.SOFT_PHRASE: 0.5, qmod.BREAK_SOFT_PHRASE: 0.5,
qmod.BreakType.WORD: 0.1, qmod.BREAK_WORD: 0.1,
qmod.BreakType.PART: 0.0, qmod.BREAK_PART: 0.0,
qmod.BreakType.TOKEN: 0.0 qmod.BREAK_TOKEN: 0.0
} }
@dataclasses.dataclass
class QueryPart:
""" Normalized and transliterated form of a single term in the query.
When the term came out of a split during the transliteration,
the normalized string is the full word before transliteration.
The word number keeps track of the word before transliteration
and can be used to identify partial transliterated terms.
Penalty is the break penalty for the break following the token.
"""
token: str
normalized: str
word_number: int
penalty: float
QueryParts = List[QueryPart]
WordDict = Dict[str, List[qmod.TokenRange]]
def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
""" Return all combinations of words in the terms list after the
given position.
"""
total = len(terms)
for first in range(start, total):
word = terms[first].token
penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType.WORD]
yield word, qmod.TokenRange(first, first + 1, penalty=penalty)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last].token))
penalty += terms[last - 1].penalty
yield word, qmod.TokenRange(first, last + 1, penalty=penalty)
@dataclasses.dataclass @dataclasses.dataclass
class ICUToken(qmod.Token): class ICUToken(qmod.Token):
""" Specialised token for ICU tokenizer. """ Specialised token for ICU tokenizer.
@@ -146,60 +113,51 @@ class ICUToken(qmod.Token):
addr_count=max(1, addr_count)) addr_count=max(1, addr_count))
class ICUQueryAnalyzer(AbstractQueryAnalyzer): @dataclasses.dataclass
""" Converter for query strings into a tokenized query class ICUAnalyzerConfig:
using the tokens created by a ICU tokenizer. postcode_parser: PostcodeParser
""" normalizer: Transliterator
def __init__(self, conn: SearchConnection) -> None: transliterator: Transliterator
self.conn = conn preprocessors: List[QueryProcessingFunc]
async def setup(self) -> None: @staticmethod
""" Set up static data structures needed for the analysis. async def create(conn: SearchConnection) -> 'ICUAnalyzerConfig':
""" rules = await conn.get_property('tokenizer_import_normalisation')
async def _make_normalizer() -> Any: normalizer = Transliterator.createFromRules("normalization", rules)
rules = await self.conn.get_property('tokenizer_import_normalisation')
return Transliterator.createFromRules("normalization", rules)
self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer', rules = await conn.get_property('tokenizer_import_transliteration')
_make_normalizer) transliterator = Transliterator.createFromRules("transliteration", rules)
async def _make_transliterator() -> Any: preprocessing_rules = conn.config.load_sub_configuration('icu_tokenizer.yaml',
rules = await self.conn.get_property('tokenizer_import_transliteration') config='TOKENIZER_CONFIG')\
return Transliterator.createFromRules("transliteration", rules) .get('query-preprocessing', [])
self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
_make_transliterator)
await self._setup_preprocessing()
if 'word' not in self.conn.t.meta.tables:
sa.Table('word', self.conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', Json))
async def _setup_preprocessing(self) -> None:
""" Load the rules for preprocessing and set up the handlers.
"""
rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
config='TOKENIZER_CONFIG')
preprocessing_rules = rules.get('query-preprocessing', [])
self.preprocessors = []
preprocessors: List[QueryProcessingFunc] = []
for func in preprocessing_rules: for func in preprocessing_rules:
if 'step' not in func: if 'step' not in func:
raise UsageError("Preprocessing rule is missing the 'step' attribute.") raise UsageError("Preprocessing rule is missing the 'step' attribute.")
if not isinstance(func['step'], str): if not isinstance(func['step'], str):
raise UsageError("'step' attribute must be a simple string.") raise UsageError("'step' attribute must be a simple string.")
module = self.conn.config.load_plugin_module( module = conn.config.load_plugin_module(
func['step'], 'nominatim_api.query_preprocessing') func['step'], 'nominatim_api.query_preprocessing')
self.preprocessors.append( preprocessors.append(
module.create(QueryConfig(func).set_normalizer(self.normalizer))) module.create(QueryConfig(func).set_normalizer(normalizer)))
return ICUAnalyzerConfig(PostcodeParser(conn.config),
normalizer, transliterator, preprocessors)
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a ICU tokenizer.
"""
def __init__(self, conn: SearchConnection, config: ICUAnalyzerConfig) -> None:
self.conn = conn
self.postcode_parser = config.postcode_parser
self.normalizer = config.normalizer
self.transliterator = config.transliterator
self.preprocessors = config.preprocessors
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct: async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
""" Analyze the given list of phrases and return the """ Analyze the given list of phrases and return the
@@ -214,8 +172,9 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
if not query.source: if not query.source:
return query return query
parts, words = self.split_query(query) self.split_query(query)
log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts)) log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
for row in await self.lookup_in_db(list(words.keys())): for row in await self.lookup_in_db(list(words.keys())):
for trange in words[row.word_token]: for trange in words[row.word_token]:
@@ -223,17 +182,24 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
if row.type == 'S': if row.type == 'S':
if row.info['op'] in ('in', 'near'): if row.info['op'] in ('in', 'near'):
if trange.start == 0: if trange.start == 0:
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
else: else:
if trange.start == 0 and trange.end == query.num_token_slots(): if trange.start == 0 and trange.end == query.num_token_slots():
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
else: else:
query.add_token(trange, qmod.TokenType.QUALIFIER, token) query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
else: else:
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token) query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
self.add_extra_tokens(query, parts) self.add_extra_tokens(query)
self.rerank_tokens(query, parts) for start, end, pc in self.postcode_parser.parse(query):
term = ' '.join(n.term_lookup for n in query.nodes[start + 1:end + 1])
query.add_token(qmod.TokenRange(start, end),
qmod.TOKEN_POSTCODE,
ICUToken(penalty=0.1, token=0, count=1, addr_count=1,
lookup_word=pc, word_token=term,
info=None))
self.rerank_tokens(query)
log().table_dump('Word tokens', _dump_word_tokens(query)) log().table_dump('Word tokens', _dump_word_tokens(query))
@@ -244,19 +210,11 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
standardized form search will work with. All information removed standardized form search will work with. All information removed
at this stage is inevitably lost. at this stage is inevitably lost.
""" """
return cast(str, self.normalizer.transliterate(text)) return cast(str, self.normalizer.transliterate(text)).strip('-: ')
def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]: def split_query(self, query: qmod.QueryStruct) -> None:
""" Transliterate the phrases and split them into tokens. """ Transliterate the phrases and split them into tokens.
Returns the list of transliterated tokens together with their
normalized form and a dictionary of words for lookup together
with their position.
""" """
parts: QueryParts = []
phrase_start = 0
words = defaultdict(list)
wordnr = 0
for phrase in query.source: for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype query.nodes[-1].ptype = phrase.ptype
phrase_split = re.split('([ :-])', phrase.text) phrase_split = re.split('([ :-])', phrase.text)
@@ -271,78 +229,74 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
if trans: if trans:
for term in trans.split(' '): for term in trans.split(' '):
if term: if term:
parts.append(QueryPart(term, word, wordnr, query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
PENALTY_IN_TOKEN_BREAK[qmod.BreakType.TOKEN])) PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
query.add_node(qmod.BreakType.TOKEN, phrase.ptype) term, word)
query.nodes[-1].btype = qmod.BreakType(breakchar) query.nodes[-1].adjust_break(breakchar,
parts[-1].penalty = PENALTY_IN_TOKEN_BREAK[qmod.BreakType(breakchar)] PENALTY_IN_TOKEN_BREAK[breakchar])
wordnr += 1
for word, wrange in yield_words(parts, phrase_start): query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
words[word].append(wrange)
phrase_start = len(parts)
query.nodes[-1].btype = qmod.BreakType.END
return parts, words
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]': async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
""" Return the token information from the database for the """ Return the token information from the database for the
given word tokens. given word tokens.
This function excludes postcode tokens
""" """
t = self.conn.t.meta.tables['word'] t = self.conn.t.meta.tables['word']
return await self.conn.execute(t.select().where(t.c.word_token.in_(words))) return await self.conn.execute(t.select()
.where(t.c.word_token.in_(words))
.where(t.c.type != 'P'))
def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None: def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
""" Add tokens to query that are not saved in the database. """ Add tokens to query that are not saved in the database.
""" """
for part, node, i in zip(parts, query.nodes, range(1000)): need_hnr = False
if len(part.token) <= 4 and part.token.isdigit()\ for i, node in enumerate(query.nodes):
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER): is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER, if need_hnr and is_full_token \
and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
ICUToken(penalty=0.5, token=0, ICUToken(penalty=0.5, token=0,
count=1, addr_count=1, lookup_word=part.token, count=1, addr_count=1,
word_token=part.token, info=None)) lookup_word=node.term_lookup,
word_token=node.term_lookup, info=None))
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None: need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
def rerank_tokens(self, query: qmod.QueryStruct) -> None:
""" Add penalties to tokens that depend on presence of other token. """ Add penalties to tokens that depend on presence of other token.
""" """
for i, node, tlist in query.iter_token_lists(): for i, node, tlist in query.iter_token_lists():
if tlist.ttype == qmod.TokenType.POSTCODE: if tlist.ttype == qmod.TOKEN_POSTCODE:
tlen = len(cast(ICUToken, tlist.tokens[0]).word_token)
for repl in node.starting: for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \ if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
and (repl.ttype != qmod.TokenType.HOUSENUMBER and (repl.ttype != qmod.TOKEN_HOUSENUMBER or tlen > 4):
or len(tlist.tokens[0].lookup_word) > 4):
repl.add_penalty(0.39) repl.add_penalty(0.39)
elif (tlist.ttype == qmod.TokenType.HOUSENUMBER elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
and len(tlist.tokens[0].lookup_word) <= 3): and len(tlist.tokens[0].lookup_word) <= 3):
if any(c.isdigit() for c in tlist.tokens[0].lookup_word): if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
for repl in node.starting: for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER: if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
repl.add_penalty(0.5 - tlist.tokens[0].penalty) repl.add_penalty(0.5 - tlist.tokens[0].penalty)
elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL): elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
norm = parts[i].normalized norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
for j in range(i + 1, tlist.end): if n.btype != qmod.BREAK_TOKEN)
if parts[j - 1].word_number != parts[j].word_number: if not norm:
norm += ' ' + parts[j].normalized # Can happen when the token only covers a partial term
norm = query.nodes[i + 1].term_normalized
for token in tlist.tokens: for token in tlist.tokens:
cast(ICUToken, token).rematch(norm) cast(ICUToken, token).rematch(norm)
def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
out = query.nodes[0].btype.value
for node, part in zip(query.nodes[1:], parts):
out += part.token + node.btype.value
return out
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]: def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info'] yield ['type', 'from', 'to', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for node in query.nodes: for i, node in enumerate(query.nodes):
for tlist in node.starting: for tlist in node.starting:
for token in tlist.tokens: for token in tlist.tokens:
t = cast(ICUToken, token) t = cast(ICUToken, token)
yield [tlist.ttype.name, t.token, t.word_token or '', yield [tlist.ttype, str(i), str(tlist.end), t.token, t.word_token or '',
t.lookup_word or '', t.penalty, t.count, t.info] t.lookup_word or '', t.penalty, t.count, t.info]
@@ -350,7 +304,17 @@ async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer
""" Create and set up a new query analyzer for a database based """ Create and set up a new query analyzer for a database based
on the ICU tokenizer. on the ICU tokenizer.
""" """
out = ICUQueryAnalyzer(conn) async def _get_config() -> ICUAnalyzerConfig:
await out.setup() if 'word' not in conn.t.meta.tables:
sa.Table('word', conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', Json))
return out return await ICUAnalyzerConfig.create(conn)
config = await conn.get_cached_value('ICUTOK', 'config', _get_config)
return ICUQueryAnalyzer(conn, config)

View File

@@ -0,0 +1,104 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Handling of arbitrary postcode tokens in tokenized query string.
"""
from typing import Tuple, Set, Dict, List
import re
from collections import defaultdict
import yaml
from ..config import Configuration
from . import query as qmod
class PostcodeParser:
""" Pattern-based parser for postcodes in tokenized queries.
The postcode patterns are read from the country configuration.
The parser does currently not return country restrictions.
"""
def __init__(self, config: Configuration) -> None:
# skip over includes here to avoid loading the complete country name data
yaml.add_constructor('!include', lambda loader, node: [],
Loader=yaml.SafeLoader)
cdata = yaml.safe_load(config.find_config_file('country_settings.yaml')
.read_text(encoding='utf-8'))
unique_patterns: Dict[str, Dict[str, List[str]]] = {}
for cc, data in cdata.items():
if data.get('postcode'):
pat = data['postcode']['pattern'].replace('d', '[0-9]').replace('l', '[A-Z]')
out = data['postcode'].get('output')
if pat not in unique_patterns:
unique_patterns[pat] = defaultdict(list)
unique_patterns[pat][out].append(cc.upper())
self.global_pattern = re.compile(
'(?:(?P<cc>[A-Z][A-Z])(?P<space>[ -]?))?(?P<pc>(?:(?:'
+ ')|(?:'.join(unique_patterns) + '))[:, >].*)')
self.local_patterns = [(re.compile(f"{pat}[:, >]"), list(info.items()))
for pat, info in unique_patterns.items()]
def parse(self, query: qmod.QueryStruct) -> Set[Tuple[int, int, str]]:
""" Parse postcodes in the given list of query tokens taking into
account the list of breaks from the nodes.
The result is a sequence of tuples with
[start node id, end node id, postcode token]
"""
nodes = query.nodes
outcodes: Set[Tuple[int, int, str]] = set()
terms = [n.term_normalized.upper() + n.btype for n in nodes]
for i in range(query.num_token_slots()):
if nodes[i].btype in '<,: ' and nodes[i + 1].btype != '`' \
and (i == 0 or nodes[i - 1].ptype != qmod.PHRASE_POSTCODE):
if nodes[i].ptype == qmod.PHRASE_ANY:
word = terms[i + 1]
if word[-1] in ' -' and nodes[i + 2].btype != '`' \
and nodes[i + 1].ptype == qmod.PHRASE_ANY:
word += terms[i + 2]
if word[-1] in ' -' and nodes[i + 3].btype != '`' \
and nodes[i + 2].ptype == qmod.PHRASE_ANY:
word += terms[i + 3]
self._match_word(word, i, False, outcodes)
elif nodes[i].ptype == qmod.PHRASE_POSTCODE:
word = terms[i + 1]
for j in range(i + 1, query.num_token_slots()):
if nodes[j].ptype != qmod.PHRASE_POSTCODE:
break
word += terms[j + 1]
self._match_word(word, i, True, outcodes)
return outcodes
def _match_word(self, word: str, pos: int, fullmatch: bool,
outcodes: Set[Tuple[int, int, str]]) -> None:
# Use global pattern to check for presence of any postcode.
m = self.global_pattern.fullmatch(word)
if m:
# If there was a match, check against each pattern separately
# because multiple patterns might be machting at the end.
cc = m.group('cc')
pc_word = m.group('pc')
cc_spaces = len(m.group('space') or '')
for pattern, info in self.local_patterns:
lm = pattern.fullmatch(pc_word) if fullmatch else pattern.match(pc_word)
if lm:
trange = (pos, pos + cc_spaces + sum(c in ' ,-:>' for c in lm.group(0)))
for out, out_ccs in info:
if cc is None or cc in out_ccs:
if out:
outcodes.add((*trange, lm.expand(out)))
else:
outcodes.add((*trange, lm.group(0)[:-1]))

View File

@@ -7,94 +7,95 @@
""" """
Datastructures for a tokenized query. Datastructures for a tokenized query.
""" """
from typing import List, Tuple, Optional, Iterator from typing import Dict, List, Tuple, Optional, Iterator
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import defaultdict
import dataclasses import dataclasses
import enum
class BreakType(enum.Enum): BreakType = str
""" Type of break between tokens. """ Type of break between tokens.
""" """
START = '<' BREAK_START = '<'
""" Begin of the query. """ """ Begin of the query. """
END = '>' BREAK_END = '>'
""" End of the query. """ """ End of the query. """
PHRASE = ',' BREAK_PHRASE = ','
""" Hard break between two phrases. Address parts cannot cross hard """ Hard break between two phrases. Address parts cannot cross hard
phrase boundaries.""" phrase boundaries."""
SOFT_PHRASE = ':' BREAK_SOFT_PHRASE = ':'
""" Likely break between two phrases. Address parts should not cross soft """ Likely break between two phrases. Address parts should not cross soft
phrase boundaries. Soft breaks can be inserted by a preprocessor phrase boundaries. Soft breaks can be inserted by a preprocessor
that is analysing the input string. that is analysing the input string.
""" """
WORD = ' ' BREAK_WORD = ' '
""" Break between words. """ """ Break between words. """
PART = '-' BREAK_PART = '-'
""" Break inside a word, for example a hyphen or apostrophe. """ """ Break inside a word, for example a hyphen or apostrophe. """
TOKEN = '`' BREAK_TOKEN = '`'
""" Break created as a result of tokenization. """ Break created as a result of tokenization.
This may happen in languages without spaces between words. This may happen in languages without spaces between words.
"""
TokenType = str
""" Type of token.
"""
TOKEN_WORD = 'W'
""" Full name of a place. """
TOKEN_PARTIAL = 'w'
""" Word term without breaks, does not necessarily represent a full name. """
TOKEN_HOUSENUMBER = 'H'
""" Housenumber term. """
TOKEN_POSTCODE = 'P'
""" Postal code term. """
TOKEN_COUNTRY = 'C'
""" Country name or reference. """
TOKEN_QUALIFIER = 'Q'
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
TOKEN_NEAR_ITEM = 'N'
""" Special term used as searchable object(e.g. supermarket in ...). """
PhraseType = int
""" Designation of a phrase.
"""
PHRASE_ANY = 0
""" No specific designation (i.e. source is free-form query). """
PHRASE_AMENITY = 1
""" Contains name or type of a POI. """
PHRASE_STREET = 2
""" Contains a street name optionally with a housenumber. """
PHRASE_CITY = 3
""" Contains the postal city. """
PHRASE_COUNTY = 4
""" Contains the equivalent of a county. """
PHRASE_STATE = 5
""" Contains a state or province. """
PHRASE_POSTCODE = 6
""" Contains a postal code. """
PHRASE_COUNTRY = 7
""" Contains the country name or code. """
def _phrase_compatible_with(ptype: PhraseType, ttype: TokenType,
is_full_phrase: bool) -> bool:
""" Check if the given token type can be used with the phrase type.
""" """
if ptype == PHRASE_ANY:
return not is_full_phrase or ttype != TOKEN_QUALIFIER
if ptype == PHRASE_AMENITY:
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)\
or (is_full_phrase and ttype == TOKEN_NEAR_ITEM)\
or (not is_full_phrase and ttype == TOKEN_QUALIFIER)
if ptype == PHRASE_STREET:
return ttype in (TOKEN_WORD, TOKEN_PARTIAL, TOKEN_HOUSENUMBER)
if ptype == PHRASE_POSTCODE:
return ttype == TOKEN_POSTCODE
if ptype == PHRASE_COUNTRY:
return ttype == TOKEN_COUNTRY
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)
class TokenType(enum.Enum):
""" Type of token.
"""
WORD = enum.auto()
""" Full name of a place. """
PARTIAL = enum.auto()
""" Word term without breaks, does not necessarily represent a full name. """
HOUSENUMBER = enum.auto()
""" Housenumber term. """
POSTCODE = enum.auto()
""" Postal code term. """
COUNTRY = enum.auto()
""" Country name or reference. """
QUALIFIER = enum.auto()
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
NEAR_ITEM = enum.auto()
""" Special term used as searchable object(e.g. supermarket in ...). """
class PhraseType(enum.Enum):
""" Designation of a phrase.
"""
NONE = 0
""" No specific designation (i.e. source is free-form query). """
AMENITY = enum.auto()
""" Contains name or type of a POI. """
STREET = enum.auto()
""" Contains a street name optionally with a housenumber. """
CITY = enum.auto()
""" Contains the postal city. """
COUNTY = enum.auto()
""" Contains the equivalent of a county. """
STATE = enum.auto()
""" Contains a state or province. """
POSTCODE = enum.auto()
""" Contains a postal code. """
COUNTRY = enum.auto()
""" Contains the country name or code. """
def compatible_with(self, ttype: TokenType,
is_full_phrase: bool) -> bool:
""" Check if the given token type can be used with the phrase type.
"""
if self == PhraseType.NONE:
return not is_full_phrase or ttype != TokenType.QUALIFIER
if self == PhraseType.AMENITY:
return ttype in (TokenType.WORD, TokenType.PARTIAL)\
or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\
or (not is_full_phrase and ttype == TokenType.QUALIFIER)
if self == PhraseType.STREET:
return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
if self == PhraseType.POSTCODE:
return ttype == TokenType.POSTCODE
if self == PhraseType.COUNTRY:
return ttype == TokenType.COUNTRY
return ttype in (TokenType.WORD, TokenType.PARTIAL)
@dataclasses.dataclass @dataclasses.dataclass
@@ -171,11 +172,33 @@ class TokenList:
@dataclasses.dataclass @dataclasses.dataclass
class QueryNode: class QueryNode:
""" A node of the query representing a break between terms. """ A node of the query representing a break between terms.
The node also contains information on the source term
ending at the node. The tokens are created from this information.
""" """
btype: BreakType btype: BreakType
ptype: PhraseType ptype: PhraseType
penalty: float
""" Penalty for the break at this node.
"""
term_lookup: str
""" Transliterated term following this node.
"""
term_normalized: str
""" Normalised form of term following this node.
When the token resulted from a split during transliteration,
then this string contains the complete source term.
"""
starting: List[TokenList] = dataclasses.field(default_factory=list) starting: List[TokenList] = dataclasses.field(default_factory=list)
def adjust_break(self, btype: BreakType, penalty: float) -> None:
""" Change the break type and penalty for this node.
"""
self.btype = btype
self.penalty = penalty
def has_tokens(self, end: int, *ttypes: TokenType) -> bool: def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
""" Check if there are tokens of the given types ending at the """ Check if there are tokens of the given types ending at the
given node. given node.
@@ -218,19 +241,22 @@ class QueryStruct:
def __init__(self, source: List[Phrase]) -> None: def __init__(self, source: List[Phrase]) -> None:
self.source = source self.source = source
self.nodes: List[QueryNode] = \ self.nodes: List[QueryNode] = \
[QueryNode(BreakType.START, source[0].ptype if source else PhraseType.NONE)] [QueryNode(BREAK_START, source[0].ptype if source else PHRASE_ANY,
0.0, '', '')]
def num_token_slots(self) -> int: def num_token_slots(self) -> int:
""" Return the length of the query in vertice steps. """ Return the length of the query in vertice steps.
""" """
return len(self.nodes) - 1 return len(self.nodes) - 1
def add_node(self, btype: BreakType, ptype: PhraseType) -> None: def add_node(self, btype: BreakType, ptype: PhraseType,
break_penalty: float = 0.0,
term_lookup: str = '', term_normalized: str = '') -> None:
""" Append a new break node with the given break type. """ Append a new break node with the given break type.
The phrase type denotes the type for any tokens starting The phrase type denotes the type for any tokens starting
at the node. at the node.
""" """
self.nodes.append(QueryNode(btype, ptype)) self.nodes.append(QueryNode(btype, ptype, break_penalty, term_lookup, term_normalized))
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None: def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
""" Add a token to the query. 'start' and 'end' are the indexes of the """ Add a token to the query. 'start' and 'end' are the indexes of the
@@ -243,9 +269,9 @@ class QueryStruct:
be added to, then the token is silently dropped. be added to, then the token is silently dropped.
""" """
snode = self.nodes[trange.start] snode = self.nodes[trange.start]
full_phrase = snode.btype in (BreakType.START, BreakType.PHRASE)\ full_phrase = snode.btype in (BREAK_START, BREAK_PHRASE)\
and self.nodes[trange.end].btype in (BreakType.PHRASE, BreakType.END) and self.nodes[trange.end].btype in (BREAK_PHRASE, BREAK_END)
if snode.ptype.compatible_with(ttype, full_phrase): if _phrase_compatible_with(snode.ptype, ttype, full_phrase):
tlist = snode.get_tokens(trange.end, ttype) tlist = snode.get_tokens(trange.end, ttype)
if tlist is None: if tlist is None:
snode.starting.append(TokenList(trange.end, ttype, [token])) snode.starting.append(TokenList(trange.end, ttype, [token]))
@@ -265,7 +291,7 @@ class QueryStruct:
going to the subsequent node. Such PARTIAL tokens are going to the subsequent node. Such PARTIAL tokens are
assumed to exist. assumed to exist.
""" """
return [next(iter(self.get_tokens(TokenRange(i, i+1), TokenType.PARTIAL))) return [next(iter(self.get_tokens(TokenRange(i, i+1), TOKEN_PARTIAL)))
for i in range(trange.start, trange.end)] for i in range(trange.start, trange.end)]
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]: def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
@@ -285,5 +311,44 @@ class QueryStruct:
for tlist in node.starting: for tlist in node.starting:
for t in tlist.tokens: for t in tlist.tokens:
if t.token == token: if t.token == token:
return f"[{tlist.ttype.name[0]}]{t.lookup_word}" return f"[{tlist.ttype}]{t.lookup_word}"
return 'None' return 'None'
def get_transliterated_query(self) -> str:
""" Return a string representation of the transliterated query
with the character representation of the different break types.
For debugging purposes only.
"""
return ''.join(''.join((n.term_lookup, n.btype)) for n in self.nodes)
def extract_words(self, base_penalty: float = 0.0,
start: int = 0,
endpos: Optional[int] = None) -> Dict[str, List[TokenRange]]:
""" Add all combinations of words that can be formed from the terms
between the given start and endnode. The terms are joined with
spaces for each break. Words can never go across a BREAK_PHRASE.
The functions returns a dictionary of possible words with their
position within the query and a penalty. The penalty is computed
from the base_penalty plus the penalty for each node the word
crosses.
"""
if endpos is None:
endpos = len(self.nodes)
words: Dict[str, List[TokenRange]] = defaultdict(list)
for first in range(start, endpos - 1):
word = self.nodes[first + 1].term_lookup
penalty = base_penalty
words[word].append(TokenRange(first, first + 1, penalty=penalty))
if self.nodes[first + 1].btype != BREAK_PHRASE:
for last in range(first + 2, min(first + 20, endpos)):
word = ' '.join((word, self.nodes[last].term_lookup))
penalty += self.nodes[last - 1].penalty
words[word].append(TokenRange(first, last, penalty=penalty))
if self.nodes[last].btype == BREAK_PHRASE:
break
return words

View File

@@ -24,13 +24,13 @@ class TypedRange:
PENALTY_TOKENCHANGE = { PENALTY_TOKENCHANGE = {
qmod.BreakType.START: 0.0, qmod.BREAK_START: 0.0,
qmod.BreakType.END: 0.0, qmod.BREAK_END: 0.0,
qmod.BreakType.PHRASE: 0.0, qmod.BREAK_PHRASE: 0.0,
qmod.BreakType.SOFT_PHRASE: 0.0, qmod.BREAK_SOFT_PHRASE: 0.0,
qmod.BreakType.WORD: 0.1, qmod.BREAK_WORD: 0.1,
qmod.BreakType.PART: 0.2, qmod.BREAK_PART: 0.2,
qmod.BreakType.TOKEN: 0.4 qmod.BREAK_TOKEN: 0.4
} }
TypedRangeSeq = List[TypedRange] TypedRangeSeq = List[TypedRange]
@@ -56,17 +56,17 @@ class TokenAssignment:
""" """
out = TokenAssignment() out = TokenAssignment()
for token in ranges: for token in ranges:
if token.ttype == qmod.TokenType.PARTIAL: if token.ttype == qmod.TOKEN_PARTIAL:
out.address.append(token.trange) out.address.append(token.trange)
elif token.ttype == qmod.TokenType.HOUSENUMBER: elif token.ttype == qmod.TOKEN_HOUSENUMBER:
out.housenumber = token.trange out.housenumber = token.trange
elif token.ttype == qmod.TokenType.POSTCODE: elif token.ttype == qmod.TOKEN_POSTCODE:
out.postcode = token.trange out.postcode = token.trange
elif token.ttype == qmod.TokenType.COUNTRY: elif token.ttype == qmod.TOKEN_COUNTRY:
out.country = token.trange out.country = token.trange
elif token.ttype == qmod.TokenType.NEAR_ITEM: elif token.ttype == qmod.TOKEN_NEAR_ITEM:
out.near_item = token.trange out.near_item = token.trange
elif token.ttype == qmod.TokenType.QUALIFIER: elif token.ttype == qmod.TOKEN_QUALIFIER:
out.qualifier = token.trange out.qualifier = token.trange
return out return out
@@ -84,7 +84,7 @@ class _TokenSequence:
self.penalty = penalty self.penalty = penalty
def __str__(self) -> str: def __str__(self) -> str:
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype.name}]' for r in self.seq) seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype}]' for r in self.seq)
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})' return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
@property @property
@@ -105,7 +105,7 @@ class _TokenSequence:
""" """
# Country and category must be the final term for left-to-right # Country and category must be the final term for left-to-right
return len(self.seq) > 1 and \ return len(self.seq) > 1 and \
self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM) self.seq[-1].ttype in (qmod.TOKEN_COUNTRY, qmod.TOKEN_NEAR_ITEM)
def appendable(self, ttype: qmod.TokenType) -> Optional[int]: def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
""" Check if the give token type is appendable to the existing sequence. """ Check if the give token type is appendable to the existing sequence.
@@ -114,23 +114,23 @@ class _TokenSequence:
new direction of the sequence after adding such a type. The new direction of the sequence after adding such a type. The
token is not added. token is not added.
""" """
if ttype == qmod.TokenType.WORD: if ttype == qmod.TOKEN_WORD:
return None return None
if not self.seq: if not self.seq:
# Append unconditionally to the empty list # Append unconditionally to the empty list
if ttype == qmod.TokenType.COUNTRY: if ttype == qmod.TOKEN_COUNTRY:
return -1 return -1
if ttype in (qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER): if ttype in (qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return 1 return 1
return self.direction return self.direction
# Name tokens are always acceptable and don't change direction # Name tokens are always acceptable and don't change direction
if ttype == qmod.TokenType.PARTIAL: if ttype == qmod.TOKEN_PARTIAL:
# qualifiers cannot appear in the middle of the query. They need # qualifiers cannot appear in the middle of the query. They need
# to be near the next phrase. # to be near the next phrase.
if self.direction == -1 \ if self.direction == -1 \
and any(t.ttype == qmod.TokenType.QUALIFIER for t in self.seq[:-1]): and any(t.ttype == qmod.TOKEN_QUALIFIER for t in self.seq[:-1]):
return None return None
return self.direction return self.direction
@@ -138,54 +138,54 @@ class _TokenSequence:
if self.has_types(ttype): if self.has_types(ttype):
return None return None
if ttype == qmod.TokenType.HOUSENUMBER: if ttype == qmod.TOKEN_HOUSENUMBER:
if self.direction == 1: if self.direction == 1:
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TokenType.QUALIFIER: if len(self.seq) == 1 and self.seq[0].ttype == qmod.TOKEN_QUALIFIER:
return None return None
if len(self.seq) > 2 \ if len(self.seq) > 2 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY): or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
return None # direction left-to-right: housenumber must come before anything return None # direction left-to-right: housenumber must come before anything
elif (self.direction == -1 elif (self.direction == -1
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY)): or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY)):
return -1 # force direction right-to-left if after other terms return -1 # force direction right-to-left if after other terms
return self.direction return self.direction
if ttype == qmod.TokenType.POSTCODE: if ttype == qmod.TOKEN_POSTCODE:
if self.direction == -1: if self.direction == -1:
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER): if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return None return None
return -1 return -1
if self.direction == 1: if self.direction == 1:
return None if self.has_types(qmod.TokenType.COUNTRY) else 1 return None if self.has_types(qmod.TOKEN_COUNTRY) else 1
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER): if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return 1 return 1
return self.direction return self.direction
if ttype == qmod.TokenType.COUNTRY: if ttype == qmod.TOKEN_COUNTRY:
return None if self.direction == -1 else 1 return None if self.direction == -1 else 1
if ttype == qmod.TokenType.NEAR_ITEM: if ttype == qmod.TOKEN_NEAR_ITEM:
return self.direction return self.direction
if ttype == qmod.TokenType.QUALIFIER: if ttype == qmod.TOKEN_QUALIFIER:
if self.direction == 1: if self.direction == 1:
if (len(self.seq) == 1 if (len(self.seq) == 1
and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \ and self.seq[0].ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_NEAR_ITEM)) \
or (len(self.seq) == 2 or (len(self.seq) == 2
and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM and self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM
and self.seq[1].ttype == qmod.TokenType.PARTIAL): and self.seq[1].ttype == qmod.TOKEN_PARTIAL):
return 1 return 1
return None return None
if self.direction == -1: if self.direction == -1:
return -1 return -1
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM else self.seq
if len(tempseq) == 0: if len(tempseq) == 0:
return 1 return 1
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER: if len(tempseq) == 1 and self.seq[0].ttype == qmod.TOKEN_HOUSENUMBER:
return None return None
if len(tempseq) > 1 or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY): if len(tempseq) > 1 or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
return -1 return -1
return 0 return 0
@@ -205,7 +205,7 @@ class _TokenSequence:
new_penalty = 0.0 new_penalty = 0.0
else: else:
last = self.seq[-1] last = self.seq[-1]
if btype != qmod.BreakType.PHRASE and last.ttype == ttype: if btype != qmod.BREAK_PHRASE and last.ttype == ttype:
# extend the existing range # extend the existing range
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))] newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
new_penalty = 0.0 new_penalty = 0.0
@@ -240,18 +240,18 @@ class _TokenSequence:
# housenumbers may not be further than 2 words from the beginning. # housenumbers may not be further than 2 words from the beginning.
# If there are two words in front, give it a penalty. # If there are two words in front, give it a penalty.
hnrpos = next((i for i, tr in enumerate(self.seq) hnrpos = next((i for i, tr in enumerate(self.seq)
if tr.ttype == qmod.TokenType.HOUSENUMBER), if tr.ttype == qmod.TOKEN_HOUSENUMBER),
None) None)
if hnrpos is not None: if hnrpos is not None:
if self.direction != -1: if self.direction != -1:
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TokenType.PARTIAL) priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TOKEN_PARTIAL)
if not self._adapt_penalty_from_priors(priors, -1): if not self._adapt_penalty_from_priors(priors, -1):
return False return False
if self.direction != 1: if self.direction != 1:
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL) priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TOKEN_PARTIAL)
if not self._adapt_penalty_from_priors(priors, 1): if not self._adapt_penalty_from_priors(priors, 1):
return False return False
if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq): if any(t.ttype == qmod.TOKEN_NEAR_ITEM for t in self.seq):
self.penalty += 1.0 self.penalty += 1.0
return True return True
@@ -269,10 +269,9 @@ class _TokenSequence:
# <address>,<postcode> should give preference to address search # <address>,<postcode> should give preference to address search
if base.postcode.start == 0: if base.postcode.start == 0:
penalty = self.penalty penalty = self.penalty
self.direction = -1 # name searches are only possible backwards
else: else:
penalty = self.penalty + 0.1 penalty = self.penalty + 0.1
self.direction = 1 # name searches are only possible forwards penalty += 0.1 * max(0, len(base.address) - 1)
yield dataclasses.replace(base, penalty=penalty) yield dataclasses.replace(base, penalty=penalty)
def _get_assignments_address_forward(self, base: TokenAssignment, def _get_assignments_address_forward(self, base: TokenAssignment,
@@ -282,6 +281,11 @@ class _TokenSequence:
""" """
first = base.address[0] first = base.address[0]
# The postcode must come after the name.
if base.postcode and base.postcode < first:
log().var_dump('skip forward', (base.postcode, first))
return
log().comment('first word = name') log().comment('first word = name')
yield dataclasses.replace(base, penalty=self.penalty, yield dataclasses.replace(base, penalty=self.penalty,
name=first, address=base.address[1:]) name=first, address=base.address[1:])
@@ -293,7 +297,7 @@ class _TokenSequence:
# * the containing phrase is strictly typed # * the containing phrase is strictly typed
if (base.housenumber and first.end < base.housenumber.start)\ if (base.housenumber and first.end < base.housenumber.start)\
or (base.qualifier and base.qualifier > first)\ or (base.qualifier and base.qualifier > first)\
or (query.nodes[first.start].ptype != qmod.PhraseType.NONE): or (query.nodes[first.start].ptype != qmod.PHRASE_ANY):
return return
penalty = self.penalty penalty = self.penalty
@@ -317,7 +321,12 @@ class _TokenSequence:
""" """
last = base.address[-1] last = base.address[-1]
if self.direction == -1 or len(base.address) > 1: # The postcode must come before the name for backward direction.
if base.postcode and base.postcode > last:
log().var_dump('skip backward', (base.postcode, last))
return
if self.direction == -1 or len(base.address) > 1 or base.postcode:
log().comment('last word = name') log().comment('last word = name')
yield dataclasses.replace(base, penalty=self.penalty, yield dataclasses.replace(base, penalty=self.penalty,
name=last, address=base.address[:-1]) name=last, address=base.address[:-1])
@@ -329,7 +338,7 @@ class _TokenSequence:
# * the containing phrase is strictly typed # * the containing phrase is strictly typed
if (base.housenumber and last.start > base.housenumber.end)\ if (base.housenumber and last.start > base.housenumber.end)\
or (base.qualifier and base.qualifier < last)\ or (base.qualifier and base.qualifier < last)\
or (query.nodes[last.start].ptype != qmod.PhraseType.NONE): or (query.nodes[last.start].ptype != qmod.PHRASE_ANY):
return return
penalty = self.penalty penalty = self.penalty
@@ -393,7 +402,7 @@ def yield_token_assignments(query: qmod.QueryStruct) -> Iterator[TokenAssignment
another. It does not include penalties for transitions within a another. It does not include penalties for transitions within a
type. type.
""" """
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PhraseType.NONE else 1)] todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PHRASE_ANY else 1)]
while todo: while todo:
state = todo.pop() state = todo.pop()

View File

@@ -173,7 +173,7 @@ class Geometry(types.UserDefinedType): # type: ignore[type-arg]
def __init__(self, subtype: str = 'Geometry'): def __init__(self, subtype: str = 'Geometry'):
self.subtype = subtype self.subtype = subtype
def get_col_spec(self) -> str: def get_col_spec(self, **_: Any) -> str:
return f'GEOMETRY({self.subtype}, 4326)' return f'GEOMETRY({self.subtype}, 4326)'
def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]: def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]:

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Common json type for different dialects. Common json type for different dialects.
@@ -24,6 +24,6 @@ class Json(sa.types.TypeDecorator[Any]):
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]: def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
if dialect.name == 'postgresql': if dialect.name == 'postgresql':
return JSONB(none_as_null=True) # type: ignore[no-untyped-call] return JSONB(none_as_null=True)
return sqlite_json(none_as_null=True) return sqlite_json(none_as_null=True)

View File

@@ -144,7 +144,7 @@ class Point(NamedTuple):
except ValueError as exc: except ValueError as exc:
raise UsageError('Point parameter needs to be numbers.') from exc raise UsageError('Point parameter needs to be numbers.') from exc
if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0: if not -180 <= x <= 180 or not -90 <= y <= 90.0:
raise UsageError('Point coordinates invalid.') raise UsageError('Point coordinates invalid.')
return Point(x, y) return Point(x, y)

View File

@@ -25,8 +25,8 @@ def get_label_tag(category: Tuple[str, str], extratags: Optional[Mapping[str, st
elif rank < 26 and extratags and 'linked_place' in extratags: elif rank < 26 and extratags and 'linked_place' in extratags:
label = extratags['linked_place'] label = extratags['linked_place']
elif category == ('boundary', 'administrative'): elif category == ('boundary', 'administrative'):
label = ADMIN_LABELS.get((country or '', int(rank/2)))\ label = ADMIN_LABELS.get((country or '', rank // 2))\
or ADMIN_LABELS.get(('', int(rank/2)))\ or ADMIN_LABELS.get(('', rank // 2))\
or 'Administrative' or 'Administrative'
elif category[1] == 'postal_code': elif category[1] == 'postal_code':
label = 'postcode' label = 'postcode'

View File

@@ -249,6 +249,9 @@ def format_base_geocodejson(results: Union[ReverseResults, SearchResults],
out.keyval(f"level{line.admin_level}", line.local_name) out.keyval(f"level{line.admin_level}", line.local_name)
out.end_object().next() out.end_object().next()
if options.get('extratags', False):
out.keyval('extra', result.extratags)
out.end_object().next().end_object().next() out.end_object().next().end_object().next()
out.key('geometry').raw(result.geometry.get('geojson') out.key('geometry').raw(result.geometry.get('geojson')

View File

@@ -8,4 +8,4 @@
Version information for the Nominatim API. Version information for the Nominatim API.
""" """
NOMINATIM_API_VERSION = '5.0.0' NOMINATIM_API_VERSION = '5.1.0'

View File

@@ -2,16 +2,15 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Command-line interface to the Nominatim functions for import, update, Command-line interface to the Nominatim functions for import, update,
database administration and querying. database administration and querying.
""" """
from typing import Optional, Any from typing import Optional, List, Mapping
import importlib import importlib
import logging import logging
import os
import sys import sys
import argparse import argparse
import asyncio import asyncio
@@ -81,13 +80,14 @@ class CommandlineParser:
parser.set_defaults(command=cmd) parser.set_defaults(command=cmd)
cmd.add_args(parser) cmd.add_args(parser)
def run(self, **kwargs: Any) -> int: def run(self, cli_args: Optional[List[str]],
environ: Optional[Mapping[str, str]]) -> int:
""" Parse the command line arguments of the program and execute the """ Parse the command line arguments of the program and execute the
appropriate subcommand. appropriate subcommand.
""" """
args = NominatimArgs() args = NominatimArgs()
try: try:
self.parser.parse_args(args=kwargs.get('cli_args'), namespace=args) self.parser.parse_args(args=cli_args, namespace=args)
except SystemExit: except SystemExit:
return 1 return 1
@@ -101,23 +101,19 @@ class CommandlineParser:
args.project_dir = Path(args.project_dir).resolve() args.project_dir = Path(args.project_dir).resolve()
if 'cli_args' not in kwargs: if cli_args is None:
logging.basicConfig(stream=sys.stderr, logging.basicConfig(stream=sys.stderr,
format='%(asctime)s: %(message)s', format='%(asctime)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', datefmt='%Y-%m-%d %H:%M:%S',
level=max(4 - args.verbose, 1) * 10) level=max(4 - args.verbose, 1) * 10)
args.config = Configuration(args.project_dir, args.config = Configuration(args.project_dir, environ=environ)
environ=kwargs.get('environ', os.environ))
args.config.set_libdirs(osm2pgsql=kwargs['osm2pgsql_path'])
log = logging.getLogger() log = logging.getLogger()
log.warning('Using project directory: %s', str(args.project_dir)) log.warning('Using project directory: %s', str(args.project_dir))
try: try:
ret = args.command.run(args) return args.command.run(args)
return ret
except UsageError as exception: except UsageError as exception:
if log.isEnabledFor(logging.DEBUG): if log.isEnabledFor(logging.DEBUG):
raise # use Python's exception printing raise # use Python's exception printing
@@ -233,9 +229,16 @@ def get_set_parser() -> CommandlineParser:
return parser return parser
def nominatim(**kwargs: Any) -> int: def nominatim(cli_args: Optional[List[str]] = None,
environ: Optional[Mapping[str, str]] = None) -> int:
"""\ """\
Command-line tools for importing, updating, administrating and Command-line tools for importing, updating, administrating and
querying the Nominatim database. querying the Nominatim database.
'cli_args' is a list of parameters for the command to run. If not given,
sys.args will be used.
'environ' is the dictionary of environment variables containing the
Nominatim configuration. When None, the os.environ is inherited.
""" """
return get_set_parser().run(**kwargs) return get_set_parser().run(cli_args=cli_args, environ=environ)

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Provides custom functions over command-line arguments. Provides custom functions over command-line arguments.
@@ -186,7 +186,7 @@ class NominatimArgs:
from the command line arguments. The resulting dict can be from the command line arguments. The resulting dict can be
further customized and then used in `run_osm2pgsql()`. further customized and then used in `run_osm2pgsql()`.
""" """
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY or self.config.lib_dir.osm2pgsql, return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY,
osm2pgsql_cache=self.osm2pgsql_cache or default_cache, osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
osm2pgsql_style=self.config.get_import_style_file(), osm2pgsql_style=self.config.get_import_style_file(),
osm2pgsql_style_path=self.config.lib_dir.lua, osm2pgsql_style_path=self.config.lib_dir.lua,

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Nominatim configuration accessor. Nominatim configuration accessor.
@@ -73,7 +73,6 @@ class Configuration:
self.project_dir = None self.project_dir = None
class _LibDirs: class _LibDirs:
osm2pgsql: Path
sql = paths.SQLLIB_DIR sql = paths.SQLLIB_DIR
lua = paths.LUALIB_DIR lua = paths.LUALIB_DIR
data = paths.DATA_DIR data = paths.DATA_DIR

View File

@@ -102,10 +102,10 @@ def server_version_tuple(conn: Connection) -> Tuple[int, int]:
Converts correctly for pre-10 and post-10 PostgreSQL versions. Converts correctly for pre-10 and post-10 PostgreSQL versions.
""" """
version = conn.info.server_version version = conn.info.server_version
if version < 100000: major, minor = divmod(version, 10000)
return (int(version / 10000), int((version % 10000) / 100)) if major < 10:
minor //= 100
return (int(version / 10000), version % 10000) return major, minor
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]: def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:

View File

@@ -50,8 +50,8 @@ class ProgressLogger:
places_per_sec = self.done_places / done_time places_per_sec = self.done_places / done_time
eta = (self.total_places - self.done_places) / places_per_sec eta = (self.total_places - self.done_places) / places_per_sec
LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f", LOG.warning("Done %d in %.0f @ %.3f per second - %s ETA (seconds): %.2f",
self.done_places, int(done_time), self.done_places, done_time,
places_per_sec, self.name, eta) places_per_sec, self.name, eta)
self.next_info += int(places_per_sec) * self.log_interval self.next_info += int(places_per_sec) * self.log_interval
@@ -68,8 +68,8 @@ class ProgressLogger:
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds() diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places / diff_seconds places_per_sec = self.done_places / diff_seconds
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n", LOG.warning("Done %d/%d in %.0f @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, int(diff_seconds), self.done_places, self.total_places, diff_seconds,
places_per_sec, self.name) places_per_sec, self.name)
return self.done_places return self.done_places

View File

@@ -121,10 +121,10 @@ class ICUTokenizer(AbstractTokenizer):
SELECT unnest(nameaddress_vector) as id, count(*) SELECT unnest(nameaddress_vector) as id, count(*)
FROM search_name GROUP BY id) FROM search_name GROUP BY id)
SELECT coalesce(a.id, w.id) as id, SELECT coalesce(a.id, w.id) as id,
(CASE WHEN w.count is null THEN '{}'::JSONB (CASE WHEN w.count is null or w.count <= 1 THEN '{}'::JSONB
ELSE jsonb_build_object('count', w.count) END ELSE jsonb_build_object('count', w.count) END
|| ||
CASE WHEN a.count is null THEN '{}'::JSONB CASE WHEN a.count is null or a.count <= 1 THEN '{}'::JSONB
ELSE jsonb_build_object('addr_count', a.count) END) as info ELSE jsonb_build_object('addr_count', a.count) END) as info
FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id; FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
""") """)
@@ -134,9 +134,10 @@ class ICUTokenizer(AbstractTokenizer):
drop_tables(conn, 'tmp_word') drop_tables(conn, 'tmp_word')
cur.execute("""CREATE TABLE tmp_word AS cur.execute("""CREATE TABLE tmp_word AS
SELECT word_id, word_token, type, word, SELECT word_id, word_token, type, word,
(CASE WHEN wf.info is null THEN word.info coalesce(word.info, '{}'::jsonb)
ELSE coalesce(word.info, '{}'::jsonb) || wf.info - 'count' - 'addr_count' ||
END) as info coalesce(wf.info, '{}'::jsonb)
as info
FROM word LEFT JOIN word_frequencies wf FROM word LEFT JOIN word_frequencies wf
ON word.word_id = wf.id ON word.word_id = wf.id
""") """)
@@ -381,76 +382,15 @@ class ICUNameAnalyzer(AbstractAnalyzer):
return postcode.strip().upper() return postcode.strip().upper()
def update_postcodes_from_db(self) -> None: def update_postcodes_from_db(self) -> None:
""" Update postcode tokens in the word table from the location_postcode """ Postcode update.
table.
Removes all postcodes from the word table because they are not
needed. Postcodes are recognised by pattern.
""" """
assert self.conn is not None assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
with self.conn.cursor() as cur: with self.conn.cursor() as cur:
# First get all postcode names currently in the word table. cur.execute("DELETE FROM word WHERE type = 'P'")
cur.execute("SELECT DISTINCT word FROM word WHERE type = 'P'")
word_entries = set((entry[0] for entry in cur))
# Then compute the required postcode names from the postcode table.
needed_entries = set()
cur.execute("SELECT country_code, postcode FROM location_postcode")
for cc, postcode in cur:
info = PlaceInfo({'country_code': cc,
'class': 'place', 'type': 'postcode',
'address': {'postcode': postcode}})
address = self.sanitizer.process_names(info)[1]
for place in address:
if place.kind == 'postcode':
if analyzer is None:
postcode_name = place.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(place)
variant_base = place.get_attr("variant")
if variant_base:
needed_entries.add(f'{postcode_name}@{variant_base}')
else:
needed_entries.add(postcode_name)
break
# Now update the word table.
self._delete_unused_postcode_words(word_entries - needed_entries)
self._add_missing_postcode_words(needed_entries - word_entries)
def _delete_unused_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if tokens:
with self.conn.cursor() as cur:
cur.execute("DELETE FROM word WHERE type = 'P' and word = any(%s)",
(list(tokens), ))
def _add_missing_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if not tokens:
return
analyzer = self.token_analysis.analysis.get('@postcode')
terms = []
for postcode_name in tokens:
if '@' in postcode_name:
term, variant = postcode_name.split('@', 2)
term = self._search_normalized(term)
if analyzer is None:
variants = [term]
else:
variants = analyzer.compute_variants(variant)
if term not in variants:
variants.append(term)
else:
variants = [self._search_normalized(postcode_name)]
terms.append((postcode_name, variants))
if terms:
with self.conn.cursor() as cur:
cur.executemany("""SELECT create_postcode_word(%s, %s)""", terms)
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]], def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None: should_replace: bool) -> None:
@@ -645,10 +585,14 @@ class ICUNameAnalyzer(AbstractAnalyzer):
if word_id: if word_id:
result = self._cache.housenumbers.get(word_id, result) result = self._cache.housenumbers.get(word_id, result)
if result[0] is None: if result[0] is None:
variants = analyzer.compute_variants(word_id) varout = analyzer.compute_variants(word_id)
if isinstance(varout, tuple):
variants = varout[0]
else:
variants = varout
if variants: if variants:
hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)", hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
(word_id, list(variants))) (word_id, variants))
result = hid, variants[0] result = hid, variants[0]
self._cache.housenumbers[word_id] = result self._cache.housenumbers[word_id] = result
@@ -693,13 +637,17 @@ class ICUNameAnalyzer(AbstractAnalyzer):
full, part = self._cache.names.get(token_id, (None, None)) full, part = self._cache.names.get(token_id, (None, None))
if full is None: if full is None:
variants = analyzer.compute_variants(word_id) varset = analyzer.compute_variants(word_id)
if isinstance(varset, tuple):
variants, lookups = varset
else:
variants, lookups = varset, None
if not variants: if not variants:
continue continue
with self.conn.cursor() as cur: with self.conn.cursor() as cur:
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)", cur.execute("SELECT * FROM getorcreate_full_word(%s, %s, %s)",
(token_id, variants)) (token_id, variants, lookups))
full, part = cast(Tuple[int, List[int]], cur.fetchone()) full, part = cast(Tuple[int, List[int]], cur.fetchone())
self._cache.names[token_id] = (full, part) self._cache.names[token_id] = (full, part)
@@ -718,32 +666,9 @@ class ICUNameAnalyzer(AbstractAnalyzer):
analyzer = self.token_analysis.analysis.get('@postcode') analyzer = self.token_analysis.analysis.get('@postcode')
if analyzer is None: if analyzer is None:
postcode_name = item.name.strip().upper() return item.name.strip().upper()
variant_base = None
else: else:
postcode_name = analyzer.get_canonical_id(item) return analyzer.get_canonical_id(item)
variant_base = item.get_attr("variant")
if variant_base:
postcode = f'{postcode_name}@{variant_base}'
else:
postcode = postcode_name
if postcode not in self._cache.postcodes:
term = self._search_normalized(postcode_name)
if not term:
return None
variants = {term}
if analyzer is not None and variant_base:
variants.update(analyzer.compute_variants(variant_base))
with self.conn.cursor() as cur:
cur.execute("SELECT create_postcode_word(%s, %s)",
(postcode, list(variants)))
self._cache.postcodes.add(postcode)
return postcode_name
class _TokenInfo: class _TokenInfo:
@@ -836,5 +761,4 @@ class _TokenCache:
self.names: Dict[str, Tuple[int, List[int]]] = {} self.names: Dict[str, Tuple[int, List[int]]] = {}
self.partials: Dict[str, int] = {} self.partials: Dict[str, int] = {}
self.fulls: Dict[str, List[int]] = {} self.fulls: Dict[str, List[int]] = {}
self.postcodes: Set[str] = set()
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {} self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}

View File

@@ -7,7 +7,7 @@
""" """
Common data types and protocols for analysers. Common data types and protocols for analysers.
""" """
from typing import Mapping, List, Any from typing import Mapping, List, Any, Union, Tuple
from ...typing import Protocol from ...typing import Protocol
from ...data.place_name import PlaceName from ...data.place_name import PlaceName
@@ -33,7 +33,7 @@ class Analyzer(Protocol):
for example because the character set in use does not match. for example because the character set in use does not match.
""" """
def compute_variants(self, canonical_id: str) -> List[str]: def compute_variants(self, canonical_id: str) -> Union[List[str], Tuple[List[str], List[str]]]:
""" Compute the transliterated spelling variants for the given """ Compute the transliterated spelling variants for the given
canonical ID. canonical ID.

View File

@@ -2,20 +2,19 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Generic processor for names that creates abbreviation variants. Generic processor for names that creates abbreviation variants.
""" """
from typing import Mapping, Dict, Any, Iterable, Iterator, Optional, List, cast from typing import Mapping, Dict, Any, Iterable, Optional, List, cast, Tuple
import itertools import itertools
import datrie
from ...errors import UsageError from ...errors import UsageError
from ...data.place_name import PlaceName from ...data.place_name import PlaceName
from .config_variants import get_variant_config from .config_variants import get_variant_config
from .generic_mutation import MutationVariantGenerator from .generic_mutation import MutationVariantGenerator
from .simple_trie import SimpleTrie
# Configuration section # Configuration section
@@ -25,8 +24,7 @@ def configure(rules: Mapping[str, Any], normalizer: Any, _: Any) -> Dict[str, An
""" """
config: Dict[str, Any] = {} config: Dict[str, Any] = {}
config['replacements'], config['chars'] = get_variant_config(rules.get('variants'), config['replacements'], _ = get_variant_config(rules.get('variants'), normalizer)
normalizer)
config['variant_only'] = rules.get('mode', '') == 'variant-only' config['variant_only'] = rules.get('mode', '') == 'variant-only'
# parse mutation rules # parse mutation rules
@@ -68,12 +66,8 @@ class GenericTokenAnalysis:
self.variant_only = config['variant_only'] self.variant_only = config['variant_only']
# Set up datrie # Set up datrie
if config['replacements']: self.replacements: Optional[SimpleTrie[List[str]]] = \
self.replacements = datrie.Trie(config['chars']) SimpleTrie(config['replacements']) if config['replacements'] else None
for src, repllist in config['replacements']:
self.replacements[src] = repllist
else:
self.replacements = None
# set up mutation rules # set up mutation rules
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']] self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
@@ -84,7 +78,7 @@ class GenericTokenAnalysis:
""" """
return cast(str, self.norm.transliterate(name.name)).strip() return cast(str, self.norm.transliterate(name.name)).strip()
def compute_variants(self, norm_name: str) -> List[str]: def compute_variants(self, norm_name: str) -> Tuple[List[str], List[str]]:
""" Compute the spelling variants for the given normalized name """ Compute the spelling variants for the given normalized name
and transliterate the result. and transliterate the result.
""" """
@@ -93,18 +87,20 @@ class GenericTokenAnalysis:
for mutation in self.mutations: for mutation in self.mutations:
variants = mutation.generate(variants) variants = mutation.generate(variants)
return [name for name in self._transliterate_unique_list(norm_name, variants) if name] varset = set(map(str.strip, variants))
def _transliterate_unique_list(self, norm_name: str,
iterable: Iterable[str]) -> Iterator[Optional[str]]:
seen = set()
if self.variant_only: if self.variant_only:
seen.add(norm_name) varset.discard(norm_name)
for variant in map(str.strip, iterable): trans = []
if variant not in seen: norm = []
seen.add(variant)
yield self.to_ascii.transliterate(variant).strip() for var in varset:
t = self.to_ascii.transliterate(var).strip()
if t:
trans.append(t)
norm.append(var)
return trans, norm
def _generate_word_variants(self, norm_name: str) -> Iterable[str]: def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
baseform = '^ ' + norm_name + ' ^' baseform = '^ ' + norm_name + ' ^'
@@ -116,10 +112,10 @@ class GenericTokenAnalysis:
pos = 0 pos = 0
force_space = False force_space = False
while pos < baselen: while pos < baselen:
full, repl = self.replacements.longest_prefix_item(baseform[pos:], frm = pos
(None, None)) repl, pos = self.replacements.longest_prefix(baseform, pos)
if full is not None: if repl is not None:
done = baseform[startpos:pos] done = baseform[startpos:frm]
partials = [v + done + r partials = [v + done + r
for v, r in itertools.product(partials, repl) for v, r in itertools.product(partials, repl)
if not force_space or r.startswith(' ')] if not force_space or r.startswith(' ')]
@@ -128,11 +124,10 @@ class GenericTokenAnalysis:
# to be helpful. Only use the original term. # to be helpful. Only use the original term.
startpos = 0 startpos = 0
break break
startpos = pos + len(full) if baseform[pos - 1] == ' ':
if full[-1] == ' ': pos -= 1
startpos -= 1
force_space = True force_space = True
pos = startpos startpos = pos
else: else:
pos += 1 pos += 1
force_space = False force_space = False

View File

@@ -0,0 +1,84 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Simple dict-based implementation of a trie structure.
"""
from typing import TypeVar, Generic, Tuple, Optional, List, Dict
from collections import defaultdict
T = TypeVar('T')
class SimpleTrie(Generic[T]):
""" A simple read-only trie structure.
This structure supports examply one lookup operation,
which is longest-prefix lookup.
"""
def __init__(self, data: Optional[List[Tuple[str, T]]] = None) -> None:
self._tree: Dict[str, 'SimpleTrie[T]'] = defaultdict(SimpleTrie[T])
self._value: Optional[T] = None
self._prefix = ''
if data:
for key, value in data:
self._add(key, 0, value)
self._make_compact()
def _add(self, word: str, pos: int, value: T) -> None:
""" (Internal) Add a sub-word to the trie.
The word is added from index 'pos'. If the sub-word to add
is empty, then the trie saves the given value.
"""
if pos < len(word):
self._tree[word[pos]]._add(word, pos + 1, value)
else:
self._value = value
def _make_compact(self) -> None:
""" (Internal) Compress tree where there is exactly one subtree
and no value.
Compression works recursively starting at the leaf.
"""
for t in self._tree.values():
t._make_compact()
if len(self._tree) == 1 and self._value is None:
assert not self._prefix
for k, v in self._tree.items():
self._prefix = k + v._prefix
self._tree = v._tree
self._value = v._value
def longest_prefix(self, word: str, start: int = 0) -> Tuple[Optional[T], int]:
""" Return the longest prefix match for the given word starting at
the position 'start'.
The function returns a tuple with the value for the longest match and
the position of the word after the match. If no match was found at
all, the function returns (None, start).
"""
cur = self
pos = start
result: Tuple[Optional[T], int] = None, start
while True:
if cur._prefix:
if not word.startswith(cur._prefix, pos):
return result
pos += len(cur._prefix)
if cur._value:
result = cur._value, pos
if pos >= len(word) or word[pos] not in cur._tree:
return result
cur = cur._tree[word[pos]]
pos += 1

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Helper functions for executing external programs. Helper functions for executing external programs.
@@ -85,7 +85,7 @@ def _mk_tablespace_options(ttype: str, options: Mapping[str, Any]) -> List[str]:
def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str: def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str:
if cmdline is not None: if cmdline:
return cmdline return cmdline
in_path = shutil.which('osm2pgsql') in_path = shutil.which('osm2pgsql')

View File

@@ -108,8 +108,7 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
async with QueryPool(dsn, place_threads, autocommit=True) as pool: async with QueryPool(dsn, place_threads, autocommit=True) as pool:
with tokenizer.name_analyzer() as analyzer: with tokenizer.name_analyzer() as analyzer:
lines = 0 for lineno, row in enumerate(tar, 1):
for row in tar:
try: try:
address = dict(street=row['street'], postcode=row['postcode']) address = dict(street=row['street'], postcode=row['postcode'])
args = ('SRID=4326;' + row['geometry'], args = ('SRID=4326;' + row['geometry'],
@@ -124,10 +123,8 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""", %s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
args) args)
lines += 1 if not lineno % 1000:
if lines == 1000:
print('.', end='', flush=True) print('.', end='', flush=True)
lines = 0
print('', flush=True) print('', flush=True)

View File

@@ -30,8 +30,8 @@ class PointsCentroid:
if self.count == 0: if self.count == 0:
raise ValueError("No points available for centroid.") raise ValueError("No points available for centroid.")
return (float(self.sum_x/self.count)/10000000, return (self.sum_x / self.count / 10_000_000,
float(self.sum_y/self.count)/10000000) self.sum_y / self.count / 10_000_000)
def __len__(self) -> int: def __len__(self) -> int:
return self.count return self.count
@@ -40,8 +40,8 @@ class PointsCentroid:
if isinstance(other, Collection) and len(other) == 2: if isinstance(other, Collection) and len(other) == 2:
if all(isinstance(p, (float, int)) for p in other): if all(isinstance(p, (float, int)) for p in other):
x, y = other x, y = other
self.sum_x += int(x * 10000000) self.sum_x += int(x * 10_000_000)
self.sum_y += int(y * 10000000) self.sum_y += int(y * 10_000_000)
self.count += 1 self.count += 1
return self return self

View File

@@ -55,7 +55,7 @@ def parse_version(version: str) -> NominatimVersion:
return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')]) return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
NOMINATIM_VERSION = parse_version('5.0.0-0') NOMINATIM_VERSION = parse_version('5.1.0-0')
POSTGRESQL_REQUIRED_VERSION = (12, 0) POSTGRESQL_REQUIRED_VERSION = (12, 0)
POSTGIS_REQUIRED_VERSION = (3, 0) POSTGIS_REQUIRED_VERSION = (3, 0)

View File

@@ -3,9 +3,8 @@
Feature: Searches with postcodes Feature: Searches with postcodes
Various searches involving postcodes Various searches involving postcodes
@Fail
Scenario: US 5+4 ZIP codes are shortened to 5 ZIP codes if not found Scenario: US 5+4 ZIP codes are shortened to 5 ZIP codes if not found
When sending json search query "36067 1111, us" with address When sending json search query "36067-1111, us" with address
Then result addresses contain Then result addresses contain
| postcode | | postcode |
| 36067 | | 36067 |

View File

@@ -67,3 +67,13 @@ Feature: Structured search queries
Then result addresses contain Then result addresses contain
| town | | town |
| Vaduz | | Vaduz |
#3651
Scenario: Structured search with surrounding extra characters
When sending xml search query "" with address
| street | city | postalcode |
| "19 Am schrägen Weg" | "Vaduz" | "9491" |
Then result addresses contain
| house_number | road |
| 19 | Am Schrägen Weg |

View File

@@ -170,7 +170,7 @@ Feature: Import of postcodes
| object | postcode | | object | postcode |
| W93 | 11200 | | W93 | 11200 |
Scenario: Postcodes are added to the postcode and word table Scenario: Postcodes are added to the postcode
Given the places Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry | | osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de | | N34 | place | house | 01982 | 111 |country:de |
@@ -178,7 +178,6 @@ Feature: Import of postcodes
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| de | 01982 | country:de | | de | 01982 | country:de |
And there are word tokens for postcodes 01982
@Fail @Fail
@@ -195,7 +194,7 @@ Feature: Import of postcodes
| E45 2 | gb | 23 | 5 | | E45 2 | gb | 23 | 5 |
| Y45 | gb | 21 | 5 | | Y45 | gb | 21 | 5 |
Scenario: Postcodes outside all countries are not added to the postcode and word table Scenario: Postcodes outside all countries are not added to the postcode table
Given the places Given the places
| osm | class | type | addr+postcode | addr+housenumber | addr+place | geometry | | osm | class | type | addr+postcode | addr+housenumber | addr+place | geometry |
| N34 | place | house | 01982 | 111 | Null Island | 0 0.00001 | | N34 | place | house | 01982 | 111 | Null Island | 0 0.00001 |
@@ -205,7 +204,6 @@ Feature: Import of postcodes
When importing When importing
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
And there are no word tokens for postcodes 01982
When sending search query "111, 01982 Null Island" When sending search query "111, 01982 Null Island"
Then results contain Then results contain
| osm | display_name | | osm | display_name |

View File

@@ -2,7 +2,7 @@
Feature: Update of postcode Feature: Update of postcode
Tests for updating of data related to postcodes Tests for updating of data related to postcodes
Scenario: A new postcode appears in the postcode and word table Scenario: A new postcode appears in the postcode table
Given the places Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry | | osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de | | N34 | place | house | 01982 | 111 |country:de |
@@ -18,9 +18,8 @@ Feature: Update of postcode
| country | postcode | geometry | | country | postcode | geometry |
| de | 01982 | country:de | | de | 01982 | country:de |
| ch | 4567 | country:ch | | ch | 4567 | country:ch |
And there are word tokens for postcodes 01982,4567
Scenario: When the last postcode is deleted, it is deleted from postcode and word Scenario: When the last postcode is deleted, it is deleted from postcode
Given the places Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry | | osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de | | N34 | place | house | 01982 | 111 |country:de |
@@ -31,10 +30,8 @@ Feature: Update of postcode
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| ch | 4567 | country:ch | | ch | 4567 | country:ch |
And there are word tokens for postcodes 4567
And there are no word tokens for postcodes 01982
Scenario: A postcode is not deleted from postcode and word when it exist in another country Scenario: A postcode is not deleted from postcode when it exist in another country
Given the places Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry | | osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de | | N34 | place | house | 01982 | 111 |country:de |
@@ -45,7 +42,6 @@ Feature: Update of postcode
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| fr | 01982 | country:fr | | fr | 01982 | country:fr |
And there are word tokens for postcodes 01982
Scenario: Updating a postcode is reflected in postcode table Scenario: Updating a postcode is reflected in postcode table
Given the places Given the places
@@ -59,7 +55,6 @@ Feature: Update of postcode
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| de | 20453 | country:de | | de | 20453 | country:de |
And there are word tokens for postcodes 20453
Scenario: When changing from a postcode type, the entry appears in placex Scenario: When changing from a postcode type, the entry appears in placex
When importing When importing
@@ -80,7 +75,6 @@ Feature: Update of postcode
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| de | 20453 | country:de | | de | 20453 | country:de |
And there are word tokens for postcodes 20453
Scenario: When changing to a postcode type, the entry disappears from placex Scenario: When changing to a postcode type, the entry disappears from placex
When importing When importing
@@ -101,7 +95,6 @@ Feature: Update of postcode
Then location_postcode contains exactly Then location_postcode contains exactly
| country | postcode | geometry | | country | postcode | geometry |
| de | 01982 | country:de | | de | 01982 | country:de |
And there are word tokens for postcodes 01982
Scenario: When a parent is deleted, the postcode gets a new parent Scenario: When a parent is deleted, the postcode gets a new parent
Given the grid with origin DE Given the grid with origin DE

View File

@@ -2,43 +2,45 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
from pathlib import Path from pathlib import Path
import sys import sys
from behave import * from behave import * # noqa
sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve())) sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve()))
from steps.geometry_factory import GeometryFactory from steps.geometry_factory import GeometryFactory # noqa: E402
from steps.nominatim_environment import NominatimEnvironment from steps.nominatim_environment import NominatimEnvironment # noqa: E402
TEST_BASE_DIR = Path(__file__, '..', '..').resolve() TEST_BASE_DIR = Path(__file__, '..', '..').resolve()
userconfig = { userconfig = {
'REMOVE_TEMPLATE' : False, 'REMOVE_TEMPLATE': False,
'KEEP_TEST_DB' : False, 'KEEP_TEST_DB': False,
'DB_HOST' : None, 'DB_HOST': None,
'DB_PORT' : None, 'DB_PORT': None,
'DB_USER' : None, 'DB_USER': None,
'DB_PASS' : None, 'DB_PASS': None,
'TEMPLATE_DB' : 'test_template_nominatim', 'TEMPLATE_DB': 'test_template_nominatim',
'TEST_DB' : 'test_nominatim', 'TEST_DB': 'test_nominatim',
'API_TEST_DB' : 'test_api_nominatim', 'API_TEST_DB': 'test_api_nominatim',
'API_TEST_FILE' : TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf', 'API_TEST_FILE': TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
'TOKENIZER' : None, # Test with a custom tokenizer 'TOKENIZER': None, # Test with a custom tokenizer
'STYLE' : 'extratags', 'STYLE': 'extratags',
'API_ENGINE': 'falcon' 'API_ENGINE': 'falcon'
} }
use_step_matcher("re")
use_step_matcher("re") # noqa: F405
def before_all(context): def before_all(context):
# logging setup # logging setup
context.config.setup_logging() context.config.setup_logging()
# set up -D options # set up -D options
for k,v in userconfig.items(): for k, v in userconfig.items():
context.config.userdata.setdefault(k, v) context.config.userdata.setdefault(k, v)
# Nominatim test setup # Nominatim test setup
context.nominatim = NominatimEnvironment(context.config.userdata) context.nominatim = NominatimEnvironment(context.config.userdata)
@@ -46,7 +48,7 @@ def before_all(context):
def before_scenario(context, scenario): def before_scenario(context, scenario):
if not 'SQLITE' in context.tags \ if 'SQLITE' not in context.tags \
and context.config.userdata['API_TEST_DB'].startswith('sqlite:'): and context.config.userdata['API_TEST_DB'].startswith('sqlite:'):
context.scenario.skip("Not usable with Sqlite database.") context.scenario.skip("Not usable with Sqlite database.")
elif 'DB' in context.tags: elif 'DB' in context.tags:
@@ -56,6 +58,7 @@ def before_scenario(context, scenario):
elif 'UNKNOWNDB' in context.tags: elif 'UNKNOWNDB' in context.tags:
context.nominatim.setup_unknown_db() context.nominatim.setup_unknown_db()
def after_scenario(context, scenario): def after_scenario(context, scenario):
if 'DB' in context.tags: if 'DB' in context.tags:
context.nominatim.teardown_db(context) context.nominatim.teardown_db(context)

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2023 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Collection of assertion functions used for the steps. Collection of assertion functions used for the steps.
@@ -11,20 +11,10 @@ import json
import math import math
import re import re
class Almost:
""" Compares a float value with a certain jitter.
"""
def __init__(self, value, offset=0.00001):
self.value = value
self.offset = offset
def __eq__(self, other): OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation',
return abs(other - self.value) < self.offset 'n': 'node', 'w': 'way', 'r': 'relation',
'node': 'n', 'way': 'w', 'relation': 'r'}
OSM_TYPE = {'N' : 'node', 'W' : 'way', 'R' : 'relation',
'n' : 'node', 'w' : 'way', 'r' : 'relation',
'node' : 'n', 'way' : 'w', 'relation' : 'r'}
class OsmType: class OsmType:
@@ -34,11 +24,9 @@ class OsmType:
def __init__(self, value): def __init__(self, value):
self.value = value self.value = value
def __eq__(self, other): def __eq__(self, other):
return other == self.value or other == OSM_TYPE[self.value] return other == self.value or other == OSM_TYPE[self.value]
def __str__(self): def __str__(self):
return f"{self.value} or {OSM_TYPE[self.value]}" return f"{self.value} or {OSM_TYPE[self.value]}"
@@ -92,7 +80,6 @@ class Bbox:
return str(self.coord) return str(self.coord)
def check_for_attributes(obj, attrs, presence='present'): def check_for_attributes(obj, attrs, presence='present'):
""" Check that the object has the given attributes. 'attrs' is a """ Check that the object has the given attributes. 'attrs' is a
string with a comma-separated list of attributes. If 'presence' string with a comma-separated list of attributes. If 'presence'
@@ -110,4 +97,3 @@ def check_for_attributes(obj, attrs, presence='present'):
else: else:
assert attr in obj, \ assert attr in obj, \
f"No attribute '{attr}'. Full response:\n{_dump_json()}" f"No attribute '{attr}'. Full response:\n{_dump_json()}"

View File

@@ -2,261 +2,261 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2022 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Collection of aliases for various world coordinates. Collection of aliases for various world coordinates.
""" """
ALIASES = { ALIASES = {
# Country aliases # Country aliases
'AD': (1.58972, 42.54241), 'AD': (1.58972, 42.54241),
'AE': (54.61589, 24.82431), 'AE': (54.61589, 24.82431),
'AF': (65.90264, 34.84708), 'AF': (65.90264, 34.84708),
'AG': (-61.72430, 17.069), 'AG': (-61.72430, 17.069),
'AI': (-63.10571, 18.25461), 'AI': (-63.10571, 18.25461),
'AL': (19.84941, 40.21232), 'AL': (19.84941, 40.21232),
'AM': (44.64229, 40.37821), 'AM': (44.64229, 40.37821),
'AO': (16.21924, -12.77014), 'AO': (16.21924, -12.77014),
'AQ': (44.99999, -75.65695), 'AQ': (44.99999, -75.65695),
'AR': (-61.10759, -34.37615), 'AR': (-61.10759, -34.37615),
'AS': (-170.68470, -14.29307), 'AS': (-170.68470, -14.29307),
'AT': (14.25747, 47.36542), 'AT': (14.25747, 47.36542),
'AU': (138.23155, -23.72068), 'AU': (138.23155, -23.72068),
'AW': (-69.98255, 12.555), 'AW': (-69.98255, 12.555),
'AX': (19.91839, 59.81682), 'AX': (19.91839, 59.81682),
'AZ': (48.38555, 40.61639), 'AZ': (48.38555, 40.61639),
'BA': (17.18514, 44.25582), 'BA': (17.18514, 44.25582),
'BB': (-59.53342, 13.19), 'BB': (-59.53342, 13.19),
'BD': (89.75989, 24.34205), 'BD': (89.75989, 24.34205),
'BE': (4.90078, 50.34682), 'BE': (4.90078, 50.34682),
'BF': (-0.56743, 11.90471), 'BF': (-0.56743, 11.90471),
'BG': (24.80616, 43.09859), 'BG': (24.80616, 43.09859),
'BH': (50.52032, 25.94685), 'BH': (50.52032, 25.94685),
'BI': (29.54561, -2.99057), 'BI': (29.54561, -2.99057),
'BJ': (2.70062, 10.02792), 'BJ': (2.70062, 10.02792),
'BL': (-62.79349, 17.907), 'BL': (-62.79349, 17.907),
'BM': (-64.77406, 32.30199), 'BM': (-64.77406, 32.30199),
'BN': (114.52196, 4.28638), 'BN': (114.52196, 4.28638),
'BO': (-62.02473, -17.77723), 'BO': (-62.02473, -17.77723),
'BQ': (-63.14322, 17.566), 'BQ': (-63.14322, 17.566),
'BR': (-45.77065, -9.58685), 'BR': (-45.77065, -9.58685),
'BS': (-77.60916, 23.8745), 'BS': (-77.60916, 23.8745),
'BT': (90.01350, 27.28137), 'BT': (90.01350, 27.28137),
'BV': (3.35744, -54.4215), 'BV': (3.35744, -54.4215),
'BW': (23.51505, -23.48391), 'BW': (23.51505, -23.48391),
'BY': (26.77259, 53.15885), 'BY': (26.77259, 53.15885),
'BZ': (-88.63489, 16.33951), 'BZ': (-88.63489, 16.33951),
'CA': (-107.74817, 67.12612), 'CA': (-107.74817, 67.12612),
'CC': (96.84420, -12.01734), 'CC': (96.84420, -12.01734),
'CD': (24.09544, -1.67713), 'CD': (24.09544, -1.67713),
'CF': (22.58701, 5.98438), 'CF': (22.58701, 5.98438),
'CG': (15.78875, 0.40388), 'CG': (15.78875, 0.40388),
'CH': (7.65705, 46.57446), 'CH': (7.65705, 46.57446),
'CI': (-6.31190, 6.62783), 'CI': (-6.31190, 6.62783),
'CK': (-159.77835, -21.23349), 'CK': (-159.77835, -21.23349),
'CL': (-70.41790, -53.77189), 'CL': (-70.41790, -53.77189),
'CM': (13.26022, 5.94519), 'CM': (13.26022, 5.94519),
'CN': (96.44285, 38.04260), 'CN': (96.44285, 38.04260),
'CO': (-72.52951, 2.45174), 'CO': (-72.52951, 2.45174),
'CR': (-83.83314, 9.93514), 'CR': (-83.83314, 9.93514),
'CU': (-80.81673, 21.88852), 'CU': (-80.81673, 21.88852),
'CV': (-24.50810, 14.929), 'CV': (-24.50810, 14.929),
'CW': (-68.96409, 12.1845), 'CW': (-68.96409, 12.1845),
'CX': (105.62411, -10.48417), 'CX': (105.62411, -10.48417),
'CY': (32.95922, 35.37010), 'CY': (32.95922, 35.37010),
'CZ': (16.32098, 49.50692), 'CZ': (16.32098, 49.50692),
'DE': (9.30716, 50.21289), 'DE': (9.30716, 50.21289),
'DJ': (42.96904, 11.41542), 'DJ': (42.96904, 11.41542),
'DK': (9.18490, 55.98916), 'DK': (9.18490, 55.98916),
'DM': (-61.00358, 15.65470), 'DM': (-61.00358, 15.65470),
'DO': (-69.62855, 18.58841), 'DO': (-69.62855, 18.58841),
'DZ': (4.24749, 25.79721), 'DZ': (4.24749, 25.79721),
'EC': (-77.45831, -0.98284), 'EC': (-77.45831, -0.98284),
'EE': (23.94288, 58.43952), 'EE': (23.94288, 58.43952),
'EG': (28.95293, 28.17718), 'EG': (28.95293, 28.17718),
'EH': (-13.69031, 25.01241), 'EH': (-13.69031, 25.01241),
'ER': (39.01223, 14.96033), 'ER': (39.01223, 14.96033),
'ES': (-2.59110, 38.79354), 'ES': (-2.59110, 38.79354),
'ET': (38.61697, 7.71399), 'ET': (38.61697, 7.71399),
'FI': (26.89798, 63.56194), 'FI': (26.89798, 63.56194),
'FJ': (177.91853, -17.74237), 'FJ': (177.91853, -17.74237),
'FK': (-58.99044, -51.34509), 'FK': (-58.99044, -51.34509),
'FM': (151.95358, 8.5045), 'FM': (151.95358, 8.5045),
'FO': (-6.60483, 62.10000), 'FO': (-6.60483, 62.10000),
'FR': (0.28410, 47.51045), 'FR': (0.28410, 47.51045),
'GA': (10.81070, -0.07429), 'GA': (10.81070, -0.07429),
'GB': (-0.92823, 52.01618), 'GB': (-0.92823, 52.01618),
'GD': (-61.64524, 12.191), 'GD': (-61.64524, 12.191),
'GE': (44.16664, 42.00385), 'GE': (44.16664, 42.00385),
'GF': (-53.46524, 3.56188), 'GF': (-53.46524, 3.56188),
'GG': (-2.50580, 49.58543), 'GG': (-2.50580, 49.58543),
'GH': (-0.46348, 7.16051), 'GH': (-0.46348, 7.16051),
'GI': (-5.32053, 36.11066), 'GI': (-5.32053, 36.11066),
'GL': (-33.85511, 74.66355), 'GL': (-33.85511, 74.66355),
'GM': (-16.40960, 13.25), 'GM': (-16.40960, 13.25),
'GN': (-13.83940, 10.96291), 'GN': (-13.83940, 10.96291),
'GP': (-61.68712, 16.23049), 'GP': (-61.68712, 16.23049),
'GQ': (10.23973, 1.43119), 'GQ': (10.23973, 1.43119),
'GR': (23.17850, 39.06206), 'GR': (23.17850, 39.06206),
'GS': (-36.49430, -54.43067), 'GS': (-36.49430, -54.43067),
'GT': (-90.74368, 15.20428), 'GT': (-90.74368, 15.20428),
'GU': (144.73362, 13.44413), 'GU': (144.73362, 13.44413),
'GW': (-14.83525, 11.92486), 'GW': (-14.83525, 11.92486),
'GY': (-58.45167, 5.73698), 'GY': (-58.45167, 5.73698),
'HK': (114.18577, 22.34923), 'HK': (114.18577, 22.34923),
'HM': (73.68230, -53.22105), 'HM': (73.68230, -53.22105),
'HN': (-86.95414, 15.23820), 'HN': (-86.95414, 15.23820),
'HR': (17.49966, 45.52689), 'HR': (17.49966, 45.52689),
'HT': (-73.51925, 18.32492), 'HT': (-73.51925, 18.32492),
'HU': (20.35362, 47.51721), 'HU': (20.35362, 47.51721),
'ID': (123.34505, -0.83791), 'ID': (123.34505, -0.83791),
'IE': (-9.00520, 52.87725), 'IE': (-9.00520, 52.87725),
'IL': (35.46314, 32.86165), 'IL': (35.46314, 32.86165),
'IM': (-4.86740, 54.023), 'IM': (-4.86740, 54.023),
'IN': (88.67620, 27.86155), 'IN': (88.67620, 27.86155),
'IO': (71.42743, -6.14349), 'IO': (71.42743, -6.14349),
'IQ': (42.58109, 34.26103), 'IQ': (42.58109, 34.26103),
'IR': (56.09355, 30.46751), 'IR': (56.09355, 30.46751),
'IS': (-17.51785, 64.71687), 'IS': (-17.51785, 64.71687),
'IT': (10.42639, 44.87904), 'IT': (10.42639, 44.87904),
'JE': (-2.19261, 49.12458), 'JE': (-2.19261, 49.12458),
'JM': (-76.84020, 18.3935), 'JM': (-76.84020, 18.3935),
'JO': (36.55552, 30.75741), 'JO': (36.55552, 30.75741),
'JP': (138.72531, 35.92099), 'JP': (138.72531, 35.92099),
'KE': (36.90602, 1.08512), 'KE': (36.90602, 1.08512),
'KG': (76.15571, 41.66497), 'KG': (76.15571, 41.66497),
'KH': (104.31901, 12.95555), 'KH': (104.31901, 12.95555),
'KI': (173.63353, 0.139), 'KI': (173.63353, 0.139),
'KM': (44.31474, -12.241), 'KM': (44.31474, -12.241),
'KN': (-62.69379, 17.2555), 'KN': (-62.69379, 17.2555),
'KP': (126.65575, 39.64575), 'KP': (126.65575, 39.64575),
'KR': (127.27740, 36.41388), 'KR': (127.27740, 36.41388),
'KW': (47.30684, 29.69180), 'KW': (47.30684, 29.69180),
'KY': (-81.07455, 19.29949), 'KY': (-81.07455, 19.29949),
'KZ': (72.00811, 49.88855), 'KZ': (72.00811, 49.88855),
'LA': (102.44391, 19.81609), 'LA': (102.44391, 19.81609),
'LB': (35.48464, 33.41766), 'LB': (35.48464, 33.41766),
'LC': (-60.97894, 13.891), 'LC': (-60.97894, 13.891),
'LI': (9.54693, 47.15934), 'LI': (9.54693, 47.15934),
'LK': (80.38520, 8.41649), 'LK': (80.38520, 8.41649),
'LR': (-11.16960, 4.04122), 'LR': (-11.16960, 4.04122),
'LS': (28.66984, -29.94538), 'LS': (28.66984, -29.94538),
'LT': (24.51735, 55.49293), 'LT': (24.51735, 55.49293),
'LU': (6.08649, 49.81533), 'LU': (6.08649, 49.81533),
'LV': (23.51033, 56.67144), 'LV': (23.51033, 56.67144),
'LY': (15.36841, 28.12177), 'LY': (15.36841, 28.12177),
'MA': (-4.03061, 33.21696), 'MA': (-4.03061, 33.21696),
'MC': (7.47743, 43.62917), 'MC': (7.47743, 43.62917),
'MD': (29.61725, 46.66517), 'MD': (29.61725, 46.66517),
'ME': (19.72291, 43.02441), 'ME': (19.72291, 43.02441),
'MF': (-63.06666, 18.08102), 'MF': (-63.06666, 18.08102),
'MG': (45.86378, -20.50245), 'MG': (45.86378, -20.50245),
'MH': (171.94982, 5.983), 'MH': (171.94982, 5.983),
'MK': (21.42108, 41.08980), 'MK': (21.42108, 41.08980),
'ML': (-1.93310, 16.46993), 'ML': (-1.93310, 16.46993),
'MM': (95.54624, 21.09620), 'MM': (95.54624, 21.09620),
'MN': (99.81138, 48.18615), 'MN': (99.81138, 48.18615),
'MO': (113.56441, 22.16209), 'MO': (113.56441, 22.16209),
'MP': (145.21345, 14.14902), 'MP': (145.21345, 14.14902),
'MQ': (-60.81128, 14.43706), 'MQ': (-60.81128, 14.43706),
'MR': (-9.42324, 22.59251), 'MR': (-9.42324, 22.59251),
'MS': (-62.19455, 16.745), 'MS': (-62.19455, 16.745),
'MT': (14.38363, 35.94467), 'MT': (14.38363, 35.94467),
'MU': (57.55121, -20.41), 'MU': (57.55121, -20.41),
'MV': (73.39292, 4.19375), 'MV': (73.39292, 4.19375),
'MW': (33.95722, -12.28218), 'MW': (33.95722, -12.28218),
'MX': (-105.89221, 25.86826), 'MX': (-105.89221, 25.86826),
'MY': (112.71154, 2.10098), 'MY': (112.71154, 2.10098),
'MZ': (37.58689, -13.72682), 'MZ': (37.58689, -13.72682),
'NA': (16.68569, -21.46572), 'NA': (16.68569, -21.46572),
'NC': (164.95322, -20.38889), 'NC': (164.95322, -20.38889),
'NE': (10.06041, 19.08273), 'NE': (10.06041, 19.08273),
'NF': (167.95718, -29.0645), 'NF': (167.95718, -29.0645),
'NG': (10.17781, 10.17804), 'NG': (10.17781, 10.17804),
'NI': (-85.87974, 13.21715), 'NI': (-85.87974, 13.21715),
'NL': (-68.57062, 12.041), 'NL': (-68.57062, 12.041),
'NO': (23.11556, 70.09934), 'NO': (23.11556, 70.09934),
'NP': (83.36259, 28.13107), 'NP': (83.36259, 28.13107),
'NR': (166.93479, -0.5275), 'NR': (166.93479, -0.5275),
'NU': (-169.84873, -19.05305), 'NU': (-169.84873, -19.05305),
'NZ': (167.97209, -45.13056), 'NZ': (167.97209, -45.13056),
'OM': (56.86055, 20.47413), 'OM': (56.86055, 20.47413),
'PA': (-79.40160, 8.80656), 'PA': (-79.40160, 8.80656),
'PE': (-78.66540, -7.54711), 'PE': (-78.66540, -7.54711),
'PF': (-145.05719, -16.70862), 'PF': (-145.05719, -16.70862),
'PG': (146.64600, -7.37427), 'PG': (146.64600, -7.37427),
'PH': (121.48359, 15.09965), 'PH': (121.48359, 15.09965),
'PK': (72.11347, 31.14629), 'PK': (72.11347, 31.14629),
'PL': (17.88136, 52.77182), 'PL': (17.88136, 52.77182),
'PM': (-56.19515, 46.78324), 'PM': (-56.19515, 46.78324),
'PN': (-130.10642, -25.06955), 'PN': (-130.10642, -25.06955),
'PR': (-65.88755, 18.37169), 'PR': (-65.88755, 18.37169),
'PS': (35.39801, 32.24773), 'PS': (35.39801, 32.24773),
'PT': (-8.45743, 40.11154), 'PT': (-8.45743, 40.11154),
'PW': (134.49645, 7.3245), 'PW': (134.49645, 7.3245),
'PY': (-59.51787, -22.41281), 'PY': (-59.51787, -22.41281),
'QA': (51.49903, 24.99816), 'QA': (51.49903, 24.99816),
'RE': (55.77345, -21.36388), 'RE': (55.77345, -21.36388),
'RO': (26.37632, 45.36120), 'RO': (26.37632, 45.36120),
'RS': (20.40371, 44.56413), 'RS': (20.40371, 44.56413),
'RU': (116.44060, 59.06780), 'RU': (116.44060, 59.06780),
'RW': (29.57882, -1.62404), 'RW': (29.57882, -1.62404),
'SA': (47.73169, 22.43790), 'SA': (47.73169, 22.43790),
'SB': (164.63894, -10.23606), 'SB': (164.63894, -10.23606),
'SC': (46.36566, -9.454), 'SC': (46.36566, -9.454),
'SD': (28.14720, 14.56423), 'SD': (28.14720, 14.56423),
'SE': (15.68667, 60.35568), 'SE': (15.68667, 60.35568),
'SG': (103.84187, 1.304), 'SG': (103.84187, 1.304),
'SH': (-12.28155, -37.11546), 'SH': (-12.28155, -37.11546),
'SI': (14.04738, 46.39085), 'SI': (14.04738, 46.39085),
'SJ': (15.27552, 79.23365), 'SJ': (15.27552, 79.23365),
'SK': (20.41603, 48.86970), 'SK': (20.41603, 48.86970),
'SL': (-11.47773, 8.78156), 'SL': (-11.47773, 8.78156),
'SM': (12.46062, 43.94279), 'SM': (12.46062, 43.94279),
'SN': (-15.37111, 14.99477), 'SN': (-15.37111, 14.99477),
'SO': (46.93383, 9.34094), 'SO': (46.93383, 9.34094),
'SR': (-55.42864, 4.56985), 'SR': (-55.42864, 4.56985),
'SS': (28.13573, 8.50933), 'SS': (28.13573, 8.50933),
'ST': (6.61025, 0.2215), 'ST': (6.61025, 0.2215),
'SV': (-89.36665, 13.43072), 'SV': (-89.36665, 13.43072),
'SX': (-63.15393, 17.9345), 'SX': (-63.15393, 17.9345),
'SY': (38.15513, 35.34221), 'SY': (38.15513, 35.34221),
'SZ': (31.78263, -26.14244), 'SZ': (31.78263, -26.14244),
'TC': (-71.32554, 21.35), 'TC': (-71.32554, 21.35),
'TD': (17.42092, 13.46223), 'TD': (17.42092, 13.46223),
'TF': (137.5, -67.5), 'TF': (137.5, -67.5),
'TG': (1.06983, 7.87677), 'TG': (1.06983, 7.87677),
'TH': (102.00877, 16.42310), 'TH': (102.00877, 16.42310),
'TJ': (71.91349, 39.01527), 'TJ': (71.91349, 39.01527),
'TK': (-171.82603, -9.20990), 'TK': (-171.82603, -9.20990),
'TL': (126.22520, -8.72636), 'TL': (126.22520, -8.72636),
'TM': (57.71603, 39.92534), 'TM': (57.71603, 39.92534),
'TN': (9.04958, 34.84199), 'TN': (9.04958, 34.84199),
'TO': (-176.99320, -23.11104), 'TO': (-176.99320, -23.11104),
'TR': (32.82002, 39.86350), 'TR': (32.82002, 39.86350),
'TT': (-60.70793, 11.1385), 'TT': (-60.70793, 11.1385),
'TV': (178.77499, -9.41685), 'TV': (178.77499, -9.41685),
'TW': (120.30074, 23.17002), 'TW': (120.30074, 23.17002),
'TZ': (33.53892, -5.01840), 'TZ': (33.53892, -5.01840),
'UA': (33.44335, 49.30619), 'UA': (33.44335, 49.30619),
'UG': (32.96523, 2.08584), 'UG': (32.96523, 2.08584),
'UM': (-169.50993, 16.74605), 'UM': (-169.50993, 16.74605),
'US': (-116.39535, 40.71379), 'US': (-116.39535, 40.71379),
'UY': (-56.46505, -33.62658), 'UY': (-56.46505, -33.62658),
'UZ': (61.35529, 42.96107), 'UZ': (61.35529, 42.96107),
'VA': (12.33197, 42.04931), 'VA': (12.33197, 42.04931),
'VC': (-61.09905, 13.316), 'VC': (-61.09905, 13.316),
'VE': (-64.88323, 7.69849), 'VE': (-64.88323, 7.69849),
'VG': (-64.62479, 18.419), 'VG': (-64.62479, 18.419),
'VI': (-64.88950, 18.32263), 'VI': (-64.88950, 18.32263),
'VN': (104.20179, 10.27644), 'VN': (104.20179, 10.27644),
'VU': (167.31919, -15.88687), 'VU': (167.31919, -15.88687),
'WF': (-176.20781, -13.28535), 'WF': (-176.20781, -13.28535),
'WS': (-172.10966, -13.85093), 'WS': (-172.10966, -13.85093),
'YE': (45.94562, 16.16338), 'YE': (45.94562, 16.16338),
'YT': (44.93774, -12.60882), 'YT': (44.93774, -12.60882),
'ZA': (23.19488, -30.43276), 'ZA': (23.19488, -30.43276),
'ZM': (26.38618, -14.39966), 'ZM': (26.38618, -14.39966),
'ZW': (30.12419, -19.86907) 'ZW': (30.12419, -19.86907)
} }

View File

@@ -2,13 +2,11 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2022 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
from pathlib import Path
import os
from steps.geometry_alias import ALIASES from steps.geometry_alias import ALIASES
class GeometryFactory: class GeometryFactory:
""" Provides functions to create geometries from coordinates and data grids. """ Provides functions to create geometries from coordinates and data grids.
""" """
@@ -47,7 +45,6 @@ class GeometryFactory:
return "ST_SetSRID('{}'::geometry, 4326)".format(out) return "ST_SetSRID('{}'::geometry, 4326)".format(out)
def mk_wkt_point(self, point): def mk_wkt_point(self, point):
""" Parse a point description. """ Parse a point description.
The point may either consist of 'x y' coordinates or a number The point may either consist of 'x y' coordinates or a number
@@ -65,7 +62,6 @@ class GeometryFactory:
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom) assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
return "{} {}".format(*pt) return "{} {}".format(*pt)
def mk_wkt_points(self, geom): def mk_wkt_points(self, geom):
""" Parse a list of points. """ Parse a list of points.
The list must be a comma-separated list of points. Points The list must be a comma-separated list of points. Points
@@ -73,7 +69,6 @@ class GeometryFactory:
""" """
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')]) return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)): def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
""" Replace the grid with one from the given lines. """ Replace the grid with one from the given lines.
""" """
@@ -87,7 +82,6 @@ class GeometryFactory:
x += grid_step x += grid_step
y += grid_step y += grid_step
def grid_node(self, nodeid): def grid_node(self, nodeid):
""" Get the coordinates for the given grid node. """ Get the coordinates for the given grid node.
""" """

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2023 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Classes wrapping HTTP responses from the Nominatim API. Classes wrapping HTTP responses from the Nominatim API.
@@ -11,7 +11,7 @@ import re
import json import json
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from check_functions import Almost, OsmType, Field, check_for_attributes from check_functions import OsmType, Field, check_for_attributes
class GenericResponse: class GenericResponse:
@@ -45,7 +45,6 @@ class GenericResponse:
else: else:
self.result = [self.result] self.result = [self.result]
def _parse_geojson(self): def _parse_geojson(self):
self._parse_json() self._parse_json()
if self.result: if self.result:
@@ -76,7 +75,6 @@ class GenericResponse:
new['__' + k] = v new['__' + k] = v
self.result.append(new) self.result.append(new)
def _parse_geocodejson(self): def _parse_geocodejson(self):
self._parse_geojson() self._parse_geojson()
if self.result: if self.result:
@@ -87,7 +85,6 @@ class GenericResponse:
inner = r.pop('geocoding') inner = r.pop('geocoding')
r.update(inner) r.update(inner)
def assert_address_field(self, idx, field, value): def assert_address_field(self, idx, field, value):
""" Check that result rows`idx` has a field `field` with value `value` """ Check that result rows`idx` has a field `field` with value `value`
in its address. If idx is None, then all results are checked. in its address. If idx is None, then all results are checked.
@@ -103,7 +100,6 @@ class GenericResponse:
address = self.result[idx]['address'] address = self.result[idx]['address']
self.check_row_field(idx, field, value, base=address) self.check_row_field(idx, field, value, base=address)
def match_row(self, row, context=None, field=None): def match_row(self, row, context=None, field=None):
""" Match the result fields against the given behave table row. """ Match the result fields against the given behave table row.
""" """
@@ -139,7 +135,6 @@ class GenericResponse:
else: else:
self.check_row_field(i, name, Field(value), base=subdict) self.check_row_field(i, name, Field(value), base=subdict)
def check_row(self, idx, check, msg): def check_row(self, idx, check, msg):
""" Assert for the condition 'check' and print 'msg' on fail together """ Assert for the condition 'check' and print 'msg' on fail together
with the contents of the failing result. with the contents of the failing result.
@@ -154,7 +149,6 @@ class GenericResponse:
assert check, _RowError(self.result[idx]) assert check, _RowError(self.result[idx])
def check_row_field(self, idx, field, expected, base=None): def check_row_field(self, idx, field, expected, base=None):
""" Check field 'field' of result 'idx' for the expected value """ Check field 'field' of result 'idx' for the expected value
and print a meaningful error if the condition fails. and print a meaningful error if the condition fails.
@@ -172,7 +166,6 @@ class GenericResponse:
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}") f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
class SearchResponse(GenericResponse): class SearchResponse(GenericResponse):
""" Specialised class for search and lookup responses. """ Specialised class for search and lookup responses.
Transforms the xml response in a format similar to json. Transforms the xml response in a format similar to json.
@@ -240,7 +233,8 @@ class ReverseResponse(GenericResponse):
assert 'namedetails' not in self.result[0], "More than one namedetails in result" assert 'namedetails' not in self.result[0], "More than one namedetails in result"
self.result[0]['namedetails'] = {} self.result[0]['namedetails'] = {}
for tag in child: for tag in child:
assert len(tag) == 0, f"Namedetails element '{tag.attrib['desc']}' has subelements" assert len(tag) == 0, \
f"Namedetails element '{tag.attrib['desc']}' has subelements"
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
elif child.tag == 'geokml': elif child.tag == 'geokml':
assert 'geokml' not in self.result[0], "More than one geokml in result" assert 'geokml' not in self.result[0], "More than one geokml in result"

View File

@@ -2,10 +2,9 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
from pathlib import Path from pathlib import Path
import importlib
import tempfile import tempfile
import psycopg import psycopg
@@ -13,10 +12,9 @@ from psycopg import sql as pysql
from nominatim_db import cli from nominatim_db import cli
from nominatim_db.config import Configuration from nominatim_db.config import Configuration
from nominatim_db.db.connection import Connection, register_hstore, execute_scalar from nominatim_db.db.connection import register_hstore, execute_scalar
from nominatim_db.tools import refresh
from nominatim_db.tokenizer import factory as tokenizer_factory from nominatim_db.tokenizer import factory as tokenizer_factory
from steps.utils import run_script
class NominatimEnvironment: class NominatimEnvironment:
""" Collects all functions for the execution of Nominatim functions. """ Collects all functions for the execution of Nominatim functions.
@@ -62,7 +60,6 @@ class NominatimEnvironment:
dbargs['password'] = self.db_pass dbargs['password'] = self.db_pass
return psycopg.connect(**dbargs) return psycopg.connect(**dbargs)
def write_nominatim_config(self, dbname): def write_nominatim_config(self, dbname):
""" Set up a custom test configuration that connects to the given """ Set up a custom test configuration that connects to the given
database. This sets up the environment variables so that they can database. This sets up the environment variables so that they can
@@ -101,7 +98,6 @@ class NominatimEnvironment:
self.website_dir = tempfile.TemporaryDirectory() self.website_dir = tempfile.TemporaryDirectory()
def get_test_config(self): def get_test_config(self):
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env) cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
return cfg return cfg
@@ -122,14 +118,13 @@ class NominatimEnvironment:
return dsn return dsn
def db_drop_database(self, name): def db_drop_database(self, name):
""" Drop the database with the given name. """ Drop the database with the given name.
""" """
with self.connect_database('postgres') as conn: with self.connect_database('postgres') as conn:
conn.autocommit = True conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS') conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(name)) + pysql.Identifier(name))
def setup_template_db(self): def setup_template_db(self):
""" Setup a template database that already contains common test data. """ Setup a template database that already contains common test data.
@@ -153,13 +148,12 @@ class NominatimEnvironment:
'--osm2pgsql-cache', '1', '--osm2pgsql-cache', '1',
'--ignore-errors', '--ignore-errors',
'--offline', '--index-noanalyse') '--offline', '--index-noanalyse')
except: except: # noqa: E722
self.db_drop_database(self.template_db) self.db_drop_database(self.template_db)
raise raise
self.run_nominatim('refresh', '--functions') self.run_nominatim('refresh', '--functions')
def setup_api_db(self): def setup_api_db(self):
""" Setup a test against the API test database. """ Setup a test against the API test database.
""" """
@@ -184,13 +178,12 @@ class NominatimEnvironment:
csv_path = str(testdata / 'full_en_phrases_test.csv') csv_path = str(testdata / 'full_en_phrases_test.csv')
self.run_nominatim('special-phrases', '--import-from-csv', csv_path) self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
except: except: # noqa: E722
self.db_drop_database(self.api_test_db) self.db_drop_database(self.api_test_db)
raise raise
tokenizer_factory.get_tokenizer_for_db(self.get_test_config()) tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
def setup_unknown_db(self): def setup_unknown_db(self):
""" Setup a test against a non-existing database. """ Setup a test against a non-existing database.
""" """
@@ -213,7 +206,7 @@ class NominatimEnvironment:
with self.connect_database(self.template_db) as conn: with self.connect_database(self.template_db) as conn:
conn.autocommit = True conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS') conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(self.test_db)) + pysql.Identifier(self.test_db))
conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format( conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format(
pysql.Identifier(self.test_db), pysql.Identifier(self.test_db),
pysql.Identifier(self.template_db))) pysql.Identifier(self.template_db)))
@@ -250,7 +243,6 @@ class NominatimEnvironment:
return False return False
def reindex_placex(self, db): def reindex_placex(self, db):
""" Run the indexing step until all data in the placex has """ Run the indexing step until all data in the placex has
been processed. Indexing during updates can produce more data been processed. Indexing during updates can produce more data
@@ -259,18 +251,15 @@ class NominatimEnvironment:
""" """
self.run_nominatim('index') self.run_nominatim('index')
def run_nominatim(self, *cmdline): def run_nominatim(self, *cmdline):
""" Run the nominatim command-line tool via the library. """ Run the nominatim command-line tool via the library.
""" """
if self.website_dir is not None: if self.website_dir is not None:
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name] cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
cli.nominatim(osm2pgsql_path=None, cli.nominatim(cli_args=cmdline,
cli_args=cmdline,
environ=self.test_env) environ=self.test_env)
def copy_from_place(self, db): def copy_from_place(self, db):
""" Copy data from place to the placex and location_property_osmline """ Copy data from place to the placex and location_property_osmline
tables invoking the appropriate triggers. tables invoking the appropriate triggers.
@@ -293,7 +282,6 @@ class NominatimEnvironment:
and osm_type='W' and osm_type='W'
and ST_GeometryType(geometry) = 'ST_LineString'""") and ST_GeometryType(geometry) = 'ST_LineString'""")
def create_api_request_func_starlette(self): def create_api_request_func_starlette(self):
import nominatim_api.server.starlette.server import nominatim_api.server.starlette.server
from asgi_lifespan import LifespanManager from asgi_lifespan import LifespanManager
@@ -311,7 +299,6 @@ class NominatimEnvironment:
return _request return _request
def create_api_request_func_falcon(self): def create_api_request_func_falcon(self):
import nominatim_api.server.falcon.server import nominatim_api.server.falcon.server
import falcon.testing import falcon.testing
@@ -326,6 +313,3 @@ class NominatimEnvironment:
return response.text, response.status_code return response.text, response.status_code
return _request return _request

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2022 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Helper classes for filling the place table. Helper classes for filling the place table.
@@ -10,12 +10,13 @@ Helper classes for filling the place table.
import random import random
import string import string
class PlaceColumn: class PlaceColumn:
""" Helper class to collect contents from a behave table row and """ Helper class to collect contents from a behave table row and
insert it into the place table. insert it into the place table.
""" """
def __init__(self, context): def __init__(self, context):
self.columns = {'admin_level' : 15} self.columns = {'admin_level': 15}
self.context = context self.context = context
self.geometry = None self.geometry = None
@@ -28,9 +29,11 @@ class PlaceColumn:
assert 'osm_type' in self.columns, "osm column missing" assert 'osm_type' in self.columns, "osm column missing"
if force_name and 'name' not in self.columns: if force_name and 'name' not in self.columns:
self._add_hstore('name', 'name', self._add_hstore(
''.join(random.choice(string.printable) 'name',
for _ in range(int(random.random()*30)))) 'name',
''.join(random.choices(string.printable, k=random.randrange(30))),
)
return self return self
@@ -96,7 +99,7 @@ class PlaceColumn:
""" Issue a delete for the given OSM object. """ Issue a delete for the given OSM object.
""" """
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s', cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
(self.columns['osm_type'] , self.columns['osm_id'])) (self.columns['osm_type'], self.columns['osm_id']))
def db_insert(self, cursor): def db_insert(self, cursor):
""" Insert the collected data into the database. """ Insert the collected data into the database.
@@ -104,7 +107,7 @@ class PlaceColumn:
if self.columns['osm_type'] == 'N' and self.geometry is None: if self.columns['osm_type'] == 'N' and self.geometry is None:
pt = self.context.osm.grid_node(self.columns['osm_id']) pt = self.context.osm.grid_node(self.columns['osm_id'])
if pt is None: if pt is None:
pt = (random.random()*360 - 180, random.random()*180 - 90) pt = (random.uniform(-180, 180), random.uniform(-90, 90))
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
else: else:

View File

@@ -2,20 +2,16 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" Steps that run queries against the API. """ Steps that run queries against the API.
""" """
from pathlib import Path from pathlib import Path
import json
import os
import re import re
import logging import logging
import asyncio import asyncio
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from urllib.parse import urlencode
from utils import run_script
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
from check_functions import Bbox, check_for_attributes from check_functions import Bbox, check_for_attributes
from table_compare import NominatimID from table_compare import NominatimID
@@ -68,7 +64,7 @@ def send_api_query(endpoint, params, fmt, context):
getattr(context, 'http_headers', {}))) getattr(context, 'http_headers', {})))
@given(u'the HTTP header') @given('the HTTP header')
def add_http_header(context): def add_http_header(context):
if not hasattr(context, 'http_headers'): if not hasattr(context, 'http_headers'):
context.http_headers = {} context.http_headers = {}
@@ -77,7 +73,7 @@ def add_http_header(context):
context.http_headers[h] = context.table[0][h] context.http_headers[h] = context.table[0][h]
@when(u'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?') @when(r'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
def website_search_request(context, fmt, query, addr): def website_search_request(context, fmt, query, addr):
params = {} params = {}
if query: if query:
@@ -90,7 +86,7 @@ def website_search_request(context, fmt, query, addr):
context.response = SearchResponse(outp, fmt or 'json', status) context.response = SearchResponse(outp, fmt or 'json', status)
@when('sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?') @when(r'sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse(context, lat, lon, fmt): def api_endpoint_v1_reverse(context, lat, lon, fmt):
params = {} params = {}
if lat is not None: if lat is not None:
@@ -106,7 +102,7 @@ def api_endpoint_v1_reverse(context, lat, lon, fmt):
context.response = ReverseResponse(outp, fmt or 'xml', status) context.response = ReverseResponse(outp, fmt or 'xml', status)
@when('sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?') @when(r'sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt): def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
params = {} params = {}
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid))) params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
@@ -115,7 +111,7 @@ def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
context.response = ReverseResponse(outp, fmt or 'xml', status) context.response = ReverseResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)') @when(r'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
def website_details_request(context, fmt, query): def website_details_request(context, fmt, query):
params = {} params = {}
if query[0] in 'NWR': if query[0] in 'NWR':
@@ -130,38 +126,45 @@ def website_details_request(context, fmt, query):
context.response = GenericResponse(outp, fmt or 'json', status) context.response = GenericResponse(outp, fmt or 'json', status)
@when(u'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
@when(r'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
def website_lookup_request(context, fmt, query): def website_lookup_request(context, fmt, query):
params = { 'osm_ids' : query } params = {'osm_ids': query}
outp, status = send_api_query('lookup', params, fmt, context) outp, status = send_api_query('lookup', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'xml', status) context.response = SearchResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?status query')
@when(r'sending (?P<fmt>\S+ )?status query')
def website_status_request(context, fmt): def website_status_request(context, fmt):
params = {} params = {}
outp, status = send_api_query('status', params, fmt, context) outp, status = send_api_query('status', params, fmt, context)
context.response = StatusResponse(outp, fmt or 'text', status) context.response = StatusResponse(outp, fmt or 'text', status)
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
@step(r'(?P<operator>less than|more than|exactly|at least|at most) '
r'(?P<number>\d+) results? (?:is|are) returned')
def validate_result_number(context, operator, number): def validate_result_number(context, operator, number):
context.execute_steps("Then a HTTP 200 is returned") context.execute_steps("Then a HTTP 200 is returned")
numres = len(context.response.result) numres = len(context.response.result)
assert compare(operator, numres, int(number)), \ assert compare(operator, numres, int(number)), \
f"Bad number of results: expected {operator} {number}, got {numres}." f"Bad number of results: expected {operator} {number}, got {numres}."
@then(u'a HTTP (?P<status>\d+) is returned')
@then(r'a HTTP (?P<status>\d+) is returned')
def check_http_return_status(context, status): def check_http_return_status(context, status):
assert context.response.errorcode == int(status), \ assert context.response.errorcode == int(status), \
f"Return HTTP status is {context.response.errorcode}."\ f"Return HTTP status is {context.response.errorcode}."\
f" Full response:\n{context.response.page}" f" Full response:\n{context.response.page}"
@then(u'the page contents equals "(?P<text>.+)"')
@then(r'the page contents equals "(?P<text>.+)"')
def check_page_content_equals(context, text): def check_page_content_equals(context, text):
assert context.response.page == text assert context.response.page == text
@then(u'the result is valid (?P<fmt>\w+)')
@then(r'the result is valid (?P<fmt>\w+)')
def step_impl(context, fmt): def step_impl(context, fmt):
context.execute_steps("Then a HTTP 200 is returned") context.execute_steps("Then a HTTP 200 is returned")
if fmt.strip() == 'html': if fmt.strip() == 'html':
@@ -178,7 +181,7 @@ def step_impl(context, fmt):
assert context.response.format == fmt assert context.response.format == fmt
@then(u'a (?P<fmt>\w+) user error is returned') @then(r'a (?P<fmt>\w+) user error is returned')
def check_page_error(context, fmt): def check_page_error(context, fmt):
context.execute_steps("Then a HTTP 400 is returned") context.execute_steps("Then a HTTP 400 is returned")
assert context.response.format == fmt assert context.response.format == fmt
@@ -188,32 +191,34 @@ def check_page_error(context, fmt):
else: else:
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
@then(u'result header contains')
@then('result header contains')
def check_header_attr(context): def check_header_attr(context):
context.execute_steps("Then a HTTP 200 is returned") context.execute_steps("Then a HTTP 200 is returned")
for line in context.table: for line in context.table:
assert line['attr'] in context.response.header, \ assert line['attr'] in context.response.header, \
f"Field '{line['attr']}' missing in header. Full header:\n{context.response.header}" f"Field '{line['attr']}' missing in header. " \
f"Full header:\n{context.response.header}"
value = context.response.header[line['attr']] value = context.response.header[line['attr']]
assert re.fullmatch(line['value'], value) is not None, \ assert re.fullmatch(line['value'], value) is not None, \
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'" f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)') @then('result header has (?P<neg>not )?attributes (?P<attrs>.*)')
def check_header_no_attr(context, neg, attrs): def check_header_no_attr(context, neg, attrs):
check_for_attributes(context.response.header, attrs, check_for_attributes(context.response.header, attrs,
'absent' if neg else 'present') 'absent' if neg else 'present')
@then(u'results contain(?: in field (?P<field>.*))?') @then(r'results contain(?: in field (?P<field>.*))?')
def step_impl(context, field): def results_contain_in_field(context, field):
context.execute_steps("then at least 1 result is returned") context.execute_steps("then at least 1 result is returned")
for line in context.table: for line in context.table:
context.response.match_row(line, context=context, field=field) context.response.match_row(line, context=context, field=field)
@then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)') @then(r'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
def validate_attributes(context, lid, neg, attrs): def validate_attributes(context, lid, neg, attrs):
for i in make_todo_list(context, lid): for i in make_todo_list(context, lid):
check_for_attributes(context.response.result[i], attrs, check_for_attributes(context.response.result[i], attrs,
@@ -221,7 +226,7 @@ def validate_attributes(context, lid, neg, attrs):
@then(u'result addresses contain') @then(u'result addresses contain')
def step_impl(context): def result_addresses_contain(context):
context.execute_steps("then at least 1 result is returned") context.execute_steps("then at least 1 result is returned")
for line in context.table: for line in context.table:
@@ -231,8 +236,9 @@ def step_impl(context):
if name != 'ID': if name != 'ID':
context.response.assert_address_field(idx, name, value) context.response.assert_address_field(idx, name, value)
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address(context, lid, neg, attrs): @then(r'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address_has_types(context, lid, neg, attrs):
context.execute_steps(f"then more than {lid} results are returned") context.execute_steps(f"then more than {lid} results are returned")
addr_parts = context.response.result[int(lid)]['address'] addr_parts = context.response.result[int(lid)]['address']
@@ -243,7 +249,8 @@ def check_address(context, lid, neg, attrs):
else: else:
assert attr in addr_parts assert attr in addr_parts
@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
@then(r'address of result (?P<lid>\d+) (?P<complete>is|contains)')
def check_address(context, lid, complete): def check_address(context, lid, complete):
context.execute_steps(f"then more than {lid} results are returned") context.execute_steps(f"then more than {lid} results are returned")
@@ -258,7 +265,7 @@ def check_address(context, lid, complete):
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}" assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)') @then(r'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
def check_bounding_box_in_area(context, lid, coords): def check_bounding_box_in_area(context, lid, coords):
expected = Bbox(coords) expected = Bbox(coords)
@@ -269,7 +276,7 @@ def check_bounding_box_in_area(context, lid, coords):
f"Bbox is not contained in {expected}") f"Bbox is not contained in {expected}")
@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)') @then(r'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
def check_centroid_in_area(context, lid, coords): def check_centroid_in_area(context, lid, coords):
expected = Bbox(coords) expected = Bbox(coords)
@@ -280,7 +287,7 @@ def check_centroid_in_area(context, lid, coords):
f"Centroid is not inside {expected}") f"Centroid is not inside {expected}")
@then(u'there are(?P<neg> no)? duplicates') @then('there are(?P<neg> no)? duplicates')
def check_for_duplicates(context, neg): def check_for_duplicates(context, neg):
context.execute_steps("then at least 1 result is returned") context.execute_steps("then at least 1 result is returned")
@@ -298,4 +305,3 @@ def check_for_duplicates(context, neg):
assert not has_dupe, f"Found duplicate for {dup}" assert not has_dupe, f"Found duplicate for {dup}"
else: else:
assert has_dupe, "No duplicates found" assert has_dupe, "No duplicates found"

View File

@@ -2,9 +2,8 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
import logging
from itertools import chain from itertools import chain
import psycopg import psycopg
@@ -13,9 +12,9 @@ from psycopg import sql as pysql
from place_inserter import PlaceColumn from place_inserter import PlaceColumn
from table_compare import NominatimID, DBRow from table_compare import NominatimID, DBRow
from nominatim_db.indexer import indexer
from nominatim_db.tokenizer import factory as tokenizer_factory from nominatim_db.tokenizer import factory as tokenizer_factory
def check_database_integrity(context): def check_database_integrity(context):
""" Check some generic constraints on the tables. """ Check some generic constraints on the tables.
""" """
@@ -31,10 +30,9 @@ def check_database_integrity(context):
cur.execute("SELECT count(*) FROM word WHERE word_token = ''") cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table" assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
# GIVEN ##################################
################################ GIVEN ##################################
@given("the (?P<named>named )?places") @given("the (?P<named>named )?places")
def add_data_to_place_table(context, named): def add_data_to_place_table(context, named):
""" Add entries into the place table. 'named places' makes sure that """ Add entries into the place table. 'named places' makes sure that
@@ -46,6 +44,7 @@ def add_data_to_place_table(context, named):
PlaceColumn(context).add_row(row, named is not None).db_insert(cur) PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert') cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
@given("the relations") @given("the relations")
def add_data_to_planet_relations(context): def add_data_to_planet_relations(context):
""" Add entries into the osm2pgsql relation middle table. This is needed """ Add entries into the osm2pgsql relation middle table. This is needed
@@ -77,9 +76,11 @@ def add_data_to_planet_relations(context):
else: else:
members = None members = None
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")]) tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings
if h.startswith("tags+")])
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags) cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off,
parts, members, tags)
VALUES (%s, %s, %s, %s, %s, %s)""", VALUES (%s, %s, %s, %s, %s, %s)""",
(r['id'], last_node, last_way, parts, members, list(tags))) (r['id'], last_node, last_way, parts, members, list(tags)))
else: else:
@@ -99,6 +100,7 @@ def add_data_to_planet_relations(context):
(r['id'], psycopg.types.json.Json(tags), (r['id'], psycopg.types.json.Json(tags),
psycopg.types.json.Json(members))) psycopg.types.json.Json(members)))
@given("the ways") @given("the ways")
def add_data_to_planet_ways(context): def add_data_to_planet_ways(context):
""" Add entries into the osm2pgsql way middle table. This is necessary for """ Add entries into the osm2pgsql way middle table. This is necessary for
@@ -110,16 +112,18 @@ def add_data_to_planet_ways(context):
json_tags = row is not None and row['value'] != '1' json_tags = row is not None and row['value'] != '1'
for r in context.table: for r in context.table:
if json_tags: if json_tags:
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings if h.startswith("tags+")}) tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings
if h.startswith("tags+")})
else: else:
tags = list(chain.from_iterable([(h[5:], r[h]) tags = list(chain.from_iterable([(h[5:], r[h])
for h in r.headings if h.startswith("tags+")])) for h in r.headings if h.startswith("tags+")]))
nodes = [ int(x.strip()) for x in r['nodes'].split(',') ] nodes = [int(x.strip()) for x in r['nodes'].split(',')]
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)", cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
(r['id'], nodes, tags)) (r['id'], nodes, tags))
################################ WHEN ################################## # WHEN ##################################
@when("importing") @when("importing")
def import_and_index_data_from_place_table(context): def import_and_index_data_from_place_table(context):
@@ -136,6 +140,7 @@ def import_and_index_data_from_place_table(context):
# itself. # itself.
context.log_capture.buffer.clear() context.log_capture.buffer.clear()
@when("updating places") @when("updating places")
def update_place_table(context): def update_place_table(context):
""" Update the place table with the given data. Also runs all triggers """ Update the place table with the given data. Also runs all triggers
@@ -164,6 +169,7 @@ def update_postcodes(context):
""" """
context.nominatim.run_nominatim('refresh', '--postcodes') context.nominatim.run_nominatim('refresh', '--postcodes')
@when("marking for delete (?P<oids>.*)") @when("marking for delete (?P<oids>.*)")
def delete_places(context, oids): def delete_places(context, oids):
""" Remove entries from the place table. Multiple ids may be given """ Remove entries from the place table. Multiple ids may be given
@@ -184,7 +190,8 @@ def delete_places(context, oids):
# itself. # itself.
context.log_capture.buffer.clear() context.log_capture.buffer.clear()
################################ THEN ################################## # THEN ##################################
@then("(?P<table>placex|place) contains(?P<exact> exactly)?") @then("(?P<table>placex|place) contains(?P<exact> exactly)?")
def check_place_contents(context, table, exact): def check_place_contents(context, table, exact):
@@ -201,7 +208,8 @@ def check_place_contents(context, table, exact):
expected_content = set() expected_content = set()
for row in context.table: for row in context.table:
nid = NominatimID(row['object']) nid = NominatimID(row['object'])
query = 'SELECT *, ST_AsText(geometry) as geomtxt, ST_GeometryType(geometry) as geometrytype' query = """SELECT *, ST_AsText(geometry) as geomtxt,
ST_GeometryType(geometry) as geometrytype """
if table == 'placex': if table == 'placex':
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy' query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
query += " FROM %s WHERE {}" % (table, ) query += " FROM %s WHERE {}" % (table, )
@@ -261,17 +269,18 @@ def check_search_name_contents(context, exclude):
if not exclude: if not exclude:
assert len(tokens) >= len(items), \ assert len(tokens) >= len(items), \
"No word entry found for {}. Entries found: {!s}".format(value, len(tokens)) f"No word entry found for {value}. Entries found: {len(tokens)}"
for word, token, wid in tokens: for word, token, wid in tokens:
if exclude: if exclude:
assert wid not in res[name], \ assert wid not in res[name], \
"Found term for {}/{}: {}".format(nid, name, wid) "Found term for {}/{}: {}".format(nid, name, wid)
else: else:
assert wid in res[name], \ assert wid in res[name], \
"Missing term for {}/{}: {}".format(nid, name, wid) "Missing term for {}/{}: {}".format(nid, name, wid)
elif name != 'object': elif name != 'object':
assert db_row.contains(name, value), db_row.assert_msg(name, value) assert db_row.contains(name, value), db_row.assert_msg(name, value)
@then("search_name has no entry for (?P<oid>.*)") @then("search_name has no entry for (?P<oid>.*)")
def check_search_name_has_entry(context, oid): def check_search_name_has_entry(context, oid):
""" Check that there is noentry in the search_name table for the given """ Check that there is noentry in the search_name table for the given
@@ -283,6 +292,7 @@ def check_search_name_has_entry(context, oid):
assert cur.rowcount == 0, \ assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid) "Found {} entries for ID {}".format(cur.rowcount, oid)
@then("location_postcode contains exactly") @then("location_postcode contains exactly")
def check_location_postcode(context): def check_location_postcode(context):
""" Check full contents for location_postcode table. Each row represents a table row """ Check full contents for location_postcode table. Each row represents a table row
@@ -294,21 +304,22 @@ def check_location_postcode(context):
with context.db.cursor() as cur: with context.db.cursor() as cur:
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode") cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
assert cur.rowcount == len(list(context.table)), \ assert cur.rowcount == len(list(context.table)), \
"Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table))) "Postcode table has {cur.rowcount} rows, expected {len(list(context.table))}."
results = {} results = {}
for row in cur: for row in cur:
key = (row['country_code'], row['postcode']) key = (row['country_code'], row['postcode'])
assert key not in results, "Postcode table has duplicate entry: {}".format(row) assert key not in results, "Postcode table has duplicate entry: {}".format(row)
results[key] = DBRow((row['country_code'],row['postcode']), row, context) results[key] = DBRow((row['country_code'], row['postcode']), row, context)
for row in context.table: for row in context.table:
db_row = results.get((row['country'],row['postcode'])) db_row = results.get((row['country'], row['postcode']))
assert db_row is not None, \ assert db_row is not None, \
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'." f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
db_row.assert_row(row, ('country', 'postcode')) db_row.assert_row(row, ('country', 'postcode'))
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)") @then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
def check_word_table_for_postcodes(context, exclude, postcodes): def check_word_table_for_postcodes(context, exclude, postcodes):
""" Check that the tokenizer produces postcode tokens for the given """ Check that the tokenizer produces postcode tokens for the given
@@ -333,7 +344,8 @@ def check_word_table_for_postcodes(context, exclude, postcodes):
assert len(found) == 0, f"Unexpected postcodes: {found}" assert len(found) == 0, f"Unexpected postcodes: {found}"
else: else:
assert set(found) == set(plist), \ assert set(found) == set(plist), \
f"Missing postcodes {set(plist) - set(found)}. Found: {found}" f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
@then("place_addressline contains") @then("place_addressline contains")
def check_place_addressline(context): def check_place_addressline(context):
@@ -352,11 +364,12 @@ def check_place_addressline(context):
WHERE place_id = %s AND address_place_id = %s""", WHERE place_id = %s AND address_place_id = %s""",
(pid, apid)) (pid, apid))
assert cur.rowcount > 0, \ assert cur.rowcount > 0, \
"No rows found for place %s and address %s" % (row['object'], row['address']) f"No rows found for place {row['object']} and address {row['address']}."
for res in cur: for res in cur:
DBRow(nid, res, context).assert_row(row, ('address', 'object')) DBRow(nid, res, context).assert_row(row, ('address', 'object'))
@then("place_addressline doesn't contain") @then("place_addressline doesn't contain")
def check_place_addressline_exclude(context): def check_place_addressline_exclude(context):
""" Check that the place_addressline doesn't contain any entries for the """ Check that the place_addressline doesn't contain any entries for the
@@ -371,9 +384,10 @@ def check_place_addressline_exclude(context):
WHERE place_id = %s AND address_place_id = %s""", WHERE place_id = %s AND address_place_id = %s""",
(pid, apid)) (pid, apid))
assert cur.rowcount == 0, \ assert cur.rowcount == 0, \
"Row found for place %s and address %s" % (row['object'], row['address']) f"Row found for place {row['object']} and address {row['address']}."
@then("W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
@then(r"W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
def check_location_property_osmline(context, oid, neg): def check_location_property_osmline(context, oid, neg):
""" Check that the given way is present in the interpolation table. """ Check that the given way is present in the interpolation table.
""" """
@@ -392,7 +406,7 @@ def check_location_property_osmline(context, oid, neg):
for i in todo: for i in todo:
row = context.table[i] row = context.table[i]
if (int(row['start']) == res['startnumber'] if (int(row['start']) == res['startnumber']
and int(row['end']) == res['endnumber']): and int(row['end']) == res['endnumber']):
todo.remove(i) todo.remove(i)
break break
else: else:
@@ -402,8 +416,9 @@ def check_location_property_osmline(context, oid, neg):
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}" assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
@then("location_property_osmline contains(?P<exact> exactly)?") @then("location_property_osmline contains(?P<exact> exactly)?")
def check_place_contents(context, exact): def check_osmline_contents(context, exact):
""" Check contents of the interpolation table. Each row represents a table row """ Check contents of the interpolation table. Each row represents a table row
and all data must match. Data not present in the expected table, may and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via the 'object' column which must be arbitrary. The rows are identified via the 'object' column which must
@@ -447,4 +462,3 @@ def check_place_contents(context, exact):
assert expected_content == actual, \ assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \ f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}" f"Not expected in table: {actual - expected_content}"

View File

@@ -14,6 +14,7 @@ from nominatim_db.tools.replication import run_osm2pgsql_updates
from geometry_alias import ALIASES from geometry_alias import ALIASES
def get_osm2pgsql_options(nominatim_env, fname, append): def get_osm2pgsql_options(nominatim_env, fname, append):
return dict(import_file=fname, return dict(import_file=fname,
osm2pgsql='osm2pgsql', osm2pgsql='osm2pgsql',
@@ -25,8 +26,7 @@ def get_osm2pgsql_options(nominatim_env, fname, append):
flatnode_file='', flatnode_file='',
tablespaces=dict(slim_data='', slim_index='', tablespaces=dict(slim_data='', slim_index='',
main_data='', main_index=''), main_data='', main_index=''),
append=append append=append)
)
def write_opl_file(opl, grid): def write_opl_file(opl, grid):
@@ -41,14 +41,14 @@ def write_opl_file(opl, grid):
if line.startswith('n') and line.find(' x') < 0: if line.startswith('n') and line.find(' x') < 0:
coord = grid.grid_node(int(line[1:].split(' ')[0])) coord = grid.grid_node(int(line[1:].split(' ')[0]))
if coord is None: if coord is None:
coord = (random.random() * 360 - 180, coord = (random.uniform(-180, 180), random.uniform(-90, 90))
random.random() * 180 - 90)
line += " x%f y%f" % coord line += " x%f y%f" % coord
fd.write(line.encode('utf-8')) fd.write(line.encode('utf-8'))
fd.write(b'\n') fd.write(b'\n')
return fd.name return fd.name
@given('the lua style file') @given('the lua style file')
def lua_style_file(context): def lua_style_file(context):
""" Define a custom style file to use for the import. """ Define a custom style file to use for the import.
@@ -91,7 +91,7 @@ def define_node_grid(context, grid_step, origin):
@when(u'loading osm data') @when(u'loading osm data')
def load_osm_file(context): def load_osm_file(context):
""" """
Load the given data into a freshly created test data using osm2pgsql. Load the given data into a freshly created test database using osm2pgsql.
No further indexing is done. No further indexing is done.
The data is expected as attached text in OPL format. The data is expected as attached text in OPL format.
@@ -103,13 +103,14 @@ def load_osm_file(context):
finally: finally:
os.remove(fname) os.remove(fname)
### reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again # reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
cur = context.db.cursor() cur = context.db.cursor()
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
FOR EACH ROW EXECUTE PROCEDURE place_delete()""") FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
FOR EACH ROW EXECUTE PROCEDURE place_insert()""") FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique on place using btree(osm_id,osm_type,class,type)""") cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique ON place
USING btree(osm_id,osm_type,class,type)""")
context.db.commit() context.db.commit()
@@ -133,6 +134,7 @@ def update_from_osm_file(context):
finally: finally:
os.remove(fname) os.remove(fname)
@when('indexing') @when('indexing')
def index_database(context): def index_database(context):
""" """

View File

@@ -2,21 +2,21 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2022 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Functions to facilitate accessing and comparing the content of DB tables. Functions to facilitate accessing and comparing the content of DB tables.
""" """
import math
import re import re
import json import json
import psycopg import psycopg
from psycopg import sql as pysql from psycopg import sql as pysql
from steps.check_functions import Almost
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?") ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
class NominatimID: class NominatimID:
""" Splits a unique identifier for places into its components. """ Splits a unique identifier for places into its components.
As place_ids cannot be used for testing, we use a unique As place_ids cannot be used for testing, we use a unique
@@ -147,10 +147,10 @@ class DBRow:
return str(actual) == expected return str(actual) == expected
def _compare_place_id(self, actual, expected): def _compare_place_id(self, actual, expected):
if expected == '0': if expected == '0':
return actual == 0 return actual == 0
with self.context.db.cursor() as cur: with self.context.db.cursor() as cur:
return NominatimID(expected).get_place_id(cur) == actual return NominatimID(expected).get_place_id(cur) == actual
def _has_centroid(self, expected): def _has_centroid(self, expected):
@@ -166,13 +166,15 @@ class DBRow:
else: else:
x, y = self.context.osm.grid_node(int(expected)) x, y = self.context.osm.grid_node(int(expected))
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy'] return math.isclose(float(x), self.db_row['cx']) \
and math.isclose(float(y), self.db_row['cy'])
def _has_geometry(self, expected): def _has_geometry(self, expected):
geom = self.context.osm.parse_geometry(expected) geom = self.context.osm.parse_geometry(expected)
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur: with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute(pysql.SQL("""SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001), cur.execute(pysql.SQL("""
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""") SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
.format(pysql.SQL(geom), .format(pysql.SQL(geom),
pysql.Literal(self.db_row['geomtxt']))) pysql.Literal(self.db_row['geomtxt'])))
return cur.fetchone()[0] return cur.fetchone()[0]
@@ -187,7 +189,8 @@ class DBRow:
else: else:
msg += " No such column." msg += " No such column."
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row), indent=4, default=str)) return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row),
indent=4, default=str))
def _get_actual(self, name): def _get_actual(self, name):
if '+' in name: if '+' in name:

View File

@@ -1,28 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Various smaller helps for step execution.
"""
import logging
import subprocess
LOG = logging.getLogger(__name__)
def run_script(cmd, **kwargs):
""" Run the given command, check that it is successful and output
when necessary.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
(outp, outerr) = proc.communicate()
outp = outp.decode('utf-8')
outerr = outerr.decode('utf-8').replace('\\n', '\n')
LOG.debug("Run command: %s\n%s\n%s", cmd, outp, outerr)
assert proc.returncode == 0, "Script '{}' failed:\n{}\n{}\n".format(cmd[0], outp, outerr)
return outp, outerr

View File

@@ -2,14 +2,13 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Helper fixtures for API call tests. Helper fixtures for API call tests.
""" """
import pytest import pytest
import pytest_asyncio import pytest_asyncio
import time
import datetime as dt import datetime as dt
import sqlalchemy as sa import sqlalchemy as sa
@@ -20,27 +19,25 @@ from nominatim_api.search.query_analyzer_factory import make_query_analyzer
from nominatim_db.tools import convert_sqlite from nominatim_db.tools import convert_sqlite
import nominatim_api.logging as loglib import nominatim_api.logging as loglib
class APITester: class APITester:
def __init__(self): def __init__(self):
self.api = napi.NominatimAPI() self.api = napi.NominatimAPI()
self.async_to_sync(self.api._async_api.setup_database()) self.async_to_sync(self.api._async_api.setup_database())
def async_to_sync(self, func): def async_to_sync(self, func):
""" Run an asynchronous function until completion using the """ Run an asynchronous function until completion using the
internal loop of the API. internal loop of the API.
""" """
return self.api._loop.run_until_complete(func) return self.api._loop.run_until_complete(func)
def add_data(self, table, data): def add_data(self, table, data):
""" Insert data into the given table. """ Insert data into the given table.
""" """
sql = getattr(self.api._async_api._tables, table).insert() sql = getattr(self.api._async_api._tables, table).insert()
self.async_to_sync(self.exec_async(sql, data)) self.async_to_sync(self.exec_async(sql, data))
def add_placex(self, **kw): def add_placex(self, **kw):
name = kw.get('name') name = kw.get('name')
if isinstance(name, str): if isinstance(name, str):
@@ -50,30 +47,29 @@ class APITester:
geometry = kw.get('geometry', 'POINT(%f %f)' % centroid) geometry = kw.get('geometry', 'POINT(%f %f)' % centroid)
self.add_data('placex', self.add_data('placex',
{'place_id': kw.get('place_id', 1000), {'place_id': kw.get('place_id', 1000),
'osm_type': kw.get('osm_type', 'W'), 'osm_type': kw.get('osm_type', 'W'),
'osm_id': kw.get('osm_id', 4), 'osm_id': kw.get('osm_id', 4),
'class_': kw.get('class_', 'highway'), 'class_': kw.get('class_', 'highway'),
'type': kw.get('type', 'residential'), 'type': kw.get('type', 'residential'),
'name': name, 'name': name,
'address': kw.get('address'), 'address': kw.get('address'),
'extratags': kw.get('extratags'), 'extratags': kw.get('extratags'),
'parent_place_id': kw.get('parent_place_id'), 'parent_place_id': kw.get('parent_place_id'),
'linked_place_id': kw.get('linked_place_id'), 'linked_place_id': kw.get('linked_place_id'),
'admin_level': kw.get('admin_level', 15), 'admin_level': kw.get('admin_level', 15),
'country_code': kw.get('country_code'), 'country_code': kw.get('country_code'),
'housenumber': kw.get('housenumber'), 'housenumber': kw.get('housenumber'),
'postcode': kw.get('postcode'), 'postcode': kw.get('postcode'),
'wikipedia': kw.get('wikipedia'), 'wikipedia': kw.get('wikipedia'),
'rank_search': kw.get('rank_search', 30), 'rank_search': kw.get('rank_search', 30),
'rank_address': kw.get('rank_address', 30), 'rank_address': kw.get('rank_address', 30),
'importance': kw.get('importance'), 'importance': kw.get('importance'),
'centroid': 'POINT(%f %f)' % centroid, 'centroid': 'POINT(%f %f)' % centroid,
'indexed_status': kw.get('indexed_status', 0), 'indexed_status': kw.get('indexed_status', 0),
'indexed_date': kw.get('indexed_date', 'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)), dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': geometry}) 'geometry': geometry})
def add_address_placex(self, object_id, **kw): def add_address_placex(self, object_id, **kw):
self.add_placex(**kw) self.add_placex(**kw)
@@ -85,46 +81,42 @@ class APITester:
'fromarea': kw.get('fromarea', False), 'fromarea': kw.get('fromarea', False),
'isaddress': kw.get('isaddress', True)}) 'isaddress': kw.get('isaddress', True)})
def add_osmline(self, **kw): def add_osmline(self, **kw):
self.add_data('osmline', self.add_data('osmline',
{'place_id': kw.get('place_id', 10000), {'place_id': kw.get('place_id', 10000),
'osm_id': kw.get('osm_id', 4004), 'osm_id': kw.get('osm_id', 4004),
'parent_place_id': kw.get('parent_place_id'), 'parent_place_id': kw.get('parent_place_id'),
'indexed_date': kw.get('indexed_date', 'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)), dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'startnumber': kw.get('startnumber', 2), 'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6), 'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2), 'step': kw.get('step', 2),
'address': kw.get('address'), 'address': kw.get('address'),
'postcode': kw.get('postcode'), 'postcode': kw.get('postcode'),
'country_code': kw.get('country_code'), 'country_code': kw.get('country_code'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')}) 'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_tiger(self, **kw): def add_tiger(self, **kw):
self.add_data('tiger', self.add_data('tiger',
{'place_id': kw.get('place_id', 30000), {'place_id': kw.get('place_id', 30000),
'parent_place_id': kw.get('parent_place_id'), 'parent_place_id': kw.get('parent_place_id'),
'startnumber': kw.get('startnumber', 2), 'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6), 'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2), 'step': kw.get('step', 2),
'postcode': kw.get('postcode'), 'postcode': kw.get('postcode'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')}) 'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_postcode(self, **kw): def add_postcode(self, **kw):
self.add_data('postcode', self.add_data('postcode',
{'place_id': kw.get('place_id', 1000), {'place_id': kw.get('place_id', 1000),
'parent_place_id': kw.get('parent_place_id'), 'parent_place_id': kw.get('parent_place_id'),
'country_code': kw.get('country_code'), 'country_code': kw.get('country_code'),
'postcode': kw.get('postcode'), 'postcode': kw.get('postcode'),
'rank_search': kw.get('rank_search', 20), 'rank_search': kw.get('rank_search', 20),
'rank_address': kw.get('rank_address', 22), 'rank_address': kw.get('rank_address', 22),
'indexed_date': kw.get('indexed_date', 'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)), dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': kw.get('geometry', 'POINT(23 34)')}) 'geometry': kw.get('geometry', 'POINT(23 34)')})
def add_country(self, country_code, geometry): def add_country(self, country_code, geometry):
self.add_data('country_grid', self.add_data('country_grid',
@@ -132,14 +124,12 @@ class APITester:
'area': 0.1, 'area': 0.1,
'geometry': geometry}) 'geometry': geometry})
def add_country_name(self, country_code, names, partition=0): def add_country_name(self, country_code, names, partition=0):
self.add_data('country_name', self.add_data('country_name',
{'country_code': country_code, {'country_code': country_code,
'name': names, 'name': names,
'partition': partition}) 'partition': partition})
def add_search_name(self, place_id, **kw): def add_search_name(self, place_id, **kw):
centroid = kw.get('centroid', (23.0, 34.0)) centroid = kw.get('centroid', (23.0, 34.0))
self.add_data('search_name', self.add_data('search_name',
@@ -152,7 +142,6 @@ class APITester:
'country_code': kw.get('country_code', 'xx'), 'country_code': kw.get('country_code', 'xx'),
'centroid': 'POINT(%f %f)' % centroid}) 'centroid': 'POINT(%f %f)' % centroid})
def add_class_type_table(self, cls, typ): def add_class_type_table(self, cls, typ):
self.async_to_sync( self.async_to_sync(
self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ} self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ}
@@ -160,7 +149,6 @@ class APITester:
WHERE class = '{cls}' AND type = '{typ}') WHERE class = '{cls}' AND type = '{typ}')
"""))) """)))
def add_word_table(self, content): def add_word_table(self, content):
data = [dict(zip(['word_id', 'word_token', 'type', 'word', 'info'], c)) data = [dict(zip(['word_id', 'word_token', 'type', 'word', 'info'], c))
for c in content] for c in content]
@@ -176,12 +164,10 @@ class APITester:
self.async_to_sync(_do_sql()) self.async_to_sync(_do_sql())
async def exec_async(self, sql, *args, **kwargs): async def exec_async(self, sql, *args, **kwargs):
async with self.api._async_api.begin() as conn: async with self.api._async_api.begin() as conn:
return await conn.execute(sql, *args, **kwargs) return await conn.execute(sql, *args, **kwargs)
async def create_tables(self): async def create_tables(self):
async with self.api._async_api._engine.begin() as conn: async with self.api._async_api._engine.begin() as conn:
await conn.run_sync(self.api._async_api._tables.meta.create_all) await conn.run_sync(self.api._async_api._tables.meta.create_all)
@@ -212,11 +198,12 @@ def frontend(request, event_loop, tmp_path):
db = str(tmp_path / 'test_nominatim_python_unittest.sqlite') db = str(tmp_path / 'test_nominatim_python_unittest.sqlite')
def mkapi(apiobj, options={'reverse'}): def mkapi(apiobj, options={'reverse'}):
apiobj.add_data('properties', apiobj.add_data(
[{'property': 'tokenizer', 'value': 'icu'}, 'properties',
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'}, [{'property': 'tokenizer', 'value': 'icu'},
{'property': 'tokenizer_import_transliteration', 'value': "'1' > '/1/'; 'ä' > 'ä '"}, {'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
]) {'property': 'tokenizer_import_transliteration',
'value': "'1' > '/1/'; 'ä' > 'ä '"}])
async def _do_sql(): async def _do_sql():
async with apiobj.api._async_api.begin() as conn: async with apiobj.api._async_api.begin() as conn:

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Provides dummy implementations of ASGIAdaptor for testing. Provides dummy implementations of ASGIAdaptor for testing.
@@ -13,6 +13,7 @@ import nominatim_api.v1.server_glue as glue
from nominatim_api.v1.format import dispatch as formatting from nominatim_api.v1.format import dispatch as formatting
from nominatim_api.config import Configuration from nominatim_api.config import Configuration
class FakeError(BaseException): class FakeError(BaseException):
def __init__(self, msg, status): def __init__(self, msg, status):
@@ -22,8 +23,10 @@ class FakeError(BaseException):
def __str__(self): def __str__(self):
return f'{self.status} -- {self.msg}' return f'{self.status} -- {self.msg}'
FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type']) FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type'])
class FakeAdaptor(glue.ASGIAdaptor): class FakeAdaptor(glue.ASGIAdaptor):
def __init__(self, params=None, headers=None, config=None): def __init__(self, params=None, headers=None, config=None):
@@ -31,23 +34,18 @@ class FakeAdaptor(glue.ASGIAdaptor):
self.headers = headers or {} self.headers = headers or {}
self._config = config or Configuration(None) self._config = config or Configuration(None)
def get(self, name, default=None): def get(self, name, default=None):
return self.params.get(name, default) return self.params.get(name, default)
def get_header(self, name, default=None): def get_header(self, name, default=None):
return self.headers.get(name, default) return self.headers.get(name, default)
def error(self, msg, status=400): def error(self, msg, status=400):
return FakeError(msg, status) return FakeError(msg, status)
def create_response(self, status, output, num_results): def create_response(self, status, output, num_results):
return FakeResponse(status, output, self.content_type) return FakeResponse(status, output, self.content_type)
def base_uri(self): def base_uri(self):
return 'http://test' return 'http://test'
@@ -56,5 +54,3 @@ class FakeAdaptor(glue.ASGIAdaptor):
def formatting(self): def formatting(self):
return formatting return formatting

View File

@@ -2,21 +2,18 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for normalizing search queries. Tests for normalizing search queries.
""" """
from pathlib import Path
import pytest
from icu import Transliterator from icu import Transliterator
import nominatim_api.search.query as qmod import nominatim_api.search.query as qmod
from nominatim_api.query_preprocessing.config import QueryConfig from nominatim_api.query_preprocessing.config import QueryConfig
from nominatim_api.query_preprocessing import normalize from nominatim_api.query_preprocessing import normalize
def run_preprocessor_on(query, norm): def run_preprocessor_on(query, norm):
normalizer = Transliterator.createFromRules("normalization", norm) normalizer = Transliterator.createFromRules("normalization", norm)
proc = normalize.create(QueryConfig().set_normalizer(normalizer)) proc = normalize.create(QueryConfig().set_normalizer(normalizer))
@@ -26,9 +23,9 @@ def run_preprocessor_on(query, norm):
def test_normalize_simple(): def test_normalize_simple():
norm = ':: lower();' norm = ':: lower();'
query = [qmod.Phrase(qmod.PhraseType.NONE, 'Hallo')] query = [qmod.Phrase(qmod.PHRASE_ANY, 'Hallo')]
out = run_preprocessor_on(query, norm) out = run_preprocessor_on(query, norm)
assert len(out) == 1 assert len(out) == 1
assert out == [qmod.Phrase(qmod.PhraseType.NONE, 'hallo')] assert out == [qmod.Phrase(qmod.PHRASE_ANY, 'hallo')]

View File

@@ -7,16 +7,13 @@
""" """
Tests for japanese phrase splitting. Tests for japanese phrase splitting.
""" """
from pathlib import Path
import pytest import pytest
from icu import Transliterator
import nominatim_api.search.query as qmod import nominatim_api.search.query as qmod
from nominatim_api.query_preprocessing.config import QueryConfig from nominatim_api.query_preprocessing.config import QueryConfig
from nominatim_api.query_preprocessing import split_japanese_phrases from nominatim_api.query_preprocessing import split_japanese_phrases
def run_preprocessor_on(query): def run_preprocessor_on(query):
proc = split_japanese_phrases.create(QueryConfig().set_normalizer(None)) proc = split_japanese_phrases.create(QueryConfig().set_normalizer(None))
@@ -27,8 +24,8 @@ def run_preprocessor_on(query):
('大阪府大阪', '大阪府:大阪'), ('大阪府大阪', '大阪府:大阪'),
('大阪市大阪', '大阪市:大阪')]) ('大阪市大阪', '大阪市:大阪')])
def test_split_phrases(inp, outp): def test_split_phrases(inp, outp):
query = [qmod.Phrase(qmod.PhraseType.NONE, inp)] query = [qmod.Phrase(qmod.PHRASE_ANY, inp)]
out = run_preprocessor_on(query) out = run_preprocessor_on(query)
assert out == [qmod.Phrase(qmod.PhraseType.NONE, outp)] assert out == [qmod.Phrase(qmod.PHRASE_ANY, outp)]

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for tokenized query data structures. Tests for tokenized query data structures.
@@ -11,6 +11,7 @@ import pytest
from nominatim_api.search import query from nominatim_api.search import query
class MyToken(query.Token): class MyToken(query.Token):
def get_category(self): def get_category(self):
@@ -22,42 +23,44 @@ def mktoken(tid: int):
lookup_word='foo') lookup_word='foo')
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'), @pytest.fixture
('AMENITY', 'QUALIFIER'), def qnode():
('STREET', 'PARTIAL'), return query.QueryNode(query.BREAK_PHRASE, query.PHRASE_ANY, 0.0, '', '')
('CITY', 'WORD'),
('COUNTRY', 'COUNTRY'),
('POSTCODE', 'POSTCODE')]) @pytest.mark.parametrize('ptype,ttype', [(query.PHRASE_ANY, 'W'),
(query.PHRASE_AMENITY, 'Q'),
(query.PHRASE_STREET, 'w'),
(query.PHRASE_CITY, 'W'),
(query.PHRASE_COUNTRY, 'C'),
(query.PHRASE_POSTCODE, 'P')])
def test_phrase_compatible(ptype, ttype): def test_phrase_compatible(ptype, ttype):
assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype], False) assert query._phrase_compatible_with(ptype, ttype, False)
@pytest.mark.parametrize('ptype', ['COUNTRY', 'POSTCODE']) @pytest.mark.parametrize('ptype', [query.PHRASE_COUNTRY, query.PHRASE_POSTCODE])
def test_phrase_incompatible(ptype): def test_phrase_incompatible(ptype):
assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL, True) assert not query._phrase_compatible_with(ptype, query.TOKEN_PARTIAL, True)
def test_query_node_empty(): def test_query_node_empty(qnode):
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE) assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
assert qnode.get_tokens(3, query.TOKEN_WORD) is None
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
assert qn.get_tokens(3, query.TokenType.WORD) is None
def test_query_node_with_content(): def test_query_node_with_content(qnode):
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE) qnode.starting.append(query.TokenList(2, query.TOKEN_PARTIAL, [mktoken(100), mktoken(101)]))
qn.starting.append(query.TokenList(2, query.TokenType.PARTIAL, [mktoken(100), mktoken(101)])) qnode.starting.append(query.TokenList(2, query.TOKEN_WORD, [mktoken(1000)]))
qn.starting.append(query.TokenList(2, query.TokenType.WORD, [mktoken(1000)]))
assert not qn.has_tokens(3, query.TokenType.PARTIAL) assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
assert not qn.has_tokens(2, query.TokenType.COUNTRY) assert not qnode.has_tokens(2, query.TOKEN_COUNTRY)
assert qn.has_tokens(2, query.TokenType.PARTIAL) assert qnode.has_tokens(2, query.TOKEN_PARTIAL)
assert qn.has_tokens(2, query.TokenType.WORD) assert qnode.has_tokens(2, query.TOKEN_WORD)
assert qn.get_tokens(3, query.TokenType.PARTIAL) is None assert qnode.get_tokens(3, query.TOKEN_PARTIAL) is None
assert qn.get_tokens(2, query.TokenType.COUNTRY) is None assert qnode.get_tokens(2, query.TOKEN_COUNTRY) is None
assert len(qn.get_tokens(2, query.TokenType.PARTIAL)) == 2 assert len(qnode.get_tokens(2, query.TOKEN_PARTIAL)) == 2
assert len(qn.get_tokens(2, query.TokenType.WORD)) == 1 assert len(qnode.get_tokens(2, query.TOKEN_WORD)) == 1
def test_query_struct_empty(): def test_query_struct_empty():
@@ -67,19 +70,19 @@ def test_query_struct_empty():
def test_query_struct_with_tokens(): def test_query_struct_with_tokens():
q = query.QueryStruct([query.Phrase(query.PhraseType.NONE, 'foo bar')]) q = query.QueryStruct([query.Phrase(query.PHRASE_ANY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.NONE) q.add_node(query.BREAK_WORD, query.PHRASE_ANY)
q.add_node(query.BreakType.END, query.PhraseType.NONE) q.add_node(query.BREAK_END, query.PHRASE_ANY)
assert q.num_token_slots() == 2 assert q.num_token_slots() == 2
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1)) q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.PARTIAL, mktoken(2)) q.add_token(query.TokenRange(1, 2), query.TOKEN_PARTIAL, mktoken(2))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(99)) q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(99))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(98)) q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(98))
assert q.get_tokens(query.TokenRange(0, 2), query.TokenType.WORD) == [] assert q.get_tokens(query.TokenRange(0, 2), query.TOKEN_WORD) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.WORD)) == 2 assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_WORD)) == 2
partials = q.get_partials_list(query.TokenRange(0, 2)) partials = q.get_partials_list(query.TokenRange(0, 2))
@@ -91,45 +94,44 @@ def test_query_struct_with_tokens():
def test_query_struct_incompatible_token(): def test_query_struct_incompatible_token():
q = query.QueryStruct([query.Phrase(query.PhraseType.COUNTRY, 'foo bar')]) q = query.QueryStruct([query.Phrase(query.PHRASE_COUNTRY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.COUNTRY) q.add_node(query.BREAK_WORD, query.PHRASE_COUNTRY)
q.add_node(query.BreakType.END, query.PhraseType.NONE) q.add_node(query.BREAK_END, query.PHRASE_ANY)
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1)) q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.COUNTRY, mktoken(100)) q.add_token(query.TokenRange(1, 2), query.TOKEN_COUNTRY, mktoken(100))
assert q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL) == [] assert q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.COUNTRY)) == 1 assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_COUNTRY)) == 1
def test_query_struct_amenity_single_word(): def test_query_struct_amenity_single_word():
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'bar')]) q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'bar')])
q.add_node(query.BreakType.END, query.PhraseType.NONE) q.add_node(query.BREAK_END, query.PHRASE_ANY)
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1)) q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM, mktoken(2)) q.add_token(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3)) q.add_token(query.TokenRange(0, 1), query.TOKEN_QUALIFIER, mktoken(3))
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 1 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 0
def test_query_struct_amenity_two_words(): def test_query_struct_amenity_two_words():
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'foo bar')]) q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.AMENITY) q.add_node(query.BREAK_WORD, query.PHRASE_AMENITY)
q.add_node(query.BreakType.END, query.PhraseType.NONE) q.add_node(query.BREAK_END, query.PHRASE_ANY)
for trange in [(0, 1), (1, 2)]: for trange in [(0, 1), (1, 2)]:
q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1)) q.add_token(query.TokenRange(*trange), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(*trange), query.TokenType.NEAR_ITEM, mktoken(2)) q.add_token(query.TokenRange(*trange), query.TOKEN_NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3)) q.add_token(query.TokenRange(*trange), query.TOKEN_QUALIFIER, mktoken(3))
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 0 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1 assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_QUALIFIER)) == 1

View File

@@ -9,38 +9,39 @@ Tests for creating abstract searches from token assignments.
""" """
import pytest import pytest
from nominatim_api.search.query import Token, TokenRange, BreakType, PhraseType, TokenType, QueryStruct, Phrase from nominatim_api.search.query import Token, TokenRange, QueryStruct, Phrase
import nominatim_api.search.query as qmod
from nominatim_api.search.db_search_builder import SearchBuilder from nominatim_api.search.db_search_builder import SearchBuilder
from nominatim_api.search.token_assignment import TokenAssignment from nominatim_api.search.token_assignment import TokenAssignment
from nominatim_api.types import SearchDetails from nominatim_api.types import SearchDetails
import nominatim_api.search.db_searches as dbs import nominatim_api.search.db_searches as dbs
class MyToken(Token): class MyToken(Token):
def get_category(self): def get_category(self):
return 'this', 'that' return 'this', 'that'
def make_query(*args): def make_query(*args):
q = QueryStruct([Phrase(PhraseType.NONE, '')]) q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
for _ in range(max(inner[0] for tlist in args for inner in tlist)): for _ in range(max(inner[0] for tlist in args for inner in tlist)):
q.add_node(BreakType.WORD, PhraseType.NONE) q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
q.add_node(BreakType.END, PhraseType.NONE) q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
for start, tlist in enumerate(args): for start, tlist in enumerate(args):
for end, ttype, tinfo in tlist: for end, ttype, tinfo in tlist:
for tid, word in tinfo: for tid, word in tinfo:
q.add_token(TokenRange(start, end), ttype, q.add_token(TokenRange(start, end), ttype,
MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0, MyToken(penalty=0.5 if ttype == qmod.TOKEN_PARTIAL else 0.0,
token=tid, count=1, addr_count=1, token=tid, count=1, addr_count=1,
lookup_word=word)) lookup_word=word))
return q return q
def test_country_search(): def test_country_search():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])]) q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -54,7 +55,7 @@ def test_country_search():
def test_country_search_with_country_restriction(): def test_country_search_with_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])]) q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -68,7 +69,7 @@ def test_country_search_with_country_restriction():
def test_country_search_with_conflicting_country_restriction(): def test_country_search_with_conflicting_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])]) q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -77,7 +78,7 @@ def test_country_search_with_conflicting_country_restriction():
def test_postcode_search_simple(): def test_postcode_search_simple():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])]) q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1))))
@@ -93,8 +94,8 @@ def test_postcode_search_simple():
def test_postcode_with_country(): def test_postcode_with_country():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])], q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, TokenType.COUNTRY, [(1, 'xx')])]) [(2, qmod.TOKEN_COUNTRY, [(1, 'xx')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -111,8 +112,8 @@ def test_postcode_with_country():
def test_postcode_with_address(): def test_postcode_with_address():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])], q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')])]) [(2, qmod.TOKEN_PARTIAL, [(100, 'word')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -129,9 +130,9 @@ def test_postcode_with_address():
def test_postcode_with_address_with_full_word(): def test_postcode_with_address_with_full_word():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])], q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')]), [(2, qmod.TOKEN_PARTIAL, [(100, 'word')]),
(2, TokenType.WORD, [(1, 'full')])]) (2, qmod.TOKEN_WORD, [(1, 'full')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -150,7 +151,7 @@ def test_postcode_with_address_with_full_word():
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True}, @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
{'near': '10,10'}]) {'near': '10,10'}])
def test_near_item_only(kwargs): def test_near_item_only(kwargs):
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])]) q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs)) builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
@@ -166,7 +167,7 @@ def test_near_item_only(kwargs):
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'}, @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
{}]) {}])
def test_near_item_skipped(kwargs): def test_near_item_skipped(kwargs):
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])]) q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs)) builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
@@ -175,8 +176,8 @@ def test_near_item_skipped(kwargs):
def test_name_only_search(): def test_name_only_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])]) (1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -194,9 +195,9 @@ def test_name_only_search():
def test_name_with_qualifier(): def test_name_with_qualifier():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])], (1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, TokenType.QUALIFIER, [(55, 'hotel')])]) [(2, qmod.TOKEN_QUALIFIER, [(55, 'hotel')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -215,9 +216,9 @@ def test_name_with_qualifier():
def test_name_with_housenumber_search(): def test_name_with_housenumber_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])], (1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, TokenType.HOUSENUMBER, [(66, '66')])]) [(2, qmod.TOKEN_HOUSENUMBER, [(66, '66')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -235,13 +236,12 @@ def test_name_with_housenumber_search():
def test_name_and_address(): def test_name_and_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])], (1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]), [(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
(2, TokenType.WORD, [(101, 'b')])], (2, qmod.TOKEN_WORD, [(101, 'b')])],
[(3, TokenType.PARTIAL, [(3, 'c')]), [(3, qmod.TOKEN_PARTIAL, [(3, 'c')]),
(3, TokenType.WORD, [(102, 'c')])] (3, qmod.TOKEN_WORD, [(102, 'c')])])
)
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -260,14 +260,13 @@ def test_name_and_address():
def test_name_and_complex_address(): def test_name_and_complex_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])], (1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]), [(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
(3, TokenType.WORD, [(101, 'bc')])], (3, qmod.TOKEN_WORD, [(101, 'bc')])],
[(3, TokenType.PARTIAL, [(3, 'c')])], [(3, qmod.TOKEN_PARTIAL, [(3, 'c')])],
[(4, TokenType.PARTIAL, [(4, 'd')]), [(4, qmod.TOKEN_PARTIAL, [(4, 'd')]),
(4, TokenType.WORD, [(103, 'd')])] (4, qmod.TOKEN_WORD, [(103, 'd')])])
)
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1), searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -286,9 +285,9 @@ def test_name_and_complex_address():
def test_name_only_near_search(): def test_name_only_near_search():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]), [(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])]) (2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -302,8 +301,8 @@ def test_name_only_near_search():
def test_name_only_search_with_category(): def test_name_only_search_with_category():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])]) (1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -316,9 +315,9 @@ def test_name_only_search_with_category():
def test_name_with_near_item_search_with_category_mismatch(): def test_name_with_near_item_search_with_category_mismatch():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]), [(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])]) (2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -328,9 +327,9 @@ def test_name_with_near_item_search_with_category_mismatch():
def test_name_with_near_item_search_with_category_match(): def test_name_with_near_item_search_with_category_match():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]), [(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])]) (2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'), builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
('this', 'that')]})) ('this', 'that')]}))
@@ -345,9 +344,9 @@ def test_name_with_near_item_search_with_category_match():
def test_name_with_qualifier_search_with_category_mismatch(): def test_name_with_qualifier_search_with_category_mismatch():
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])], q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]), [(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])]) (2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -357,9 +356,9 @@ def test_name_with_qualifier_search_with_category_mismatch():
def test_name_with_qualifier_search_with_category_match(): def test_name_with_qualifier_search_with_category_match():
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])], q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]), [(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])]) (2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'), builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
('this', 'that')]})) ('this', 'that')]}))
@@ -374,8 +373,8 @@ def test_name_with_qualifier_search_with_category_match():
def test_name_only_search_with_countries(): def test_name_only_search_with_countries():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])]) (1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'})) builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1)))) searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -391,19 +390,19 @@ def test_name_only_search_with_countries():
def make_counted_searches(name_part, name_full, address_part, address_full, def make_counted_searches(name_part, name_full, address_part, address_full,
num_address_parts=1): num_address_parts=1):
q = QueryStruct([Phrase(PhraseType.NONE, '')]) q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
for i in range(1 + num_address_parts): for i in range(1 + num_address_parts):
q.add_node(BreakType.WORD, PhraseType.NONE) q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
q.add_node(BreakType.END, PhraseType.NONE) q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
q.add_token(TokenRange(0, 1), TokenType.PARTIAL, q.add_token(TokenRange(0, 1), qmod.TOKEN_PARTIAL,
MyToken(0.5, 1, name_part, 1, 'name_part')) MyToken(0.5, 1, name_part, 1, 'name_part'))
q.add_token(TokenRange(0, 1), TokenType.WORD, q.add_token(TokenRange(0, 1), qmod.TOKEN_WORD,
MyToken(0, 101, name_full, 1, 'name_full')) MyToken(0, 101, name_full, 1, 'name_full'))
for i in range(num_address_parts): for i in range(num_address_parts):
q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL, q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_PARTIAL,
MyToken(0.5, 2, address_part, 1, 'address_part')) MyToken(0.5, 2, address_part, 1, 'address_part'))
q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD, q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_WORD,
MyToken(0, 102, address_full, 1, 'address_full')) MyToken(0, 102, address_full, 1, 'address_full'))
builder = SearchBuilder(q, SearchDetails()) builder = SearchBuilder(q, SearchDetails())
@@ -422,8 +421,8 @@ def test_infrequent_partials_in_name():
assert len(search.lookups) == 2 assert len(search.lookups) == 2
assert len(search.rankings) == 2 assert len(search.rankings) == 2
assert set((l.column, l.lookup_type.__name__) for l in search.lookups) == \ assert set((s.column, s.lookup_type.__name__) for s in search.lookups) == \
{('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')} {('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')}
def test_frequent_partials_in_name_and_address(): def test_frequent_partials_in_name_and_address():
@@ -434,10 +433,10 @@ def test_frequent_partials_in_name_and_address():
assert all(isinstance(s, dbs.PlaceSearch) for s in searches) assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty) searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \ assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')} {('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
assert set((l.column, l.lookup_type.__name__) for l in searches[1].lookups) == \ assert set((s.column, s.lookup_type.__name__) for s in searches[1].lookups) == \
{('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')} {('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')}
def test_too_frequent_partials_in_name_and_address(): def test_too_frequent_partials_in_name_and_address():
@@ -448,5 +447,5 @@ def test_too_frequent_partials_in_name_and_address():
assert all(isinstance(s, dbs.PlaceSearch) for s in searches) assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty) searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \ assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')} {('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for query analyzer for ICU tokenizer. Tests for query analyzer for ICU tokenizer.
@@ -11,11 +11,13 @@ import pytest
import pytest_asyncio import pytest_asyncio
from nominatim_api import NominatimAPIAsync from nominatim_api import NominatimAPIAsync
from nominatim_api.search.query import Phrase, PhraseType, TokenType, BreakType from nominatim_api.search.query import Phrase
import nominatim_api.search.query as qmod
import nominatim_api.search.icu_tokenizer as tok import nominatim_api.search.icu_tokenizer as tok
from nominatim_api.logging import set_log_output, get_and_disable from nominatim_api.logging import set_log_output, get_and_disable
async def add_word(conn, word_id, word_token, wtype, word, info = None):
async def add_word(conn, word_id, word_token, wtype, word, info=None):
t = conn.t.meta.tables['word'] t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id, await conn.execute(t.insert(), {'word_id': word_id,
'word_token': word_token, 'word_token': word_token,
@@ -25,7 +27,8 @@ async def add_word(conn, word_id, word_token, wtype, word, info = None):
def make_phrase(query): def make_phrase(query):
return [Phrase(PhraseType.NONE, s) for s in query.split(',')] return [Phrase(qmod.PHRASE_ANY, s) for s in query.split(',')]
@pytest_asyncio.fixture @pytest_asyncio.fixture
async def conn(table_factory): async def conn(table_factory):
@@ -62,7 +65,7 @@ async def test_single_phrase_with_unknown_terms(conn):
query = await ana.analyze_query(make_phrase('foo BAR')) query = await ana.analyze_query(make_phrase('foo BAR'))
assert len(query.source) == 1 assert len(query.source) == 1
assert query.source[0].ptype == PhraseType.NONE assert query.source[0].ptype == qmod.PHRASE_ANY
assert query.source[0].text == 'foo bar' assert query.source[0].text == 'foo bar'
assert query.num_token_slots() == 2 assert query.num_token_slots() == 2
@@ -96,17 +99,15 @@ async def test_splitting_in_transliteration(conn):
assert query.num_token_slots() == 2 assert query.num_token_slots() == 2
assert query.nodes[0].starting assert query.nodes[0].starting
assert query.nodes[1].starting assert query.nodes[1].starting
assert query.nodes[1].btype == BreakType.TOKEN assert query.nodes[1].btype == qmod.BREAK_TOKEN
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize('term,order', [('23456', ['POSTCODE', 'HOUSENUMBER', 'WORD', 'PARTIAL']), @pytest.mark.parametrize('term,order', [('23456', ['P', 'H', 'W', 'w']),
('3', ['HOUSENUMBER', 'POSTCODE', 'WORD', 'PARTIAL']) ('3', ['H', 'W', 'w'])])
])
async def test_penalty_postcodes_and_housenumbers(conn, term, order): async def test_penalty_postcodes_and_housenumbers(conn, term, order):
ana = await tok.create_query_analyzer(conn) ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, term, 'P', None)
await add_word(conn, 2, term, 'H', term) await add_word(conn, 2, term, 'H', term)
await add_word(conn, 3, term, 'w', term) await add_word(conn, 3, term, 'w', term)
await add_word(conn, 4, term, 'W', term) await add_word(conn, 4, term, 'W', term)
@@ -115,11 +116,12 @@ async def test_penalty_postcodes_and_housenumbers(conn, term, order):
assert query.num_token_slots() == 1 assert query.num_token_slots() == 1
torder = [(tl.tokens[0].penalty, tl.ttype.name) for tl in query.nodes[0].starting] torder = [(tl.tokens[0].penalty, tl.ttype) for tl in query.nodes[0].starting]
torder.sort() torder.sort()
assert [t[1] for t in torder] == order assert [t[1] for t in torder] == order
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_category_words_only_at_beginning(conn): async def test_category_words_only_at_beginning(conn):
ana = await tok.create_query_analyzer(conn) ana = await tok.create_query_analyzer(conn)
@@ -131,7 +133,7 @@ async def test_category_words_only_at_beginning(conn):
assert query.num_token_slots() == 3 assert query.num_token_slots() == 3
assert len(query.nodes[0].starting) == 1 assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
assert not query.nodes[2].starting assert not query.nodes[2].starting
@@ -145,7 +147,7 @@ async def test_freestanding_qualifier_words_become_category(conn):
assert query.num_token_slots() == 1 assert query.num_token_slots() == 1
assert len(query.nodes[0].starting) == 1 assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -158,9 +160,9 @@ async def test_qualifier_words(conn):
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo')) query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
assert query.num_token_slots() == 5 assert query.num_token_slots() == 5
assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.QUALIFIER} assert set(t.ttype for t in query.nodes[0].starting) == {qmod.TOKEN_QUALIFIER}
assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER} assert set(t.ttype for t in query.nodes[2].starting) == {qmod.TOKEN_QUALIFIER}
assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.QUALIFIER} assert set(t.ttype for t in query.nodes[4].starting) == {qmod.TOKEN_QUALIFIER}
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -172,14 +174,16 @@ async def test_add_unknown_housenumbers(conn):
query = await ana.analyze_query(make_phrase('466 23 99834 34a')) query = await ana.analyze_query(make_phrase('466 23 99834 34a'))
assert query.num_token_slots() == 4 assert query.num_token_slots() == 4
assert query.nodes[0].starting[0].ttype == TokenType.HOUSENUMBER assert query.nodes[0].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
assert len(query.nodes[0].starting[0].tokens) == 1 assert len(query.nodes[0].starting[0].tokens) == 1
assert query.nodes[0].starting[0].tokens[0].token == 0 assert query.nodes[0].starting[0].tokens[0].token == 0
assert query.nodes[1].starting[0].ttype == TokenType.HOUSENUMBER assert query.nodes[1].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
assert len(query.nodes[1].starting[0].tokens) == 1 assert len(query.nodes[1].starting[0].tokens) == 1
assert query.nodes[1].starting[0].tokens[0].token == 1 assert query.nodes[1].starting[0].tokens[0].token == 1
assert not query.nodes[2].starting assert query.nodes[2].has_tokens(3, qmod.TOKEN_POSTCODE)
assert not query.nodes[3].starting assert not query.nodes[2].has_tokens(3, qmod.TOKEN_HOUSENUMBER)
assert not query.nodes[2].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
assert not query.nodes[3].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
@pytest.mark.asyncio @pytest.mark.asyncio

View File

@@ -0,0 +1,171 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for parsing of postcodes in queries.
"""
import re
from itertools import zip_longest
import pytest
from nominatim_api.search.postcode_parser import PostcodeParser
from nominatim_api.search.query import QueryStruct, PHRASE_ANY, PHRASE_POSTCODE, PHRASE_STREET
@pytest.fixture
def pc_config(project_env):
country_file = project_env.project_dir / 'country_settings.yaml'
country_file.write_text(r"""
ab:
postcode:
pattern: "ddddd ll"
ba:
postcode:
pattern: "ddddd"
de:
postcode:
pattern: "ddddd"
gr:
postcode:
pattern: "(ddd) ?(dd)"
output: \1 \2
in:
postcode:
pattern: "(ddd) ?(ddd)"
output: \1\2
mc:
postcode:
pattern: "980dd"
mz:
postcode:
pattern: "(dddd)(?:-dd)?"
bn:
postcode:
pattern: "(ll) ?(dddd)"
output: \1\2
ky:
postcode:
pattern: "(d)-(dddd)"
output: KY\1-\2
gb:
postcode:
pattern: "(l?ld[A-Z0-9]?) ?(dll)"
output: \1 \2
""")
return project_env
def mk_query(inp):
query = QueryStruct([])
phrase_split = re.split(r"([ ,:'-])", inp)
for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue='>'):
query.add_node(breakchar, PHRASE_ANY, 0.1, word, word)
return query
@pytest.mark.parametrize('query,pos', [('45325 Berlin', 0),
('45325:Berlin', 0),
('45325,Berlin', 0),
('Berlin 45325', 1),
('Berlin,45325', 1),
('Berlin:45325', 1),
('Hansastr,45325 Berlin', 1),
('Hansastr 45325 Berlin', 1)])
def test_simple_postcode(pc_config, query, pos):
parser = PostcodeParser(pc_config)
result = parser.parse(mk_query(query))
assert result == {(pos, pos + 1, '45325'), (pos, pos + 1, '453 25')}
@pytest.mark.parametrize('query', ['EC1R 3HF', 'ec1r 3hf'])
def test_postcode_matching_case_insensitive(pc_config, query):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query(query)) == {(0, 2, 'EC1R 3HF')}
def test_contained_postcode(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('12345 dx')) == {(0, 1, '12345'), (0, 1, '123 45'),
(0, 2, '12345 DX')}
@pytest.mark.parametrize('query,frm,to', [('345987', 0, 1), ('345 987', 0, 2),
('Aina 345 987', 1, 3),
('Aina 23 345 987 ff', 2, 4)])
def test_postcode_with_space(pc_config, query, frm, to):
parser = PostcodeParser(pc_config)
result = parser.parse(mk_query(query))
assert result == {(frm, to, '345987')}
def test_overlapping_postcode(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('123 456 78')) == {(0, 2, '123456'), (1, 3, '456 78')}
@pytest.mark.parametrize('query', ['45325-Berlin', "45325'Berlin",
'Berlin-45325', "Berlin'45325", '45325Berlin'
'345-987', "345'987", '345,987', '345:987'])
def test_not_a_postcode(pc_config, query):
parser = PostcodeParser(pc_config)
assert not parser.parse(mk_query(query))
@pytest.mark.parametrize('query', ['ba 12233', 'ba-12233'])
def test_postcode_with_country_prefix(pc_config, query):
parser = PostcodeParser(pc_config)
assert (0, 2, '12233') in parser.parse(mk_query(query))
def test_postcode_with_joined_country_prefix(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('ba12233')) == {(0, 1, '12233')}
def test_postcode_with_non_matching_country_prefix(pc_config):
parser = PostcodeParser(pc_config)
assert not parser.parse(mk_query('ky12233'))
def test_postcode_inside_postcode_phrase(pc_config):
parser = PostcodeParser(pc_config)
query = QueryStruct([])
query.nodes[-1].ptype = PHRASE_STREET
query.add_node(',', PHRASE_STREET, 0.1, '12345', '12345')
query.add_node(',', PHRASE_POSTCODE, 0.1, 'xz', 'xz')
query.add_node('>', PHRASE_POSTCODE, 0.1, '4444', '4444')
assert parser.parse(query) == {(2, 3, '4444')}
def test_partial_postcode_in_postcode_phrase(pc_config):
parser = PostcodeParser(pc_config)
query = QueryStruct([])
query.nodes[-1].ptype = PHRASE_POSTCODE
query.add_node(' ', PHRASE_POSTCODE, 0.1, '2224', '2224')
query.add_node('>', PHRASE_POSTCODE, 0.1, '12345', '12345')
assert not parser.parse(query)

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Test data types for search queries. Test data types for search queries.
@@ -11,14 +11,15 @@ import pytest
import nominatim_api.search.query as nq import nominatim_api.search.query as nq
def test_token_range_equal(): def test_token_range_equal():
assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3) assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3)
assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3)) assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3))
@pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)), @pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)),
((3, 4), (3, 5)), ((3, 4), (3, 5)),
((10, 12), (11, 12))]) ((10, 12), (11, 12))])
def test_token_range_unequal(lop, rop): def test_token_range_unequal(lop, rop):
assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop)) assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop))
assert nq.TokenRange(*lop) != nq.TokenRange(*rop) assert nq.TokenRange(*lop) != nq.TokenRange(*rop)
@@ -28,17 +29,17 @@ def test_token_range_lt():
assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12) assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12)
assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8) assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8)
assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5) assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5)
assert not(nq.TokenRange(5, 6) < nq.TokenRange(5, 6)) assert not (nq.TokenRange(5, 6) < nq.TokenRange(5, 6))
assert not(nq.TokenRange(10, 11) < nq.TokenRange(4, 5)) assert not (nq.TokenRange(10, 11) < nq.TokenRange(4, 5))
def test_token_rankge_gt(): def test_token_rankge_gt():
assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2) assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2)
assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11) assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11)
assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10) assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10)
assert not(nq.TokenRange(5, 6) > nq.TokenRange(5, 6)) assert not (nq.TokenRange(5, 6) > nq.TokenRange(5, 6))
assert not(nq.TokenRange(1, 2) > nq.TokenRange(3, 4)) assert not (nq.TokenRange(1, 2) > nq.TokenRange(3, 4))
assert not(nq.TokenRange(4, 10) > nq.TokenRange(3, 5)) assert not (nq.TokenRange(4, 10) > nq.TokenRange(3, 5))
def test_token_range_unimplemented_ops(): def test_token_range_unimplemented_ops():
@@ -46,3 +47,19 @@ def test_token_range_unimplemented_ops():
nq.TokenRange(1, 3) <= nq.TokenRange(10, 12) nq.TokenRange(1, 3) <= nq.TokenRange(10, 12)
with pytest.raises(TypeError): with pytest.raises(TypeError):
nq.TokenRange(1, 3) >= nq.TokenRange(10, 12) nq.TokenRange(1, 3) >= nq.TokenRange(10, 12)
def test_query_extract_words():
q = nq.QueryStruct([])
q.add_node(nq.BREAK_WORD, nq.PHRASE_ANY, 0.1, '12', '')
q.add_node(nq.BREAK_TOKEN, nq.PHRASE_ANY, 0.0, 'ab', '')
q.add_node(nq.BREAK_PHRASE, nq.PHRASE_ANY, 0.0, '12', '')
q.add_node(nq.BREAK_END, nq.PHRASE_ANY, 0.5, 'hallo', '')
words = q.extract_words(base_penalty=1.0)
assert set(words.keys()) \
== {'12', 'ab', 'hallo', '12 ab', 'ab 12', '12 ab 12'}
assert sorted(words['12']) == [nq.TokenRange(0, 1, 1.0), nq.TokenRange(2, 3, 1.0)]
assert words['12 ab'] == [nq.TokenRange(0, 2, 1.1)]
assert words['hallo'] == [nq.TokenRange(3, 4, 1.0)]

View File

@@ -2,18 +2,17 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for query analyzer creation. Tests for query analyzer creation.
""" """
from pathlib import Path
import pytest import pytest
from nominatim_api.search.query_analyzer_factory import make_query_analyzer from nominatim_api.search.query_analyzer_factory import make_query_analyzer
from nominatim_api.search.icu_tokenizer import ICUQueryAnalyzer from nominatim_api.search.icu_tokenizer import ICUQueryAnalyzer
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_import_icu_tokenizer(table_factory, api): async def test_import_icu_tokenizer(table_factory, api):
table_factory('nominatim_properties', table_factory('nominatim_properties',

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for running the country searcher. Tests for running the country searcher.
@@ -48,6 +48,7 @@ def test_find_from_placex(apiobj, frontend):
assert results[0].place_id == 55 assert results[0].place_id == 55
assert results[0].accuracy == 0.8 assert results[0].accuracy == 0.8
def test_find_from_fallback_countries(apiobj, frontend): def test_find_from_fallback_countries(apiobj, frontend):
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))') apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'}) apiobj.add_country_name('ro', {'name': 'România'})
@@ -87,7 +88,6 @@ class TestCountryParameters:
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))') apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'}) apiobj.add_country_name('ro', {'name': 'România'})
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -100,7 +100,6 @@ class TestCountryParameters:
assert len(results) == 1 assert len(results) == 1
assert geom.name.lower() in results[0].geometry assert geom.name.lower() in results[0].geometry
@pytest.mark.parametrize('pid,rids', [(76, [55]), (55, [])]) @pytest.mark.parametrize('pid,rids', [(76, [55]), (55, [])])
def test_exclude_place_id(self, apiobj, frontend, pid, rids): def test_exclude_place_id(self, apiobj, frontend, pid, rids):
results = run_search(apiobj, frontend, 0.5, ['yw', 'ro'], results = run_search(apiobj, frontend, 0.5, ['yw', 'ro'],
@@ -108,7 +107,6 @@ class TestCountryParameters:
assert [r.place_id for r in results] == rids assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox,rids', [((9, 9, 11, 11), [55]), @pytest.mark.parametrize('viewbox,rids', [((9, 9, 11, 11), [55]),
((-10, -10, -3, -3), [])]) ((-10, -10, -3, -3), [])])
def test_bounded_viewbox_in_placex(self, apiobj, frontend, viewbox, rids): def test_bounded_viewbox_in_placex(self, apiobj, frontend, viewbox, rids):
@@ -118,9 +116,8 @@ class TestCountryParameters:
assert [r.place_id for r in results] == rids assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox,numres', [((0, 0, 1, 1), 1), @pytest.mark.parametrize('viewbox,numres', [((0, 0, 1, 1), 1),
((-10, -10, -3, -3), 0)]) ((-10, -10, -3, -3), 0)])
def test_bounded_viewbox_in_fallback(self, apiobj, frontend, viewbox, numres): def test_bounded_viewbox_in_fallback(self, apiobj, frontend, viewbox, numres):
results = run_search(apiobj, frontend, 0.5, ['ro'], results = run_search(apiobj, frontend, 0.5, ['ro'],
details=SearchDetails.from_kwargs({'viewbox': viewbox, details=SearchDetails.from_kwargs({'viewbox': viewbox,

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for running the near searcher. Tests for running the near searcher.
@@ -12,8 +12,8 @@ import pytest
import nominatim_api as napi import nominatim_api as napi
from nominatim_api.types import SearchDetails from nominatim_api.types import SearchDetails
from nominatim_api.search.db_searches import NearSearch, PlaceSearch from nominatim_api.search.db_searches import NearSearch, PlaceSearch
from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories,\ from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories, \
FieldLookup, FieldRanking, RankedTokens FieldLookup
from nominatim_api.search.db_search_lookups import LookupAll from nominatim_api.search.db_search_lookups import LookupAll
@@ -80,7 +80,6 @@ class TestNearSearch:
apiobj.add_search_name(101, names=[56], country_code='mx', apiobj.add_search_name(101, names=[56], country_code='mx',
centroid=(-10.3, 56.9)) centroid=(-10.3, 56.9))
def test_near_in_placex(self, apiobj, frontend): def test_near_in_placex(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank', apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6001, 4.2994)) centroid=(5.6001, 4.2994))
@@ -91,7 +90,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22] assert [r.place_id for r in results] == [22]
def test_multiple_types_near_in_placex(self, apiobj, frontend): def test_multiple_types_near_in_placex(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank', apiobj.add_placex(place_id=22, class_='amenity', type='bank',
importance=0.002, importance=0.002,
@@ -105,7 +103,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22, 23] assert [r.place_id for r in results] == [22, 23]
def test_near_in_classtype(self, apiobj, frontend): def test_near_in_classtype(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank', apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6, 4.34)) centroid=(5.6, 4.34))
@@ -118,7 +115,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22] assert [r.place_id for r in results] == [22]
@pytest.mark.parametrize('cc,rid', [('us', 22), ('mx', 23)]) @pytest.mark.parametrize('cc,rid', [('us', 22), ('mx', 23)])
def test_restrict_by_country(self, apiobj, frontend, cc, rid): def test_restrict_by_country(self, apiobj, frontend, cc, rid):
apiobj.add_placex(place_id=22, class_='amenity', type='bank', apiobj.add_placex(place_id=22, class_='amenity', type='bank',
@@ -138,7 +134,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [rid] assert [r.place_id for r in results] == [rid]
@pytest.mark.parametrize('excluded,rid', [(22, 122), (122, 22)]) @pytest.mark.parametrize('excluded,rid', [(22, 122), (122, 22)])
def test_exclude_place_by_id(self, apiobj, frontend, excluded, rid): def test_exclude_place_by_id(self, apiobj, frontend, excluded, rid):
apiobj.add_placex(place_id=22, class_='amenity', type='bank', apiobj.add_placex(place_id=22, class_='amenity', type='bank',
@@ -148,13 +143,11 @@ class TestNearSearch:
centroid=(5.6001, 4.2994), centroid=(5.6001, 4.2994),
country_code='us') country_code='us')
results = run_search(apiobj, frontend, 0.1, [('amenity', 'bank')], results = run_search(apiobj, frontend, 0.1, [('amenity', 'bank')],
details=SearchDetails(excluded=[excluded])) details=SearchDetails(excluded=[excluded]))
assert [r.place_id for r in results] == [rid] assert [r.place_id for r in results] == [rid]
@pytest.mark.parametrize('layer,rids', [(napi.DataLayer.POI, [22]), @pytest.mark.parametrize('layer,rids', [(napi.DataLayer.POI, [22]),
(napi.DataLayer.MANMADE, [])]) (napi.DataLayer.MANMADE, [])])
def test_with_layer(self, apiobj, frontend, layer, rids): def test_with_layer(self, apiobj, frontend, layer, rids):

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for running the generic place searcher. Tests for running the generic place searcher.
@@ -14,12 +14,13 @@ import pytest
import nominatim_api as napi import nominatim_api as napi
from nominatim_api.types import SearchDetails from nominatim_api.types import SearchDetails
from nominatim_api.search.db_searches import PlaceSearch from nominatim_api.search.db_searches import PlaceSearch
from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories,\ from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories, \
FieldLookup, FieldRanking, RankedTokens FieldLookup, FieldRanking, RankedTokens
from nominatim_api.search.db_search_lookups import LookupAll, LookupAny, Restrict from nominatim_api.search.db_search_lookups import LookupAll, LookupAny, Restrict
APIOPTIONS = ['search'] APIOPTIONS = ['search']
def run_search(apiobj, frontend, global_penalty, lookup, ranking, count=2, def run_search(apiobj, frontend, global_penalty, lookup, ranking, count=2,
hnrs=[], pcs=[], ccodes=[], quals=[], hnrs=[], pcs=[], ccodes=[], quals=[],
details=SearchDetails()): details=SearchDetails()):
@@ -55,29 +56,27 @@ class TestNameOnlySearches:
def fill_database(self, apiobj): def fill_database(self, apiobj):
apiobj.add_placex(place_id=100, country_code='us', apiobj.add_placex(place_id=100, country_code='us',
centroid=(5.6, 4.3)) centroid=(5.6, 4.3))
apiobj.add_search_name(100, names=[1,2,10,11], country_code='us', apiobj.add_search_name(100, names=[1, 2, 10, 11], country_code='us',
centroid=(5.6, 4.3)) centroid=(5.6, 4.3))
apiobj.add_placex(place_id=101, country_code='mx', apiobj.add_placex(place_id=101, country_code='mx',
centroid=(-10.3, 56.9)) centroid=(-10.3, 56.9))
apiobj.add_search_name(101, names=[1,2,20,21], country_code='mx', apiobj.add_search_name(101, names=[1, 2, 20, 21], country_code='mx',
centroid=(-10.3, 56.9)) centroid=(-10.3, 56.9))
@pytest.mark.parametrize('lookup_type', [LookupAll, Restrict]) @pytest.mark.parametrize('lookup_type', [LookupAll, Restrict])
@pytest.mark.parametrize('rank,res', [([10], [100, 101]), @pytest.mark.parametrize('rank,res', [([10], [100, 101]),
([20], [101, 100])]) ([20], [101, 100])])
def test_lookup_all_match(self, apiobj, frontend, lookup_type, rank, res): def test_lookup_all_match(self, apiobj, frontend, lookup_type, rank, res):
lookup = FieldLookup('name_vector', [1,2], lookup_type) lookup = FieldLookup('name_vector', [1, 2], lookup_type)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, rank)]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, rank)])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking]) results = run_search(apiobj, frontend, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == res assert [r.place_id for r in results] == res
@pytest.mark.parametrize('lookup_type', [LookupAll, Restrict]) @pytest.mark.parametrize('lookup_type', [LookupAll, Restrict])
def test_lookup_all_partial_match(self, apiobj, frontend, lookup_type): def test_lookup_all_partial_match(self, apiobj, frontend, lookup_type):
lookup = FieldLookup('name_vector', [1,20], lookup_type) lookup = FieldLookup('name_vector', [1, 20], lookup_type)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking]) results = run_search(apiobj, frontend, 0.1, [lookup], [ranking])
@@ -88,14 +87,13 @@ class TestNameOnlySearches:
@pytest.mark.parametrize('rank,res', [([10], [100, 101]), @pytest.mark.parametrize('rank,res', [([10], [100, 101]),
([20], [101, 100])]) ([20], [101, 100])])
def test_lookup_any_match(self, apiobj, frontend, rank, res): def test_lookup_any_match(self, apiobj, frontend, rank, res):
lookup = FieldLookup('name_vector', [11,21], LookupAny) lookup = FieldLookup('name_vector', [11, 21], LookupAny)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, rank)]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, rank)])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking]) results = run_search(apiobj, frontend, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == res assert [r.place_id for r in results] == res
def test_lookup_any_partial_match(self, apiobj, frontend): def test_lookup_any_partial_match(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [20], LookupAll) lookup = FieldLookup('name_vector', [20], LookupAll)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])])
@@ -105,19 +103,17 @@ class TestNameOnlySearches:
assert len(results) == 1 assert len(results) == 1
assert results[0].place_id == 101 assert results[0].place_id == 101
@pytest.mark.parametrize('cc,res', [('us', 100), ('mx', 101)]) @pytest.mark.parametrize('cc,res', [('us', 100), ('mx', 101)])
def test_lookup_restrict_country(self, apiobj, frontend, cc, res): def test_lookup_restrict_country(self, apiobj, frontend, cc, res):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], ccodes=[cc]) results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], ccodes=[cc])
assert [r.place_id for r in results] == [res] assert [r.place_id for r in results] == [res]
def test_lookup_restrict_placeid(self, apiobj, frontend): def test_lookup_restrict_placeid(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking],
@@ -125,7 +121,6 @@ class TestNameOnlySearches:
assert [r.place_id for r in results] == [100] assert [r.place_id for r in results] == [100]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -139,7 +134,6 @@ class TestNameOnlySearches:
assert geom.name.lower() in results[0].geometry assert geom.name.lower() in results[0].geometry
@pytest.mark.parametrize('factor,npoints', [(0.0, 3), (1.0, 2)]) @pytest.mark.parametrize('factor,npoints', [(0.0, 3), (1.0, 2)])
def test_return_simplified_geometry(self, apiobj, frontend, factor, npoints): def test_return_simplified_geometry(self, apiobj, frontend, factor, npoints):
apiobj.add_placex(place_id=333, country_code='us', apiobj.add_placex(place_id=333, country_code='us',
@@ -162,7 +156,6 @@ class TestNameOnlySearches:
assert result.place_id == 333 assert result.place_id == 333
assert len(geom['coordinates']) == npoints assert len(geom['coordinates']) == npoints
@pytest.mark.parametrize('viewbox', ['5.0,4.0,6.0,5.0', '5.7,4.0,6.0,5.0']) @pytest.mark.parametrize('viewbox', ['5.0,4.0,6.0,5.0', '5.7,4.0,6.0,5.0'])
@pytest.mark.parametrize('wcount,rids', [(2, [100, 101]), (20000, [100])]) @pytest.mark.parametrize('wcount,rids', [(2, [100, 101]), (20000, [100])])
def test_prefer_viewbox(self, apiobj, frontend, viewbox, wcount, rids): def test_prefer_viewbox(self, apiobj, frontend, viewbox, wcount, rids):
@@ -177,18 +170,16 @@ class TestNameOnlySearches:
details=SearchDetails.from_kwargs({'viewbox': viewbox})) details=SearchDetails.from_kwargs({'viewbox': viewbox}))
assert [r.place_id for r in results] == rids assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox', ['5.0,4.0,6.0,5.0', '5.55,4.27,5.62,4.31']) @pytest.mark.parametrize('viewbox', ['5.0,4.0,6.0,5.0', '5.55,4.27,5.62,4.31'])
def test_force_viewbox(self, apiobj, frontend, viewbox): def test_force_viewbox(self, apiobj, frontend, viewbox):
lookup = FieldLookup('name_vector', [1, 2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
details=SearchDetails.from_kwargs({'viewbox': viewbox, details = SearchDetails.from_kwargs({'viewbox': viewbox,
'bounded_viewbox': True}) 'bounded_viewbox': True})
results = run_search(apiobj, frontend, 0.1, [lookup], [], details=details) results = run_search(apiobj, frontend, 0.1, [lookup], [], details=details)
assert [r.place_id for r in results] == [100] assert [r.place_id for r in results] == [100]
def test_prefer_near(self, apiobj, frontend): def test_prefer_near(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1, 2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])]) ranking = FieldRanking('name_vector', 0.4, [RankedTokens(0.0, [21])])
@@ -202,13 +193,12 @@ class TestNameOnlySearches:
results.sort(key=lambda r: -r.importance) results.sort(key=lambda r: -r.importance)
assert [r.place_id for r in results] == [100, 101] assert [r.place_id for r in results] == [100, 101]
@pytest.mark.parametrize('radius', [0.09, 0.11]) @pytest.mark.parametrize('radius', [0.09, 0.11])
def test_force_near(self, apiobj, frontend, radius): def test_force_near(self, apiobj, frontend, radius):
lookup = FieldLookup('name_vector', [1, 2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
details=SearchDetails.from_kwargs({'near': '5.6,4.3', details = SearchDetails.from_kwargs({'near': '5.6,4.3',
'near_radius': radius}) 'near_radius': radius})
results = run_search(apiobj, frontend, 0.1, [lookup], [], details=details) results = run_search(apiobj, frontend, 0.1, [lookup], [], details=details)
@@ -228,7 +218,7 @@ class TestStreetWithHousenumber:
apiobj.add_placex(place_id=1000, class_='highway', type='residential', apiobj.add_placex(place_id=1000, class_='highway', type='residential',
rank_search=26, rank_address=26, rank_search=26, rank_address=26,
country_code='es') country_code='es')
apiobj.add_search_name(1000, names=[1,2,10,11], apiobj.add_search_name(1000, names=[1, 2, 10, 11],
search_rank=26, address_rank=26, search_rank=26, address_rank=26,
country_code='es') country_code='es')
apiobj.add_placex(place_id=91, class_='place', type='house', apiobj.add_placex(place_id=91, class_='place', type='house',
@@ -243,26 +233,24 @@ class TestStreetWithHousenumber:
apiobj.add_placex(place_id=2000, class_='highway', type='residential', apiobj.add_placex(place_id=2000, class_='highway', type='residential',
rank_search=26, rank_address=26, rank_search=26, rank_address=26,
country_code='pt') country_code='pt')
apiobj.add_search_name(2000, names=[1,2,20,21], apiobj.add_search_name(2000, names=[1, 2, 20, 21],
search_rank=26, address_rank=26, search_rank=26, address_rank=26,
country_code='pt') country_code='pt')
@pytest.mark.parametrize('hnr,res', [('20', [91, 1]), ('20 a', [1]), @pytest.mark.parametrize('hnr,res', [('20', [91, 1]), ('20 a', [1]),
('21', [2]), ('22', [2, 92]), ('21', [2]), ('22', [2, 92]),
('24', [93]), ('25', [])]) ('24', [93]), ('25', [])])
def test_lookup_by_single_housenumber(self, apiobj, frontend, hnr, res): def test_lookup_by_single_housenumber(self, apiobj, frontend, hnr, res):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=[hnr]) results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=[hnr])
assert [r.place_id for r in results] == res + [1000, 2000] assert [r.place_id for r in results] == res + [1000, 2000]
@pytest.mark.parametrize('cc,res', [('es', [2, 1000]), ('pt', [92, 2000])]) @pytest.mark.parametrize('cc,res', [('es', [2, 1000]), ('pt', [92, 2000])])
def test_lookup_with_country_restriction(self, apiobj, frontend, cc, res): def test_lookup_with_country_restriction(self, apiobj, frontend, cc, res):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -270,9 +258,8 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == res assert [r.place_id for r in results] == res
def test_lookup_exclude_housenumber_placeid(self, apiobj, frontend): def test_lookup_exclude_housenumber_placeid(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -280,9 +267,8 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == [2, 1000, 2000] assert [r.place_id for r in results] == [2, 1000, 2000]
def test_lookup_exclude_street_placeid(self, apiobj, frontend): def test_lookup_exclude_street_placeid(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -290,9 +276,8 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == [2, 92, 2000] assert [r.place_id for r in results] == [2, 92, 2000]
def test_lookup_only_house_qualifier(self, apiobj, frontend): def test_lookup_only_house_qualifier(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -300,9 +285,8 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == [2, 92] assert [r.place_id for r in results] == [2, 92]
def test_lookup_only_street_qualifier(self, apiobj, frontend): def test_lookup_only_street_qualifier(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -310,10 +294,9 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == [1000, 2000] assert [r.place_id for r in results] == [1000, 2000]
@pytest.mark.parametrize('rank,found', [(26, True), (27, False), (30, False)]) @pytest.mark.parametrize('rank,found', [(26, True), (27, False), (30, False)])
def test_lookup_min_rank(self, apiobj, frontend, rank, found): def test_lookup_min_rank(self, apiobj, frontend, rank, found):
lookup = FieldLookup('name_vector', [1,2], LookupAll) lookup = FieldLookup('name_vector', [1, 2], LookupAll)
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'], results = run_search(apiobj, frontend, 0.1, [lookup], [ranking], hnrs=['22'],
@@ -321,7 +304,6 @@ class TestStreetWithHousenumber:
assert [r.place_id for r in results] == ([2, 92, 1000, 2000] if found else [2, 92]) assert [r.place_id for r in results] == ([2, 92, 1000, 2000] if found else [2, 92])
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -343,7 +325,7 @@ def test_very_large_housenumber(apiobj, frontend):
apiobj.add_placex(place_id=2000, class_='highway', type='residential', apiobj.add_placex(place_id=2000, class_='highway', type='residential',
rank_search=26, rank_address=26, rank_search=26, rank_address=26,
country_code='pt') country_code='pt')
apiobj.add_search_name(2000, names=[1,2], apiobj.add_search_name(2000, names=[1, 2],
search_rank=26, address_rank=26, search_rank=26, address_rank=26,
country_code='pt') country_code='pt')
@@ -405,7 +387,6 @@ class TestInterpolations:
centroid=(10.0, 10.00001), centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)') geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
@pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])]) @pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])])
def test_lookup_housenumber(self, apiobj, frontend, hnr, res): def test_lookup_housenumber(self, apiobj, frontend, hnr, res):
lookup = FieldLookup('name_vector', [111], LookupAll) lookup = FieldLookup('name_vector', [111], LookupAll)
@@ -414,7 +395,6 @@ class TestInterpolations:
assert [r.place_id for r in results] == res + [990] assert [r.place_id for r in results] == res + [990]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -429,7 +409,6 @@ class TestInterpolations:
assert geom.name.lower() in results[0].geometry assert geom.name.lower() in results[0].geometry
class TestTiger: class TestTiger:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
@@ -453,7 +432,6 @@ class TestTiger:
centroid=(10.0, 10.00001), centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)') geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
@pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])]) @pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])])
def test_lookup_housenumber(self, apiobj, frontend, hnr, res): def test_lookup_housenumber(self, apiobj, frontend, hnr, res):
lookup = FieldLookup('name_vector', [111], LookupAll) lookup = FieldLookup('name_vector', [111], LookupAll)
@@ -462,7 +440,6 @@ class TestTiger:
assert [r.place_id for r in results] == res + [990] assert [r.place_id for r in results] == res + [990]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -513,15 +490,15 @@ class TestLayersRank30:
importance=0.0005, importance=0.0005,
address_rank=0, search_rank=30) address_rank=0, search_rank=30)
@pytest.mark.parametrize('layer,res',
@pytest.mark.parametrize('layer,res', [(napi.DataLayer.ADDRESS, [223]), [(napi.DataLayer.ADDRESS, [223]),
(napi.DataLayer.POI, [224]), (napi.DataLayer.POI, [224]),
(napi.DataLayer.ADDRESS | napi.DataLayer.POI, [223, 224]), (napi.DataLayer.ADDRESS | napi.DataLayer.POI, [223, 224]),
(napi.DataLayer.MANMADE, [225]), (napi.DataLayer.MANMADE, [225]),
(napi.DataLayer.RAILWAY, [226]), (napi.DataLayer.RAILWAY, [226]),
(napi.DataLayer.NATURAL, [227]), (napi.DataLayer.NATURAL, [227]),
(napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, [225, 227]), (napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, [225, 227]),
(napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, [225, 226])]) (napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, [225, 226])])
def test_layers_rank30(self, apiobj, frontend, layer, res): def test_layers_rank30(self, apiobj, frontend, layer, res):
lookup = FieldLookup('name_vector', [34], LookupAny) lookup = FieldLookup('name_vector', [34], LookupAny)

View File

@@ -2,14 +2,13 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for running the POI searcher. Tests for running the POI searcher.
""" """
import pytest import pytest
import nominatim_api as napi
from nominatim_api.types import SearchDetails from nominatim_api.types import SearchDetails
from nominatim_api.search.db_searches import PoiSearch from nominatim_api.search.db_searches import PoiSearch
from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories
@@ -84,14 +83,12 @@ class TestPoiSearchWithRestrictions:
else: else:
self.args = {'near': '34.3, 56.100021', 'near_radius': 0.001} self.args = {'near': '34.3, 56.100021', 'near_radius': 0.001}
def test_unrestricted(self, apiobj, frontend): def test_unrestricted(self, apiobj, frontend):
results = run_search(apiobj, frontend, 0.1, [('highway', 'bus_stop')], [0.5], results = run_search(apiobj, frontend, 0.1, [('highway', 'bus_stop')], [0.5],
details=SearchDetails.from_kwargs(self.args)) details=SearchDetails.from_kwargs(self.args))
assert [r.place_id for r in results] == [1, 2] assert [r.place_id for r in results] == [1, 2]
def test_restict_country(self, apiobj, frontend): def test_restict_country(self, apiobj, frontend):
results = run_search(apiobj, frontend, 0.1, [('highway', 'bus_stop')], [0.5], results = run_search(apiobj, frontend, 0.1, [('highway', 'bus_stop')], [0.5],
ccodes=['de', 'nz'], ccodes=['de', 'nz'],
@@ -99,7 +96,6 @@ class TestPoiSearchWithRestrictions:
assert [r.place_id for r in results] == [2] assert [r.place_id for r in results] == [2]
def test_restrict_by_viewbox(self, apiobj, frontend): def test_restrict_by_viewbox(self, apiobj, frontend):
args = {'bounded_viewbox': True, 'viewbox': '34.299,56.0,34.3001,56.10001'} args = {'bounded_viewbox': True, 'viewbox': '34.299,56.0,34.3001,56.10001'}
args.update(self.args) args.update(self.args)

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for running the postcode searcher. Tests for running the postcode searcher.
@@ -15,6 +15,7 @@ from nominatim_api.search.db_searches import PostcodeSearch
from nominatim_api.search.db_search_fields import WeightedStrings, FieldLookup, \ from nominatim_api.search.db_search_fields import WeightedStrings, FieldLookup, \
FieldRanking, RankedTokens FieldRanking, RankedTokens
def run_search(apiobj, frontend, global_penalty, pcs, pc_penalties=None, def run_search(apiobj, frontend, global_penalty, pcs, pc_penalties=None,
ccodes=[], lookup=[], ranking=[], details=SearchDetails()): ccodes=[], lookup=[], ranking=[], details=SearchDetails()):
if pc_penalties is None: if pc_penalties is None:
@@ -85,26 +86,24 @@ class TestPostcodeSearchWithAddress:
apiobj.add_placex(place_id=1000, class_='place', type='village', apiobj.add_placex(place_id=1000, class_='place', type='village',
rank_search=22, rank_address=22, rank_search=22, rank_address=22,
country_code='ch') country_code='ch')
apiobj.add_search_name(1000, names=[1,2,10,11], apiobj.add_search_name(1000, names=[1, 2, 10, 11],
search_rank=22, address_rank=22, search_rank=22, address_rank=22,
country_code='ch') country_code='ch')
apiobj.add_placex(place_id=2000, class_='place', type='village', apiobj.add_placex(place_id=2000, class_='place', type='village',
rank_search=22, rank_address=22, rank_search=22, rank_address=22,
country_code='pl') country_code='pl')
apiobj.add_search_name(2000, names=[1,2,20,21], apiobj.add_search_name(2000, names=[1, 2, 20, 21],
search_rank=22, address_rank=22, search_rank=22, address_rank=22,
country_code='pl') country_code='pl')
def test_lookup_both(self, apiobj, frontend): def test_lookup_both(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [1,2], 'restrict') lookup = FieldLookup('name_vector', [1, 2], 'restrict')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, ['12345'], lookup=[lookup], ranking=[ranking]) results = run_search(apiobj, frontend, 0.1, ['12345'], lookup=[lookup], ranking=[ranking])
assert [r.place_id for r in results] == [100, 101] assert [r.place_id for r in results] == [100, 101]
def test_restrict_by_name(self, apiobj, frontend): def test_restrict_by_name(self, apiobj, frontend):
lookup = FieldLookup('name_vector', [10], 'restrict') lookup = FieldLookup('name_vector', [10], 'restrict')
@@ -112,11 +111,10 @@ class TestPostcodeSearchWithAddress:
assert [r.place_id for r in results] == [100] assert [r.place_id for r in results] == [100]
@pytest.mark.parametrize('coord,place_id', [((16.5, 5), 100), @pytest.mark.parametrize('coord,place_id', [((16.5, 5), 100),
((-45.1, 7.004), 101)]) ((-45.1, 7.004), 101)])
def test_lookup_near(self, apiobj, frontend, coord, place_id): def test_lookup_near(self, apiobj, frontend, coord, place_id):
lookup = FieldLookup('name_vector', [1,2], 'restrict') lookup = FieldLookup('name_vector', [1, 2], 'restrict')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, frontend, 0.1, ['12345'], results = run_search(apiobj, frontend, 0.1, ['12345'],
@@ -126,7 +124,6 @@ class TestPostcodeSearchWithAddress:
assert [r.place_id for r in results] == [place_id] assert [r.place_id for r in results] == [place_id]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML, napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
@@ -138,18 +135,16 @@ class TestPostcodeSearchWithAddress:
assert results assert results
assert all(geom.name.lower() in r.geometry for r in results) assert all(geom.name.lower() in r.geometry for r in results)
@pytest.mark.parametrize('viewbox, rids', [('-46,6,-44,8', [101, 100]),
@pytest.mark.parametrize('viewbox, rids', [('-46,6,-44,8', [101,100]), ('16,4,18,6', [100, 101])])
('16,4,18,6', [100,101])])
def test_prefer_viewbox(self, apiobj, frontend, viewbox, rids): def test_prefer_viewbox(self, apiobj, frontend, viewbox, rids):
results = run_search(apiobj, frontend, 0.1, ['12345'], results = run_search(apiobj, frontend, 0.1, ['12345'],
details=SearchDetails.from_kwargs({'viewbox': viewbox})) details=SearchDetails.from_kwargs({'viewbox': viewbox}))
assert [r.place_id for r in results] == rids assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox, rid', [('-46,6,-44,8', 101), @pytest.mark.parametrize('viewbox, rid', [('-46,6,-44,8', 101),
('16,4,18,6', 100)]) ('16,4,18,6', 100)])
def test_restrict_to_viewbox(self, apiobj, frontend, viewbox, rid): def test_restrict_to_viewbox(self, apiobj, frontend, viewbox, rid):
results = run_search(apiobj, frontend, 0.1, ['12345'], results = run_search(apiobj, frontend, 0.1, ['12345'],
details=SearchDetails.from_kwargs({'viewbox': viewbox, details=SearchDetails.from_kwargs({'viewbox': viewbox,
@@ -157,7 +152,6 @@ class TestPostcodeSearchWithAddress:
assert [r.place_id for r in results] == [rid] assert [r.place_id for r in results] == [rid]
@pytest.mark.parametrize('coord,rids', [((17.05, 5), [100, 101]), @pytest.mark.parametrize('coord,rids', [((17.05, 5), [100, 101]),
((-45, 7.1), [101, 100])]) ((-45, 7.1), [101, 100])])
def test_prefer_near(self, apiobj, frontend, coord, rids): def test_prefer_near(self, apiobj, frontend, coord, rids):
@@ -166,7 +160,6 @@ class TestPostcodeSearchWithAddress:
assert [r.place_id for r in results] == rids assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('pid,rid', [(100, 101), (101, 100)]) @pytest.mark.parametrize('pid,rid', [(100, 101), (101, 100)])
def test_exclude(self, apiobj, frontend, pid, rid): def test_exclude(self, apiobj, frontend, pid, rid):
results = run_search(apiobj, frontend, 0.1, ['12345'], results = run_search(apiobj, frontend, 0.1, ['12345'],

View File

@@ -2,15 +2,19 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Test for creation of token assignments from tokenized queries. Test for creation of token assignments from tokenized queries.
""" """
import pytest import pytest
from nominatim_api.search.query import QueryStruct, Phrase, PhraseType, BreakType, TokenType, TokenRange, Token from nominatim_api.search.query import QueryStruct, Phrase, TokenRange, Token
from nominatim_api.search.token_assignment import yield_token_assignments, TokenAssignment, PENALTY_TOKENCHANGE import nominatim_api.search.query as qmod
from nominatim_api.search.token_assignment import (yield_token_assignments,
TokenAssignment,
PENALTY_TOKENCHANGE)
class MyToken(Token): class MyToken(Token):
def get_category(self): def get_category(self):
@@ -24,7 +28,7 @@ def make_query(*args):
for btype, ptype, _ in args[1:]: for btype, ptype, _ in args[1:]:
q.add_node(btype, ptype) q.add_node(btype, ptype)
q.add_node(BreakType.END, PhraseType.NONE) q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
for start, t in enumerate(args): for start, t in enumerate(args):
for end, ttype in t[2]: for end, ttype in t[2]:
@@ -43,52 +47,52 @@ def check_assignments(actual, *expected):
def test_query_with_missing_tokens(): def test_query_with_missing_tokens():
q = QueryStruct([Phrase(PhraseType.NONE, '')]) q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
q.add_node(BreakType.END, PhraseType.NONE) q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
assert list(yield_token_assignments(q)) == [] assert list(yield_token_assignments(q)) == []
def test_one_word_query(): def test_one_word_query():
q = make_query((BreakType.START, PhraseType.NONE, q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY,
[(1, TokenType.PARTIAL), [(1, qmod.TOKEN_PARTIAL),
(1, TokenType.WORD), (1, qmod.TOKEN_WORD),
(1, TokenType.HOUSENUMBER)])) (1, qmod.TOKEN_HOUSENUMBER)]))
res = list(yield_token_assignments(q)) res = list(yield_token_assignments(q))
assert res == [TokenAssignment(name=TokenRange(0, 1))] assert res == [TokenAssignment(name=TokenRange(0, 1))]
def test_single_postcode(): def test_single_postcode():
q = make_query((BreakType.START, PhraseType.NONE, q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY,
[(1, TokenType.POSTCODE)])) [(1, qmod.TOKEN_POSTCODE)]))
res = list(yield_token_assignments(q)) res = list(yield_token_assignments(q))
assert res == [TokenAssignment(postcode=TokenRange(0, 1))] assert res == [TokenAssignment(postcode=TokenRange(0, 1))]
def test_single_country_name(): def test_single_country_name():
q = make_query((BreakType.START, PhraseType.NONE, q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY,
[(1, TokenType.COUNTRY)])) [(1, qmod.TOKEN_COUNTRY)]))
res = list(yield_token_assignments(q)) res = list(yield_token_assignments(q))
assert res == [TokenAssignment(country=TokenRange(0, 1))] assert res == [TokenAssignment(country=TokenRange(0, 1))]
def test_single_word_poi_search(): def test_single_word_poi_search():
q = make_query((BreakType.START, PhraseType.NONE, q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY,
[(1, TokenType.NEAR_ITEM), [(1, qmod.TOKEN_NEAR_ITEM),
(1, TokenType.QUALIFIER)])) (1, qmod.TOKEN_QUALIFIER)]))
res = list(yield_token_assignments(q)) res = list(yield_token_assignments(q))
assert res == [TokenAssignment(near_item=TokenRange(0, 1))] assert res == [TokenAssignment(near_item=TokenRange(0, 1))]
@pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN]) @pytest.mark.parametrize('btype', [qmod.BREAK_WORD, qmod.BREAK_PART, qmod.BREAK_TOKEN])
def test_multiple_simple_words(btype): def test_multiple_simple_words(btype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(btype, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (btype, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(btype, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (btype, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
penalty = PENALTY_TOKENCHANGE[btype] penalty = PENALTY_TOKENCHANGE[btype]
@@ -101,13 +105,12 @@ def test_multiple_simple_words(btype):
TokenAssignment(penalty=penalty, name=TokenRange(1, 3), TokenAssignment(penalty=penalty, name=TokenRange(1, 3),
address=[TokenRange(0, 1)]), address=[TokenRange(0, 1)]),
TokenAssignment(penalty=penalty, name=TokenRange(2, 3), TokenAssignment(penalty=penalty, name=TokenRange(2, 3),
address=[TokenRange(0, 2)]) address=[TokenRange(0, 2)]))
)
def test_multiple_words_respect_phrase_break(): def test_multiple_words_respect_phrase_break():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1), TokenAssignment(name=TokenRange(0, 1),
@@ -117,8 +120,8 @@ def test_multiple_words_respect_phrase_break():
def test_housenumber_and_street(): def test_housenumber_and_street():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(1, 2), TokenAssignment(name=TokenRange(1, 2),
@@ -128,8 +131,8 @@ def test_housenumber_and_street():
def test_housenumber_and_street_backwards(): def test_housenumber_and_street_backwards():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_HOUSENUMBER)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1), TokenAssignment(name=TokenRange(0, 1),
@@ -139,10 +142,10 @@ def test_housenumber_and_street_backwards():
def test_housenumber_and_postcode(): def test_housenumber_and_postcode():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.POSTCODE)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(4, qmod.TOKEN_POSTCODE)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=pytest.approx(0.3), TokenAssignment(penalty=pytest.approx(0.3),
@@ -155,11 +158,12 @@ def test_housenumber_and_postcode():
address=[TokenRange(0, 1), TokenRange(2, 3)], address=[TokenRange(0, 1), TokenRange(2, 3)],
postcode=TokenRange(3, 4))) postcode=TokenRange(3, 4)))
def test_postcode_and_housenumber(): def test_postcode_and_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.POSTCODE)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_POSTCODE)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.HOUSENUMBER)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(4, qmod.TOKEN_HOUSENUMBER)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=pytest.approx(0.3), TokenAssignment(penalty=pytest.approx(0.3),
@@ -174,54 +178,54 @@ def test_postcode_and_housenumber():
def test_country_housenumber_postcode(): def test_country_housenumber_postcode():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.COUNTRY)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.HOUSENUMBER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.POSTCODE)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(4, qmod.TOKEN_POSTCODE)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
@pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY, @pytest.mark.parametrize('ttype', [qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY,
TokenType.NEAR_ITEM, TokenType.QUALIFIER]) qmod.TOKEN_NEAR_ITEM, qmod.TOKEN_QUALIFIER])
def test_housenumber_with_only_special_terms(ttype): def test_housenumber_with_only_special_terms(ttype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(2, ttype)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, ttype)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
@pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.HOUSENUMBER, TokenType.COUNTRY]) @pytest.mark.parametrize('ttype', [qmod.TOKEN_POSTCODE, qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_COUNTRY])
def test_multiple_special_tokens(ttype): def test_multiple_special_tokens(ttype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, ttype)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, ttype)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(3, ttype)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(3, ttype)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
def test_housenumber_many_phrases(): def test_housenumber_many_phrases():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(3, TokenType.PARTIAL)]), (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(4, TokenType.HOUSENUMBER)]), (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(4, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(5, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(5, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, TokenAssignment(penalty=0.1,
name=TokenRange(4, 5), name=TokenRange(4, 5),
housenumber=TokenRange(3, 4),\ housenumber=TokenRange(3, 4),
address=[TokenRange(0, 1), TokenRange(1, 2), address=[TokenRange(0, 1), TokenRange(1, 2),
TokenRange(2, 3)]), TokenRange(2, 3)]),
TokenAssignment(penalty=0.1, TokenAssignment(penalty=0.1,
housenumber=TokenRange(3, 4),\ housenumber=TokenRange(3, 4),
address=[TokenRange(0, 1), TokenRange(1, 2), address=[TokenRange(0, 1), TokenRange(1, 2),
TokenRange(2, 3), TokenRange(4, 5)])) TokenRange(2, 3), TokenRange(4, 5)]))
def test_country_at_beginning(): def test_country_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.COUNTRY)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2), TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
@@ -229,8 +233,8 @@ def test_country_at_beginning():
def test_country_at_end(): def test_country_at_end():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.COUNTRY)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_COUNTRY)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(0, 1), TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
@@ -238,16 +242,16 @@ def test_country_at_end():
def test_country_in_middle(): def test_country_in_middle():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.COUNTRY)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
def test_postcode_with_designation(): def test_postcode_with_designation():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.POSTCODE)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_POSTCODE)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2), TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
@@ -257,8 +261,8 @@ def test_postcode_with_designation():
def test_postcode_with_designation_backwards(): def test_postcode_with_designation_backwards():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.POSTCODE)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_POSTCODE)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1), TokenAssignment(name=TokenRange(0, 1),
@@ -268,8 +272,8 @@ def test_postcode_with_designation_backwards():
def test_near_item_at_beginning(): def test_near_item_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.NEAR_ITEM)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_NEAR_ITEM)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2), TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
@@ -277,8 +281,8 @@ def test_near_item_at_beginning():
def test_near_item_at_end(): def test_near_item_at_end():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_NEAR_ITEM)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(0, 1), TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
@@ -286,18 +290,17 @@ def test_near_item_at_end():
def test_near_item_in_middle(): def test_near_item_in_middle():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_NEAR_ITEM)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
def test_qualifier_at_beginning(): def test_qualifier_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.QUALIFIER)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 3), TokenAssignment(penalty=0.1, name=TokenRange(1, 3),
@@ -308,12 +311,11 @@ def test_qualifier_at_beginning():
def test_qualifier_after_name(): def test_qualifier_after_name():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.QUALIFIER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(4, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(5, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(5, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q), check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.2, name=TokenRange(0, 2), TokenAssignment(penalty=0.2, name=TokenRange(0, 2),
@@ -325,27 +327,26 @@ def test_qualifier_after_name():
def test_qualifier_before_housenumber(): def test_qualifier_before_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.QUALIFIER)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
def test_qualifier_after_housenumber(): def test_qualifier_after_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.QUALIFIER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(2, qmod.TOKEN_QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))
def test_qualifier_in_middle_of_phrase(): def test_qualifier_in_middle_of_phrase():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), q = make_query((qmod.BREAK_START, qmod.PHRASE_ANY, [(1, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]), (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(2, qmod.TOKEN_PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.QUALIFIER)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(3, qmod.TOKEN_QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.PARTIAL)]), (qmod.BREAK_WORD, qmod.PHRASE_ANY, [(4, qmod.TOKEN_PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(5, TokenType.PARTIAL)])) (qmod.BREAK_PHRASE, qmod.PHRASE_ANY, [(5, qmod.TOKEN_PARTIAL)]))
check_assignments(yield_token_assignments(q)) check_assignments(yield_token_assignments(q))

View File

@@ -2,12 +2,11 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for enhanced connection class for API functions. Tests for enhanced connection class for API functions.
""" """
from pathlib import Path
import pytest import pytest
import sqlalchemy as sa import sqlalchemy as sa
@@ -76,7 +75,7 @@ async def test_get_db_property_existing(api):
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_get_db_property_existing(api): async def test_get_db_property_bad_name(api):
async with api.begin() as conn: async with api.begin() as conn:
with pytest.raises(ValueError): with pytest.raises(ValueError):
await conn.get_db_property('dfkgjd.rijg') await conn.get_db_property('dfkgjd.rijg')

View File

@@ -2,20 +2,20 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for the deletable v1 API call. Tests for the deletable v1 API call.
""" """
import json import json
from pathlib import Path
import pytest import pytest
from fake_adaptor import FakeAdaptor, FakeError, FakeResponse from fake_adaptor import FakeAdaptor
import nominatim_api.v1.server_glue as glue import nominatim_api.v1.server_glue as glue
class TestDeletableEndPoint: class TestDeletableEndPoint:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
@@ -25,14 +25,13 @@ class TestDeletableEndPoint:
content=[(345, 'N', 'boundary', 'administrative'), content=[(345, 'N', 'boundary', 'administrative'),
(781, 'R', 'landuse', 'wood'), (781, 'R', 'landuse', 'wood'),
(781, 'R', 'landcover', 'grass')]) (781, 'R', 'landcover', 'grass')])
table_factory('placex', table_factory(
definition="""place_id bigint, osm_id bigint, osm_type char(1), 'placex',
class text, type text, name HSTORE, country_code char(2)""", definition="""place_id bigint, osm_id bigint, osm_type char(1),
content=[(1, 345, 'N', 'boundary', 'administrative', {'old_name': 'Former'}, 'ab'), class text, type text, name HSTORE, country_code char(2)""",
(2, 781, 'R', 'landuse', 'wood', {'name': 'Wood'}, 'cd'), content=[(1, 345, 'N', 'boundary', 'administrative', {'old_name': 'Former'}, 'ab'),
(3, 781, 'R', 'landcover', 'grass', None, 'cd')]) (2, 781, 'R', 'landuse', 'wood', {'name': 'Wood'}, 'cd'),
(3, 781, 'R', 'landcover', 'grass', None, 'cd')])
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_deletable(self, api): async def test_deletable(self, api):

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for details API call. Tests for details API call.
@@ -13,23 +13,24 @@ import pytest
import nominatim_api as napi import nominatim_api as napi
@pytest.mark.parametrize('idobj', (napi.PlaceID(332), napi.OsmID('W', 4), @pytest.mark.parametrize('idobj', (napi.PlaceID(332), napi.OsmID('W', 4),
napi.OsmID('W', 4, 'highway'))) napi.OsmID('W', 4, 'highway')))
def test_lookup_in_placex(apiobj, frontend, idobj): def test_lookup_in_placex(apiobj, frontend, idobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0) import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'}, name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'}, extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55, parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb', admin_level=15, country_code='gb',
housenumber='4', housenumber='4',
postcode='34425', wikipedia='en:Faa', postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
importance=0.01, importance=0.01,
centroid=(23, 34), centroid=(23, 34),
indexed_date=import_date, indexed_date=import_date,
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)') geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(idobj) result = api.details(idobj)
@@ -73,12 +74,12 @@ def test_lookup_in_placex(apiobj, frontend, idobj):
def test_lookup_in_placex_minimal_info(apiobj, frontend): def test_lookup_in_placex_minimal_info(apiobj, frontend):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0) import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
admin_level=15, admin_level=15,
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
centroid=(23, 34), centroid=(23, 34),
indexed_date=import_date, indexed_date=import_date,
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)') geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(napi.PlaceID(332)) result = api.details(napi.PlaceID(332))
@@ -131,9 +132,9 @@ def test_lookup_in_placex_with_geometry(apiobj, frontend):
def test_lookup_placex_with_address_details(apiobj, frontend): def test_lookup_placex_with_address_details(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', country_code='pl',
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False, apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034, distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333, place_id=1000, osm_type='N', osm_id=3333,
@@ -178,9 +179,9 @@ def test_lookup_placex_with_address_details(apiobj, frontend):
def test_lookup_place_with_linked_places_none_existing(apiobj, frontend): def test_lookup_place_with_linked_places_none_existing(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=45, country_code='pl', linked_place_id=45,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(napi.PlaceID(332), linked_places=True) result = api.details(napi.PlaceID(332), linked_places=True)
@@ -190,17 +191,17 @@ def test_lookup_place_with_linked_places_none_existing(apiobj, frontend):
def test_lookup_place_with_linked_places_existing(apiobj, frontend): def test_lookup_place_with_linked_places_existing(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=45, country_code='pl', linked_place_id=45,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1001, osm_type='W', osm_id=5, apiobj.add_placex(place_id=1001, osm_type='W', osm_id=5,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=332, country_code='pl', linked_place_id=332,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6, apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=332, country_code='pl', linked_place_id=332,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(napi.PlaceID(332), linked_places=True) result = api.details(napi.PlaceID(332), linked_places=True)
@@ -221,9 +222,9 @@ def test_lookup_place_with_linked_places_existing(apiobj, frontend):
def test_lookup_place_with_parented_places_not_existing(apiobj, frontend): def test_lookup_place_with_parented_places_not_existing(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=45, country_code='pl', parent_place_id=45,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(napi.PlaceID(332), parented_places=True) result = api.details(napi.PlaceID(332), parented_places=True)
@@ -233,17 +234,17 @@ def test_lookup_place_with_parented_places_not_existing(apiobj, frontend):
def test_lookup_place_with_parented_places_existing(apiobj, frontend): def test_lookup_place_with_parented_places_existing(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=45, country_code='pl', parent_place_id=45,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1001, osm_type='N', osm_id=5, apiobj.add_placex(place_id=1001, osm_type='N', osm_id=5,
class_='place', type='house', housenumber='23', class_='place', type='house', housenumber='23',
country_code='pl', parent_place_id=332, country_code='pl', parent_place_id=332,
rank_search=30, rank_address=30) rank_search=30, rank_address=30)
apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6, apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=332, country_code='pl', parent_place_id=332,
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.details(napi.PlaceID(332), parented_places=True) result = api.details(napi.PlaceID(332), parented_places=True)
@@ -332,9 +333,9 @@ def test_lookup_osmline_with_address_details(apiobj, frontend):
startnumber=2, endnumber=4, step=1, startnumber=2, endnumber=4, step=1,
parent_place_id=332) parent_place_id=332)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='pl', country_code='pl',
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False, apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034, distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333, place_id=1000, osm_type='N', osm_id=3333,
@@ -432,9 +433,9 @@ def test_lookup_tiger_with_address_details(apiobj, frontend):
startnumber=2, endnumber=4, step=1, startnumber=2, endnumber=4, step=1,
parent_place_id=332) parent_place_id=332)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street', class_='highway', type='residential', name='Street',
country_code='us', country_code='us',
rank_search=27, rank_address=26) rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False, apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034, distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333, place_id=1000, osm_type='N', osm_id=3333,
@@ -571,6 +572,7 @@ def test_lookup_postcode_with_address_details(apiobj, frontend):
rank_address=4, distance=0.0) rank_address=4, distance=0.0)
] ]
@pytest.mark.parametrize('objid', [napi.PlaceID(1736), @pytest.mark.parametrize('objid', [napi.PlaceID(1736),
napi.OsmID('W', 55), napi.OsmID('W', 55),
napi.OsmID('N', 55, 'amenity')]) napi.OsmID('N', 55, 'amenity')])
@@ -583,8 +585,8 @@ def test_lookup_missing_object(apiobj, frontend, objid):
@pytest.mark.parametrize('gtype', (napi.GeometryFormat.KML, @pytest.mark.parametrize('gtype', (napi.GeometryFormat.KML,
napi.GeometryFormat.SVG, napi.GeometryFormat.SVG,
napi.GeometryFormat.TEXT)) napi.GeometryFormat.TEXT))
def test_lookup_unsupported_geometry(apiobj, frontend, gtype): def test_lookup_unsupported_geometry(apiobj, frontend, gtype):
apiobj.add_placex(place_id=332) apiobj.add_placex(place_id=332)

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for lookup API call. Tests for lookup API call.
@@ -13,6 +13,7 @@ import pytest
import nominatim_api as napi import nominatim_api as napi
def test_lookup_empty_list(apiobj, frontend): def test_lookup_empty_list(apiobj, frontend):
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
assert api.lookup([]) == [] assert api.lookup([]) == []
@@ -28,17 +29,17 @@ def test_lookup_non_existing(apiobj, frontend):
napi.OsmID('W', 4, 'highway'))) napi.OsmID('W', 4, 'highway')))
def test_lookup_single_placex(apiobj, frontend, idobj): def test_lookup_single_placex(apiobj, frontend, idobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'}, name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'}, extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55, parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb', admin_level=15, country_code='gb',
housenumber='4', housenumber='4',
postcode='34425', wikipedia='en:Faa', postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
importance=0.01, importance=0.01,
centroid=(23, 34), centroid=(23, 34),
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)') geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.lookup([idobj]) result = api.lookup([idobj])
@@ -79,17 +80,17 @@ def test_lookup_single_placex(apiobj, frontend, idobj):
def test_lookup_multiple_places(apiobj, frontend): def test_lookup_multiple_places(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'}, name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'}, extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55, parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb', admin_level=15, country_code='gb',
housenumber='4', housenumber='4',
postcode='34425', wikipedia='en:Faa', postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
importance=0.01, importance=0.01,
centroid=(23, 34), centroid=(23, 34),
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)') geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
apiobj.add_osmline(place_id=4924, osm_id=9928, apiobj.add_osmline(place_id=4924, osm_id=9928,
parent_place_id=12, parent_place_id=12,
startnumber=1, endnumber=4, step=1, startnumber=1, endnumber=4, step=1,
@@ -97,7 +98,6 @@ def test_lookup_multiple_places(apiobj, frontend):
address={'city': 'Big'}, address={'city': 'Big'},
geometry='LINESTRING(23 34, 23 35)') geometry='LINESTRING(23 34, 23 35)')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.lookup((napi.OsmID('W', 1), result = api.lookup((napi.OsmID('W', 1),
napi.OsmID('W', 4), napi.OsmID('W', 4),
@@ -111,17 +111,17 @@ def test_lookup_multiple_places(apiobj, frontend):
@pytest.mark.parametrize('gtype', list(napi.GeometryFormat)) @pytest.mark.parametrize('gtype', list(napi.GeometryFormat))
def test_simple_place_with_geometry(apiobj, frontend, gtype): def test_simple_place_with_geometry(apiobj, frontend, gtype):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'}, name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'}, extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55, parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb', admin_level=15, country_code='gb',
housenumber='4', housenumber='4',
postcode='34425', wikipedia='en:Faa', postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
importance=0.01, importance=0.01,
centroid=(23, 34), centroid=(23, 34),
geometry='POLYGON((23 34, 23.1 34, 23.1 34.1, 23 34))') geometry='POLYGON((23 34, 23.1 34, 23.1 34.1, 23 34))')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.lookup([napi.OsmID('W', 4)], geometry_output=gtype) result = api.lookup([napi.OsmID('W', 4)], geometry_output=gtype)
@@ -137,17 +137,17 @@ def test_simple_place_with_geometry(apiobj, frontend, gtype):
def test_simple_place_with_geometry_simplified(apiobj, frontend): def test_simple_place_with_geometry_simplified(apiobj, frontend):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4, apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'}, name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'}, extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55, parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb', admin_level=15, country_code='gb',
housenumber='4', housenumber='4',
postcode='34425', wikipedia='en:Faa', postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26, rank_search=27, rank_address=26,
importance=0.01, importance=0.01,
centroid=(23, 34), centroid=(23, 34),
geometry='POLYGON((23 34, 22.999 34, 23.1 34, 23.1 34.1, 23 34))') geometry='POLYGON((23 34, 22.999 34, 23.1 34, 23.1 34.1, 23 34))')
api = frontend(apiobj, options={'details'}) api = frontend(apiobj, options={'details'})
result = api.lookup([napi.OsmID('W', 4)], result = api.lookup([napi.OsmID('W', 4)],
@@ -159,5 +159,5 @@ def test_simple_place_with_geometry_simplified(apiobj, frontend):
geom = json.loads(result[0].geometry['geojson']) geom = json.loads(result[0].geometry['geojson'])
assert geom['type'] == 'Polygon' assert geom['type'] == 'Polygon'
assert geom['coordinates'] == [[[23, 34], [23.1, 34], [23.1, 34.1], [23, 34]]] assert geom['coordinates'] == [[[23, 34], [23.1, 34], [23.1, 34.1], [23, 34]]]

View File

@@ -2,21 +2,21 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for the deletable v1 API call. Tests for the deletable v1 API call.
""" """
import json import json
import datetime as dt import datetime as dt
from pathlib import Path
import pytest import pytest
from fake_adaptor import FakeAdaptor, FakeError, FakeResponse from fake_adaptor import FakeAdaptor
import nominatim_api.v1.server_glue as glue import nominatim_api.v1.server_glue as glue
class TestPolygonsEndPoint: class TestPolygonsEndPoint:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
@@ -35,13 +35,12 @@ class TestPolygonsEndPoint:
errormessage text, errormessage text,
prevgeometry geometry(Geometry,4326), prevgeometry geometry(Geometry,4326),
newgeometry geometry(Geometry,4326)""", newgeometry geometry(Geometry,4326)""",
content=[(345, 'N', 'boundary', 'administrative', content=[(345, 'N', 'boundary', 'administrative',
{'name': 'Foo'}, 'xx', self.recent, {'name': 'Foo'}, 'xx', self.recent,
'some text', None, None), 'some text', None, None),
(781, 'R', 'landuse', 'wood', (781, 'R', 'landuse', 'wood',
None, 'ds', self.now, None, 'ds', self.now,
'Area reduced by lots', None, None)]) 'Area reduced by lots', None, None)])
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_polygons_simple(self, api): async def test_polygons_simple(self, api):
@@ -63,7 +62,6 @@ class TestPolygonsEndPoint:
'errormessage': 'Area reduced by lots', 'errormessage': 'Area reduced by lots',
'updated': self.now.isoformat(sep=' ', timespec='seconds')}] 'updated': self.now.isoformat(sep=' ', timespec='seconds')}]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_polygons_days(self, api): async def test_polygons_days(self, api):
a = FakeAdaptor() a = FakeAdaptor()
@@ -74,7 +72,6 @@ class TestPolygonsEndPoint:
assert [r['osm_id'] for r in results] == [781] assert [r['osm_id'] for r in results] == [781]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_polygons_class(self, api): async def test_polygons_class(self, api):
a = FakeAdaptor() a = FakeAdaptor()
@@ -85,8 +82,6 @@ class TestPolygonsEndPoint:
assert [r['osm_id'] for r in results] == [781] assert [r['osm_id'] for r in results] == [781]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_polygons_reduced(self, api): async def test_polygons_reduced(self, api):
a = FakeAdaptor() a = FakeAdaptor()

View File

@@ -2,7 +2,7 @@
# #
# This file is part of Nominatim. (https://nominatim.org) # This file is part of Nominatim. (https://nominatim.org)
# #
# Copyright (C) 2024 by the Nominatim developer community. # Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log. # For a full list of authors see the git log.
""" """
Tests for reverse API call. Tests for reverse API call.
@@ -18,6 +18,7 @@ import nominatim_api as napi
API_OPTIONS = {'reverse'} API_OPTIONS = {'reverse'}
def test_reverse_rank_30(apiobj, frontend): def test_reverse_rank_30(apiobj, frontend):
apiobj.add_placex(place_id=223, class_='place', type='house', apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1', housenumber='1',
@@ -35,7 +36,7 @@ def test_reverse_rank_30(apiobj, frontend):
def test_reverse_street(apiobj, frontend, country): def test_reverse_street(apiobj, frontend, country):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
country_code=country, country_code=country,
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
@@ -57,16 +58,17 @@ def test_reverse_ignore_unindexed(apiobj, frontend):
assert result is None assert result is None
@pytest.mark.parametrize('y,layer,place_id', [(0.7, napi.DataLayer.ADDRESS, 223), @pytest.mark.parametrize('y,layer,place_id',
(0.70001, napi.DataLayer.POI, 224), [(0.7, napi.DataLayer.ADDRESS, 223),
(0.7, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 224), (0.70001, napi.DataLayer.POI, 224),
(0.70001, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 223), (0.7, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 224),
(0.7, napi.DataLayer.MANMADE, 225), (0.70001, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 223),
(0.7, napi.DataLayer.RAILWAY, 226), (0.7, napi.DataLayer.MANMADE, 225),
(0.7, napi.DataLayer.NATURAL, 227), (0.7, napi.DataLayer.RAILWAY, 226),
(0.70003, napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, 225), (0.7, napi.DataLayer.NATURAL, 227),
(0.70003, napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, 225), (0.70003, napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, 225),
(5, napi.DataLayer.ADDRESS, 229)]) (0.70003, napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, 225),
(5, napi.DataLayer.ADDRESS, 229)])
def test_reverse_rank_30_layers(apiobj, frontend, y, layer, place_id): def test_reverse_rank_30_layers(apiobj, frontend, y, layer, place_id):
apiobj.add_placex(place_id=223, osm_type='N', class_='place', type='house', apiobj.add_placex(place_id=223, osm_type='N', class_='place', type='house',
housenumber='1', housenumber='1',
@@ -108,14 +110,14 @@ def test_reverse_poi_layer_with_no_pois(apiobj, frontend):
api = frontend(apiobj, options=API_OPTIONS) api = frontend(apiobj, options=API_OPTIONS)
assert api.reverse((1.3, 0.70001), max_rank=29, assert api.reverse((1.3, 0.70001), max_rank=29,
layers=napi.DataLayer.POI) is None layers=napi.DataLayer.POI) is None
@pytest.mark.parametrize('with_geom', [True, False]) @pytest.mark.parametrize('with_geom', [True, False])
def test_reverse_housenumber_on_street(apiobj, frontend, with_geom): def test_reverse_housenumber_on_street(apiobj, frontend, with_geom):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_placex(place_id=991, class_='place', type='house', apiobj.add_placex(place_id=991, class_='place', type='house',
@@ -125,7 +127,7 @@ def test_reverse_housenumber_on_street(apiobj, frontend, with_geom):
centroid=(10.0, 10.00001)) centroid=(10.0, 10.00001))
apiobj.add_placex(place_id=1990, class_='highway', type='service', apiobj.add_placex(place_id=1990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'Other Street'}, name={'name': 'Other Street'},
centroid=(10.0, 1.0), centroid=(10.0, 1.0),
geometry='LINESTRING(9.995 1, 10.005 1)') geometry='LINESTRING(9.995 1, 10.005 1)')
apiobj.add_placex(place_id=1991, class_='place', type='house', apiobj.add_placex(place_id=1991, class_='place', type='house',
@@ -147,7 +149,7 @@ def test_reverse_housenumber_on_street(apiobj, frontend, with_geom):
def test_reverse_housenumber_interpolation(apiobj, frontend, with_geom): def test_reverse_housenumber_interpolation(apiobj, frontend, with_geom):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_placex(place_id=991, class_='place', type='house', apiobj.add_placex(place_id=991, class_='place', type='house',
@@ -162,7 +164,7 @@ def test_reverse_housenumber_interpolation(apiobj, frontend, with_geom):
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)') geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
apiobj.add_placex(place_id=1990, class_='highway', type='service', apiobj.add_placex(place_id=1990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'Other Street'}, name={'name': 'Other Street'},
centroid=(10.0, 20.0), centroid=(10.0, 20.0),
geometry='LINESTRING(9.995 20, 10.005 20)') geometry='LINESTRING(9.995 20, 10.005 20)')
apiobj.add_osmline(place_id=1992, apiobj.add_osmline(place_id=1992,
@@ -181,7 +183,7 @@ def test_reverse_housenumber_interpolation(apiobj, frontend, with_geom):
def test_reverse_housenumber_point_interpolation(apiobj, frontend): def test_reverse_housenumber_point_interpolation(apiobj, frontend):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_osmline(place_id=992, apiobj.add_osmline(place_id=992,
@@ -199,7 +201,7 @@ def test_reverse_housenumber_point_interpolation(apiobj, frontend):
def test_reverse_tiger_number(apiobj, frontend): def test_reverse_tiger_number(apiobj, frontend):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
country_code='us', country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
@@ -217,7 +219,7 @@ def test_reverse_tiger_number(apiobj, frontend):
def test_reverse_point_tiger(apiobj, frontend): def test_reverse_point_tiger(apiobj, frontend):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
country_code='us', country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
@@ -393,14 +395,15 @@ def test_reverse_interpolation_geometry(apiobj, frontend):
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)') geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
api = frontend(apiobj, options=API_OPTIONS) api = frontend(apiobj, options=API_OPTIONS)
assert api.reverse((10.0, 10.0), geometry_output=napi.GeometryFormat.TEXT)\ result = api.reverse((10.0, 10.0), geometry_output=napi.GeometryFormat.TEXT)
.geometry['text'] == 'POINT(10 10.00001)'
assert result.geometry['text'] == 'POINT(10 10.00001)'
def test_reverse_tiger_geometry(apiobj, frontend): def test_reverse_tiger_geometry(apiobj, frontend):
apiobj.add_placex(place_id=990, class_='highway', type='service', apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(10.0, 10.0), centroid=(10.0, 10.0),
country_code='us', country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)') geometry='LINESTRING(9.995 10, 10.005 10)')
@@ -411,7 +414,7 @@ def test_reverse_tiger_geometry(apiobj, frontend):
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)') geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
apiobj.add_placex(place_id=1000, class_='highway', type='service', apiobj.add_placex(place_id=1000, class_='highway', type='service',
rank_search=27, rank_address=27, rank_search=27, rank_address=27,
name = {'name': 'My Street'}, name={'name': 'My Street'},
centroid=(11.0, 11.0), centroid=(11.0, 11.0),
country_code='us', country_code='us',
geometry='LINESTRING(10.995 11, 11.005 11)') geometry='LINESTRING(10.995 11, 11.005 11)')
@@ -426,8 +429,9 @@ def test_reverse_tiger_geometry(apiobj, frontend):
params = {'geometry_output': napi.GeometryFormat.GEOJSON} params = {'geometry_output': napi.GeometryFormat.GEOJSON}
output = api.reverse((10.0, 10.0), **params) output = api.reverse((10.0, 10.0), **params)
assert json.loads(output.geometry['geojson']) == {'coordinates': [10, 10.00001], 'type': 'Point'} assert json.loads(output.geometry['geojson']) \
== {'coordinates': [10, 10.00001], 'type': 'Point'}
output = api.reverse((11.0, 11.0), **params) output = api.reverse((11.0, 11.0), **params)
assert json.loads(output.geometry['geojson']) == {'coordinates': [11, 11.00001], 'type': 'Point'} assert json.loads(output.geometry['geojson']) \
== {'coordinates': [11, 11.00001], 'type': 'Point'}

Some files were not shown because too many files have changed in this diff Show More