Compare commits

...

114 Commits

Author SHA1 Message Date
Sarah Hoffmann
76b8b07f16 adapt docs for release 2025-04-01 11:24:32 +02:00
Sarah Hoffmann
fce279226f prepare release 5.1.0 2025-04-01 10:16:35 +02:00
Sarah Hoffmann
54d895c4ce Merge pull request #3695 from TuringVerified/doc-dependencies
[Small fix] Add documentation to install extras for mkdocstrings
2025-04-01 09:34:08 +02:00
TuringVerified
896a1c9d12 Add mkdocstrings extra 2025-04-01 11:06:46 +05:30
Sarah Hoffmann
32728d6c89 Merge pull request #3693 from lonvia/remove-unused-sql
Remove SQL function for address lookup
2025-03-31 17:11:39 +02:00
Sarah Hoffmann
bfd1c83cb0 Merge pull request #3692 from lonvia/word-lookup-variants
Avoid matching penalty for abbreviated search terms
2025-03-31 16:38:31 +02:00
Sarah Hoffmann
bbadc62371 remove SQL function for address lookup
This is now done in Python.
2025-03-31 15:09:40 +02:00
Sarah Hoffmann
5c9d3ca8d2 Merge pull request #3691 from lonvia/more-search-tweaks
More tweaks to search wights
2025-03-31 15:06:09 +02:00
Sarah Hoffmann
be4ba370ef adapt tests to extended results 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
3cb183ffb0 add lookup word to variants in word table 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
58ef032a2b do not write any word counts on initial word insert 2025-03-31 14:52:50 +02:00
Sarah Hoffmann
1705bb5f57 do not save word counts of 1
This is the default setting, which will be assumed when the count is
missing.
2025-03-31 14:52:50 +02:00
Sarah Hoffmann
f2aa15778f always use lookup when requested
Doesn't seem to cause any issues in production.
2025-03-31 11:38:21 +02:00
Sarah Hoffmann
efe65c3e49 increase allowable address counts 2025-03-31 11:38:21 +02:00
Sarah Hoffmann
51847ebfeb more agressively reduce expected count for multi-word terms
Improves searching of non-latin scripts with forced token spaces.
2025-03-31 11:18:22 +02:00
Sarah Hoffmann
46579f08e4 Merge pull request #3690 from lonvia/fix-signature
Fix function signature for newer SQLAlchemy
2025-03-31 11:17:03 +02:00
Sarah Hoffmann
d4994a152b fix function signature for newer SQLAlchemy 2025-03-31 09:42:29 +02:00
Sarah Hoffmann
00b3ace3cf Merge pull request #3684 from lonvia/compact-en-variants
Clean up English variants
2025-03-24 15:15:13 +01:00
Sarah Hoffmann
522bc942cf restrict some English variants to end of word 2025-03-21 21:22:38 +01:00
Sarah Hoffmann
d6e749d621 make English variant list more compact 2025-03-21 21:13:34 +01:00
Sarah Hoffmann
13cfb7efe2 Merge pull request #3682 from lonvia/fix-postcode-case
Fix case issues when parsing postcodes
2025-03-21 11:41:24 +01:00
Sarah Hoffmann
35baf77b18 make query upper-case when parsing postcodes
The postcode patterns expect upper-case letters.
2025-03-21 09:44:15 +01:00
Sarah Hoffmann
7e68613cc7 Merge pull request #3679 from lonvia/output-fixes
Minor fixes for v1 frontend code
2025-03-19 21:56:28 +01:00
Sarah Hoffmann
b1fc721f4b fix layer setting for structured search 2025-03-19 17:31:43 +01:00
Sarah Hoffmann
d400fd5f76 fix debug output for lookup type 2025-03-19 17:31:18 +01:00
Sarah Hoffmann
e4295dba10 Merge pull request #3678 from lonvia/search-tweaks
Some minor tweaks to postcode parsing in query
2025-03-19 16:00:52 +01:00
Sarah Hoffmann
9419c5adb2 penalize postcode searches with multiple name qualifiers 2025-03-19 10:05:36 +01:00
Sarah Hoffmann
2c61fe08a0 use word_token length when penalizing against postcodes 2025-03-19 09:52:40 +01:00
Sarah Hoffmann
7b3c725f2a postcode token should have transliterated term in word_token 2025-03-19 09:52:40 +01:00
Sarah Hoffmann
edc5ada625 improve handling of leading postcodes
Setting the direction of the query while yielding assignments is
a bad idea because it may override a direction already set.
2025-03-19 09:52:40 +01:00
Sarah Hoffmann
72d3360fa2 Merge pull request #3673 from otbutz/parallel_safe
Mark functions as PARALLEL SAFE
2025-03-18 21:46:53 +01:00
Sarah Hoffmann
0ffe384c57 Merge pull request #3676 from lonvia/adjust-place-levels-sa
Adjust place ranks for Saudi-Arabia
2025-03-18 18:31:48 +01:00
Sarah Hoffmann
9dad5edeb6 adjust for special use of province and municipality in Saudi-Arabia 2025-03-18 16:38:10 +01:00
Thomas Butz
d86d491f2e Mark functions as PARALLEL SAFE 2025-03-13 10:53:11 +01:00
Sarah Hoffmann
3026c333ca adapt typing for latest SQLAlchemy version 2025-03-13 10:49:08 +01:00
Sarah Hoffmann
ad84bbdec7 Merge pull request #3671 from lonvia/remove-osm2pgsql-libdir
Remove code for setting osm2pgsql location via config.lib_dir
2025-03-11 11:22:46 +01:00
Sarah Hoffmann
f5755a7a82 remove code for setting osm2pgsql via config.lib_dir
With the internal osm2pgsql gone, configuration of the binary location
via settings is the only option left that makes sense.
2025-03-11 09:04:05 +01:00
Sarah Hoffmann
cd08956c61 Merge pull request #3670 from lonvia/flake-for-tests
Extend linting with flake to tests
2025-03-10 09:35:24 +01:00
Sarah Hoffmann
12f5719184 remove unused bdd util functions 2025-03-09 17:34:40 +01:00
Sarah Hoffmann
78f839fbd3 enable flake for bdd test code 2025-03-09 17:34:04 +01:00
Sarah Hoffmann
c70dfccaca also enable flake for tests in github actions 2025-03-09 16:03:02 +01:00
Sarah Hoffmann
4cc788f69e enable flake for Python tests 2025-03-09 15:33:24 +01:00
Sarah Hoffmann
5a245e33e0 Merge pull request #3667 from eumiro/simplify-int-float
Simplify  int/float manipulation
2025-03-09 09:44:15 +01:00
Miroslav Šedivý
6ff51712fe Simplify int/float manipulation 2025-03-06 19:26:56 +01:00
Sarah Hoffmann
c431e0e45d Merge pull request #3666 from eumiro/math-isclose
Replace custom Almost with stdlib math.isclose
2025-03-06 17:53:01 +01:00
Sarah Hoffmann
c2d62a59cb Merge pull request #3664 from eumiro/consolidate-random
Consolidate usage of random module
2025-03-06 17:52:19 +01:00
Miroslav Šedivý
cd64788a58 Replace custom Almost with stdlib math.isclose 2025-03-05 20:35:01 +01:00
Miroslav Šedivý
800a41721a Consolidate usage of random module 2025-03-05 19:38:28 +01:00
Sarah Hoffmann
1b44fe2555 Merge pull request #3665 from lonvia/pattern-matching-postcodes
Add full parsing of postcodes in query
2025-03-05 16:02:03 +01:00
Sarah Hoffmann
6b0d58d9fd restrict postcode parsing in typed phrases
Postcodes can only appear in postcode-type phrases and must then
cover the full phrase
2025-03-05 10:09:33 +01:00
Sarah Hoffmann
afb89f9c7a add unit tests for postcode parser 2025-03-04 16:25:00 +01:00
Sarah Hoffmann
6712627d5e adapt BDD tests to new postcode handling 2025-03-04 15:18:46 +01:00
Sarah Hoffmann
434fbbfd18 add support for country prefixes in postcodes 2025-03-04 15:18:27 +01:00
Sarah Hoffmann
921db8bb2f cache all info of ICUQueryAnalyser in a single object 2025-03-04 08:58:57 +01:00
Sarah Hoffmann
a574b98e4a remove postcode computation for word table during import 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
b2af358f66 reenable ZIP+ test 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
e67ae701ac show token begin and end in debug output 2025-03-04 08:57:59 +01:00
Sarah Hoffmann
fc1c6261ed add postcode parser 2025-03-04 08:57:37 +01:00
Sarah Hoffmann
6759edfb5d make word generation from query a class method 2025-03-04 08:57:37 +01:00
Sarah Hoffmann
e362a965e1 search: merge QueryPart array with QueryNodes
The basic information on terms is pretty much always used together
with the node inforamtion. Merging them together saves some
allocation while making lookup easier at the same time.
2025-03-04 08:57:37 +01:00
Sarah Hoffmann
eff60ba6be enable parsing of US ZIP+ codes
The four-digit part of these postcodes will simply be ignored.
2025-02-25 20:29:06 +01:00
Sarah Hoffmann
157414a053 Merge pull request #3659 from lonvia/custom-datrie-structure
Replace datrie library with a simple custom Python implementation
2025-02-24 16:49:42 +01:00
Sarah Hoffmann
18d4996bec remove datrie dependency 2025-02-24 10:24:21 +01:00
Sarah Hoffmann
13db4c9731 replace datrie library with a more simple pure-Python class 2025-02-24 10:24:21 +01:00
Sarah Hoffmann
f567ea89cc Merge pull request #3658 from lonvia/minor-query-parsing-optimisations
Minor query parsing optimisations
2025-02-24 10:16:47 +01:00
Sarah Hoffmann
3e718e40d9 adapt documentation for PhraseType type 2025-02-21 17:16:42 +01:00
Sarah Hoffmann
49bd18b048 replace PhraseType enum with simple int constants 2025-02-21 16:44:12 +01:00
Sarah Hoffmann
31412e0674 replace TokenType enum with simple char constants 2025-02-21 10:23:41 +01:00
Sarah Hoffmann
4577669213 replace BreakType enum with simple char constants 2025-02-21 09:57:48 +01:00
Sarah Hoffmann
9bf1428d81 consistently use query module as qmod 2025-02-21 09:31:21 +01:00
Sarah Hoffmann
b56edf3d0a avoid yielding when extracting words from query 2025-02-20 23:32:39 +01:00
Sarah Hoffmann
abc911079e remove word_number counting for phrases
We can just examine the break types to know if we are dealing
with a partial token.
2025-02-20 17:36:50 +01:00
Sarah Hoffmann
adabfee3be Merge pull request #3655 from lonvia/remove-name-ranking-in-postcode-search
Tweak penalties for postcode searches
2025-02-20 14:32:43 +01:00
Sarah Hoffmann
46c4446dc2 remove address penalty for postcode search
Searches of the form <postcode> <city> are in fact quite common.
2025-02-20 11:11:45 +01:00
Sarah Hoffmann
add9244a2f do not rerank address by full match in postcode search
The reranking result will not be completely correct because
the address of a postcode refer to the address _and_ name
of the parent and reranking was only done against the
address. We assume here that the postcode is precise enough
as to not require a penalty to to partial matches.
2025-02-20 10:29:03 +01:00
Sarah Hoffmann
96d7a8e8f6 Merge pull request #3653 from lonvia/trailing-spaces-in-normalization
Strip leading and trailing space markers during normalization
2025-02-19 17:25:59 +01:00
Sarah Hoffmann
55c3176957 strip normalisation results of normal and special spaces 2025-02-19 14:40:35 +01:00
Sarah Hoffmann
e29823e28f add test for structured query with leading spaces 2025-02-19 10:31:36 +01:00
Sarah Hoffmann
97ed168996 Merge pull request #3652 from lonvia/update-variants
Cleanup and updates of tokenizer variant configuration
2025-02-18 19:47:45 +01:00
Sarah Hoffmann
9b8ef97d4b Merge pull request #3649 from lonvia/actions-move-to-ubuntu22
Move Github actions to Unbuntu-22 image
2025-02-18 13:21:09 +01:00
Sarah Hoffmann
4f3c88f0c1 remove e-ë mutation, this is taken care of by transliteration 2025-02-18 10:31:44 +01:00
mhsr21
7781186f3c Add USPS Standard Suffix Abbreviation 2025-02-18 09:28:13 +01:00
Sarah Hoffmann
f78686edb8 fix Norwegian variants
More cases of 'no' being interpreted as fasle by yaml.
2025-02-18 09:28:13 +01:00
Sarah Hoffmann
e330cd3162 remove ineffective and dupicate variants 2025-02-18 09:28:13 +01:00
Sarah Hoffmann
671af4cff2 Merge pull request #3555 from IvanShift/patch-1
Fixed Russian abbreviation list
2025-02-17 18:44:11 +01:00
Sarah Hoffmann
e612b7d550 actions: use Debians's script for adding the Postgres apt repo 2025-02-17 17:56:23 +01:00
Sarah Hoffmann
0b49d01703 actions: move tests to Ubuntu-20 2025-02-17 17:54:49 +01:00
Sarah Hoffmann
f6bc8e153f Merge pull request #3648 from lonvia/extratags-for-geocodejson
Enable output of extratags for geocodejson format
2025-02-17 11:14:52 +01:00
Sarah Hoffmann
f143ecaf1c add documentation for new extra field 2025-02-17 10:04:23 +01:00
Sarah Hoffmann
6730c8bac8 add optional output of extratags to geocodejson 2025-02-16 10:16:40 +01:00
Sarah Hoffmann
ee8915f2b6 prepare 5.0.0 release 2025-02-05 10:54:38 +01:00
Sarah Hoffmann
5475bf7b9c Merge pull request #3635 from lonvia/replace-wikimedia-importance-test-data
Update wikimedia importance file for test database
2025-01-14 16:49:52 +01:00
Sarah Hoffmann
95e2d8c846 adapt tests to changed wikimedia importance test table 2025-01-14 14:19:17 +01:00
Sarah Hoffmann
7552818866 replace wikimedia importance file for test data with CSV version 2025-01-14 09:16:25 +01:00
Sarah Hoffmann
db3991af74 Merge pull request #3626 from lonvia/import-performance
Import performance
2025-01-10 16:44:33 +01:00
Sarah Hoffmann
4523b9aaed Merge pull request #3631 from lonvia/avoid-transactions
Creating tables and indexes in autocommit mode
2025-01-10 16:44:18 +01:00
Sarah Hoffmann
8b1cabebd6 Merge pull request #3633 from lonvia/restrict-long-ways
Ignore overly long ways during import
2025-01-10 16:06:37 +01:00
Sarah Hoffmann
0cf636a80c ignore overly long ways during import 2025-01-10 13:55:43 +01:00
Sarah Hoffmann
c2cb6722fe use autocommit when creating tables and indexes
Might avoid some deadlock situations with autovacuum.
2025-01-09 17:14:37 +01:00
Sarah Hoffmann
f8337bedb2 Merge pull request #3629 from lonvia/additional-breaks
Introduce new break types and phrase splitting for Japanese addresses
2025-01-09 13:55:29 +01:00
Sarah Hoffmann
efc09a5cfc add japanese phrase preprocessing
Code adapted from GSOC code by @miku.
2025-01-09 09:24:10 +01:00
Sarah Hoffmann
86ad9efa8a keep break indicators [:-] during normalisation
All punctuation will be converted to '-'. Soft breaks : may be
added by preprocessors. The break signs are only used during
query analysis and are ignored during import token analysis.
2025-01-09 09:21:55 +01:00
Sarah Hoffmann
d984100e23 add inner word break penalty 2025-01-07 21:42:25 +01:00
Sarah Hoffmann
499110f549 add SOFT_PHRASE break and enable parsing
Also enables parsing of PART breaks.
2025-01-06 17:10:24 +01:00
Sarah Hoffmann
267e5dac0d split up MultiPolygons before adding them to large_areas table 2024-12-22 09:15:16 +01:00
Sarah Hoffmann
32d3eb46d5 move geometry split into insertLocationAreaLarge()
thus insert only needs to be called once.
2024-12-22 09:15:16 +01:00
Sarah Hoffmann
c8a0dc8af1 more efficient belongs-to-address determination 2024-12-22 09:15:16 +01:00
Sarah Hoffmann
14ecfc7834 Merge pull request #3619 from lonvia/demote-farms
Remove farms and isolated dwellings from computed addresses
2024-12-22 09:13:42 +01:00
Sarah Hoffmann
cad44eb00c remove farms and isolated dwellings from computed addresses
Farms and isolated dwellings are usually confined to a very small
area. It does not make sense if they are automatically used in
addressing surrounding features. Still works to use them for
parenting when used with addr:place.
2024-12-20 22:59:02 +01:00
Sarah Hoffmann
f76dbb0a16 docs: update Update docs for virtualenv use 2024-12-20 11:27:45 +01:00
Sarah Hoffmann
8dd218a1d0 Merge pull request #3618 from osm-search/settings-md-table-space-osm-index
Settings.md - one setting was repeated
2024-12-19 08:40:31 +01:00
IvanShift
bea9249e38 Added "дом" and fixed order "школа" 2024-10-06 17:59:59 +03:00
Alexander Sapozhnikov
1e4677b668 Expand Russian abbreviation list 2022-11-01 04:01:27 +05:00
Alexander Sapozhnikov
7f909dbbd8 Add replacement for Russian 2022-11-01 02:54:07 +05:00
183 changed files with 3338 additions and 3360 deletions

View File

@@ -6,3 +6,6 @@ extend-ignore =
E711
per-file-ignores =
__init__.py: F401
test/python/utils/test_json_writer.py: E131
test/python/conftest.py: E402
test/bdd/*: F821

View File

@@ -11,10 +11,8 @@ runs:
steps:
- name: Remove existing PostgreSQL
run: |
sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y
sudo apt-get purge -yq postgresql*
sudo apt install curl ca-certificates gnupg
curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
sudo apt-get update -qq
shell: bash

View File

@@ -37,10 +37,10 @@ jobs:
needs: create-archive
strategy:
matrix:
flavour: ["ubuntu-20", "ubuntu-24"]
flavour: ["ubuntu-22", "ubuntu-24"]
include:
- flavour: ubuntu-20
ubuntu: 20
- flavour: ubuntu-22
ubuntu: 22
postgresql: 12
lua: '5.1'
dependencies: pip
@@ -81,7 +81,7 @@ jobs:
sudo make install
cd ../..
rm -rf osm2pgsql-build
if: matrix.ubuntu == '20'
if: matrix.ubuntu == '22'
env:
LUA_VERSION: ${{ matrix.lua }}
@@ -100,7 +100,7 @@ jobs:
run: ./venv/bin/pip install -U flake8
- name: Python linting
run: ../venv/bin/python -m flake8 src
run: ../venv/bin/python -m flake8 src test/python test/bdd
working-directory: Nominatim
- name: Install mypy and typechecking info

View File

@@ -87,7 +87,6 @@ Checklist for releases:
* [ ] increase versions in
* `src/nominatim_api/version.py`
* `src/nominatim_db/version.py`
* CMakeLists.txt
* [ ] update `ChangeLog` (copy information from patch releases from release branch)
* [ ] complete `docs/admin/Migration.md`
* [ ] update EOL dates in `SECURITY.md`

View File

@@ -1,3 +1,47 @@
5.1.0
* replace datrie with simple internal trie implementation
* add pattern-based postcode parser for queries,
postcodes no longer need to be present in OSM to be found
* take variants into account when computing token similarity
* add extratags output to geocodejson format
* fix default layer setting used for structured queries
* update abbreviation lists for Russian and English
(thanks @shoorick, @IvanShift, @mhsrn21)
* fix variant generation for Norwegian
* fix normalization around space-like characters
* improve postcode search and handling of postcodes in queries
* reorganise internal query structure and get rid of slow enums
* enable code linting for tests
* various code moderinsations in test code (thanks @eumiro)
* remove setting osm2pgsql location via config.lib_dir
* make SQL functions parallel save as far as possible (thanks @otbutz)
* various fixes and improvements to documentation (thanks @TuringVerified)
5.0.0
* increase required versions for PostgreSQL (12+), PostGIS (3.0+)
* remove installation via cmake and debundle osm2pgsql
* remove deprecated PHP frontend
* remove deprecated legacy tokenizer
* add configurable pre-processing of queries
* add query pre-processor to split up Japanese addresses
* rewrite of osm2pgsql style implementation
(also adds support for osm2pgsql-themepark)
* reduce the number of SQL queries needed to complete a 'lookup' call
* improve computation of centroid for lines with only two points
* improve bbox output for postcode areas
* improve result order by returning the largest object when other things are
equal
* add fallback for reverse geocoding to default country tables
* exclude postcode areas from reverse geocoding
* disable search endpoint when database is reverse-only (regression)
* minor performance improvements to area split algorithm
* switch table and index creation to use autocommit mode to avoid deadlocks
* drop overly long ways during import
* restrict automatic migrations to versions 4.3+
* switch linting from pylint to flake8
* switch tests to use a wikimedia test file in the new CSV style
* various fixes and improvements to documentation
4.5.0
* allow building Nominatim as a pip package
* make osm2pgsql building optional

View File

@@ -24,7 +24,7 @@ pytest:
pytest test/python
lint:
flake8 src
flake8 src test/python test/bdd
bdd:
cd test/bdd; behave -DREMOVE_TEMPLATE=1

View File

@@ -9,10 +9,11 @@ versions.
| Version | End of support for security updates |
| ------- | ----------------------------------- |
| 5.1.x | 2027-04-01 |
| 5.0.x | 2027-02-06 |
| 4.5.x | 2026-09-12 |
| 4.4.x | 2026-03-07 |
| 4.3.x | 2025-09-07 |
| 4.2.x | 2024-11-24 |
## Reporting a Vulnerability

View File

@@ -37,7 +37,6 @@ Furthermore the following Python libraries are required:
* [Jinja2](https://palletsprojects.com/p/jinja/)
* [PyICU](https://pypi.org/project/PyICU/)
* [PyYaml](https://pyyaml.org/) (5.1+)
* [datrie](https://github.com/pytries/datrie)
These will be installed automatically when using pip installation.
@@ -111,14 +110,17 @@ Then you can install Nominatim with:
pip install nominatim-db nominatim-api
## Downloading and building Nominatim
## Downloading and building Nominatim from source
### Downloading the latest release
The following instructions are only relevant, if you want to build and
install Nominatim **from source**.
### Downloading the source for the latest release
You can download the [latest release from nominatim.org](https://nominatim.org/downloads/).
The release contains all necessary files. Just unpack it.
### Downloading the latest development version
### Downloading the source for the latest development version
If you want to install latest development version from github:
@@ -132,7 +134,7 @@ The development version does not include the country grid. Download it separatel
wget -O Nominatim/data/country_osm_grid.sql.gz https://nominatim.org/data/country_grid.sql.gz
```
### Building Nominatim
### Building Nominatim from source
Nominatim is easiest to run from its own virtual environment. To create one, run:

View File

@@ -9,19 +9,15 @@ the following steps:
* Update the frontend: `pip install -U nominatim-api`
* (optionally) Restart updates
If you are still using CMake for the installation of Nominatim, then you
need to update the software in one step before migrating the database.
It is not recommended to do this while the machine is serving requests.
Below you find additional migrations and hints about other structural and
breaking changes. **Please read them before running the migration.**
!!! note
If you are migrating from a version <4.3, you need to install 4.3
first and migrate to 4.3 first. Then you can migrate to the current
and migrate to 4.3 first. Then you can migrate to the current
version. It is strongly recommended to do a reimport instead.
## 4.5.0 -> master
## 4.5.0 -> 5.0.0
### PHP frontend removed
@@ -33,6 +29,42 @@ needed. It currently omits a warning and does otherwise nothing. It will be
removed in later versions of Nominatim. So make sure you remove it from your
scripts.
### CMake building removed
Nominatim can now only be installed via pip. Please follow the installation
instructions for the current version to change to pip.
### osm2pgsql no longer vendored in
Nominatim no longer ships its own version of osm2pgsql. Please install a
stock version of osm2pgsql from your distribution. See the
[installation instruction for osm2pgsql](https://osm2pgsql.org/doc/install.html)
for details. A minimum version of 1.8 is required. The current stable versions
of Ubuntu and Debian already ship with an appropriate versions. For older
installation, you may have to compile a newer osm2pgsql yourself.
### Legacy tokenizer removed
The `legacy` tokenizer is no longer enabled. This tokenizer has been superseded
by the `ICU` tokenizer a long time ago. In the unlikely case that your database
still uses the `legacy` tokenizer, you must reimport your database.
### osm2pgsql style overhauled
There are some fundamental changes to how customized osm2pgsql styles should
be written. The changes are mostly backwards compatible, i.e. custom styles
should still work with the new implementation. The only exception is a
customization of the `process_tags()` function. This function is no longer
considered public and neither are the helper functions used in it.
They currently still work but will be removed at some point. If you have
been making changes to `process_tags`, please review your style and try
to switch to the new convenience functions.
For more information on the changes, see the
[pull request](https://github.com/osm-search/Nominatim/pull/3615)
and read the new
[customization documentation](https://nominatim.org/release-docs/latest/customize/Import-Styles/).
## 4.4.0 -> 4.5.0
### New structure for Python packages

View File

@@ -68,10 +68,10 @@ the update interval no new data has been published yet, it will go to sleep
until the next expected update and only then attempt to download the next batch.
The one-time mode is particularly useful if you want to run updates continuously
but need to schedule other work in between updates. For example, the main
service at osm.org uses it, to regularly recompute postcodes -- a process that
must not be run while updates are in progress. Its update script
looks like this:
but need to schedule other work in between updates. For example, you might
want to regularly recompute postcodes -- a process that
must not be run while updates are in progress. An update script refreshing
postcodes regularly might look like this:
```sh
#!/bin/bash
@@ -109,17 +109,19 @@ Unit=nominatim-updates.service
WantedBy=multi-user.target
```
And then a similar service definition: `/etc/systemd/system/nominatim-updates.service`:
`OnUnitActiveSec` defines how often the individual update command is run.
Then add a service definition for the timer in `/etc/systemd/system/nominatim-updates.service`:
```
[Unit]
Description=Single updates of Nominatim
[Service]
WorkingDirectory=/srv/nominatim
ExecStart=nominatim replication --once
StandardOutput=append:/var/log/nominatim-updates.log
StandardError=append:/var/log/nominatim-updates.error.log
WorkingDirectory=/srv/nominatim-project
ExecStart=/srv/nominatim-venv/bin/nominatim replication --once
StandardOutput=journald
StandardError=inherit
User=nominatim
Group=nominatim
Type=simple
@@ -128,9 +130,9 @@ Type=simple
WantedBy=multi-user.target
```
Replace the `WorkingDirectory` with your project directory. Also adapt user and
group names as required. `OnUnitActiveSec` defines how often the individual
update command is run.
Replace the `WorkingDirectory` with your project directory. `ExecStart` points
to the nominatim binary that was installed in your virtualenv earlier.
Finally, you might need to adapt user and group names as required.
Now activate the service and start the updates:
@@ -140,12 +142,13 @@ sudo systemctl enable nominatim-updates.timer
sudo systemctl start nominatim-updates.timer
```
You can stop future data updates, while allowing any current, in-progress
You can stop future data updates while allowing any current, in-progress
update steps to finish, by running `sudo systemctl stop
nominatim-updates.timer` and waiting until `nominatim-updates.service` isn't
running (`sudo systemctl is-active nominatim-updates.service`). Current output
from the update can be seen like above (`systemctl status
nominatim-updates.service`).
running (`sudo systemctl is-active nominatim-updates.service`).
To check the output from the update process, use journalctl: `journalctl -u
nominatim-updates.service`
#### Catch-up mode
@@ -155,13 +158,13 @@ all changes from the server until the database is up-to-date. The catch-up mode
still respects the parameter `NOMINATIM_REPLICATION_MAX_DIFF`. It downloads and
applies the changes in appropriate batches until all is done.
The catch-up mode is foremost useful to bring the database up to speed after the
The catch-up mode is foremost useful to bring the database up to date after the
initial import. Give that the service usually is not in production at this
point, you can temporarily be a bit more generous with the batch size and
number of threads you use for the updates by running catch-up like this:
```
cd /srv/nominatim
cd /srv/nominatim-project
NOMINATIM_REPLICATION_MAX_DIFF=5000 nominatim replication --catch-up --threads 15
```
@@ -173,13 +176,13 @@ replication catch-up at whatever interval you desire.
When running scheduled updates with catch-up, it is a good idea to choose
a replication source with an update frequency that is an order of magnitude
lower. For example, if you want to update once a day, use an hourly updated
source. This makes sure that you don't miss an entire day of updates when
source. This ensures that you don't miss an entire day of updates when
the source is unexpectedly late to publish its update.
If you want to use the source with the same update frequency (e.g. a daily
updated source with daily updates), use the
continuous update mode. It ensures to re-request the newest update until it
is published.
once mode together with a frequently run systemd script as described above.
It ensures to re-request the newest update until they have been published.
#### Continuous updates
@@ -197,36 +200,3 @@ parameters:
The update application keeps running forever and retrieves and applies
new updates from the server as they are published.
You can run this command as a simple systemd service. Create a service
description like that in `/etc/systemd/system/nominatim-updates.service`:
```
[Unit]
Description=Continuous updates of Nominatim
[Service]
WorkingDirectory=/srv/nominatim
ExecStart=nominatim replication
StandardOutput=append:/var/log/nominatim-updates.log
StandardError=append:/var/log/nominatim-updates.error.log
User=nominatim
Group=nominatim
Type=simple
[Install]
WantedBy=multi-user.target
```
Replace the `WorkingDirectory` with your project directory. Also adapt user
and group names as required.
Now activate the service and start the updates:
```
sudo systemctl daemon-reload
sudo systemctl enable nominatim-updates
sudo systemctl start nominatim-updates
```

View File

@@ -106,8 +106,11 @@ The following feature attributes are implemented:
* `name` - localised name of the place
* `housenumber`, `street`, `locality`, `district`, `postcode`, `city`,
`county`, `state`, `country` -
provided when it can be determined from the address
provided when it can be determined from the address (only with `addressdetails=1`)
* `admin` - list of localised names of administrative boundaries (only with `addressdetails=1`)
* `extra` - dictionary with additional useful tags like `website` or `maxspeed`
(only with `extratags=1`)
Use `polygon_geojson` to output the full geometry of the object instead
of the centroid.

View File

@@ -326,7 +326,7 @@ defined primary names are forgotten.)
| Name | Description |
| :----- | :---------- |
| core | Basic set of recogniced names for all places. |
| core | Basic set of recognized names for all places. |
| address | Additional names useful when indexing full addresses. |
| poi | Extended set of recognized names for pois. Use on top of the core set. |

View File

@@ -50,7 +50,7 @@ queries. This happens in two stages:
as during the import process but may involve other processing like,
for example, word break detection.
2. The **token analysis** step breaks down the query parts into tokens,
looks them up in the database and assignes them possible functions and
looks them up in the database and assigns them possible functions and
probabilities.
Query processing can be further customized while the rest of the analysis

View File

@@ -69,9 +69,9 @@ To set up the virtual environment with all necessary packages run:
```sh
virtualenv ~/nominatim-dev-venv
~/nominatim-dev-venv/bin/pip install\
psutil psycopg[binary] PyICU SQLAlchemy \
python-dotenv jinja2 pyYAML datrie behave \
mkdocs mkdocstrings mkdocs-gen-files pytest pytest-asyncio flake8 \
psutil 'psycopg[binary]' PyICU SQLAlchemy \
python-dotenv jinja2 pyYAML behave \
mkdocs 'mkdocstrings[python]' mkdocs-gen-files pytest pytest-asyncio flake8 \
types-jinja2 types-markupsafe types-psutil types-psycopg2 \
types-pygments types-pyyaml types-requests types-ujson \
types-urllib3 typing-extensions unicorn falcon starlette \

View File

@@ -60,13 +60,19 @@ The order of phrases matters to Nominatim when doing further processing.
Thus, while you may split or join phrases, you should not reorder them
unless you really know what you are doing.
Phrase types (`nominatim_api.search.PhraseType`) can further help narrowing
down how the tokens in the phrase are interpreted. The following phrase types
are known:
Phrase types can further help narrowing down how the tokens in the phrase
are interpreted. The following phrase types are known:
::: nominatim_api.search.PhraseType
options:
heading_level: 6
| Name | Description |
|----------------|-------------|
| PHRASE_ANY | No specific designation (i.e. source is free-form query) |
| PHRASE_AMENITY | Contains name or type of a POI |
| PHRASE_STREET | Contains a street name optionally with a housenumber |
| PHRASE_CITY | Contains the postal city |
| PHRASE_COUNTY | Contains the equivalent of a county |
| PHRASE_STATE | Contains a state or province |
| PHRASE_POSTCODE| Contains a postal code |
| PHRASE_COUNTRY | Contains the country name or code |
## Custom sanitizer modules

View File

@@ -425,7 +425,7 @@ function Place:write_row(k, v)
if self.geometry == nil then
self.geometry = self.geom_func(self.object)
end
if self.geometry:is_null() then
if self.geometry == nil or self.geometry:is_null() then
return 0
end
@@ -608,6 +608,9 @@ function module.process_way(object)
if geom:is_null() then
geom = o:as_linestring()
if geom:is_null() or geom:length() > 30 then
return nil
end
end
return geom

View File

@@ -8,7 +8,6 @@
{% include('functions/utils.sql') %}
{% include('functions/ranking.sql') %}
{% include('functions/importance.sql') %}
{% include('functions/address_lookup.sql') %}
{% include('functions/interpolation.sql') %}
{% if 'place' in db.tables %}

View File

@@ -1,334 +0,0 @@
-- SPDX-License-Identifier: GPL-2.0-only
--
-- This file is part of Nominatim. (https://nominatim.org)
--
-- Copyright (C) 2022 by the Nominatim developer community.
-- For a full list of authors see the git log.
-- Functions for returning address information for a place.
DROP TYPE IF EXISTS addressline CASCADE;
CREATE TYPE addressline as (
place_id BIGINT,
osm_type CHAR(1),
osm_id BIGINT,
name HSTORE,
class TEXT,
type TEXT,
place_type TEXT,
admin_level INTEGER,
fromarea BOOLEAN,
isaddress BOOLEAN,
rank_address INTEGER,
distance FLOAT
);
CREATE OR REPLACE FUNCTION get_name_by_language(name hstore, languagepref TEXT[])
RETURNS TEXT
AS $$
DECLARE
result TEXT;
BEGIN
IF name is null THEN
RETURN null;
END IF;
FOR j IN 1..array_upper(languagepref,1) LOOP
IF name ? languagepref[j] THEN
result := trim(name->languagepref[j]);
IF result != '' THEN
return result;
END IF;
END IF;
END LOOP;
-- as a fallback - take the last element since it is the default name
RETURN trim((avals(name))[array_length(avals(name), 1)]);
END;
$$
LANGUAGE plpgsql IMMUTABLE;
--housenumber only needed for tiger data
CREATE OR REPLACE FUNCTION get_address_by_language(for_place_id BIGINT,
housenumber INTEGER,
languagepref TEXT[])
RETURNS TEXT
AS $$
DECLARE
result TEXT[];
currresult TEXT;
prevresult TEXT;
location RECORD;
BEGIN
result := '{}';
prevresult := '';
FOR location IN
SELECT name,
CASE WHEN place_id = for_place_id THEN 99 ELSE rank_address END as rank_address
FROM get_addressdata(for_place_id, housenumber)
WHERE isaddress order by rank_address desc
LOOP
currresult := trim(get_name_by_language(location.name, languagepref));
IF currresult != prevresult AND currresult IS NOT NULL
AND result[(100 - location.rank_address)] IS NULL
THEN
result[(100 - location.rank_address)] := currresult;
prevresult := currresult;
END IF;
END LOOP;
RETURN array_to_string(result,', ');
END;
$$
LANGUAGE plpgsql STABLE;
DROP TYPE IF EXISTS addressdata_place;
CREATE TYPE addressdata_place AS (
place_id BIGINT,
country_code VARCHAR(2),
housenumber TEXT,
postcode TEXT,
class TEXT,
type TEXT,
name HSTORE,
address HSTORE,
centroid GEOMETRY
);
-- Compute the list of address parts for the given place.
--
-- If in_housenumber is greator or equal 0, look for an interpolation.
CREATE OR REPLACE FUNCTION get_addressdata(in_place_id BIGINT, in_housenumber INTEGER)
RETURNS setof addressline
AS $$
DECLARE
place addressdata_place;
location RECORD;
country RECORD;
current_rank_address INTEGER;
location_isaddress BOOLEAN;
BEGIN
-- The place in question might not have a direct entry in place_addressline.
-- Look for the parent of such places then and save it in place.
-- first query osmline (interpolation lines)
IF in_housenumber >= 0 THEN
SELECT parent_place_id as place_id, country_code,
in_housenumber as housenumber, postcode,
'place' as class, 'house' as type,
null as name, null as address,
ST_Centroid(linegeo) as centroid
INTO place
FROM location_property_osmline
WHERE place_id = in_place_id
AND in_housenumber between startnumber and endnumber;
END IF;
--then query tiger data
{% if config.get_bool('USE_US_TIGER_DATA') %}
IF place IS NULL AND in_housenumber >= 0 THEN
SELECT parent_place_id as place_id, 'us' as country_code,
in_housenumber as housenumber, postcode,
'place' as class, 'house' as type,
null as name, null as address,
ST_Centroid(linegeo) as centroid
INTO place
FROM location_property_tiger
WHERE place_id = in_place_id
AND in_housenumber between startnumber and endnumber;
END IF;
{% endif %}
-- postcode table
IF place IS NULL THEN
SELECT parent_place_id as place_id, country_code,
null::text as housenumber, postcode,
'place' as class, 'postcode' as type,
null as name, null as address,
null as centroid
INTO place
FROM location_postcode
WHERE place_id = in_place_id;
END IF;
-- POI objects in the placex table
IF place IS NULL THEN
SELECT parent_place_id as place_id, country_code,
coalesce(address->'housenumber',
address->'streetnumber',
address->'conscriptionnumber')::text as housenumber,
postcode,
class, type,
name, address,
centroid
INTO place
FROM placex
WHERE place_id = in_place_id and rank_search > 27;
END IF;
-- If place is still NULL at this point then the object has its own
-- entry in place_address line. However, still check if there is not linked
-- place we should be using instead.
IF place IS NULL THEN
select coalesce(linked_place_id, place_id) as place_id, country_code,
null::text as housenumber, postcode,
class, type,
null as name, address,
null as centroid
INTO place
FROM placex where place_id = in_place_id;
END IF;
--RAISE WARNING '% % % %',searchcountrycode, searchhousenumber, searchpostcode;
-- --- Return the record for the base entry.
current_rank_address := 1000;
FOR location IN
SELECT placex.place_id, osm_type, osm_id, name,
coalesce(extratags->'linked_place', extratags->'place') as place_type,
class, type, admin_level,
CASE WHEN rank_address = 0 THEN 100
WHEN rank_address = 11 THEN 5
ELSE rank_address END as rank_address,
country_code
FROM placex
WHERE place_id = place.place_id
LOOP
--RAISE WARNING '%',location;
-- mix in default names for countries
IF location.rank_address = 4 and place.country_code is not NULL THEN
FOR country IN
SELECT coalesce(name, ''::hstore) as name FROM country_name
WHERE country_code = place.country_code LIMIT 1
LOOP
place.name := country.name || place.name;
END LOOP;
END IF;
IF location.rank_address < 4 THEN
-- no country locations for ranks higher than country
place.country_code := NULL::varchar(2);
ELSEIF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
place.country_code := location.country_code;
END IF;
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
location.name, location.class, location.type,
location.place_type,
location.admin_level, true,
location.type not in ('postcode', 'postal_code'),
location.rank_address, 0)::addressline;
current_rank_address := location.rank_address;
END LOOP;
-- --- Return records for address parts.
FOR location IN
SELECT placex.place_id, osm_type, osm_id, name, class, type,
coalesce(extratags->'linked_place', extratags->'place') as place_type,
admin_level, fromarea, isaddress,
CASE WHEN rank_address = 11 THEN 5 ELSE rank_address END as rank_address,
distance, country_code, postcode
FROM place_addressline join placex on (address_place_id = placex.place_id)
WHERE place_addressline.place_id IN (place.place_id, in_place_id)
AND linked_place_id is null
AND (placex.country_code IS NULL OR place.country_code IS NULL
OR placex.country_code = place.country_code)
ORDER BY rank_address desc,
(place_addressline.place_id = in_place_id) desc,
(CASE WHEN coalesce((avals(name) && avals(place.address)), False) THEN 2
WHEN isaddress THEN 0
WHEN fromarea
and place.centroid is not null
and ST_Contains(geometry, place.centroid) THEN 1
ELSE -1 END) desc,
fromarea desc, distance asc, rank_search desc
LOOP
-- RAISE WARNING '%',location;
location_isaddress := location.rank_address != current_rank_address;
IF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
place.country_code := location.country_code;
END IF;
IF location.type in ('postcode', 'postal_code')
AND place.postcode is not null
THEN
-- If the place had a postcode assigned, take this one only
-- into consideration when it is an area and the place does not have
-- a postcode itself.
IF location.fromarea AND location_isaddress
AND (place.address is null or not place.address ? 'postcode')
THEN
place.postcode := null; -- remove the less exact postcode
ELSE
location_isaddress := false;
END IF;
END IF;
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
location.name, location.class, location.type,
location.place_type,
location.admin_level, location.fromarea,
location_isaddress,
location.rank_address,
location.distance)::addressline;
current_rank_address := location.rank_address;
END LOOP;
-- If no country was included yet, add the name information from country_name.
IF current_rank_address > 4 THEN
FOR location IN
SELECT name || coalesce(derived_name, ''::hstore) as name FROM country_name
WHERE country_code = place.country_code LIMIT 1
LOOP
--RAISE WARNING '% % %',current_rank_address,searchcountrycode,countryname;
RETURN NEXT ROW(null, null, null, location.name, 'place', 'country', NULL,
null, true, true, 4, 0)::addressline;
END LOOP;
END IF;
-- Finally add some artificial rows.
IF place.country_code IS NOT NULL THEN
location := ROW(null, null, null, hstore('ref', place.country_code),
'place', 'country_code', null, null, true, false, 4, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.name IS NOT NULL THEN
location := ROW(in_place_id, null, null, place.name, place.class,
place.type, null, null, true, true, 29, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.housenumber IS NOT NULL THEN
location := ROW(null, null, null, hstore('ref', place.housenumber),
'place', 'house_number', null, null, true, true, 28, 0)::addressline;
RETURN NEXT location;
END IF;
IF place.address is not null and place.address ? '_unlisted_place' THEN
RETURN NEXT ROW(null, null, null, hstore('name', place.address->'_unlisted_place'),
'place', 'locality', null, null, true, true, 25, 0)::addressline;
END IF;
IF place.postcode is not null THEN
location := ROW(null, null, null, hstore('ref', place.postcode), 'place',
'postcode', null, null, false, true, 5, 0)::addressline;
RETURN NEXT location;
ELSEIF place.address is not null and place.address ? 'postcode'
and not place.address->'postcode' SIMILAR TO '%(,|;)%' THEN
location := ROW(null, null, null, hstore('ref', place.address->'postcode'), 'place',
'postcode', null, null, false, true, 5, 0)::addressline;
RETURN NEXT location;
END IF;
RETURN;
END;
$$
LANGUAGE plpgsql STABLE;

View File

@@ -65,7 +65,7 @@ BEGIN
RETURN NULL;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
{% else %}
@@ -78,7 +78,7 @@ SELECT convert_from(CAST(E'\\x' || array_to_string(ARRAY(
FROM regexp_matches($1, '%[0-9a-f][0-9a-f]|.', 'gi') AS r(m)
), '') AS bytea), 'UTF8');
$$
LANGUAGE SQL IMMUTABLE STRICT;
LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION catch_decode_url_part(p varchar)
@@ -91,7 +91,7 @@ EXCEPTION
WHEN others THEN return null;
END;
$$
LANGUAGE plpgsql IMMUTABLE STRICT;
LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_wikipedia_match(extratags HSTORE, country_code varchar(2))
@@ -139,7 +139,7 @@ BEGIN
RETURN NULL;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
{% endif %}
@@ -203,5 +203,5 @@ BEGIN
RETURN result;
END;
$$
LANGUAGE plpgsql;
LANGUAGE plpgsql PARALLEL SAFE;

View File

@@ -34,7 +34,7 @@ BEGIN
RETURN in_address;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
@@ -70,7 +70,7 @@ BEGIN
RETURN parent_place_id;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION reinsert_interpolation(way_id BIGINT, addr HSTORE,

View File

@@ -17,28 +17,6 @@ CREATE TYPE nearfeaturecentr AS (
centroid GEOMETRY
);
-- feature intersects geometry
-- for areas and linestrings they must touch at least along a line
CREATE OR REPLACE FUNCTION is_relevant_geometry(de9im TEXT, geom_type TEXT)
RETURNS BOOLEAN
AS $$
BEGIN
IF substring(de9im from 1 for 2) != 'FF' THEN
RETURN TRUE;
END IF;
IF geom_type = 'ST_Point' THEN
RETURN substring(de9im from 4 for 1) = '0';
END IF;
IF geom_type in ('ST_LineString', 'ST_MultiLineString') THEN
RETURN substring(de9im from 4 for 1) = '1';
END IF;
RETURN substring(de9im from 4 for 1) = '2';
END
$$ LANGUAGE plpgsql IMMUTABLE;
CREATE OR REPLACE function getNearFeatures(in_partition INTEGER, feature GEOMETRY,
feature_centroid GEOMETRY,
maxrank INTEGER)
@@ -59,7 +37,12 @@ BEGIN
isguess, postcode, centroid
FROM location_area_large_{{ partition }}
WHERE geometry && feature
AND is_relevant_geometry(ST_Relate(geometry, feature), ST_GeometryType(feature))
AND CASE WHEN ST_Dimension(feature) = 0
THEN _ST_Covers(geometry, feature)
WHEN ST_Dimension(feature) = 2
THEN ST_Relate(geometry, feature, 'T********')
ELSE ST_NPoints(ST_Intersection(geometry, feature)) > 1
END
AND rank_address < maxrank
-- Postcodes currently still use rank_search to define for which
-- features they are relevant.
@@ -75,7 +58,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_address_place(in_partition SMALLINT, feature GEOMETRY,
@@ -104,7 +87,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
create or replace function deleteLocationArea(in_partition INTEGER, in_place_id BIGINT, in_rank_search INTEGER) RETURNS BOOLEAN AS $$
@@ -142,14 +125,16 @@ BEGIN
IF in_rank_search <= 4 and not in_estimate THEN
INSERT INTO location_area_country (place_id, country_code, geometry)
values (in_place_id, in_country_code, in_geometry);
(SELECT in_place_id, in_country_code, geom
FROM split_geometry(in_geometry) as geom);
RETURN TRUE;
END IF;
{% for partition in db.partitions %}
IF in_partition = {{ partition }} THEN
INSERT INTO location_area_large_{{ partition }} (partition, place_id, country_code, keywords, rank_search, rank_address, isguess, postcode, centroid, geometry)
values (in_partition, in_place_id, in_country_code, in_keywords, in_rank_search, in_rank_address, in_estimate, postcode, in_centroid, in_geometry);
(SELECT in_partition, in_place_id, in_country_code, in_keywords, in_rank_search, in_rank_address, in_estimate, postcode, in_centroid, geom
FROM split_geometry(in_geometry) as geom);
RETURN TRUE;
END IF;
{% endfor %}
@@ -187,7 +172,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION getNearestNamedPlacePlaceId(in_partition INTEGER,
point GEOMETRY,
@@ -217,7 +202,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
create or replace function insertSearchName(
in_partition INTEGER, in_place_id BIGINT, in_name_vector INTEGER[],
@@ -325,7 +310,7 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION getNearestParallelRoadFeature(in_partition INTEGER,
line GEOMETRY)
@@ -369,4 +354,4 @@ BEGIN
RAISE EXCEPTION 'Unknown partition %', in_partition;
END
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;

View File

@@ -109,7 +109,7 @@ BEGIN
RETURN result;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION find_associated_street(poi_osm_type CHAR(1),
@@ -200,7 +200,7 @@ BEGIN
RETURN result;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Find the parent road of a POI.
@@ -286,7 +286,7 @@ BEGIN
RETURN parent_place_id;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Try to find a linked place for the given object.
CREATE OR REPLACE FUNCTION find_linked_place(bnd placex)
@@ -404,7 +404,7 @@ BEGIN
RETURN NULL;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION create_poi_search_terms(obj_place_id BIGINT,

View File

@@ -29,7 +29,7 @@ BEGIN
RETURN 0.02;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Return an approximate update radius according to the search rank.
@@ -60,7 +60,7 @@ BEGIN
RETURN 0;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Compute a base address rank from the extent of the given geometry.
--
@@ -107,7 +107,7 @@ BEGIN
RETURN 23;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Guess a ranking for postcodes from country and postcode format.
@@ -167,7 +167,7 @@ BEGIN
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Get standard search and address rank for an object.
@@ -236,7 +236,7 @@ BEGIN
END IF;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_addr_tag_rank(key TEXT, country TEXT,
OUT from_rank SMALLINT,
@@ -283,7 +283,7 @@ BEGIN
END LOOP;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION weigh_search(search_vector INT[],
@@ -304,4 +304,4 @@ BEGIN
RETURN def_weight;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;

View File

@@ -24,7 +24,7 @@ BEGIN
RETURN ST_PointOnSurface(place);
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION geometry_sector(partition INTEGER, place GEOMETRY)
@@ -34,7 +34,7 @@ BEGIN
RETURN (partition*1000000) + (500-ST_X(place)::INTEGER)*1000 + (500-ST_Y(place)::INTEGER);
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
@@ -60,7 +60,7 @@ BEGIN
RETURN r;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Return the node members with a given label from a relation member list
-- as a set.
@@ -88,7 +88,7 @@ BEGIN
RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_rel_node_members(members JSONB, memberLabels TEXT[])
@@ -107,7 +107,7 @@ BEGIN
RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Copy 'name' to or from the default language.
@@ -136,7 +136,7 @@ BEGIN
END IF;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
-- Find the nearest artificial postcode for the given geometry.
@@ -172,7 +172,7 @@ BEGIN
RETURN outcode;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
@@ -233,7 +233,7 @@ BEGIN
RETURN NULL;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_country_language_code(search_country_code VARCHAR(2))
@@ -251,7 +251,7 @@ BEGIN
RETURN NULL;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION get_partition(in_country_code VARCHAR(10))
@@ -268,7 +268,7 @@ BEGIN
RETURN 0;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
-- Find the parent of an address with addr:street/addr:place tag.
@@ -299,7 +299,7 @@ BEGIN
RETURN parent_place_id;
END;
$$
LANGUAGE plpgsql STABLE;
LANGUAGE plpgsql STABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION delete_location(OLD_place_id BIGINT)
@@ -337,7 +337,7 @@ BEGIN
ST_Project(geom::geography, radius, 3.9269908)::geometry));
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2),
@@ -348,8 +348,6 @@ CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2)
RETURNS BOOLEAN
AS $$
DECLARE
locationid INTEGER;
secgeo GEOMETRY;
postcode TEXT;
BEGIN
PERFORM deleteLocationArea(partition, place_id, rank_search);
@@ -360,18 +358,19 @@ BEGIN
postcode := upper(trim (in_postcode));
END IF;
IF ST_GeometryType(geometry) in ('ST_Polygon','ST_MultiPolygon') THEN
FOR secgeo IN select split_geometry(geometry) AS geom LOOP
PERFORM insertLocationAreaLarge(partition, place_id, country_code, keywords, rank_search, rank_address, false, postcode, centroid, secgeo);
END LOOP;
ELSEIF ST_GeometryType(geometry) = 'ST_Point' THEN
secgeo := place_node_fuzzy_area(geometry, rank_search);
PERFORM insertLocationAreaLarge(partition, place_id, country_code, keywords, rank_search, rank_address, true, postcode, centroid, secgeo);
IF ST_Dimension(geometry) = 2 THEN
RETURN insertLocationAreaLarge(partition, place_id, country_code, keywords,
rank_search, rank_address, false, postcode,
centroid, geometry);
END IF;
RETURN true;
IF ST_Dimension(geometry) = 0 THEN
RETURN insertLocationAreaLarge(partition, place_id, country_code, keywords,
rank_search, rank_address, true, postcode,
centroid, place_node_fuzzy_area(geometry, rank_search));
END IF;
RETURN false;
END;
$$
LANGUAGE plpgsql;
@@ -394,19 +393,21 @@ DECLARE
geo RECORD;
area FLOAT;
remainingdepth INTEGER;
added INTEGER;
BEGIN
-- RAISE WARNING 'quad_split_geometry: maxarea=%, depth=%',maxarea,maxdepth;
IF (ST_GeometryType(geometry) not in ('ST_Polygon','ST_MultiPolygon') OR NOT ST_IsValid(geometry)) THEN
IF not ST_IsValid(geometry) THEN
RETURN;
END IF;
IF ST_Dimension(geometry) != 2 OR maxdepth <= 1 THEN
RETURN NEXT geometry;
RETURN;
END IF;
remainingdepth := maxdepth - 1;
area := ST_AREA(geometry);
IF remainingdepth < 1 OR area < maxarea THEN
IF area < maxarea THEN
RETURN NEXT geometry;
RETURN;
END IF;
@@ -426,7 +427,6 @@ BEGIN
xmid := (xmin+xmax)/2;
ymid := (ymin+ymax)/2;
added := 0;
FOR seg IN 1..4 LOOP
IF seg = 1 THEN
@@ -442,23 +442,20 @@ BEGIN
secbox := ST_SetSRID(ST_MakeBox2D(ST_Point(xmid,ymid),ST_Point(xmax,ymax)),4326);
END IF;
IF st_intersects(geometry, secbox) THEN
secgeo := st_intersection(geometry, secbox);
IF NOT ST_IsEmpty(secgeo) AND ST_GeometryType(secgeo) in ('ST_Polygon','ST_MultiPolygon') THEN
FOR geo IN select quad_split_geometry(secgeo, maxarea, remainingdepth) as geom LOOP
IF NOT ST_IsEmpty(geo.geom) AND ST_GeometryType(geo.geom) in ('ST_Polygon','ST_MultiPolygon') THEN
added := added + 1;
RETURN NEXT geo.geom;
END IF;
END LOOP;
END IF;
secgeo := st_intersection(geometry, secbox);
IF NOT ST_IsEmpty(secgeo) AND ST_Dimension(secgeo) = 2 THEN
FOR geo IN SELECT quad_split_geometry(secgeo, maxarea, remainingdepth) as geom LOOP
IF NOT ST_IsEmpty(geo.geom) AND ST_Dimension(geo.geom) = 2 THEN
RETURN NEXT geo.geom;
END IF;
END LOOP;
END IF;
END LOOP;
RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
@@ -467,14 +464,26 @@ CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
DECLARE
geo RECORD;
BEGIN
-- 10000000000 is ~~ 1x1 degree
FOR geo IN select quad_split_geometry(geometry, 0.25, 20) as geom LOOP
RETURN NEXT geo.geom;
END LOOP;
IF ST_GeometryType(geometry) = 'ST_MultiPolygon'
and ST_Area(geometry) * 10 > ST_Area(Box2D(geometry))
THEN
FOR geo IN
SELECT quad_split_geometry(g, 0.25, 20) as geom
FROM (SELECT (ST_Dump(geometry)).geom::geometry(Polygon, 4326) AS g) xx
LOOP
RETURN NEXT geo.geom;
END LOOP;
ELSE
FOR geo IN
SELECT quad_split_geometry(geometry, 0.25, 20) as geom
LOOP
RETURN NEXT geo.geom;
END LOOP;
END IF;
RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION simplify_large_polygons(geometry GEOMETRY)
RETURNS GEOMETRY
@@ -488,7 +497,7 @@ BEGIN
RETURN geometry;
END;
$$
LANGUAGE plpgsql IMMUTABLE;
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION place_force_delete(placeid BIGINT)

View File

@@ -12,7 +12,7 @@ CREATE OR REPLACE FUNCTION token_get_name_search_tokens(info JSONB)
RETURNS INTEGER[]
AS $$
SELECT (info->>'names')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Get tokens for matching the place name against others.
@@ -22,7 +22,7 @@ CREATE OR REPLACE FUNCTION token_get_name_match_tokens(info JSONB)
RETURNS INTEGER[]
AS $$
SELECT (info->>'names')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return the housenumber tokens applicable for the place.
@@ -30,7 +30,7 @@ CREATE OR REPLACE FUNCTION token_get_housenumber_search_tokens(info JSONB)
RETURNS INTEGER[]
AS $$
SELECT (info->>'hnr_tokens')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return the housenumber in the form that it can be matched during search.
@@ -38,77 +38,77 @@ CREATE OR REPLACE FUNCTION token_normalized_housenumber(info JSONB)
RETURNS TEXT
AS $$
SELECT info->>'hnr';
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_is_street_address(info JSONB)
RETURNS BOOLEAN
AS $$
SELECT info->>'street' is not null or info->>'place' is null;
$$ LANGUAGE SQL IMMUTABLE;
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_has_addr_street(info JSONB)
RETURNS BOOLEAN
AS $$
SELECT info->>'street' is not null and info->>'street' != '{}';
$$ LANGUAGE SQL IMMUTABLE;
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_has_addr_place(info JSONB)
RETURNS BOOLEAN
AS $$
SELECT info->>'place' is not null;
$$ LANGUAGE SQL IMMUTABLE;
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_street(info JSONB, street_tokens INTEGER[])
RETURNS BOOLEAN
AS $$
SELECT (info->>'street')::INTEGER[] && street_tokens
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_place(info JSONB, place_tokens INTEGER[])
RETURNS BOOLEAN
AS $$
SELECT (info->>'place')::INTEGER[] <@ place_tokens
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_addr_place_search_tokens(info JSONB)
RETURNS INTEGER[]
AS $$
SELECT (info->>'place')::INTEGER[]
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_address_keys(info JSONB)
RETURNS SETOF TEXT
AS $$
SELECT * FROM jsonb_object_keys(info->'addr');
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_address_search_tokens(info JSONB, key TEXT)
RETURNS INTEGER[]
AS $$
SELECT (info->'addr'->>key)::INTEGER[];
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_matches_address(info JSONB, key TEXT, tokens INTEGER[])
RETURNS BOOLEAN
AS $$
SELECT (info->'addr'->>key)::INTEGER[] <@ tokens;
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION token_get_postcode(info JSONB)
RETURNS TEXT
AS $$
SELECT info->>'postcode';
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
-- Return token info that should be saved permanently in the database.
@@ -116,7 +116,7 @@ CREATE OR REPLACE FUNCTION token_strip_info(info JSONB)
RETURNS JSONB
AS $$
SELECT NULL::JSONB;
$$ LANGUAGE SQL IMMUTABLE STRICT;
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
--------------- private functions ----------------------------------------------
@@ -128,16 +128,14 @@ DECLARE
partial_terms TEXT[] = '{}'::TEXT[];
term TEXT;
term_id INTEGER;
term_count INTEGER;
BEGIN
SELECT min(word_id) INTO full_token
FROM word WHERE word = norm_term and type = 'W';
IF full_token IS NULL THEN
full_token := nextval('seq_word');
INSERT INTO word (word_id, word_token, type, word, info)
SELECT full_token, lookup_term, 'W', norm_term,
json_build_object('count', 0)
INSERT INTO word (word_id, word_token, type, word)
SELECT full_token, lookup_term, 'W', norm_term
FROM unnest(lookup_terms) as lookup_term;
END IF;
@@ -150,14 +148,67 @@ BEGIN
partial_tokens := '{}'::INT[];
FOR term IN SELECT unnest(partial_terms) LOOP
SELECT min(word_id), max(info->>'count') INTO term_id, term_count
SELECT min(word_id) INTO term_id
FROM word WHERE word_token = term and type = 'w';
IF term_id IS NULL THEN
term_id := nextval('seq_word');
term_count := 0;
INSERT INTO word (word_id, word_token, type, info)
VALUES (term_id, term, 'w', json_build_object('count', term_count));
INSERT INTO word (word_id, word_token, type)
VALUES (term_id, term, 'w');
END IF;
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);
END LOOP;
END;
$$
LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION getorcreate_full_word(norm_term TEXT,
lookup_terms TEXT[],
lookup_norm_terms TEXT[],
OUT full_token INT,
OUT partial_tokens INT[])
AS $$
DECLARE
partial_terms TEXT[] = '{}'::TEXT[];
term TEXT;
term_id INTEGER;
BEGIN
SELECT min(word_id) INTO full_token
FROM word WHERE word = norm_term and type = 'W';
IF full_token IS NULL THEN
full_token := nextval('seq_word');
IF lookup_norm_terms IS NULL THEN
INSERT INTO word (word_id, word_token, type, word)
SELECT full_token, lookup_term, 'W', norm_term
FROM unnest(lookup_terms) as lookup_term;
ELSE
INSERT INTO word (word_id, word_token, type, word, info)
SELECT full_token, t.lookup, 'W', norm_term,
CASE WHEN norm_term = t.norm THEN null
ELSE json_build_object('lookup', t.norm) END
FROM unnest(lookup_terms, lookup_norm_terms) as t(lookup, norm);
END IF;
END IF;
FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
term := trim(term);
IF NOT (ARRAY[term] <@ partial_terms) THEN
partial_terms := partial_terms || term;
END IF;
END LOOP;
partial_tokens := '{}'::INT[];
FOR term IN SELECT unnest(partial_terms) LOOP
SELECT min(word_id) INTO term_id
FROM word WHERE word_token = term and type = 'w';
IF term_id IS NULL THEN
term_id := nextval('seq_word');
INSERT INTO word (word_id, word_token, type)
VALUES (term_id, term, 'w');
END IF;
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);

View File

@@ -1,4 +1,4 @@
site_name: Nominatim Manual
site_name: Nominatim 5.1.0 Manual
theme:
font: false
name: material

View File

@@ -3,7 +3,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper script for development to run nominatim from the source directory.
@@ -15,4 +15,4 @@ sys.path.insert(1, str((Path(__file__) / '..' / 'src').resolve()))
from nominatim_db import cli
exit(cli.nominatim(module_dir=None, osm2pgsql_path=None))
exit(cli.nominatim())

View File

@@ -19,7 +19,6 @@ dependencies = [
"python-dotenv",
"jinja2",
"pyYAML>=5.1",
"datrie",
"psutil",
"PyICU"
]

View File

@@ -2,4 +2,4 @@
from nominatim_db import cli
exit(cli.nominatim(osm2pgsql_path=None))
exit(cli.nominatim())

View File

@@ -23,8 +23,8 @@
"allotments" : 22,
"neighbourhood" : [20, 22],
"quarter" : [20, 22],
"isolated_dwelling" : [22, 20],
"farm" : [22, 20],
"isolated_dwelling" : [22, 25],
"farm" : [22, 25],
"city_block" : 25,
"mountain_pass" : 25,
"square" : 25,
@@ -216,6 +216,14 @@
}
}
},
{ "countries" : ["sa"],
"tags" : {
"place" : {
"province" : 12,
"municipality" : 18
}
}
},
{ "countries" : ["sk"],
"tags" : {
"boundary" : {

View File

@@ -1809,7 +1809,8 @@ us:
languages: en
names: !include country-names/us.yaml
postcode:
pattern: "ddddd"
pattern: "(ddddd)(?:-dddd)?"
output: \1
# Uruguay (Uruguay)

View File

@@ -4,7 +4,7 @@
- aparcament -> aparc
- apartament -> apmt
- apartat -> apt
- àtic -> àt
- àtic -> àt
- autopista -> auto
- autopista -> autop
- autovia -> autov
@@ -19,7 +19,6 @@
- biblioteca -> bibl
- bloc -> bl
- carrer -> c
- carrer -> c/
- carreró -> cró
- carretera -> ctra
- cantonada -> cant
@@ -58,7 +57,6 @@
- número -> n
- sense número -> s/n
- parada -> par
- parcel·la -> parc
- passadís -> pdís
- passatge -> ptge
- passeig -> pg

View File

@@ -1,438 +1,393 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#English
# Source: https://pe.usps.com/text/pub28/28apc_002.htm
- lang: en
words:
- Access -> Accs
- Air Force Base -> AFB
- Air National Guard Base -> ANGB
- Airport -> Aprt
- Alley -> Al
- Alley -> All
- Alley -> Ally
- Alley -> Aly
- Alley -> Al,All,Ally,Aly
- Alleyway -> Alwy
- Amble -> Ambl
- Anex -> Anx
- Apartments -> Apts
- Approach -> Apch
- Approach -> App
- Approach -> Apch,App
- Arcade -> Arc
- Arterial -> Artl
- Artery -> Arty
- Avenue -> Av
- Avenue -> Ave
- Avenue -> Av,Ave
- Back -> Bk
- Banan -> Ba
- Basin -> Basn
- Basin -> Bsn
- Basin -> Basn,Bsn
- Bayou -> Byu
- Beach -> Bch
- Bend -> Bend
- Bend -> Bnd
- Block -> Blk
- Bluff -> Blf
- Bluffs -> Blfs
- Boardwalk -> Bwlk
- Boulevard -> Blvd
- Boulevard -> Bvd
- Bottom -> Btm
- Boulevard -> Blvd,Bvd
- Boundary -> Bdy
- Bowl -> Bl
- Brace -> Br
- Brae -> Br
- Brae -> Brae
- Branch -> Br
- Break -> Brk
- Bridge -> Bdge
- Bridge -> Br
- Bridge -> Brdg
- Bridge -> Bri
- Broadway -> Bdwy
- Broadway -> Bway
- Broadway -> Bwy
- Bridge$ -> Bdge,Br,Brdg,Brg,Bri
- Broadway -> Bdwy,Bway,Bwy
- Brook -> Brk
- Brooks -> Brks
- Brow -> Brw
- Brow -> Brow
- Buildings -> Bldgs
- Buildings -> Bldngs
- Buildings -> Bldgs,Bldngs
- Business -> Bus
- Bypass -> Bps
- Bypass -> Byp
- Bypass -> Bypa
- Burg -> Bg
- Burgs -> Bgs
- Bypass -> Bps,Byp,Bypa
- Byway -> Bywy
- Camp -> Cp
- Canyon -> Cyn
- Cape -> Cpe
- Caravan -> Cvn
- Causeway -> Caus
- Causeway -> Cswy
- Causeway -> Cway
- Center -> Cen
- Center -> Ctr
- Causeway -> Caus,Cswy,Cway
- Center,Centre -> Cen,Ctr
- Centers -> Ctrs
- Central -> Ctrl
- Centre -> Cen
- Centre -> Ctr
- Centreway -> Cnwy
- Chase -> Ch
- Church -> Ch
- Circle -> Cir
- Circuit -> Cct
- Circuit -> Ci
- Circus -> Crc
- Circus -> Crcs
- Circles -> Cirs
- Circuit -> Cct,Ci
- Circus -> Crc,Crcs
- City -> Cty
- Cliff -> Clf
- Cliffs -> Clfs
- Close -> Cl
- Common -> Cmn
- Common -> Comm
- Club -> Clb
- Common -> Cmn,Comm
- Commons -> Cmns
- Community -> Comm
- Concourse -> Cnc
- Concourse -> Con
- Copse -> Cps
- Corner -> Cnr
- Corner -> Crn
- Corner -> Cor,Cnr,Crn
- Corners -> Cors
- Corso -> Cso
- Cottages -> Cotts
- County -> Co
- County Road -> CR
- County Route -> CR
- Court -> Crt
- Court -> Ct
- Course -> Crse
- Court -> Crt,Ct
- Courts -> Cts
- Courtyard -> Cyd
- Courtyard -> Ctyd
- Cove -> Ce
- Cove -> Cov
- Cove -> Cove
- Cove -> Cv
- Creek -> Ck
- Creek -> Cr
- Creek -> Crk
- Cove$ -> Ce,Cov,Cv
- Coves -> Cvs
- Creek$ -> Ck,Cr,Crk
- Crescent -> Cr
- Crescent -> Cres
- Crest -> Crst
- Crest -> Cst
- Crest -> Crst,Cst
- Croft -> Cft
- Cross -> Cs
- Cross -> Crss
- Crossing -> Crsg
- Crossing -> Csg
- Crossing -> Xing
- Crossroad -> Crd
- Cross -> Cs,Crss
- Crossing -> Crsg,Csg,Xing
- Crossroad -> Crd,Xrd
- Crossroads -> Xrds
- Crossway -> Cowy
- Cul-de-sac -> Cds
- Cul-de-sac -> Csac
- Curve -> Cve
- Cul-de-sac -> Cds,Csac
- Curve -> Cve,Curv
- Cutting -> Cutt
- Dale -> Dle
- Dale -> Dale
- Dam -> Dm
- Deviation -> Devn
- Dip -> Dip
- Distributor -> Dstr
- Divide -> Dv
- Down -> Dn
- Downs -> Dn
- Drive -> Dr
- Drive -> Drv
- Drive -> Dv
- Drive -> Dr,Drv,Dv
- Drives -> Drs
- Drive-In => Drive-In # prevent abbreviation here
- Driveway -> Drwy
- Driveway -> Dvwy
- Driveway -> Dwy
- Driveway -> Drwy,Dvwy,Dwy
- East -> E
- Edge -> Edg
- Edge -> Edge
- Elbow -> Elb
- End -> End
- Entrance -> Ent
- Esplanade -> Esp
- Estate -> Est
- Expressway -> Exp
- Expressway -> Expy
- Expressway -> Expwy
- Expressway -> Xway
- Estates -> Ests
- Expressway -> Exp,Expy,Expwy,Xway
- Extension -> Ex
- Fairway -> Fawy
- Fairway -> Fy
- Extensions -> Exts
- Fairway -> Fawy,Fy
- Falls -> Fls
- Father -> Fr
- Ferry -> Fy
- Field -> Fd
- Ferry -> Fy,Fry
- Field -> Fd,Fld
- Fields -> Flds
- Fire Track -> Ftrk
- Firetrail -> Fit
- Flat -> Fl
- Flat -> Flat
- Flat -> Fl,Flt
- Flats -> Flts
- Follow -> Folw
- Footway -> Ftwy
- Ford -> Frd
- Fords -> Frds
- Foreshore -> Fshr
- Forest -> Frst
- Forest Service Road -> FSR
- Forge -> Frg
- Forges -> Frgs
- Formation -> Form
- Fork -> Frk
- Forks -> Frks
- Fort -> Ft
- Freeway -> Frwy
- Freeway -> Fwy
- Freeway -> Frwy,Fwy
- Front -> Frnt
- Frontage -> Fr
- Frontage -> Frtg
- Gap -> Gap
- Frontage -> Fr,Frtg
- Garden -> Gdn
- Gardens -> Gdn
- Gardens -> Gdns
- Gate -> Ga
- Gate -> Gte
- Gates -> Ga
- Gates -> Gte
- Gateway -> Gwy
- Gardens -> Gdn,Gdns
- Gate,Gates -> Ga,Gte
- Gateway -> Gwy,Gtwy
- George -> Geo
- Glade -> Gl
- Glade -> Gld
- Glade -> Glde
- Glade$ -> Gl,Gld,Glde
- Glen -> Gln
- Glen -> Glen
- Glens -> Glns
- Grange -> Gra
- Green -> Gn
- Green -> Grn
- Green -> Gn,Grn
- Greens -> Grns
- Ground -> Grnd
- Grove -> Gr
- Grove -> Gro
- Grove$ -> Gr,Gro,Grv
- Groves -> Grvs
- Grovet -> Gr
- Gully -> Gly
- Harbor -> Hbr
- Harbour -> Hbr
- Harbor -> Hbr,Harbour
- Harbors -> Hbrs
- Harbour -> Hbr,Harbor
- Haven -> Hvn
- Head -> Hd
- Heads -> Hd
- Heights -> Hgts
- Heights -> Ht
- Heights -> Hts
- Heights -> Hgts,Ht,Hts
- High School -> HS
- Highroad -> Hird
- Highroad -> Hrd
- Highroad -> Hird,Hrd
- Highway -> Hwy
- Hill -> Hill
- Hill -> Hl
- Hills -> Hl
- Hills -> Hls
- Hills -> Hl,Hls
- Hollow -> Holw
- Hospital -> Hosp
- House -> Ho
- House -> Hse
- House -> Ho,Hse
- Industrial -> Ind
- Inlet -> Inlt
- Interchange -> Intg
- International -> Intl
- Island -> I
- Island -> Is
- Junction -> Jctn
- Junction -> Jnc
- Island -> I,Is
- Islands -> Iss
- Junction -> Jct,Jctn,Jnc
- Junctions -> Jcts
- Junior -> Jr
- Key -> Key
- Key -> Ky
- Keys -> Kys
- Knoll -> Knl
- Knolls -> Knls
- Lagoon -> Lgn
- Lakes -> L
- Landing -> Ldg
- Lane -> La
- Lane -> Lane
- Lane -> Ln
- Lake -> Lk
- Lakes -> L,Lks
- Landing -> Ldg,Lndg
- Lane -> La,Ln
- Laneway -> Lnwy
- Line -> Line
- Light -> Lgt
- Lights -> Lgts
- Line -> Ln
- Link -> Link
- Link -> Lk
- Little -> Lit
- Little -> Lt
- Little -> Lit,Lt
- Loaf -> Lf
- Lock -> Lck
- Locks -> Lcks
- Lodge -> Ldg
- Lookout -> Lkt
- Loop -> Loop
- Loop -> Lp
- Lower -> Low
- Lower -> Lr
- Lower -> Lwr
- Mall -> Mall
- Lower -> Low,Lr,Lwr
- Mall -> Ml
- Manor -> Mnr
- Manors -> Mnrs
- Mansions -> Mans
- Market -> Mkt
- Meadow -> Mdw
- Meadows -> Mdw
- Meadows -> Mdws
- Meadows -> Mdw,Mdws
- Mead -> Md
- Meander -> Mdr
- Meander -> Mndr
- Meander -> Mr
- Meander -> Mdr,Mndr,Mr
- Medical -> Med
- Memorial -> Mem
- Mews -> Mews
- Mews -> Mw
- Middle -> Mid
- Middle School -> MS
- Mile -> Mi
- Military -> Mil
- Motorway -> Mtwy
- Motorway -> Mwy
- Mill -> Ml
- Mills -> Mls
- Mission -> Msn
- Motorway -> Mtwy,Mwy
- Mount -> Mt
- Mountain -> Mtn
- Mountains -> Mtn
- Mountains$ -> Mtn,Mtns
- Municipal -> Mun
- Museum -> Mus
- National Park -> NP
- National Recreation Area -> NRA
- National Wildlife Refuge Area -> NWRA
- Neck -> Nck
- Nook -> Nk
- Nook -> Nook
- North -> N
- Northeast -> NE
- Northwest -> NW
- Outlook -> Out
- Outlook -> Otlk
- Orchard -> Orch
- Outlook -> Out,Otlk
- Overpass -> Opas
- Parade -> Pde
- Paradise -> Pdse
- Park -> Park
- Park -> Pk
- Parklands -> Pkld
- Parkway -> Pkwy
- Parkway -> Pky
- Parkway -> Pwy
- Pass -> Pass
- Parkway -> Pkwy,Pky,Pwy
- Parkways -> Pkwy
- Pass -> Ps
- Passage -> Psge
- Path -> Path
- Pathway -> Phwy
- Pathway -> Pway
- Pathway -> Pwy
- Pathway -> Phwy,Pway,Pwy
- Piazza -> Piaz
- Pike -> Pk
- Pine -> Pne
- Pines -> Pnes
- Place -> Pl
- Plain -> Pl
- Plains -> Pl
- Plain -> Pl,Pln
- Plains -> Pl,Plns
- Plateau -> Plat
- Plaza -> Pl
- Plaza -> Plz
- Plaza -> Plza
- Plaza -> Pl,Plz,Plza
- Pocket -> Pkt
- Point -> Pnt
- Point -> Pt
- Port -> Port
- Port -> Pt
- Point -> Pnt,Pt
- Points -> Pts
- Port -> Prt,Pt
- Ports -> Prts
- Post Office -> PO
- Prairie -> Pr
- Precinct -> Pct
- Promenade -> Prm
- Promenade -> Prom
- Quad -> Quad
- Promenade -> Prm,Prom
- Quadrangle -> Qdgl
- Quadrant -> Qdrt
- Quadrant -> Qd
- Quadrant -> Qdrt,Qd
- Quay -> Qy
- Quays -> Qy
- Quays -> Qys
- Radial -> Radl
- Ramble -> Ra
- Ramble -> Rmbl
- Range -> Rge
- Range -> Rnge
- Ranch -> Rnch
- Range -> Rge,Rnge
- Rapid -> Rpd
- Rapids -> Rpds
- Reach -> Rch
- Reservation -> Res
- Reserve -> Res
- Reservoir -> Res
- Rest -> Rest
- Rest -> Rst
- Retreat -> Rt
- Retreat -> Rtt
- Retreat -> Rt,Rtt
- Return -> Rtn
- Ridge -> Rdg
- Ridge -> Rdge
- Ridge -> Rdg,Rdge
- Ridges -> Rdgs
- Ridgeway -> Rgwy
- Right of Way -> Rowy
- Rise -> Ri
- Rise -> Rise
- River -> R
- River -> Riv
- River -> Rvr
- ^River -> R,Riv,Rvr
- River$ -> R,Riv,Rvr
- Riverway -> Rvwy
- Riviera -> Rvra
- Road -> Rd
- Roads -> Rds
- Roadside -> Rdsd
- Roadway -> Rdwy
- Roadway -> Rdy
- Robert -> Robt
- Roadway -> Rdwy,Rdy
- Rocks -> Rks
- Ronde -> Rnde
- Rosebowl -> Rsbl
- Rotary -> Rty
- Round -> Rnd
- Route -> Rt
- Route -> Rte
- Row -> Row
- Rue -> Rue
- Run -> Run
- Route -> Rt,Rte
- Saint -> St
- Saints -> SS
- Senior -> Sr
- Serviceway -> Swy
- Serviceway -> Svwy
- Serviceway -> Swy,Svwy
- Shoal -> Shl
- Shore -> Shr
- Shores -> Shrs
- Shunt -> Shun
- Siding -> Sdng
- Sister -> Sr
- Skyway -> Skwy
- Slope -> Slpe
- Sound -> Snd
- South -> S
- South -> Sth
- South -> S,Sth
- Southeast -> SE
- Southwest -> SW
- Spur -> Spur
- Spring -> Spg
- Springs -> Spgs
- Spurs -> Spur
- Square -> Sq
- Squares -> Sqs
- Stairway -> Strwy
- State Highway -> SH
- State Highway -> SHwy
- State Highway -> SH,SHwy
- State Route -> SR
- Station -> Sta
- Station -> Stn
- Strand -> Sd
- Strand -> Stra
- Station -> Sta,Stn
- Strand -> Sd,Stra
- Stravenue -> Stra
- Stream -> Strm
- Street -> St
- Streets -> Sts
- Strip -> Strp
- Subway -> Sbwy
- Summit -> Smt
- Tarn -> Tn
- Tarn -> Tarn
- Terminal -> Term
- Terrace -> Tce
- Terrace -> Ter
- Terrace -> Terr
- Thoroughfare -> Thfr
- Thoroughfare -> Thor
- Tollway -> Tlwy
- Tollway -> Twy
- Top -> Top
- Tor -> Tor
- Terrace -> Tce,Ter,Terr
- Thoroughfare -> Thfr,Thor
- Throughway -> Trwy
- Tollway -> Tlwy,Twy
- Towers -> Twrs
- Township -> Twp
- Trace -> Trce
- Track -> Tr
- Track -> Trk
- Track -> Tr,Trak,Trk
- Trafficway -> Trfy
- Trail -> Trl
- Trailer -> Trlr
- Triangle -> Tri
- Trunkway -> Tkwy
- Tunnel -> Tun
- Turn -> Tn
- Turn -> Trn
- Turn -> Turn
- Turnpike -> Tpk
- Turnpike -> Tpke
- Underpass -> Upas
- Underpass -> Ups
- University -> Uni
- University -> Univ
- Tunnel -> Tun,Tunl
- Turn -> Tn,Trn
- Turnpike -> Tpk,Tpke
- Underpass -> Upas,Ups
- Union -> Un
- Unions -> Uns
- University -> Uni,Univ
- Upper -> Up
- Upper -> Upr
- Vale -> Va
- Vale -> Vale
- Valley -> Vly
- Valley -> Vy
- Viaduct -> Vdct
- Viaduct -> Via
- Viaduct -> Viad
- Valleys -> Vlys
- Viaduct$ -> Vdct,Via,Viad
- View -> Vw
- View -> View
- Village -> Vill
- Views -> Vws
- Village -> Vill,Vlg
- Villages -> Vlgs
- Villas -> Vlls
- Vista -> Vst
- Vista -> Vsta
- Walk -> Walk
- Walk -> Wk
- Walk -> Wlk
- Walkway -> Wkwy
- Walkway -> Wky
- Ville -> Vl
- Vista -> Vis,Vst,Vsta
- Walk -> Wk,Wlk
- Walks -> Walk
- Walkway -> Wkwy,Wky
- Waters -> Wtr
- Way -> Way
- Way -> Wy
- Well -> Wl
- Wells -> Wls
- West -> W
- Wharf -> Whrf
- William -> Wm
- Wynd -> Wyn
- Wynd -> Wynd
- Yard -> Yard
- Yard -> Yd
- lang: en
country: ca

View File

@@ -30,7 +30,6 @@
- Bloque -> Blq
- Bulevar -> Blvr
- Boulevard -> Blvd
- Calle -> C/
- Calle -> C
- Calle -> Cl
- Calleja -> Cllja

View File

@@ -3,20 +3,16 @@
words:
- Abbaye -> ABE
- Agglomération -> AGL
- Aire -> AIRE
- Aires -> AIRE
- Allée -> ALL
- Allée -> All
- Allées -> ALL
- Ancien chemin -> ACH
- Ancienne route -> ART
- Anciennes routes -> ART
- Anse -> ANSE
- Arcade -> ARC
- Arcades -> ARC
- Autoroute -> AUT
- Avenue -> AV
- Avenue -> Av
- Barrière -> BRE
- Barrières -> BRE
- Bas chemin -> BCH
@@ -28,16 +24,11 @@
- Berges -> BER
- Bois -> BOIS
- Boucle -> BCLE
- Boulevard -> Bd
- Boulevard -> BD
- Bourg -> BRG
- Butte -> BUT
- Cité -> CITE
- Cités -> CITE
- Côte -> COTE
- Côteau -> COTE
- Cale -> CALE
- Camp -> CAMP
- Campagne -> CGNE
- Camping -> CPG
- Carreau -> CAU
@@ -56,17 +47,13 @@
- Chaussées -> CHS
- Chemin -> Ch
- Chemin -> CHE
- Chemin -> Che
- Chemin vicinal -> CHV
- Cheminement -> CHEM
- Cheminements -> CHEM
- Chemins -> CHE
- Chemins vicinaux -> CHV
- Chez -> CHEZ
- Château -> CHT
- Cloître -> CLOI
- Clos -> CLOS
- Col -> COL
- Colline -> COLI
- Collines -> COLI
- Contour -> CTR
@@ -74,9 +61,7 @@
- Corniches -> COR
- Cottage -> COTT
- Cottages -> COTT
- Cour -> COUR
- Cours -> CRS
- Cours -> Crs
- Darse -> DARS
- Degré -> DEG
- Degrés -> DEG
@@ -87,11 +72,8 @@
- Domaine -> DOM
- Domaines -> DOM
- Écluse -> ECL
- Écluse -> ÉCL
- Écluses -> ECL
- Écluses -> ÉCL
- Église -> EGL
- Église -> ÉGL
- Enceinte -> EN
- Enclave -> ENV
- Enclos -> ENC
@@ -100,21 +82,16 @@
- Espace -> ESPA
- Esplanade -> ESP
- Esplanades -> ESP
- Étang -> ETANG
- Étang -> ÉTANG
- Faubourg -> FG
- Faubourg -> Fg
- Ferme -> FRM
- Fermes -> FRM
- Fontaine -> FON
- Fort -> FORT
- Forum -> FORM
- Fosse -> FOS
- Fosses -> FOS
- Foyer -> FOYR
- Galerie -> GAL
- Galeries -> GAL
- Gare -> GARE
- Garenne -> GARN
- Grand boulevard -> GBD
- Grand ensemble -> GDEN
@@ -134,13 +111,9 @@
- Haut chemin -> HCH
- Hauts chemins -> HCH
- Hippodrome -> HIP
- HLM -> HLM
- Île -> ILE
- Île -> ÎLE
- Immeuble -> IMM
- Immeubles -> IMM
- Impasse -> IMP
- Impasse -> Imp
- Impasses -> IMP
- Jardin -> JARD
- Jardins -> JARD
@@ -150,13 +123,11 @@
- Lieu-dit -> LD
- Lotissement -> LOT
- Lotissements -> LOT
- Mail -> MAIL
- Maison forestière -> MF
- Manoir -> MAN
- Marche -> MAR
- Marches -> MAR
- Maréchal -> MAL
- Mas -> MAS
- Monseigneur -> Mgr
- Mont -> Mt
- Montée -> MTE
@@ -168,13 +139,9 @@
- Métro -> MÉT
- Nouvelle route -> NTE
- Palais -> PAL
- Parc -> PARC
- Parcs -> PARC
- Parking -> PKG
- Parvis -> PRV
- Passage -> PAS
- Passage -> Pas
- Passage -> Pass
- Passage à niveau -> PN
- Passe -> PASS
- Passerelle -> PLE
@@ -191,19 +158,14 @@
- Petite rue -> PTR
- Petites allées -> PTA
- Place -> PL
- Place -> Pl
- Placis -> PLCI
- Plage -> PLAG
- Plages -> PLAG
- Plaine -> PLN
- Plan -> PLAN
- Plateau -> PLT
- Plateaux -> PLT
- Pointe -> PNT
- Pont -> PONT
- Ponts -> PONT
- Porche -> PCH
- Port -> PORT
- Porte -> PTE
- Portique -> PORQ
- Portiques -> PORQ
@@ -211,25 +173,19 @@
- Pourtour -> POUR
- Presquîle -> PRQ
- Promenade -> PROM
- Promenade -> Prom
- Pré -> PRE
- Pré -> PRÉ
- Périphérique -> PERI
- Péristyle -> PSTY
- Quai -> QU
- Quai -> Qu
- Quartier -> QUA
- Raccourci -> RAC
- Raidillon -> RAID
- Rampe -> RPE
- Rempart -> REM
- Roc -> ROC
- Rocade -> ROC
- Rond point -> RPT
- Roquet -> ROQT
- Rotonde -> RTD
- Route -> RTE
- Route -> Rte
- Routes -> RTE
- Rue -> R
- Rue -> R
@@ -245,7 +201,6 @@
- Sentier -> SEN
- Sentiers -> SEN
- Square -> SQ
- Square -> Sq
- Stade -> STDE
- Station -> STA
- Terrain -> TRN
@@ -254,13 +209,11 @@
- Terre plein -> TPL
- Tertre -> TRT
- Tertres -> TRT
- Tour -> TOUR
- Traverse -> TRA
- Vallon -> VAL
- Vallée -> VAL
- Venelle -> VEN
- Venelles -> VEN
- Via -> VIA
- Vieille route -> VTE
- Vieux chemin -> VCHE
- Villa -> VLA
@@ -269,7 +222,6 @@
- Villas -> VLA
- Voie -> VOI
- Voies -> VOI
- Zone -> ZONE
- Zone artisanale -> ZA
- Zone d'aménagement concerté -> ZAC
- Zone d'aménagement différé -> ZAD
@@ -289,7 +241,6 @@
- Esplanade -> ESPL
- Passage -> PASS
- Plateau -> PLAT
- Rang -> RANG
- Rond-point -> RDPT
- Sentier -> SENT
- Subdivision -> SUBDIV

View File

@@ -29,7 +29,6 @@
- Prima -> I
- Primo -> I
- Primo -> 1
- Primo -> 1°
- Quarta -> IV
- Quarto -> IV
- Quattro -> IV

View File

@@ -1,11 +1,10 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#Norsk_-_Norwegian
- lang: no
- lang: "no"
words:
# convert between Nynorsk and Bookmal here
- vei, veg => v,vn,vei,veg
- veien, vegen -> v,vn,veien,vegen
- gate -> g,gt
- ~vei, ~veg -> v,vei,veg
- ~veien, ~vegen -> vn,veien,vegen
# convert between the two female forms
- gaten, gata => g,gt,gaten,gata
- gate, gaten, gata -> g,gt
- plass, plassen -> pl
- sving, svingen -> sv

View File

@@ -1,14 +1,128 @@
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#.D0.A0.D1.83.D1.81.D1.81.D0.BA.D0.B8.D0.B9_-_Russian
# Source: https://www.plantarium.ru/page/help/topic/abbreviations.html
# Source: https://dic.academic.ru/dic.nsf/ruwiki/1871310
- lang: ru
words:
- Академик, Академика -> Ак
- акционерное общество -> АО
- аллея -> ал
- архипелаг -> арх
- атомная электростанция -> АЭС
- аэродром -> аэрд
- аэропорт -> аэрп
- Башкирский, Башкирская, Башкирское, Башкирские -> Баш, Башк, Башкир
- Белый, Белая, Белое. Белые -> Бел
- болото -> бол
- больница -> больн
- Большой, Большая, Большое, Большие -> Б, Бол
- брод -> бр
- бульвар -> бул
- бухта -> бух
- бывший, бывшая, бывшее, бывшие -> бывш
- Великий, Великая, Великое, Великие -> Вел
- Верхний, Верхняя, Верхнее, Верхние -> В, Верх
- водокачка -> вдкч
- водопад -> вдп
- водохранилище -> вдхр
- вокзал -> вкз, вокз
- Восточный, Восточная, Восточное, Восточные -> В, Вост
- вулкан -> влк
- гидроэлектростанция -> ГЭС
- гора -> г
- город -> г
- дворец культуры, дом культуры -> ДК
- дворец спорта -> ДС
- деревня -> д, дер
- детский оздоровительный лагерь -> ДОЛ
- дом -> д
- дом отдыха -> Д О
- железная дорога -> ж д
- железнодорожный, железнодорожная, железнодорожное -> ж-д
- железобетонных изделий -> ЖБИ
- жилой комплекс -> ЖК
- завод -> з-д
- закрытое административно-территориальное образование -> ЗАТО
- залив -> зал
- Западный, Западная, Западное, Западные -> З, Зап, Запад
- заповедник -> запов
- имени -> им
- институт -> инст
- исправительная колония -> ИК
- километр -> км
- Красный, Красная, Красное, Красные -> Кр, Крас
- лагерь -> лаг
- Левый, Левая,Левое, Левые -> Л, Лев
- ледник -> ледн
- лесничество -> леснич
- лесной, лесная, лесное -> лес
- линия электропередачи -> ЛЭП
- Малый, Малая, Малое, Малые -> М, Мал
- Мордовский, Мордовская, Мордовское, Мордовские -> Мордов
- морской, морская, морское -> мор
- Московский, Московская, Московское, Московские -> Мос, Моск
- мыс -> м
- набережная -> наб
- Нижний, Нижняя, Нижнее, Нижние -> Ниж, Н
- Новый, Новая, Новое, Новые -> Нов, Н
- обгонный пункт -> обг п
- область -> обл
- озеро -> оз
- особо охраняемая природная территория -> ООПТ
- остановочный пункт -> о п
- остров -> о
- острова -> о-ва
- парк культуры и отдыха -> ПКиО
- перевал -> пер
- переулок -> пер
- пещера -> пещ
- пионерский лагерь -> пионерлаг
- платформа -> пл, платф
- площадь -> пл
- подсобное хозяйство -> подсоб хоз
- полуостров -> п-ов
- посёлок -> пос, п
- посёлок городского типа -> п г т, пгт
- Правый, Правая, Правое, Правые -> П, Пр, Прав
- проезд -> пр
- проспект -> просп
- шоссе -> ш
- пруд -> пр
- пустыня -> пуст
- разъезд -> рзд
- район -> р
- резинотехнических изделий -> РТИ
- река -> р
- речной, речная, речное -> реч, речн
- Российский, Российская, Российское, Российские -> Рос
- Русский, Русская, Русское, Русские -> Рус, Русск
- ручей -> руч
- садовое некоммерческое товарищество -> СНТ
- садовые участки -> сад уч
- санаторий -> сан
- сарай -> сар
- Северный, Северная, Северное, Северные -> С, Сев
- село -> с
- Сибирский, Сибирская, Сибирское, Сибирские -> Сиб
- Советский, Советская, Советское, Советские -> Сов
- совхоз -> свх
- Сортировочный, Сортировочная, Сортировочное, Сортировочные -> Сорт
- станция -> ст
- Старый, Старая, Среднее, Средние -> Ср
- Татарский, Татарская, Татарское, Татарские -> Тат, Татар
- теплоэлекстростанция -> ТЭС
- теплоэлектроцентраль -> ТЭЦ
- техникум -> техн
- тоннель, туннель -> тун
- тупик -> туп
- улица -> ул
- область -> обл
- Уральский, Уральская, Уральское, Уральские -> Ур, Урал
- урочище -> ур
- хозяйство -> хоз, хоз-во
- хребет -> хр
- хутор -> хут
- Чёрный, Чёрная, Чёрное, Чёрные -> Черн
- Чувашский, Чувашская, Чувашское, Чувашские -> Чуваш
- шахта -> шах
- школа -> шк
- шоссе -> ш
- элеватор -> элев
- Южный, Южная, Южное, Южные -> Ю, Юж, Южн

View File

@@ -1,4 +1,5 @@
query-preprocessing:
- step: split_japanese_phrases
- step: normalize
normalization:
- ":: lower ()"
@@ -9,16 +10,17 @@ normalization:
- "'nº' > 'no'"
- "ª > a"
- "º > o"
- "[[:Punctuation:][:Symbol:]\u02bc] > ' '"
- "[[:Punctuation:][:Symbol:][\u02bc] - [-:]]+ > '-'"
- "ß > 'ss'" # German szet is unambiguously equal to double ss
- "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:]] >"
- "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:] [-:]] >"
- "[:Lm:] >"
- ":: [[:Number:]] Latin ()"
- ":: [[:Number:]] Ascii ();"
- ":: [[:Number:]] NFD ();"
- "[[:Nonspacing Mark:] [:Cf:]] >;"
- "[:Space:]+ > ' '"
- "[-:]?[:Space:]+[-:]? > ' '"
transliteration:
- "[-:] > ' '"
- ":: Latin ()"
- !include icu-rules/extended-unicode-to-asccii.yaml
- ":: Ascii ()"
@@ -44,7 +46,7 @@ sanitizers:
- step: strip-brace-terms
- step: tag-analyzer-by-language
filter-kind: [".*name.*"]
whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,no,pl,pt,ro,ru,sk,sl,sv,tr,uk,vi]
whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,"no",pl,pt,ro,ru,sk,sl,sv,tr,uk,vi]
use-defaults: all
mode: append
- step: tag-japanese
@@ -156,7 +158,7 @@ token-analysis:
mode: variant-only
variants:
- !include icu-rules/variants-nl.yaml
- id: no
- id: "no"
analyzer: generic
mode: variant-only
variants:

View File

@@ -26,7 +26,7 @@ from .connection import SearchConnection
from .status import get_status, StatusResult
from .lookup import get_places, get_detailed_place
from .reverse import ReverseGeocoder
from .search import ForwardGeocoder, Phrase, PhraseType, make_query_analyzer
from . import search as nsearch
from . import types as ntyp
from .results import DetailedResult, ReverseResult, SearchResults
@@ -207,7 +207,7 @@ class NominatimAPIAsync:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
await nsearch.make_query_analyzer(conn)
return await get_detailed_place(conn, place, details)
async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
@@ -219,7 +219,7 @@ class NominatimAPIAsync:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
await nsearch.make_query_analyzer(conn)
return await get_places(conn, places, details)
async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
@@ -237,7 +237,7 @@ class NominatimAPIAsync:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
await nsearch.make_query_analyzer(conn)
geocoder = ReverseGeocoder(conn, details,
self.reverse_restrict_to_country_area)
return await geocoder.lookup(coord)
@@ -251,10 +251,10 @@ class NominatimAPIAsync:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
geocoder = ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
phrases = [Phrase(PhraseType.NONE, p.strip()) for p in query.split(',')]
geocoder = nsearch.ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p.strip()) for p in query.split(',')]
return await geocoder.lookup(phrases)
async def search_address(self, amenity: Optional[str] = None,
@@ -271,22 +271,22 @@ class NominatimAPIAsync:
conn.set_query_timeout(self.query_timeout)
details = ntyp.SearchDetails.from_kwargs(params)
phrases: List[Phrase] = []
phrases: List[nsearch.Phrase] = []
if amenity:
phrases.append(Phrase(PhraseType.AMENITY, amenity))
phrases.append(nsearch.Phrase(nsearch.PHRASE_AMENITY, amenity))
if street:
phrases.append(Phrase(PhraseType.STREET, street))
phrases.append(nsearch.Phrase(nsearch.PHRASE_STREET, street))
if city:
phrases.append(Phrase(PhraseType.CITY, city))
phrases.append(nsearch.Phrase(nsearch.PHRASE_CITY, city))
if county:
phrases.append(Phrase(PhraseType.COUNTY, county))
phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTY, county))
if state:
phrases.append(Phrase(PhraseType.STATE, state))
phrases.append(nsearch.Phrase(nsearch.PHRASE_STATE, state))
if postalcode:
phrases.append(Phrase(PhraseType.POSTCODE, postalcode))
phrases.append(nsearch.Phrase(nsearch.PHRASE_POSTCODE, postalcode))
if country:
phrases.append(Phrase(PhraseType.COUNTRY, country))
phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTRY, country))
if not phrases:
raise UsageError('Nothing to search for.')
@@ -304,14 +304,14 @@ class NominatimAPIAsync:
else:
details.restrict_min_max_rank(4, 4)
if 'layers' not in params:
if details.layers is None:
details.layers = ntyp.DataLayer.ADDRESS
if amenity:
details.layers |= ntyp.DataLayer.POI
geocoder = ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
geocoder = nsearch.ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup(phrases)
async def search_category(self, categories: List[Tuple[str, str]],
@@ -328,15 +328,15 @@ class NominatimAPIAsync:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if near_query:
phrases = [Phrase(PhraseType.NONE, p) for p in near_query.split(',')]
phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p) for p in near_query.split(',')]
else:
phrases = []
if details.keywords:
await make_query_analyzer(conn)
await nsearch.make_query_analyzer(conn)
geocoder = ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
geocoder = nsearch.ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT')
if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup_pois(categories, phrases)

View File

@@ -27,5 +27,5 @@ def create(config: QueryConfig) -> QueryProcessingFunc:
return lambda phrases: list(
filter(lambda p: p.text,
(Phrase(p.ptype, cast(str, normalizer.transliterate(p.text)))
(Phrase(p.ptype, cast(str, normalizer.transliterate(p.text)).strip('-: '))
for p in phrases)))

View File

@@ -0,0 +1,61 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This file divides Japanese addresses into three categories:
prefecture, municipality, and other.
The division is not strict but simple using these keywords.
"""
from typing import List
import re
from .config import QueryConfig
from .base import QueryProcessingFunc
from ..search.query import Phrase
MATCH_PATTERNS = [
r'''
(...??[都都道府県縣]) # [group1] prefecture
(.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
(.+) # [group3] other words
''',
r'''
(...??[都都道府県縣]) # [group1] prefecture
(.+) # [group3] other words
''',
r'''
(.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
(.+) # [group3] other words
'''
]
class _JapanesePreprocessing:
def __init__(self, config: QueryConfig) -> None:
self.config = config
def split_phrase(self, phrase: Phrase) -> Phrase:
"""
This function performs a division on the given text using a regular expression.
"""
for pattern in MATCH_PATTERNS:
result = re.match(pattern, phrase.text, re.VERBOSE)
if result is not None:
return Phrase(phrase.ptype, ':'.join(result.groups()))
return phrase
def __call__(self, phrases: List[Phrase]) -> List[Phrase]:
"""Split a Japanese address using japanese_tokenizer.
"""
return [self.split_phrase(p) for p in phrases]
def create(config: QueryConfig) -> QueryProcessingFunc:
""" Create a function of japanese preprocessing.
"""
return _JapanesePreprocessing(config)

View File

@@ -9,5 +9,12 @@ Module for forward search.
"""
from .geocoder import (ForwardGeocoder as ForwardGeocoder)
from .query import (Phrase as Phrase,
PhraseType as PhraseType)
PHRASE_ANY as PHRASE_ANY,
PHRASE_AMENITY as PHRASE_AMENITY,
PHRASE_STREET as PHRASE_STREET,
PHRASE_CITY as PHRASE_CITY,
PHRASE_COUNTY as PHRASE_COUNTY,
PHRASE_STATE as PHRASE_STATE,
PHRASE_POSTCODE as PHRASE_POSTCODE,
PHRASE_COUNTRY as PHRASE_COUNTRY)
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)

View File

@@ -11,7 +11,7 @@ from typing import Optional, List, Tuple, Iterator, Dict
import heapq
from ..types import SearchDetails, DataLayer
from .query import QueryStruct, Token, TokenType, TokenRange, BreakType
from . import query as qmod
from .token_assignment import TokenAssignment
from . import db_search_fields as dbf
from . import db_searches as dbs
@@ -51,7 +51,7 @@ class SearchBuilder:
""" Build the abstract search queries from token assignments.
"""
def __init__(self, query: QueryStruct, details: SearchDetails) -> None:
def __init__(self, query: qmod.QueryStruct, details: SearchDetails) -> None:
self.query = query
self.details = details
@@ -97,7 +97,7 @@ class SearchBuilder:
builder = self.build_poi_search(sdata)
elif assignment.housenumber:
hnr_tokens = self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER)
qmod.TOKEN_HOUSENUMBER)
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
else:
builder = self.build_special_search(sdata, assignment.address,
@@ -128,7 +128,7 @@ class SearchBuilder:
yield dbs.PoiSearch(sdata)
def build_special_search(self, sdata: dbf.SearchData,
address: List[TokenRange],
address: List[qmod.TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for searches that do not involve
a named place.
@@ -148,11 +148,10 @@ class SearchBuilder:
[t.token for r in address
for t in self.query.get_partials_list(r)],
lookups.Restrict)]
penalty += 0.2
yield dbs.PostcodeSearch(penalty, sdata)
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token],
address: List[TokenRange]) -> Iterator[dbs.AbstractSearch]:
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[qmod.Token],
address: List[qmod.TokenRange]) -> Iterator[dbs.AbstractSearch]:
""" Build a simple address search for special entries where the
housenumber is the main name token.
"""
@@ -174,7 +173,7 @@ class SearchBuilder:
list(partials), lookups.LookupAll))
else:
addr_fulls = [t.token for t
in self.query.get_tokens(address[0], TokenType.WORD)]
in self.query.get_tokens(address[0], qmod.TOKEN_WORD)]
if len(addr_fulls) > 5:
return
sdata.lookups.append(
@@ -184,7 +183,7 @@ class SearchBuilder:
yield dbs.PlaceSearch(0.05, sdata, expected_count)
def build_name_search(self, sdata: dbf.SearchData,
name: TokenRange, address: List[TokenRange],
name: qmod.TokenRange, address: List[qmod.TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for simple name or address searches.
"""
@@ -197,7 +196,7 @@ class SearchBuilder:
sdata.lookups = lookup
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
def yield_lookups(self, name: TokenRange, address: List[TokenRange]
def yield_lookups(self, name: qmod.TokenRange, address: List[qmod.TokenRange]
) -> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
""" Yield all variants how the given name and address should best
be searched for. This takes into account how frequent the terms
@@ -209,26 +208,26 @@ class SearchBuilder:
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
addr_tokens = list({t.token for t in addr_partials})
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
exp_count = min(t.count for t in name_partials.values()) / (3**(len(name_partials) - 1))
if (len(name_partials) > 3 or exp_count < 8000):
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
return
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000
# Partial term to frequent. Try looking up by rare full names first.
name_fulls = self.query.get_tokens(name, TokenType.WORD)
name_fulls = self.query.get_tokens(name, qmod.TOKEN_WORD)
if name_fulls:
fulls_count = sum(t.count for t in name_fulls)
if fulls_count < 50000 or addr_count < 30000:
if fulls_count < 50000 or addr_count < 50000:
yield penalty, fulls_count / (2**len(addr_tokens)), \
self.get_full_name_ranking(name_fulls, addr_partials,
fulls_count > 30000 / max(1, len(addr_tokens)))
# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
exp_count /= 2**len(addr_tokens)
if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens))
@@ -236,7 +235,7 @@ class SearchBuilder:
self.get_name_address_ranking(list(name_partials.keys()), addr_partials)
def get_name_address_ranking(self, name_tokens: List[int],
addr_partials: List[Token]) -> List[dbf.FieldLookup]:
addr_partials: List[qmod.Token]) -> List[dbf.FieldLookup]:
""" Create a ranking expression looking up by name and address.
"""
lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
@@ -258,23 +257,16 @@ class SearchBuilder:
return lookup
def get_full_name_ranking(self, name_fulls: List[Token], addr_partials: List[Token],
def get_full_name_ranking(self, name_fulls: List[qmod.Token], addr_partials: List[qmod.Token],
use_lookup: bool) -> List[dbf.FieldLookup]:
""" Create a ranking expression with full name terms and
additional address lookup. When 'use_lookup' is true, then
address lookups will use the index, when the occurrences are not
too many.
"""
# At this point drop unindexed partials from the address.
# This might yield wrong results, nothing we can do about that.
if use_lookup:
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
addr_lookup_tokens = [t.token for t in addr_partials]
else:
addr_restrict_tokens = [t.token for t in addr_partials]
addr_lookup_tokens = []
@@ -282,11 +274,11 @@ class SearchBuilder:
return dbf.lookup_by_any_name([t.token for t in name_fulls],
addr_restrict_tokens, addr_lookup_tokens)
def get_name_ranking(self, trange: TokenRange,
def get_name_ranking(self, trange: qmod.TokenRange,
db_field: str = 'name_vector') -> dbf.FieldRanking:
""" Create a ranking expression for a name term in the given range.
"""
name_fulls = self.query.get_tokens(trange, TokenType.WORD)
name_fulls = self.query.get_tokens(trange, qmod.TOKEN_WORD)
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
ranks.sort(key=lambda r: r.penalty)
# Fallback, sum of penalty for partials
@@ -294,7 +286,7 @@ class SearchBuilder:
default = sum(t.penalty for t in name_partials) + 0.2
return dbf.FieldRanking(db_field, default, ranks)
def get_addr_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
def get_addr_ranking(self, trange: qmod.TokenRange) -> dbf.FieldRanking:
""" Create a list of ranking expressions for an address term
for the given ranges.
"""
@@ -305,10 +297,10 @@ class SearchBuilder:
while todo:
neglen, pos, rank = heapq.heappop(todo)
for tlist in self.query.nodes[pos].starting:
if tlist.ttype in (TokenType.PARTIAL, TokenType.WORD):
if tlist.ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_WORD):
if tlist.end < trange.end:
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
if tlist.ttype == TokenType.PARTIAL:
if tlist.ttype == qmod.TOKEN_PARTIAL:
penalty = rank.penalty + chgpenalty \
+ max(t.penalty for t in tlist.tokens)
heapq.heappush(todo, (neglen - 1, tlist.end,
@@ -318,7 +310,7 @@ class SearchBuilder:
heapq.heappush(todo, (neglen - 1, tlist.end,
rank.with_token(t, chgpenalty)))
elif tlist.end == trange.end:
if tlist.ttype == TokenType.PARTIAL:
if tlist.ttype == qmod.TOKEN_PARTIAL:
ranks.append(dbf.RankedTokens(rank.penalty
+ max(t.penalty for t in tlist.tokens),
rank.tokens))
@@ -358,11 +350,11 @@ class SearchBuilder:
if assignment.housenumber:
sdata.set_strings('housenumbers',
self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER))
qmod.TOKEN_HOUSENUMBER))
if assignment.postcode:
sdata.set_strings('postcodes',
self.query.get_tokens(assignment.postcode,
TokenType.POSTCODE))
qmod.TOKEN_POSTCODE))
if assignment.qualifier:
tokens = self.get_qualifier_tokens(assignment.qualifier)
if not tokens:
@@ -387,23 +379,23 @@ class SearchBuilder:
return sdata
def get_country_tokens(self, trange: TokenRange) -> List[Token]:
def get_country_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
""" Return the list of country tokens for the given range,
optionally filtered by the country list from the details
parameters.
"""
tokens = self.query.get_tokens(trange, TokenType.COUNTRY)
tokens = self.query.get_tokens(trange, qmod.TOKEN_COUNTRY)
if self.details.countries:
tokens = [t for t in tokens if t.lookup_word in self.details.countries]
return tokens
def get_qualifier_tokens(self, trange: TokenRange) -> List[Token]:
def get_qualifier_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
""" Return the list of qualifier tokens for the given range,
optionally filtered by the qualifier list from the details
parameters.
"""
tokens = self.query.get_tokens(trange, TokenType.QUALIFIER)
tokens = self.query.get_tokens(trange, qmod.TOKEN_QUALIFIER)
if self.details.categories:
tokens = [t for t in tokens if t.get_category() in self.details.categories]
@@ -416,7 +408,7 @@ class SearchBuilder:
"""
if assignment.near_item:
tokens: Dict[Tuple[str, str], float] = {}
for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
for t in self.query.get_tokens(assignment.near_item, qmod.TOKEN_NEAR_ITEM):
cat = t.get_category()
# The category of a near search will be that of near_item.
# Thus, if search is restricted to a category parameter,
@@ -430,10 +422,11 @@ class SearchBuilder:
PENALTY_WORDCHANGE = {
BreakType.START: 0.0,
BreakType.END: 0.0,
BreakType.PHRASE: 0.0,
BreakType.WORD: 0.1,
BreakType.PART: 0.2,
BreakType.TOKEN: 0.4
qmod.BREAK_START: 0.0,
qmod.BREAK_END: 0.0,
qmod.BREAK_PHRASE: 0.0,
qmod.BREAK_SOFT_PHRASE: 0.0,
qmod.BREAK_WORD: 0.1,
qmod.BREAK_PART: 0.2,
qmod.BREAK_TOKEN: 0.4
}

View File

@@ -581,9 +581,13 @@ class PostcodeSearch(AbstractSearch):
.where((tsearch.c.name_vector + tsearch.c.nameaddress_vector)
.contains(sa.type_coerce(self.lookups[0].tokens,
IntArray)))
# Do NOT add rerank penalties based on the address terms.
# The standard rerank penalty only checks the address vector
# while terms may appear in name and address vector. This would
# lead to overly high penalties.
# We assume that a postcode is precise enough to not require
# additional full name matches.
for ranking in self.rankings:
penalty += ranking.sql_penalty(conn.t.search_name)
penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes),
else_=1.0)

View File

@@ -133,7 +133,7 @@ class ForwardGeocoder:
"""
assert self.query_analyzer is not None
qwords = [word for phrase in query.source
for word in re.split('[, ]+', phrase.text) if word]
for word in re.split('[-,: ]+', phrase.text) if word]
if not qwords:
return
@@ -146,7 +146,7 @@ class ForwardGeocoder:
distance = 0.0
norm = self.query_analyzer.normalize_text(' '.join((result.display_name,
result.country_code or '')))
words = set((w for w in norm.split(' ') if w))
words = set((w for w in re.split('[-,: ]+', norm) if w))
if not words:
continue
for qword in qwords:
@@ -238,7 +238,7 @@ def _dump_searches(searches: List[AbstractSearch], query: QueryStruct,
if not lk:
return ''
return f"{lk.lookup_type}({lk.column}{tk(lk.tokens)})"
return f"{lk.lookup_type.__name__}({lk.column}{tk(lk.tokens)})"
def fmt_cstr(c: Any) -> str:
if not c:

View File

@@ -7,10 +7,11 @@
"""
Implementation of query analysis for the ICU tokenizer.
"""
from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
from collections import defaultdict
from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
import dataclasses
import difflib
import re
from itertools import zip_longest
from icu import Transliterator
@@ -23,45 +24,28 @@ from ..connection import SearchConnection
from ..logging import log
from . import query as qmod
from ..query_preprocessing.config import QueryConfig
from ..query_preprocessing.base import QueryProcessingFunc
from .query_analyzer_factory import AbstractQueryAnalyzer
from .postcode_parser import PostcodeParser
DB_TO_TOKEN_TYPE = {
'W': qmod.TokenType.WORD,
'w': qmod.TokenType.PARTIAL,
'H': qmod.TokenType.HOUSENUMBER,
'P': qmod.TokenType.POSTCODE,
'C': qmod.TokenType.COUNTRY
'W': qmod.TOKEN_WORD,
'w': qmod.TOKEN_PARTIAL,
'H': qmod.TOKEN_HOUSENUMBER,
'P': qmod.TOKEN_POSTCODE,
'C': qmod.TOKEN_COUNTRY
}
class QueryPart(NamedTuple):
""" Normalized and transliterated form of a single term in the query.
When the term came out of a split during the transliteration,
the normalized string is the full word before transliteration.
The word number keeps track of the word before transliteration
and can be used to identify partial transliterated terms.
"""
token: str
normalized: str
word_number: int
QueryParts = List[QueryPart]
WordDict = Dict[str, List[qmod.TokenRange]]
def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
""" Return all combinations of words in the terms list after the
given position.
"""
total = len(terms)
for first in range(start, total):
word = terms[first].token
yield word, qmod.TokenRange(first, first + 1)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last].token))
yield word, qmod.TokenRange(first, last + 1)
PENALTY_IN_TOKEN_BREAK = {
qmod.BREAK_START: 0.5,
qmod.BREAK_END: 0.5,
qmod.BREAK_PHRASE: 0.5,
qmod.BREAK_SOFT_PHRASE: 0.5,
qmod.BREAK_WORD: 0.1,
qmod.BREAK_PART: 0.0,
qmod.BREAK_TOKEN: 0.0
}
@dataclasses.dataclass
@@ -94,25 +78,25 @@ class ICUToken(qmod.Token):
self.penalty += (distance/len(self.lookup_word))
@staticmethod
def from_db_row(row: SaRow) -> 'ICUToken':
def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
""" Create a ICUToken from the row of the word table.
"""
count = 1 if row.info is None else row.info.get('count', 1)
addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
penalty = 0.0
penalty = base_penalty
if row.type == 'w':
penalty = 0.3
penalty += 0.3
elif row.type == 'W':
if len(row.word_token) == 1 and row.word_token == row.word:
penalty = 0.2 if row.word.isdigit() else 0.3
penalty += 0.2 if row.word.isdigit() else 0.3
elif row.type == 'H':
penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
if all(not c.isdigit() for c in row.word_token):
penalty += 0.2 * (len(row.word_token) - 1)
elif row.type == 'C':
if len(row.word_token) == 1:
penalty = 0.3
penalty += 0.3
if row.info is None:
lookup_word = row.word
@@ -129,60 +113,51 @@ class ICUToken(qmod.Token):
addr_count=max(1, addr_count))
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a ICU tokenizer.
"""
def __init__(self, conn: SearchConnection) -> None:
self.conn = conn
@dataclasses.dataclass
class ICUAnalyzerConfig:
postcode_parser: PostcodeParser
normalizer: Transliterator
transliterator: Transliterator
preprocessors: List[QueryProcessingFunc]
async def setup(self) -> None:
""" Set up static data structures needed for the analysis.
"""
async def _make_normalizer() -> Any:
rules = await self.conn.get_property('tokenizer_import_normalisation')
return Transliterator.createFromRules("normalization", rules)
@staticmethod
async def create(conn: SearchConnection) -> 'ICUAnalyzerConfig':
rules = await conn.get_property('tokenizer_import_normalisation')
normalizer = Transliterator.createFromRules("normalization", rules)
self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
_make_normalizer)
rules = await conn.get_property('tokenizer_import_transliteration')
transliterator = Transliterator.createFromRules("transliteration", rules)
async def _make_transliterator() -> Any:
rules = await self.conn.get_property('tokenizer_import_transliteration')
return Transliterator.createFromRules("transliteration", rules)
self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
_make_transliterator)
await self._setup_preprocessing()
if 'word' not in self.conn.t.meta.tables:
sa.Table('word', self.conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', Json))
async def _setup_preprocessing(self) -> None:
""" Load the rules for preprocessing and set up the handlers.
"""
rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
config='TOKENIZER_CONFIG')
preprocessing_rules = rules.get('query-preprocessing', [])
self.preprocessors = []
preprocessing_rules = conn.config.load_sub_configuration('icu_tokenizer.yaml',
config='TOKENIZER_CONFIG')\
.get('query-preprocessing', [])
preprocessors: List[QueryProcessingFunc] = []
for func in preprocessing_rules:
if 'step' not in func:
raise UsageError("Preprocessing rule is missing the 'step' attribute.")
if not isinstance(func['step'], str):
raise UsageError("'step' attribute must be a simple string.")
module = self.conn.config.load_plugin_module(
module = conn.config.load_plugin_module(
func['step'], 'nominatim_api.query_preprocessing')
self.preprocessors.append(
module.create(QueryConfig(func).set_normalizer(self.normalizer)))
preprocessors.append(
module.create(QueryConfig(func).set_normalizer(normalizer)))
return ICUAnalyzerConfig(PostcodeParser(conn.config),
normalizer, transliterator, preprocessors)
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a ICU tokenizer.
"""
def __init__(self, conn: SearchConnection, config: ICUAnalyzerConfig) -> None:
self.conn = conn
self.postcode_parser = config.postcode_parser
self.normalizer = config.normalizer
self.transliterator = config.transliterator
self.preprocessors = config.preprocessors
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
""" Analyze the given list of phrases and return the
@@ -197,26 +172,34 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
if not query.source:
return query
parts, words = self.split_query(query)
log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts))
self.split_query(query)
log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
for row in await self.lookup_in_db(list(words.keys())):
for trange in words[row.word_token]:
token = ICUToken.from_db_row(row)
token = ICUToken.from_db_row(row, trange.penalty or 0.0)
if row.type == 'S':
if row.info['op'] in ('in', 'near'):
if trange.start == 0:
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
else:
if trange.start == 0 and trange.end == query.num_token_slots():
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
else:
query.add_token(trange, qmod.TokenType.QUALIFIER, token)
query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
else:
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
self.add_extra_tokens(query, parts)
self.rerank_tokens(query, parts)
self.add_extra_tokens(query)
for start, end, pc in self.postcode_parser.parse(query):
term = ' '.join(n.term_lookup for n in query.nodes[start + 1:end + 1])
query.add_token(qmod.TokenRange(start, end),
qmod.TOKEN_POSTCODE,
ICUToken(penalty=0.1, token=0, count=1, addr_count=1,
lookup_word=pc, word_token=term,
info=None))
self.rerank_tokens(query)
log().table_dump('Word tokens', _dump_word_tokens(query))
@@ -227,97 +210,93 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
standardized form search will work with. All information removed
at this stage is inevitably lost.
"""
return cast(str, self.normalizer.transliterate(text))
return cast(str, self.normalizer.transliterate(text)).strip('-: ')
def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]:
def split_query(self, query: qmod.QueryStruct) -> None:
""" Transliterate the phrases and split them into tokens.
Returns the list of transliterated tokens together with their
normalized form and a dictionary of words for lookup together
with their position.
"""
parts: QueryParts = []
phrase_start = 0
words = defaultdict(list)
wordnr = 0
for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype
for word in phrase.text.split(' '):
phrase_split = re.split('([ :-])', phrase.text)
# The zip construct will give us the pairs of word/break from
# the regular expression split. As the split array ends on the
# final word, we simply use the fillvalue to even out the list and
# add the phrase break at the end.
for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
if not word:
continue
trans = self.transliterator.transliterate(word)
if trans:
for term in trans.split(' '):
if term:
parts.append(QueryPart(term, word, wordnr))
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
query.nodes[-1].btype = qmod.BreakType.WORD
wordnr += 1
query.nodes[-1].btype = qmod.BreakType.PHRASE
query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
term, word)
query.nodes[-1].adjust_break(breakchar,
PENALTY_IN_TOKEN_BREAK[breakchar])
for word, wrange in yield_words(parts, phrase_start):
words[word].append(wrange)
phrase_start = len(parts)
query.nodes[-1].btype = qmod.BreakType.END
return parts, words
query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
""" Return the token information from the database for the
given word tokens.
This function excludes postcode tokens
"""
t = self.conn.t.meta.tables['word']
return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
return await self.conn.execute(t.select()
.where(t.c.word_token.in_(words))
.where(t.c.type != 'P'))
def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
""" Add tokens to query that are not saved in the database.
"""
for part, node, i in zip(parts, query.nodes, range(1000)):
if len(part.token) <= 4 and part[0].isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
need_hnr = False
for i, node in enumerate(query.nodes):
is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
if need_hnr and is_full_token \
and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
ICUToken(penalty=0.5, token=0,
count=1, addr_count=1, lookup_word=part.token,
word_token=part.token, info=None))
count=1, addr_count=1,
lookup_word=node.term_lookup,
word_token=node.term_lookup, info=None))
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
def rerank_tokens(self, query: qmod.QueryStruct) -> None:
""" Add penalties to tokens that depend on presence of other token.
"""
for i, node, tlist in query.iter_token_lists():
if tlist.ttype == qmod.TokenType.POSTCODE:
if tlist.ttype == qmod.TOKEN_POSTCODE:
tlen = len(cast(ICUToken, tlist.tokens[0]).word_token)
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
and (repl.ttype != qmod.TokenType.HOUSENUMBER
or len(tlist.tokens[0].lookup_word) > 4):
if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
and (repl.ttype != qmod.TOKEN_HOUSENUMBER or tlen > 4):
repl.add_penalty(0.39)
elif (tlist.ttype == qmod.TokenType.HOUSENUMBER
elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
and len(tlist.tokens[0].lookup_word) <= 3):
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER:
if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL):
norm = parts[i].normalized
for j in range(i + 1, tlist.end):
if parts[j - 1].word_number != parts[j].word_number:
norm += ' ' + parts[j].normalized
elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
if n.btype != qmod.BREAK_TOKEN)
if not norm:
# Can happen when the token only covers a partial term
norm = query.nodes[i + 1].term_normalized
for token in tlist.tokens:
cast(ICUToken, token).rematch(norm)
def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
out = query.nodes[0].btype.value
for node, part in zip(query.nodes[1:], parts):
out += part.token + node.btype.value
return out
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for node in query.nodes:
yield ['type', 'from', 'to', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for i, node in enumerate(query.nodes):
for tlist in node.starting:
for token in tlist.tokens:
t = cast(ICUToken, token)
yield [tlist.ttype.name, t.token, t.word_token or '',
yield [tlist.ttype, str(i), str(tlist.end), t.token, t.word_token or '',
t.lookup_word or '', t.penalty, t.count, t.info]
@@ -325,7 +304,17 @@ async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer
""" Create and set up a new query analyzer for a database based
on the ICU tokenizer.
"""
out = ICUQueryAnalyzer(conn)
await out.setup()
async def _get_config() -> ICUAnalyzerConfig:
if 'word' not in conn.t.meta.tables:
sa.Table('word', conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', Json))
return out
return await ICUAnalyzerConfig.create(conn)
config = await conn.get_cached_value('ICUTOK', 'config', _get_config)
return ICUQueryAnalyzer(conn, config)

View File

@@ -0,0 +1,104 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Handling of arbitrary postcode tokens in tokenized query string.
"""
from typing import Tuple, Set, Dict, List
import re
from collections import defaultdict
import yaml
from ..config import Configuration
from . import query as qmod
class PostcodeParser:
""" Pattern-based parser for postcodes in tokenized queries.
The postcode patterns are read from the country configuration.
The parser does currently not return country restrictions.
"""
def __init__(self, config: Configuration) -> None:
# skip over includes here to avoid loading the complete country name data
yaml.add_constructor('!include', lambda loader, node: [],
Loader=yaml.SafeLoader)
cdata = yaml.safe_load(config.find_config_file('country_settings.yaml')
.read_text(encoding='utf-8'))
unique_patterns: Dict[str, Dict[str, List[str]]] = {}
for cc, data in cdata.items():
if data.get('postcode'):
pat = data['postcode']['pattern'].replace('d', '[0-9]').replace('l', '[A-Z]')
out = data['postcode'].get('output')
if pat not in unique_patterns:
unique_patterns[pat] = defaultdict(list)
unique_patterns[pat][out].append(cc.upper())
self.global_pattern = re.compile(
'(?:(?P<cc>[A-Z][A-Z])(?P<space>[ -]?))?(?P<pc>(?:(?:'
+ ')|(?:'.join(unique_patterns) + '))[:, >].*)')
self.local_patterns = [(re.compile(f"{pat}[:, >]"), list(info.items()))
for pat, info in unique_patterns.items()]
def parse(self, query: qmod.QueryStruct) -> Set[Tuple[int, int, str]]:
""" Parse postcodes in the given list of query tokens taking into
account the list of breaks from the nodes.
The result is a sequence of tuples with
[start node id, end node id, postcode token]
"""
nodes = query.nodes
outcodes: Set[Tuple[int, int, str]] = set()
terms = [n.term_normalized.upper() + n.btype for n in nodes]
for i in range(query.num_token_slots()):
if nodes[i].btype in '<,: ' and nodes[i + 1].btype != '`' \
and (i == 0 or nodes[i - 1].ptype != qmod.PHRASE_POSTCODE):
if nodes[i].ptype == qmod.PHRASE_ANY:
word = terms[i + 1]
if word[-1] in ' -' and nodes[i + 2].btype != '`' \
and nodes[i + 1].ptype == qmod.PHRASE_ANY:
word += terms[i + 2]
if word[-1] in ' -' and nodes[i + 3].btype != '`' \
and nodes[i + 2].ptype == qmod.PHRASE_ANY:
word += terms[i + 3]
self._match_word(word, i, False, outcodes)
elif nodes[i].ptype == qmod.PHRASE_POSTCODE:
word = terms[i + 1]
for j in range(i + 1, query.num_token_slots()):
if nodes[j].ptype != qmod.PHRASE_POSTCODE:
break
word += terms[j + 1]
self._match_word(word, i, True, outcodes)
return outcodes
def _match_word(self, word: str, pos: int, fullmatch: bool,
outcodes: Set[Tuple[int, int, str]]) -> None:
# Use global pattern to check for presence of any postcode.
m = self.global_pattern.fullmatch(word)
if m:
# If there was a match, check against each pattern separately
# because multiple patterns might be machting at the end.
cc = m.group('cc')
pc_word = m.group('pc')
cc_spaces = len(m.group('space') or '')
for pattern, info in self.local_patterns:
lm = pattern.fullmatch(pc_word) if fullmatch else pattern.match(pc_word)
if lm:
trange = (pos, pos + cc_spaces + sum(c in ' ,-:>' for c in lm.group(0)))
for out, out_ccs in info:
if cc is None or cc in out_ccs:
if out:
outcodes.add((*trange, lm.expand(out)))
else:
outcodes.add((*trange, lm.group(0)[:-1]))

View File

@@ -7,88 +7,95 @@
"""
Datastructures for a tokenized query.
"""
from typing import List, Tuple, Optional, Iterator
from typing import Dict, List, Tuple, Optional, Iterator
from abc import ABC, abstractmethod
from collections import defaultdict
import dataclasses
import enum
class BreakType(enum.Enum):
""" Type of break between tokens.
"""
START = '<'
""" Begin of the query. """
END = '>'
""" End of the query. """
PHRASE = ','
""" Break between two phrases. """
WORD = ' '
""" Break between words. """
PART = '-'
""" Break inside a word, for example a hyphen or apostrophe. """
TOKEN = '`'
""" Break created as a result of tokenization.
This may happen in languages without spaces between words.
BreakType = str
""" Type of break between tokens.
"""
BREAK_START = '<'
""" Begin of the query. """
BREAK_END = '>'
""" End of the query. """
BREAK_PHRASE = ','
""" Hard break between two phrases. Address parts cannot cross hard
phrase boundaries."""
BREAK_SOFT_PHRASE = ':'
""" Likely break between two phrases. Address parts should not cross soft
phrase boundaries. Soft breaks can be inserted by a preprocessor
that is analysing the input string.
"""
BREAK_WORD = ' '
""" Break between words. """
BREAK_PART = '-'
""" Break inside a word, for example a hyphen or apostrophe. """
BREAK_TOKEN = '`'
""" Break created as a result of tokenization.
This may happen in languages without spaces between words.
"""
TokenType = str
""" Type of token.
"""
TOKEN_WORD = 'W'
""" Full name of a place. """
TOKEN_PARTIAL = 'w'
""" Word term without breaks, does not necessarily represent a full name. """
TOKEN_HOUSENUMBER = 'H'
""" Housenumber term. """
TOKEN_POSTCODE = 'P'
""" Postal code term. """
TOKEN_COUNTRY = 'C'
""" Country name or reference. """
TOKEN_QUALIFIER = 'Q'
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
TOKEN_NEAR_ITEM = 'N'
""" Special term used as searchable object(e.g. supermarket in ...). """
PhraseType = int
""" Designation of a phrase.
"""
PHRASE_ANY = 0
""" No specific designation (i.e. source is free-form query). """
PHRASE_AMENITY = 1
""" Contains name or type of a POI. """
PHRASE_STREET = 2
""" Contains a street name optionally with a housenumber. """
PHRASE_CITY = 3
""" Contains the postal city. """
PHRASE_COUNTY = 4
""" Contains the equivalent of a county. """
PHRASE_STATE = 5
""" Contains a state or province. """
PHRASE_POSTCODE = 6
""" Contains a postal code. """
PHRASE_COUNTRY = 7
""" Contains the country name or code. """
def _phrase_compatible_with(ptype: PhraseType, ttype: TokenType,
is_full_phrase: bool) -> bool:
""" Check if the given token type can be used with the phrase type.
"""
if ptype == PHRASE_ANY:
return not is_full_phrase or ttype != TOKEN_QUALIFIER
if ptype == PHRASE_AMENITY:
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)\
or (is_full_phrase and ttype == TOKEN_NEAR_ITEM)\
or (not is_full_phrase and ttype == TOKEN_QUALIFIER)
if ptype == PHRASE_STREET:
return ttype in (TOKEN_WORD, TOKEN_PARTIAL, TOKEN_HOUSENUMBER)
if ptype == PHRASE_POSTCODE:
return ttype == TOKEN_POSTCODE
if ptype == PHRASE_COUNTRY:
return ttype == TOKEN_COUNTRY
class TokenType(enum.Enum):
""" Type of token.
"""
WORD = enum.auto()
""" Full name of a place. """
PARTIAL = enum.auto()
""" Word term without breaks, does not necessarily represent a full name. """
HOUSENUMBER = enum.auto()
""" Housenumber term. """
POSTCODE = enum.auto()
""" Postal code term. """
COUNTRY = enum.auto()
""" Country name or reference. """
QUALIFIER = enum.auto()
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
NEAR_ITEM = enum.auto()
""" Special term used as searchable object(e.g. supermarket in ...). """
class PhraseType(enum.Enum):
""" Designation of a phrase.
"""
NONE = 0
""" No specific designation (i.e. source is free-form query). """
AMENITY = enum.auto()
""" Contains name or type of a POI. """
STREET = enum.auto()
""" Contains a street name optionally with a housenumber. """
CITY = enum.auto()
""" Contains the postal city. """
COUNTY = enum.auto()
""" Contains the equivalent of a county. """
STATE = enum.auto()
""" Contains a state or province. """
POSTCODE = enum.auto()
""" Contains a postal code. """
COUNTRY = enum.auto()
""" Contains the country name or code. """
def compatible_with(self, ttype: TokenType,
is_full_phrase: bool) -> bool:
""" Check if the given token type can be used with the phrase type.
"""
if self == PhraseType.NONE:
return not is_full_phrase or ttype != TokenType.QUALIFIER
if self == PhraseType.AMENITY:
return ttype in (TokenType.WORD, TokenType.PARTIAL)\
or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\
or (not is_full_phrase and ttype == TokenType.QUALIFIER)
if self == PhraseType.STREET:
return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
if self == PhraseType.POSTCODE:
return ttype == TokenType.POSTCODE
if self == PhraseType.COUNTRY:
return ttype == TokenType.COUNTRY
return ttype in (TokenType.WORD, TokenType.PARTIAL)
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)
@dataclasses.dataclass
@@ -116,6 +123,7 @@ class TokenRange:
"""
start: int
end: int
penalty: Optional[float] = None
def __lt__(self, other: 'TokenRange') -> bool:
return self.end <= other.start
@@ -164,11 +172,33 @@ class TokenList:
@dataclasses.dataclass
class QueryNode:
""" A node of the query representing a break between terms.
The node also contains information on the source term
ending at the node. The tokens are created from this information.
"""
btype: BreakType
ptype: PhraseType
penalty: float
""" Penalty for the break at this node.
"""
term_lookup: str
""" Transliterated term following this node.
"""
term_normalized: str
""" Normalised form of term following this node.
When the token resulted from a split during transliteration,
then this string contains the complete source term.
"""
starting: List[TokenList] = dataclasses.field(default_factory=list)
def adjust_break(self, btype: BreakType, penalty: float) -> None:
""" Change the break type and penalty for this node.
"""
self.btype = btype
self.penalty = penalty
def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
""" Check if there are tokens of the given types ending at the
given node.
@@ -211,19 +241,22 @@ class QueryStruct:
def __init__(self, source: List[Phrase]) -> None:
self.source = source
self.nodes: List[QueryNode] = \
[QueryNode(BreakType.START, source[0].ptype if source else PhraseType.NONE)]
[QueryNode(BREAK_START, source[0].ptype if source else PHRASE_ANY,
0.0, '', '')]
def num_token_slots(self) -> int:
""" Return the length of the query in vertice steps.
"""
return len(self.nodes) - 1
def add_node(self, btype: BreakType, ptype: PhraseType) -> None:
def add_node(self, btype: BreakType, ptype: PhraseType,
break_penalty: float = 0.0,
term_lookup: str = '', term_normalized: str = '') -> None:
""" Append a new break node with the given break type.
The phrase type denotes the type for any tokens starting
at the node.
"""
self.nodes.append(QueryNode(btype, ptype))
self.nodes.append(QueryNode(btype, ptype, break_penalty, term_lookup, term_normalized))
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
""" Add a token to the query. 'start' and 'end' are the indexes of the
@@ -236,9 +269,9 @@ class QueryStruct:
be added to, then the token is silently dropped.
"""
snode = self.nodes[trange.start]
full_phrase = snode.btype in (BreakType.START, BreakType.PHRASE)\
and self.nodes[trange.end].btype in (BreakType.PHRASE, BreakType.END)
if snode.ptype.compatible_with(ttype, full_phrase):
full_phrase = snode.btype in (BREAK_START, BREAK_PHRASE)\
and self.nodes[trange.end].btype in (BREAK_PHRASE, BREAK_END)
if _phrase_compatible_with(snode.ptype, ttype, full_phrase):
tlist = snode.get_tokens(trange.end, ttype)
if tlist is None:
snode.starting.append(TokenList(trange.end, ttype, [token]))
@@ -258,7 +291,7 @@ class QueryStruct:
going to the subsequent node. Such PARTIAL tokens are
assumed to exist.
"""
return [next(iter(self.get_tokens(TokenRange(i, i+1), TokenType.PARTIAL)))
return [next(iter(self.get_tokens(TokenRange(i, i+1), TOKEN_PARTIAL)))
for i in range(trange.start, trange.end)]
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
@@ -278,5 +311,44 @@ class QueryStruct:
for tlist in node.starting:
for t in tlist.tokens:
if t.token == token:
return f"[{tlist.ttype.name[0]}]{t.lookup_word}"
return f"[{tlist.ttype}]{t.lookup_word}"
return 'None'
def get_transliterated_query(self) -> str:
""" Return a string representation of the transliterated query
with the character representation of the different break types.
For debugging purposes only.
"""
return ''.join(''.join((n.term_lookup, n.btype)) for n in self.nodes)
def extract_words(self, base_penalty: float = 0.0,
start: int = 0,
endpos: Optional[int] = None) -> Dict[str, List[TokenRange]]:
""" Add all combinations of words that can be formed from the terms
between the given start and endnode. The terms are joined with
spaces for each break. Words can never go across a BREAK_PHRASE.
The functions returns a dictionary of possible words with their
position within the query and a penalty. The penalty is computed
from the base_penalty plus the penalty for each node the word
crosses.
"""
if endpos is None:
endpos = len(self.nodes)
words: Dict[str, List[TokenRange]] = defaultdict(list)
for first in range(start, endpos - 1):
word = self.nodes[first + 1].term_lookup
penalty = base_penalty
words[word].append(TokenRange(first, first + 1, penalty=penalty))
if self.nodes[first + 1].btype != BREAK_PHRASE:
for last in range(first + 2, min(first + 20, endpos)):
word = ' '.join((word, self.nodes[last].term_lookup))
penalty += self.nodes[last - 1].penalty
words[word].append(TokenRange(first, last, penalty=penalty))
if self.nodes[last].btype == BREAK_PHRASE:
break
return words

View File

@@ -24,12 +24,13 @@ class TypedRange:
PENALTY_TOKENCHANGE = {
qmod.BreakType.START: 0.0,
qmod.BreakType.END: 0.0,
qmod.BreakType.PHRASE: 0.0,
qmod.BreakType.WORD: 0.1,
qmod.BreakType.PART: 0.2,
qmod.BreakType.TOKEN: 0.4
qmod.BREAK_START: 0.0,
qmod.BREAK_END: 0.0,
qmod.BREAK_PHRASE: 0.0,
qmod.BREAK_SOFT_PHRASE: 0.0,
qmod.BREAK_WORD: 0.1,
qmod.BREAK_PART: 0.2,
qmod.BREAK_TOKEN: 0.4
}
TypedRangeSeq = List[TypedRange]
@@ -55,17 +56,17 @@ class TokenAssignment:
"""
out = TokenAssignment()
for token in ranges:
if token.ttype == qmod.TokenType.PARTIAL:
if token.ttype == qmod.TOKEN_PARTIAL:
out.address.append(token.trange)
elif token.ttype == qmod.TokenType.HOUSENUMBER:
elif token.ttype == qmod.TOKEN_HOUSENUMBER:
out.housenumber = token.trange
elif token.ttype == qmod.TokenType.POSTCODE:
elif token.ttype == qmod.TOKEN_POSTCODE:
out.postcode = token.trange
elif token.ttype == qmod.TokenType.COUNTRY:
elif token.ttype == qmod.TOKEN_COUNTRY:
out.country = token.trange
elif token.ttype == qmod.TokenType.NEAR_ITEM:
elif token.ttype == qmod.TOKEN_NEAR_ITEM:
out.near_item = token.trange
elif token.ttype == qmod.TokenType.QUALIFIER:
elif token.ttype == qmod.TOKEN_QUALIFIER:
out.qualifier = token.trange
return out
@@ -83,7 +84,7 @@ class _TokenSequence:
self.penalty = penalty
def __str__(self) -> str:
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype.name}]' for r in self.seq)
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype}]' for r in self.seq)
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
@property
@@ -104,7 +105,7 @@ class _TokenSequence:
"""
# Country and category must be the final term for left-to-right
return len(self.seq) > 1 and \
self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM)
self.seq[-1].ttype in (qmod.TOKEN_COUNTRY, qmod.TOKEN_NEAR_ITEM)
def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
""" Check if the give token type is appendable to the existing sequence.
@@ -113,23 +114,23 @@ class _TokenSequence:
new direction of the sequence after adding such a type. The
token is not added.
"""
if ttype == qmod.TokenType.WORD:
if ttype == qmod.TOKEN_WORD:
return None
if not self.seq:
# Append unconditionally to the empty list
if ttype == qmod.TokenType.COUNTRY:
if ttype == qmod.TOKEN_COUNTRY:
return -1
if ttype in (qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
if ttype in (qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return 1
return self.direction
# Name tokens are always acceptable and don't change direction
if ttype == qmod.TokenType.PARTIAL:
if ttype == qmod.TOKEN_PARTIAL:
# qualifiers cannot appear in the middle of the query. They need
# to be near the next phrase.
if self.direction == -1 \
and any(t.ttype == qmod.TokenType.QUALIFIER for t in self.seq[:-1]):
and any(t.ttype == qmod.TOKEN_QUALIFIER for t in self.seq[:-1]):
return None
return self.direction
@@ -137,54 +138,54 @@ class _TokenSequence:
if self.has_types(ttype):
return None
if ttype == qmod.TokenType.HOUSENUMBER:
if ttype == qmod.TOKEN_HOUSENUMBER:
if self.direction == 1:
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TokenType.QUALIFIER:
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TOKEN_QUALIFIER:
return None
if len(self.seq) > 2 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
return None # direction left-to-right: housenumber must come before anything
elif (self.direction == -1
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY)):
or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY)):
return -1 # force direction right-to-left if after other terms
return self.direction
if ttype == qmod.TokenType.POSTCODE:
if ttype == qmod.TOKEN_POSTCODE:
if self.direction == -1:
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return None
return -1
if self.direction == 1:
return None if self.has_types(qmod.TokenType.COUNTRY) else 1
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return None if self.has_types(qmod.TOKEN_COUNTRY) else 1
if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
return 1
return self.direction
if ttype == qmod.TokenType.COUNTRY:
if ttype == qmod.TOKEN_COUNTRY:
return None if self.direction == -1 else 1
if ttype == qmod.TokenType.NEAR_ITEM:
if ttype == qmod.TOKEN_NEAR_ITEM:
return self.direction
if ttype == qmod.TokenType.QUALIFIER:
if ttype == qmod.TOKEN_QUALIFIER:
if self.direction == 1:
if (len(self.seq) == 1
and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \
and self.seq[0].ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_NEAR_ITEM)) \
or (len(self.seq) == 2
and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM
and self.seq[1].ttype == qmod.TokenType.PARTIAL):
and self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM
and self.seq[1].ttype == qmod.TOKEN_PARTIAL):
return 1
return None
if self.direction == -1:
return -1
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM else self.seq
if len(tempseq) == 0:
return 1
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TOKEN_HOUSENUMBER:
return None
if len(tempseq) > 1 or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
if len(tempseq) > 1 or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
return -1
return 0
@@ -204,7 +205,7 @@ class _TokenSequence:
new_penalty = 0.0
else:
last = self.seq[-1]
if btype != qmod.BreakType.PHRASE and last.ttype == ttype:
if btype != qmod.BREAK_PHRASE and last.ttype == ttype:
# extend the existing range
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
new_penalty = 0.0
@@ -239,18 +240,18 @@ class _TokenSequence:
# housenumbers may not be further than 2 words from the beginning.
# If there are two words in front, give it a penalty.
hnrpos = next((i for i, tr in enumerate(self.seq)
if tr.ttype == qmod.TokenType.HOUSENUMBER),
if tr.ttype == qmod.TOKEN_HOUSENUMBER),
None)
if hnrpos is not None:
if self.direction != -1:
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TokenType.PARTIAL)
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TOKEN_PARTIAL)
if not self._adapt_penalty_from_priors(priors, -1):
return False
if self.direction != 1:
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TOKEN_PARTIAL)
if not self._adapt_penalty_from_priors(priors, 1):
return False
if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq):
if any(t.ttype == qmod.TOKEN_NEAR_ITEM for t in self.seq):
self.penalty += 1.0
return True
@@ -268,10 +269,9 @@ class _TokenSequence:
# <address>,<postcode> should give preference to address search
if base.postcode.start == 0:
penalty = self.penalty
self.direction = -1 # name searches are only possible backwards
else:
penalty = self.penalty + 0.1
self.direction = 1 # name searches are only possible forwards
penalty += 0.1 * max(0, len(base.address) - 1)
yield dataclasses.replace(base, penalty=penalty)
def _get_assignments_address_forward(self, base: TokenAssignment,
@@ -281,6 +281,11 @@ class _TokenSequence:
"""
first = base.address[0]
# The postcode must come after the name.
if base.postcode and base.postcode < first:
log().var_dump('skip forward', (base.postcode, first))
return
log().comment('first word = name')
yield dataclasses.replace(base, penalty=self.penalty,
name=first, address=base.address[1:])
@@ -292,7 +297,7 @@ class _TokenSequence:
# * the containing phrase is strictly typed
if (base.housenumber and first.end < base.housenumber.start)\
or (base.qualifier and base.qualifier > first)\
or (query.nodes[first.start].ptype != qmod.PhraseType.NONE):
or (query.nodes[first.start].ptype != qmod.PHRASE_ANY):
return
penalty = self.penalty
@@ -316,7 +321,12 @@ class _TokenSequence:
"""
last = base.address[-1]
if self.direction == -1 or len(base.address) > 1:
# The postcode must come before the name for backward direction.
if base.postcode and base.postcode > last:
log().var_dump('skip backward', (base.postcode, last))
return
if self.direction == -1 or len(base.address) > 1 or base.postcode:
log().comment('last word = name')
yield dataclasses.replace(base, penalty=self.penalty,
name=last, address=base.address[:-1])
@@ -328,7 +338,7 @@ class _TokenSequence:
# * the containing phrase is strictly typed
if (base.housenumber and last.start > base.housenumber.end)\
or (base.qualifier and base.qualifier < last)\
or (query.nodes[last.start].ptype != qmod.PhraseType.NONE):
or (query.nodes[last.start].ptype != qmod.PHRASE_ANY):
return
penalty = self.penalty
@@ -392,7 +402,7 @@ def yield_token_assignments(query: qmod.QueryStruct) -> Iterator[TokenAssignment
another. It does not include penalties for transitions within a
type.
"""
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PhraseType.NONE else 1)]
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PHRASE_ANY else 1)]
while todo:
state = todo.pop()

View File

@@ -173,7 +173,7 @@ class Geometry(types.UserDefinedType): # type: ignore[type-arg]
def __init__(self, subtype: str = 'Geometry'):
self.subtype = subtype
def get_col_spec(self) -> str:
def get_col_spec(self, **_: Any) -> str:
return f'GEOMETRY({self.subtype}, 4326)'
def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]:

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common json type for different dialects.
@@ -24,6 +24,6 @@ class Json(sa.types.TypeDecorator[Any]):
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
if dialect.name == 'postgresql':
return JSONB(none_as_null=True) # type: ignore[no-untyped-call]
return JSONB(none_as_null=True)
return sqlite_json(none_as_null=True)

View File

@@ -144,7 +144,7 @@ class Point(NamedTuple):
except ValueError as exc:
raise UsageError('Point parameter needs to be numbers.') from exc
if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0:
if not -180 <= x <= 180 or not -90 <= y <= 90.0:
raise UsageError('Point coordinates invalid.')
return Point(x, y)

View File

@@ -25,8 +25,8 @@ def get_label_tag(category: Tuple[str, str], extratags: Optional[Mapping[str, st
elif rank < 26 and extratags and 'linked_place' in extratags:
label = extratags['linked_place']
elif category == ('boundary', 'administrative'):
label = ADMIN_LABELS.get((country or '', int(rank/2)))\
or ADMIN_LABELS.get(('', int(rank/2)))\
label = ADMIN_LABELS.get((country or '', rank // 2))\
or ADMIN_LABELS.get(('', rank // 2))\
or 'Administrative'
elif category[1] == 'postal_code':
label = 'postcode'

View File

@@ -249,6 +249,9 @@ def format_base_geocodejson(results: Union[ReverseResults, SearchResults],
out.keyval(f"level{line.admin_level}", line.local_name)
out.end_object().next()
if options.get('extratags', False):
out.keyval('extra', result.extratags)
out.end_object().next().end_object().next()
out.key('geometry').raw(result.geometry.get('geojson')

View File

@@ -8,4 +8,4 @@
Version information for the Nominatim API.
"""
NOMINATIM_API_VERSION = '4.5.0'
NOMINATIM_API_VERSION = '5.1.0'

View File

@@ -2,16 +2,15 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Command-line interface to the Nominatim functions for import, update,
database administration and querying.
"""
from typing import Optional, Any
from typing import Optional, List, Mapping
import importlib
import logging
import os
import sys
import argparse
import asyncio
@@ -81,13 +80,14 @@ class CommandlineParser:
parser.set_defaults(command=cmd)
cmd.add_args(parser)
def run(self, **kwargs: Any) -> int:
def run(self, cli_args: Optional[List[str]],
environ: Optional[Mapping[str, str]]) -> int:
""" Parse the command line arguments of the program and execute the
appropriate subcommand.
"""
args = NominatimArgs()
try:
self.parser.parse_args(args=kwargs.get('cli_args'), namespace=args)
self.parser.parse_args(args=cli_args, namespace=args)
except SystemExit:
return 1
@@ -101,23 +101,19 @@ class CommandlineParser:
args.project_dir = Path(args.project_dir).resolve()
if 'cli_args' not in kwargs:
if cli_args is None:
logging.basicConfig(stream=sys.stderr,
format='%(asctime)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=max(4 - args.verbose, 1) * 10)
args.config = Configuration(args.project_dir,
environ=kwargs.get('environ', os.environ))
args.config.set_libdirs(osm2pgsql=kwargs['osm2pgsql_path'])
args.config = Configuration(args.project_dir, environ=environ)
log = logging.getLogger()
log.warning('Using project directory: %s', str(args.project_dir))
try:
ret = args.command.run(args)
return ret
return args.command.run(args)
except UsageError as exception:
if log.isEnabledFor(logging.DEBUG):
raise # use Python's exception printing
@@ -233,9 +229,16 @@ def get_set_parser() -> CommandlineParser:
return parser
def nominatim(**kwargs: Any) -> int:
def nominatim(cli_args: Optional[List[str]] = None,
environ: Optional[Mapping[str, str]] = None) -> int:
"""\
Command-line tools for importing, updating, administrating and
querying the Nominatim database.
'cli_args' is a list of parameters for the command to run. If not given,
sys.args will be used.
'environ' is the dictionary of environment variables containing the
Nominatim configuration. When None, the os.environ is inherited.
"""
return get_set_parser().run(**kwargs)
return get_set_parser().run(cli_args=cli_args, environ=environ)

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Provides custom functions over command-line arguments.
@@ -186,7 +186,7 @@ class NominatimArgs:
from the command line arguments. The resulting dict can be
further customized and then used in `run_osm2pgsql()`.
"""
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY or self.config.lib_dir.osm2pgsql,
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY,
osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
osm2pgsql_style=self.config.get_import_style_file(),
osm2pgsql_style_path=self.config.lib_dir.lua,

View File

@@ -122,13 +122,16 @@ class SetupAll:
LOG.warning('Post-process tables')
with connect(args.config.get_libpq_dsn()) as conn:
conn.autocommit = True
await database_import.create_search_indices(conn, args.config,
drop=args.no_updates,
threads=num_threads)
LOG.warning('Create search index for default country names.')
conn.autocommit = False
country_info.create_country_names(conn, tokenizer,
args.config.get_str_list('LANGUAGES'))
if args.no_updates:
conn.autocommit = True
freeze.drop_update_tables(conn)
tokenizer.finalize_import(args.config)
@@ -183,6 +186,7 @@ class SetupAll:
from ..tools import database_import, refresh
with connect(config.get_libpq_dsn()) as conn:
conn.autocommit = True
LOG.warning('Create functions (1st pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create tables')

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Nominatim configuration accessor.
@@ -73,7 +73,6 @@ class Configuration:
self.project_dir = None
class _LibDirs:
osm2pgsql: Path
sql = paths.SQLLIB_DIR
lua = paths.LUALIB_DIR
data = paths.DATA_DIR

View File

@@ -102,10 +102,10 @@ def server_version_tuple(conn: Connection) -> Tuple[int, int]:
Converts correctly for pre-10 and post-10 PostgreSQL versions.
"""
version = conn.info.server_version
if version < 100000:
return (int(version / 10000), int((version % 10000) / 100))
return (int(version / 10000), version % 10000)
major, minor = divmod(version, 10000)
if major < 10:
minor //= 100
return major, minor
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:

View File

@@ -50,8 +50,8 @@ class ProgressLogger:
places_per_sec = self.done_places / done_time
eta = (self.total_places - self.done_places) / places_per_sec
LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f",
self.done_places, int(done_time),
LOG.warning("Done %d in %.0f @ %.3f per second - %s ETA (seconds): %.2f",
self.done_places, done_time,
places_per_sec, self.name, eta)
self.next_info += int(places_per_sec) * self.log_interval
@@ -68,8 +68,8 @@ class ProgressLogger:
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places / diff_seconds
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, int(diff_seconds),
LOG.warning("Done %d/%d in %.0f @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, diff_seconds,
places_per_sec, self.name)
return self.done_places

View File

@@ -25,6 +25,8 @@ class ICUTokenAnalysis:
def __init__(self, norm_rules: str, trans_rules: str,
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
# additional break signs are not relevant during name analysis
norm_rules += ";[[:Space:][-:]]+ > ' ';"
self.normalizer = Transliterator.createFromRules("icu_normalization",
norm_rules)
trans_rules += ";[:Space:]+ > ' '"

View File

@@ -121,10 +121,10 @@ class ICUTokenizer(AbstractTokenizer):
SELECT unnest(nameaddress_vector) as id, count(*)
FROM search_name GROUP BY id)
SELECT coalesce(a.id, w.id) as id,
(CASE WHEN w.count is null THEN '{}'::JSONB
(CASE WHEN w.count is null or w.count <= 1 THEN '{}'::JSONB
ELSE jsonb_build_object('count', w.count) END
||
CASE WHEN a.count is null THEN '{}'::JSONB
CASE WHEN a.count is null or a.count <= 1 THEN '{}'::JSONB
ELSE jsonb_build_object('addr_count', a.count) END) as info
FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
""")
@@ -134,9 +134,10 @@ class ICUTokenizer(AbstractTokenizer):
drop_tables(conn, 'tmp_word')
cur.execute("""CREATE TABLE tmp_word AS
SELECT word_id, word_token, type, word,
(CASE WHEN wf.info is null THEN word.info
ELSE coalesce(word.info, '{}'::jsonb) || wf.info
END) as info
coalesce(word.info, '{}'::jsonb)
- 'count' - 'addr_count' ||
coalesce(wf.info, '{}'::jsonb)
as info
FROM word LEFT JOIN word_frequencies wf
ON word.word_id = wf.id
""")
@@ -381,76 +382,15 @@ class ICUNameAnalyzer(AbstractAnalyzer):
return postcode.strip().upper()
def update_postcodes_from_db(self) -> None:
""" Update postcode tokens in the word table from the location_postcode
table.
""" Postcode update.
Removes all postcodes from the word table because they are not
needed. Postcodes are recognised by pattern.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
with self.conn.cursor() as cur:
# First get all postcode names currently in the word table.
cur.execute("SELECT DISTINCT word FROM word WHERE type = 'P'")
word_entries = set((entry[0] for entry in cur))
# Then compute the required postcode names from the postcode table.
needed_entries = set()
cur.execute("SELECT country_code, postcode FROM location_postcode")
for cc, postcode in cur:
info = PlaceInfo({'country_code': cc,
'class': 'place', 'type': 'postcode',
'address': {'postcode': postcode}})
address = self.sanitizer.process_names(info)[1]
for place in address:
if place.kind == 'postcode':
if analyzer is None:
postcode_name = place.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(place)
variant_base = place.get_attr("variant")
if variant_base:
needed_entries.add(f'{postcode_name}@{variant_base}')
else:
needed_entries.add(postcode_name)
break
# Now update the word table.
self._delete_unused_postcode_words(word_entries - needed_entries)
self._add_missing_postcode_words(needed_entries - word_entries)
def _delete_unused_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if tokens:
with self.conn.cursor() as cur:
cur.execute("DELETE FROM word WHERE type = 'P' and word = any(%s)",
(list(tokens), ))
def _add_missing_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if not tokens:
return
analyzer = self.token_analysis.analysis.get('@postcode')
terms = []
for postcode_name in tokens:
if '@' in postcode_name:
term, variant = postcode_name.split('@', 2)
term = self._search_normalized(term)
if analyzer is None:
variants = [term]
else:
variants = analyzer.compute_variants(variant)
if term not in variants:
variants.append(term)
else:
variants = [self._search_normalized(postcode_name)]
terms.append((postcode_name, variants))
if terms:
with self.conn.cursor() as cur:
cur.executemany("""SELECT create_postcode_word(%s, %s)""", terms)
cur.execute("DELETE FROM word WHERE type = 'P'")
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
@@ -645,10 +585,14 @@ class ICUNameAnalyzer(AbstractAnalyzer):
if word_id:
result = self._cache.housenumbers.get(word_id, result)
if result[0] is None:
variants = analyzer.compute_variants(word_id)
varout = analyzer.compute_variants(word_id)
if isinstance(varout, tuple):
variants = varout[0]
else:
variants = varout
if variants:
hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
(word_id, list(variants)))
(word_id, variants))
result = hid, variants[0]
self._cache.housenumbers[word_id] = result
@@ -693,13 +637,17 @@ class ICUNameAnalyzer(AbstractAnalyzer):
full, part = self._cache.names.get(token_id, (None, None))
if full is None:
variants = analyzer.compute_variants(word_id)
varset = analyzer.compute_variants(word_id)
if isinstance(varset, tuple):
variants, lookups = varset
else:
variants, lookups = varset, None
if not variants:
continue
with self.conn.cursor() as cur:
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
(token_id, variants))
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s, %s)",
(token_id, variants, lookups))
full, part = cast(Tuple[int, List[int]], cur.fetchone())
self._cache.names[token_id] = (full, part)
@@ -718,32 +666,9 @@ class ICUNameAnalyzer(AbstractAnalyzer):
analyzer = self.token_analysis.analysis.get('@postcode')
if analyzer is None:
postcode_name = item.name.strip().upper()
variant_base = None
return item.name.strip().upper()
else:
postcode_name = analyzer.get_canonical_id(item)
variant_base = item.get_attr("variant")
if variant_base:
postcode = f'{postcode_name}@{variant_base}'
else:
postcode = postcode_name
if postcode not in self._cache.postcodes:
term = self._search_normalized(postcode_name)
if not term:
return None
variants = {term}
if analyzer is not None and variant_base:
variants.update(analyzer.compute_variants(variant_base))
with self.conn.cursor() as cur:
cur.execute("SELECT create_postcode_word(%s, %s)",
(postcode, list(variants)))
self._cache.postcodes.add(postcode)
return postcode_name
return analyzer.get_canonical_id(item)
class _TokenInfo:
@@ -836,5 +761,4 @@ class _TokenCache:
self.names: Dict[str, Tuple[int, List[int]]] = {}
self.partials: Dict[str, int] = {}
self.fulls: Dict[str, List[int]] = {}
self.postcodes: Set[str] = set()
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}

View File

@@ -7,7 +7,7 @@
"""
Common data types and protocols for analysers.
"""
from typing import Mapping, List, Any
from typing import Mapping, List, Any, Union, Tuple
from ...typing import Protocol
from ...data.place_name import PlaceName
@@ -33,7 +33,7 @@ class Analyzer(Protocol):
for example because the character set in use does not match.
"""
def compute_variants(self, canonical_id: str) -> List[str]:
def compute_variants(self, canonical_id: str) -> Union[List[str], Tuple[List[str], List[str]]]:
""" Compute the transliterated spelling variants for the given
canonical ID.

View File

@@ -2,20 +2,19 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Generic processor for names that creates abbreviation variants.
"""
from typing import Mapping, Dict, Any, Iterable, Iterator, Optional, List, cast
from typing import Mapping, Dict, Any, Iterable, Optional, List, cast, Tuple
import itertools
import datrie
from ...errors import UsageError
from ...data.place_name import PlaceName
from .config_variants import get_variant_config
from .generic_mutation import MutationVariantGenerator
from .simple_trie import SimpleTrie
# Configuration section
@@ -25,8 +24,7 @@ def configure(rules: Mapping[str, Any], normalizer: Any, _: Any) -> Dict[str, An
"""
config: Dict[str, Any] = {}
config['replacements'], config['chars'] = get_variant_config(rules.get('variants'),
normalizer)
config['replacements'], _ = get_variant_config(rules.get('variants'), normalizer)
config['variant_only'] = rules.get('mode', '') == 'variant-only'
# parse mutation rules
@@ -68,12 +66,8 @@ class GenericTokenAnalysis:
self.variant_only = config['variant_only']
# Set up datrie
if config['replacements']:
self.replacements = datrie.Trie(config['chars'])
for src, repllist in config['replacements']:
self.replacements[src] = repllist
else:
self.replacements = None
self.replacements: Optional[SimpleTrie[List[str]]] = \
SimpleTrie(config['replacements']) if config['replacements'] else None
# set up mutation rules
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
@@ -84,7 +78,7 @@ class GenericTokenAnalysis:
"""
return cast(str, self.norm.transliterate(name.name)).strip()
def compute_variants(self, norm_name: str) -> List[str]:
def compute_variants(self, norm_name: str) -> Tuple[List[str], List[str]]:
""" Compute the spelling variants for the given normalized name
and transliterate the result.
"""
@@ -93,18 +87,20 @@ class GenericTokenAnalysis:
for mutation in self.mutations:
variants = mutation.generate(variants)
return [name for name in self._transliterate_unique_list(norm_name, variants) if name]
def _transliterate_unique_list(self, norm_name: str,
iterable: Iterable[str]) -> Iterator[Optional[str]]:
seen = set()
varset = set(map(str.strip, variants))
if self.variant_only:
seen.add(norm_name)
varset.discard(norm_name)
for variant in map(str.strip, iterable):
if variant not in seen:
seen.add(variant)
yield self.to_ascii.transliterate(variant).strip()
trans = []
norm = []
for var in varset:
t = self.to_ascii.transliterate(var).strip()
if t:
trans.append(t)
norm.append(var)
return trans, norm
def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
baseform = '^ ' + norm_name + ' ^'
@@ -116,10 +112,10 @@ class GenericTokenAnalysis:
pos = 0
force_space = False
while pos < baselen:
full, repl = self.replacements.longest_prefix_item(baseform[pos:],
(None, None))
if full is not None:
done = baseform[startpos:pos]
frm = pos
repl, pos = self.replacements.longest_prefix(baseform, pos)
if repl is not None:
done = baseform[startpos:frm]
partials = [v + done + r
for v, r in itertools.product(partials, repl)
if not force_space or r.startswith(' ')]
@@ -128,11 +124,10 @@ class GenericTokenAnalysis:
# to be helpful. Only use the original term.
startpos = 0
break
startpos = pos + len(full)
if full[-1] == ' ':
startpos -= 1
if baseform[pos - 1] == ' ':
pos -= 1
force_space = True
pos = startpos
startpos = pos
else:
pos += 1
force_space = False

View File

@@ -0,0 +1,84 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Simple dict-based implementation of a trie structure.
"""
from typing import TypeVar, Generic, Tuple, Optional, List, Dict
from collections import defaultdict
T = TypeVar('T')
class SimpleTrie(Generic[T]):
""" A simple read-only trie structure.
This structure supports examply one lookup operation,
which is longest-prefix lookup.
"""
def __init__(self, data: Optional[List[Tuple[str, T]]] = None) -> None:
self._tree: Dict[str, 'SimpleTrie[T]'] = defaultdict(SimpleTrie[T])
self._value: Optional[T] = None
self._prefix = ''
if data:
for key, value in data:
self._add(key, 0, value)
self._make_compact()
def _add(self, word: str, pos: int, value: T) -> None:
""" (Internal) Add a sub-word to the trie.
The word is added from index 'pos'. If the sub-word to add
is empty, then the trie saves the given value.
"""
if pos < len(word):
self._tree[word[pos]]._add(word, pos + 1, value)
else:
self._value = value
def _make_compact(self) -> None:
""" (Internal) Compress tree where there is exactly one subtree
and no value.
Compression works recursively starting at the leaf.
"""
for t in self._tree.values():
t._make_compact()
if len(self._tree) == 1 and self._value is None:
assert not self._prefix
for k, v in self._tree.items():
self._prefix = k + v._prefix
self._tree = v._tree
self._value = v._value
def longest_prefix(self, word: str, start: int = 0) -> Tuple[Optional[T], int]:
""" Return the longest prefix match for the given word starting at
the position 'start'.
The function returns a tuple with the value for the longest match and
the position of the word after the match. If no match was found at
all, the function returns (None, start).
"""
cur = self
pos = start
result: Tuple[Optional[T], int] = None, start
while True:
if cur._prefix:
if not word.startswith(cur._prefix, pos):
return result
pos += len(cur._prefix)
if cur._value:
result = cur._value, pos
if pos >= len(word) or word[pos] not in cur._tree:
return result
cur = cur._tree[word[pos]]
pos += 1

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for executing external programs.
@@ -85,7 +85,7 @@ def _mk_tablespace_options(ttype: str, options: Mapping[str, Any]) -> List[str]:
def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str:
if cmdline is not None:
if cmdline:
return cmdline
in_path = shutil.which('osm2pgsql')

View File

@@ -108,8 +108,7 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
async with QueryPool(dsn, place_threads, autocommit=True) as pool:
with tokenizer.name_analyzer() as analyzer:
lines = 0
for row in tar:
for lineno, row in enumerate(tar, 1):
try:
address = dict(street=row['street'], postcode=row['postcode'])
args = ('SRID=4326;' + row['geometry'],
@@ -124,10 +123,8 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
args)
lines += 1
if lines == 1000:
if not lineno % 1000:
print('.', end='', flush=True)
lines = 0
print('', flush=True)

View File

@@ -30,8 +30,8 @@ class PointsCentroid:
if self.count == 0:
raise ValueError("No points available for centroid.")
return (float(self.sum_x/self.count)/10000000,
float(self.sum_y/self.count)/10000000)
return (self.sum_x / self.count / 10_000_000,
self.sum_y / self.count / 10_000_000)
def __len__(self) -> int:
return self.count
@@ -40,8 +40,8 @@ class PointsCentroid:
if isinstance(other, Collection) and len(other) == 2:
if all(isinstance(p, (float, int)) for p in other):
x, y = other
self.sum_x += int(x * 10000000)
self.sum_y += int(y * 10000000)
self.sum_x += int(x * 10_000_000)
self.sum_y += int(y * 10_000_000)
self.count += 1
return self

View File

@@ -55,7 +55,7 @@ def parse_version(version: str) -> NominatimVersion:
return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
NOMINATIM_VERSION = parse_version('4.5.0-0')
NOMINATIM_VERSION = parse_version('5.1.0-0')
POSTGRESQL_REQUIRED_VERSION = (12, 0)
POSTGIS_REQUIRED_VERSION = (3, 0)

View File

@@ -3,9 +3,8 @@
Feature: Searches with postcodes
Various searches involving postcodes
@Fail
Scenario: US 5+4 ZIP codes are shortened to 5 ZIP codes if not found
When sending json search query "36067 1111, us" with address
When sending json search query "36067-1111, us" with address
Then result addresses contain
| postcode |
| 36067 |

View File

@@ -67,3 +67,13 @@ Feature: Structured search queries
Then result addresses contain
| town |
| Vaduz |
#3651
Scenario: Structured search with surrounding extra characters
When sending xml search query "" with address
| street | city | postalcode |
| "19 Am schrägen Weg" | "Vaduz" | "9491" |
Then result addresses contain
| house_number | road |
| 19 | Am Schrägen Weg |

View File

@@ -170,7 +170,7 @@ Feature: Import of postcodes
| object | postcode |
| W93 | 11200 |
Scenario: Postcodes are added to the postcode and word table
Scenario: Postcodes are added to the postcode
Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de |
@@ -178,7 +178,6 @@ Feature: Import of postcodes
Then location_postcode contains exactly
| country | postcode | geometry |
| de | 01982 | country:de |
And there are word tokens for postcodes 01982
@Fail
@@ -195,7 +194,7 @@ Feature: Import of postcodes
| E45 2 | gb | 23 | 5 |
| Y45 | gb | 21 | 5 |
Scenario: Postcodes outside all countries are not added to the postcode and word table
Scenario: Postcodes outside all countries are not added to the postcode table
Given the places
| osm | class | type | addr+postcode | addr+housenumber | addr+place | geometry |
| N34 | place | house | 01982 | 111 | Null Island | 0 0.00001 |
@@ -205,7 +204,6 @@ Feature: Import of postcodes
When importing
Then location_postcode contains exactly
| country | postcode | geometry |
And there are no word tokens for postcodes 01982
When sending search query "111, 01982 Null Island"
Then results contain
| osm | display_name |

View File

@@ -267,3 +267,34 @@ Feature: Rank assignment
| object | rank_search | rank_address |
| N23:amenity | 30 | 30 |
| N23:place | 16 | 16 |
Scenario: Address rank 25 is only used for addr:place
Given the grid
| 10 | 33 | 34 | 11 |
Given the places
| osm | class | type | name |
| N10 | place | village | vil |
| N11 | place | farm | farm |
And the places
| osm | class | type | name | geometry |
| W1 | highway | residential | RD | 33,11 |
And the places
| osm | class | type | name | addr+farm | geometry |
| W2 | highway | residential | RD2 | farm | 34,11 |
And the places
| osm | class | type | housenr |
| N33 | place | house | 23 |
And the places
| osm | class | type | housenr | addr+place |
| N34 | place | house | 23 | farm |
When importing
Then placex contains
| object | parent_place_id |
| N11 | N10 |
| N33 | W1 |
| N34 | N11 |
And place_addressline contains
| object | address |
| W1 | N10 |
| W2 | N10 |
| W2 | N11 |

View File

@@ -2,7 +2,7 @@
Feature: Update of postcode
Tests for updating of data related to postcodes
Scenario: A new postcode appears in the postcode and word table
Scenario: A new postcode appears in the postcode table
Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de |
@@ -18,9 +18,8 @@ Feature: Update of postcode
| country | postcode | geometry |
| de | 01982 | country:de |
| ch | 4567 | country:ch |
And there are word tokens for postcodes 01982,4567
Scenario: When the last postcode is deleted, it is deleted from postcode and word
Scenario: When the last postcode is deleted, it is deleted from postcode
Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de |
@@ -31,10 +30,8 @@ Feature: Update of postcode
Then location_postcode contains exactly
| country | postcode | geometry |
| ch | 4567 | country:ch |
And there are word tokens for postcodes 4567
And there are no word tokens for postcodes 01982
Scenario: A postcode is not deleted from postcode and word when it exist in another country
Scenario: A postcode is not deleted from postcode when it exist in another country
Given the places
| osm | class | type | addr+postcode | addr+housenumber | geometry |
| N34 | place | house | 01982 | 111 |country:de |
@@ -45,7 +42,6 @@ Feature: Update of postcode
Then location_postcode contains exactly
| country | postcode | geometry |
| fr | 01982 | country:fr |
And there are word tokens for postcodes 01982
Scenario: Updating a postcode is reflected in postcode table
Given the places
@@ -59,7 +55,6 @@ Feature: Update of postcode
Then location_postcode contains exactly
| country | postcode | geometry |
| de | 20453 | country:de |
And there are word tokens for postcodes 20453
Scenario: When changing from a postcode type, the entry appears in placex
When importing
@@ -80,7 +75,6 @@ Feature: Update of postcode
Then location_postcode contains exactly
| country | postcode | geometry |
| de | 20453 | country:de |
And there are word tokens for postcodes 20453
Scenario: When changing to a postcode type, the entry disappears from placex
When importing
@@ -101,7 +95,6 @@ Feature: Update of postcode
Then location_postcode contains exactly
| country | postcode | geometry |
| de | 01982 | country:de |
And there are word tokens for postcodes 01982
Scenario: When a parent is deleted, the postcode gets a new parent
Given the grid with origin DE

View File

@@ -2,43 +2,45 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import sys
from behave import *
from behave import * # noqa
sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve()))
from steps.geometry_factory import GeometryFactory
from steps.nominatim_environment import NominatimEnvironment
from steps.geometry_factory import GeometryFactory # noqa: E402
from steps.nominatim_environment import NominatimEnvironment # noqa: E402
TEST_BASE_DIR = Path(__file__, '..', '..').resolve()
userconfig = {
'REMOVE_TEMPLATE' : False,
'KEEP_TEST_DB' : False,
'DB_HOST' : None,
'DB_PORT' : None,
'DB_USER' : None,
'DB_PASS' : None,
'TEMPLATE_DB' : 'test_template_nominatim',
'TEST_DB' : 'test_nominatim',
'API_TEST_DB' : 'test_api_nominatim',
'API_TEST_FILE' : TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
'TOKENIZER' : None, # Test with a custom tokenizer
'STYLE' : 'extratags',
'REMOVE_TEMPLATE': False,
'KEEP_TEST_DB': False,
'DB_HOST': None,
'DB_PORT': None,
'DB_USER': None,
'DB_PASS': None,
'TEMPLATE_DB': 'test_template_nominatim',
'TEST_DB': 'test_nominatim',
'API_TEST_DB': 'test_api_nominatim',
'API_TEST_FILE': TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
'TOKENIZER': None, # Test with a custom tokenizer
'STYLE': 'extratags',
'API_ENGINE': 'falcon'
}
use_step_matcher("re")
use_step_matcher("re") # noqa: F405
def before_all(context):
# logging setup
context.config.setup_logging()
# set up -D options
for k,v in userconfig.items():
for k, v in userconfig.items():
context.config.userdata.setdefault(k, v)
# Nominatim test setup
context.nominatim = NominatimEnvironment(context.config.userdata)
@@ -46,7 +48,7 @@ def before_all(context):
def before_scenario(context, scenario):
if not 'SQLITE' in context.tags \
if 'SQLITE' not in context.tags \
and context.config.userdata['API_TEST_DB'].startswith('sqlite:'):
context.scenario.skip("Not usable with Sqlite database.")
elif 'DB' in context.tags:
@@ -56,6 +58,7 @@ def before_scenario(context, scenario):
elif 'UNKNOWNDB' in context.tags:
context.nominatim.setup_unknown_db()
def after_scenario(context, scenario):
if 'DB' in context.tags:
context.nominatim.teardown_db(context)

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of assertion functions used for the steps.
@@ -11,20 +11,10 @@ import json
import math
import re
class Almost:
""" Compares a float value with a certain jitter.
"""
def __init__(self, value, offset=0.00001):
self.value = value
self.offset = offset
def __eq__(self, other):
return abs(other - self.value) < self.offset
OSM_TYPE = {'N' : 'node', 'W' : 'way', 'R' : 'relation',
'n' : 'node', 'w' : 'way', 'r' : 'relation',
'node' : 'n', 'way' : 'w', 'relation' : 'r'}
OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation',
'n': 'node', 'w': 'way', 'r': 'relation',
'node': 'n', 'way': 'w', 'relation': 'r'}
class OsmType:
@@ -34,11 +24,9 @@ class OsmType:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return other == self.value or other == OSM_TYPE[self.value]
def __str__(self):
return f"{self.value} or {OSM_TYPE[self.value]}"
@@ -92,7 +80,6 @@ class Bbox:
return str(self.coord)
def check_for_attributes(obj, attrs, presence='present'):
""" Check that the object has the given attributes. 'attrs' is a
string with a comma-separated list of attributes. If 'presence'
@@ -110,4 +97,3 @@ def check_for_attributes(obj, attrs, presence='present'):
else:
assert attr in obj, \
f"No attribute '{attr}'. Full response:\n{_dump_json()}"

View File

@@ -2,261 +2,261 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of aliases for various world coordinates.
"""
ALIASES = {
# Country aliases
'AD': (1.58972, 42.54241),
'AE': (54.61589, 24.82431),
'AF': (65.90264, 34.84708),
'AG': (-61.72430, 17.069),
'AI': (-63.10571, 18.25461),
'AL': (19.84941, 40.21232),
'AM': (44.64229, 40.37821),
'AO': (16.21924, -12.77014),
'AQ': (44.99999, -75.65695),
'AR': (-61.10759, -34.37615),
'AS': (-170.68470, -14.29307),
'AT': (14.25747, 47.36542),
'AU': (138.23155, -23.72068),
'AW': (-69.98255, 12.555),
'AX': (19.91839, 59.81682),
'AZ': (48.38555, 40.61639),
'BA': (17.18514, 44.25582),
'BB': (-59.53342, 13.19),
'BD': (89.75989, 24.34205),
'BE': (4.90078, 50.34682),
'BF': (-0.56743, 11.90471),
'BG': (24.80616, 43.09859),
'BH': (50.52032, 25.94685),
'BI': (29.54561, -2.99057),
'BJ': (2.70062, 10.02792),
'BL': (-62.79349, 17.907),
'BM': (-64.77406, 32.30199),
'BN': (114.52196, 4.28638),
'BO': (-62.02473, -17.77723),
'BQ': (-63.14322, 17.566),
'BR': (-45.77065, -9.58685),
'BS': (-77.60916, 23.8745),
'BT': (90.01350, 27.28137),
'BV': (3.35744, -54.4215),
'BW': (23.51505, -23.48391),
'BY': (26.77259, 53.15885),
'BZ': (-88.63489, 16.33951),
'CA': (-107.74817, 67.12612),
'CC': (96.84420, -12.01734),
'CD': (24.09544, -1.67713),
'CF': (22.58701, 5.98438),
'CG': (15.78875, 0.40388),
'CH': (7.65705, 46.57446),
'CI': (-6.31190, 6.62783),
'CK': (-159.77835, -21.23349),
'CL': (-70.41790, -53.77189),
'CM': (13.26022, 5.94519),
'CN': (96.44285, 38.04260),
'CO': (-72.52951, 2.45174),
'CR': (-83.83314, 9.93514),
'CU': (-80.81673, 21.88852),
'CV': (-24.50810, 14.929),
'CW': (-68.96409, 12.1845),
'CX': (105.62411, -10.48417),
'CY': (32.95922, 35.37010),
'CZ': (16.32098, 49.50692),
'DE': (9.30716, 50.21289),
'DJ': (42.96904, 11.41542),
'DK': (9.18490, 55.98916),
'DM': (-61.00358, 15.65470),
'DO': (-69.62855, 18.58841),
'DZ': (4.24749, 25.79721),
'EC': (-77.45831, -0.98284),
'EE': (23.94288, 58.43952),
'EG': (28.95293, 28.17718),
'EH': (-13.69031, 25.01241),
'ER': (39.01223, 14.96033),
'ES': (-2.59110, 38.79354),
'ET': (38.61697, 7.71399),
'FI': (26.89798, 63.56194),
'FJ': (177.91853, -17.74237),
'FK': (-58.99044, -51.34509),
'FM': (151.95358, 8.5045),
'FO': (-6.60483, 62.10000),
'FR': (0.28410, 47.51045),
'GA': (10.81070, -0.07429),
'GB': (-0.92823, 52.01618),
'GD': (-61.64524, 12.191),
'GE': (44.16664, 42.00385),
'GF': (-53.46524, 3.56188),
'GG': (-2.50580, 49.58543),
'GH': (-0.46348, 7.16051),
'GI': (-5.32053, 36.11066),
'GL': (-33.85511, 74.66355),
'GM': (-16.40960, 13.25),
'GN': (-13.83940, 10.96291),
'GP': (-61.68712, 16.23049),
'GQ': (10.23973, 1.43119),
'GR': (23.17850, 39.06206),
'GS': (-36.49430, -54.43067),
'GT': (-90.74368, 15.20428),
'GU': (144.73362, 13.44413),
'GW': (-14.83525, 11.92486),
'GY': (-58.45167, 5.73698),
'HK': (114.18577, 22.34923),
'HM': (73.68230, -53.22105),
'HN': (-86.95414, 15.23820),
'HR': (17.49966, 45.52689),
'HT': (-73.51925, 18.32492),
'HU': (20.35362, 47.51721),
'ID': (123.34505, -0.83791),
'IE': (-9.00520, 52.87725),
'IL': (35.46314, 32.86165),
'IM': (-4.86740, 54.023),
'IN': (88.67620, 27.86155),
'IO': (71.42743, -6.14349),
'IQ': (42.58109, 34.26103),
'IR': (56.09355, 30.46751),
'IS': (-17.51785, 64.71687),
'IT': (10.42639, 44.87904),
'JE': (-2.19261, 49.12458),
'JM': (-76.84020, 18.3935),
'JO': (36.55552, 30.75741),
'JP': (138.72531, 35.92099),
'KE': (36.90602, 1.08512),
'KG': (76.15571, 41.66497),
'KH': (104.31901, 12.95555),
'KI': (173.63353, 0.139),
'KM': (44.31474, -12.241),
'KN': (-62.69379, 17.2555),
'KP': (126.65575, 39.64575),
'KR': (127.27740, 36.41388),
'KW': (47.30684, 29.69180),
'KY': (-81.07455, 19.29949),
'KZ': (72.00811, 49.88855),
'LA': (102.44391, 19.81609),
'LB': (35.48464, 33.41766),
'LC': (-60.97894, 13.891),
'LI': (9.54693, 47.15934),
'LK': (80.38520, 8.41649),
'LR': (-11.16960, 4.04122),
'LS': (28.66984, -29.94538),
'LT': (24.51735, 55.49293),
'LU': (6.08649, 49.81533),
'LV': (23.51033, 56.67144),
'LY': (15.36841, 28.12177),
'MA': (-4.03061, 33.21696),
'MC': (7.47743, 43.62917),
'MD': (29.61725, 46.66517),
'ME': (19.72291, 43.02441),
'MF': (-63.06666, 18.08102),
'MG': (45.86378, -20.50245),
'MH': (171.94982, 5.983),
'MK': (21.42108, 41.08980),
'ML': (-1.93310, 16.46993),
'MM': (95.54624, 21.09620),
'MN': (99.81138, 48.18615),
'MO': (113.56441, 22.16209),
'MP': (145.21345, 14.14902),
'MQ': (-60.81128, 14.43706),
'MR': (-9.42324, 22.59251),
'MS': (-62.19455, 16.745),
'MT': (14.38363, 35.94467),
'MU': (57.55121, -20.41),
'MV': (73.39292, 4.19375),
'MW': (33.95722, -12.28218),
'MX': (-105.89221, 25.86826),
'MY': (112.71154, 2.10098),
'MZ': (37.58689, -13.72682),
'NA': (16.68569, -21.46572),
'NC': (164.95322, -20.38889),
'NE': (10.06041, 19.08273),
'NF': (167.95718, -29.0645),
'NG': (10.17781, 10.17804),
'NI': (-85.87974, 13.21715),
'NL': (-68.57062, 12.041),
'NO': (23.11556, 70.09934),
'NP': (83.36259, 28.13107),
'NR': (166.93479, -0.5275),
'NU': (-169.84873, -19.05305),
'NZ': (167.97209, -45.13056),
'OM': (56.86055, 20.47413),
'PA': (-79.40160, 8.80656),
'PE': (-78.66540, -7.54711),
'PF': (-145.05719, -16.70862),
'PG': (146.64600, -7.37427),
'PH': (121.48359, 15.09965),
'PK': (72.11347, 31.14629),
'PL': (17.88136, 52.77182),
'PM': (-56.19515, 46.78324),
'PN': (-130.10642, -25.06955),
'PR': (-65.88755, 18.37169),
'PS': (35.39801, 32.24773),
'PT': (-8.45743, 40.11154),
'PW': (134.49645, 7.3245),
'PY': (-59.51787, -22.41281),
'QA': (51.49903, 24.99816),
'RE': (55.77345, -21.36388),
'RO': (26.37632, 45.36120),
'RS': (20.40371, 44.56413),
'RU': (116.44060, 59.06780),
'RW': (29.57882, -1.62404),
'SA': (47.73169, 22.43790),
'SB': (164.63894, -10.23606),
'SC': (46.36566, -9.454),
'SD': (28.14720, 14.56423),
'SE': (15.68667, 60.35568),
'SG': (103.84187, 1.304),
'SH': (-12.28155, -37.11546),
'SI': (14.04738, 46.39085),
'SJ': (15.27552, 79.23365),
'SK': (20.41603, 48.86970),
'SL': (-11.47773, 8.78156),
'SM': (12.46062, 43.94279),
'SN': (-15.37111, 14.99477),
'SO': (46.93383, 9.34094),
'SR': (-55.42864, 4.56985),
'SS': (28.13573, 8.50933),
'ST': (6.61025, 0.2215),
'SV': (-89.36665, 13.43072),
'SX': (-63.15393, 17.9345),
'SY': (38.15513, 35.34221),
'SZ': (31.78263, -26.14244),
'TC': (-71.32554, 21.35),
'TD': (17.42092, 13.46223),
'TF': (137.5, -67.5),
'TG': (1.06983, 7.87677),
'TH': (102.00877, 16.42310),
'TJ': (71.91349, 39.01527),
'TK': (-171.82603, -9.20990),
'TL': (126.22520, -8.72636),
'TM': (57.71603, 39.92534),
'TN': (9.04958, 34.84199),
'TO': (-176.99320, -23.11104),
'TR': (32.82002, 39.86350),
'TT': (-60.70793, 11.1385),
'TV': (178.77499, -9.41685),
'TW': (120.30074, 23.17002),
'TZ': (33.53892, -5.01840),
'UA': (33.44335, 49.30619),
'UG': (32.96523, 2.08584),
'UM': (-169.50993, 16.74605),
'US': (-116.39535, 40.71379),
'UY': (-56.46505, -33.62658),
'UZ': (61.35529, 42.96107),
'VA': (12.33197, 42.04931),
'VC': (-61.09905, 13.316),
'VE': (-64.88323, 7.69849),
'VG': (-64.62479, 18.419),
'VI': (-64.88950, 18.32263),
'VN': (104.20179, 10.27644),
'VU': (167.31919, -15.88687),
'WF': (-176.20781, -13.28535),
'WS': (-172.10966, -13.85093),
'YE': (45.94562, 16.16338),
'YT': (44.93774, -12.60882),
'ZA': (23.19488, -30.43276),
'ZM': (26.38618, -14.39966),
'ZW': (30.12419, -19.86907)
}
# Country aliases
'AD': (1.58972, 42.54241),
'AE': (54.61589, 24.82431),
'AF': (65.90264, 34.84708),
'AG': (-61.72430, 17.069),
'AI': (-63.10571, 18.25461),
'AL': (19.84941, 40.21232),
'AM': (44.64229, 40.37821),
'AO': (16.21924, -12.77014),
'AQ': (44.99999, -75.65695),
'AR': (-61.10759, -34.37615),
'AS': (-170.68470, -14.29307),
'AT': (14.25747, 47.36542),
'AU': (138.23155, -23.72068),
'AW': (-69.98255, 12.555),
'AX': (19.91839, 59.81682),
'AZ': (48.38555, 40.61639),
'BA': (17.18514, 44.25582),
'BB': (-59.53342, 13.19),
'BD': (89.75989, 24.34205),
'BE': (4.90078, 50.34682),
'BF': (-0.56743, 11.90471),
'BG': (24.80616, 43.09859),
'BH': (50.52032, 25.94685),
'BI': (29.54561, -2.99057),
'BJ': (2.70062, 10.02792),
'BL': (-62.79349, 17.907),
'BM': (-64.77406, 32.30199),
'BN': (114.52196, 4.28638),
'BO': (-62.02473, -17.77723),
'BQ': (-63.14322, 17.566),
'BR': (-45.77065, -9.58685),
'BS': (-77.60916, 23.8745),
'BT': (90.01350, 27.28137),
'BV': (3.35744, -54.4215),
'BW': (23.51505, -23.48391),
'BY': (26.77259, 53.15885),
'BZ': (-88.63489, 16.33951),
'CA': (-107.74817, 67.12612),
'CC': (96.84420, -12.01734),
'CD': (24.09544, -1.67713),
'CF': (22.58701, 5.98438),
'CG': (15.78875, 0.40388),
'CH': (7.65705, 46.57446),
'CI': (-6.31190, 6.62783),
'CK': (-159.77835, -21.23349),
'CL': (-70.41790, -53.77189),
'CM': (13.26022, 5.94519),
'CN': (96.44285, 38.04260),
'CO': (-72.52951, 2.45174),
'CR': (-83.83314, 9.93514),
'CU': (-80.81673, 21.88852),
'CV': (-24.50810, 14.929),
'CW': (-68.96409, 12.1845),
'CX': (105.62411, -10.48417),
'CY': (32.95922, 35.37010),
'CZ': (16.32098, 49.50692),
'DE': (9.30716, 50.21289),
'DJ': (42.96904, 11.41542),
'DK': (9.18490, 55.98916),
'DM': (-61.00358, 15.65470),
'DO': (-69.62855, 18.58841),
'DZ': (4.24749, 25.79721),
'EC': (-77.45831, -0.98284),
'EE': (23.94288, 58.43952),
'EG': (28.95293, 28.17718),
'EH': (-13.69031, 25.01241),
'ER': (39.01223, 14.96033),
'ES': (-2.59110, 38.79354),
'ET': (38.61697, 7.71399),
'FI': (26.89798, 63.56194),
'FJ': (177.91853, -17.74237),
'FK': (-58.99044, -51.34509),
'FM': (151.95358, 8.5045),
'FO': (-6.60483, 62.10000),
'FR': (0.28410, 47.51045),
'GA': (10.81070, -0.07429),
'GB': (-0.92823, 52.01618),
'GD': (-61.64524, 12.191),
'GE': (44.16664, 42.00385),
'GF': (-53.46524, 3.56188),
'GG': (-2.50580, 49.58543),
'GH': (-0.46348, 7.16051),
'GI': (-5.32053, 36.11066),
'GL': (-33.85511, 74.66355),
'GM': (-16.40960, 13.25),
'GN': (-13.83940, 10.96291),
'GP': (-61.68712, 16.23049),
'GQ': (10.23973, 1.43119),
'GR': (23.17850, 39.06206),
'GS': (-36.49430, -54.43067),
'GT': (-90.74368, 15.20428),
'GU': (144.73362, 13.44413),
'GW': (-14.83525, 11.92486),
'GY': (-58.45167, 5.73698),
'HK': (114.18577, 22.34923),
'HM': (73.68230, -53.22105),
'HN': (-86.95414, 15.23820),
'HR': (17.49966, 45.52689),
'HT': (-73.51925, 18.32492),
'HU': (20.35362, 47.51721),
'ID': (123.34505, -0.83791),
'IE': (-9.00520, 52.87725),
'IL': (35.46314, 32.86165),
'IM': (-4.86740, 54.023),
'IN': (88.67620, 27.86155),
'IO': (71.42743, -6.14349),
'IQ': (42.58109, 34.26103),
'IR': (56.09355, 30.46751),
'IS': (-17.51785, 64.71687),
'IT': (10.42639, 44.87904),
'JE': (-2.19261, 49.12458),
'JM': (-76.84020, 18.3935),
'JO': (36.55552, 30.75741),
'JP': (138.72531, 35.92099),
'KE': (36.90602, 1.08512),
'KG': (76.15571, 41.66497),
'KH': (104.31901, 12.95555),
'KI': (173.63353, 0.139),
'KM': (44.31474, -12.241),
'KN': (-62.69379, 17.2555),
'KP': (126.65575, 39.64575),
'KR': (127.27740, 36.41388),
'KW': (47.30684, 29.69180),
'KY': (-81.07455, 19.29949),
'KZ': (72.00811, 49.88855),
'LA': (102.44391, 19.81609),
'LB': (35.48464, 33.41766),
'LC': (-60.97894, 13.891),
'LI': (9.54693, 47.15934),
'LK': (80.38520, 8.41649),
'LR': (-11.16960, 4.04122),
'LS': (28.66984, -29.94538),
'LT': (24.51735, 55.49293),
'LU': (6.08649, 49.81533),
'LV': (23.51033, 56.67144),
'LY': (15.36841, 28.12177),
'MA': (-4.03061, 33.21696),
'MC': (7.47743, 43.62917),
'MD': (29.61725, 46.66517),
'ME': (19.72291, 43.02441),
'MF': (-63.06666, 18.08102),
'MG': (45.86378, -20.50245),
'MH': (171.94982, 5.983),
'MK': (21.42108, 41.08980),
'ML': (-1.93310, 16.46993),
'MM': (95.54624, 21.09620),
'MN': (99.81138, 48.18615),
'MO': (113.56441, 22.16209),
'MP': (145.21345, 14.14902),
'MQ': (-60.81128, 14.43706),
'MR': (-9.42324, 22.59251),
'MS': (-62.19455, 16.745),
'MT': (14.38363, 35.94467),
'MU': (57.55121, -20.41),
'MV': (73.39292, 4.19375),
'MW': (33.95722, -12.28218),
'MX': (-105.89221, 25.86826),
'MY': (112.71154, 2.10098),
'MZ': (37.58689, -13.72682),
'NA': (16.68569, -21.46572),
'NC': (164.95322, -20.38889),
'NE': (10.06041, 19.08273),
'NF': (167.95718, -29.0645),
'NG': (10.17781, 10.17804),
'NI': (-85.87974, 13.21715),
'NL': (-68.57062, 12.041),
'NO': (23.11556, 70.09934),
'NP': (83.36259, 28.13107),
'NR': (166.93479, -0.5275),
'NU': (-169.84873, -19.05305),
'NZ': (167.97209, -45.13056),
'OM': (56.86055, 20.47413),
'PA': (-79.40160, 8.80656),
'PE': (-78.66540, -7.54711),
'PF': (-145.05719, -16.70862),
'PG': (146.64600, -7.37427),
'PH': (121.48359, 15.09965),
'PK': (72.11347, 31.14629),
'PL': (17.88136, 52.77182),
'PM': (-56.19515, 46.78324),
'PN': (-130.10642, -25.06955),
'PR': (-65.88755, 18.37169),
'PS': (35.39801, 32.24773),
'PT': (-8.45743, 40.11154),
'PW': (134.49645, 7.3245),
'PY': (-59.51787, -22.41281),
'QA': (51.49903, 24.99816),
'RE': (55.77345, -21.36388),
'RO': (26.37632, 45.36120),
'RS': (20.40371, 44.56413),
'RU': (116.44060, 59.06780),
'RW': (29.57882, -1.62404),
'SA': (47.73169, 22.43790),
'SB': (164.63894, -10.23606),
'SC': (46.36566, -9.454),
'SD': (28.14720, 14.56423),
'SE': (15.68667, 60.35568),
'SG': (103.84187, 1.304),
'SH': (-12.28155, -37.11546),
'SI': (14.04738, 46.39085),
'SJ': (15.27552, 79.23365),
'SK': (20.41603, 48.86970),
'SL': (-11.47773, 8.78156),
'SM': (12.46062, 43.94279),
'SN': (-15.37111, 14.99477),
'SO': (46.93383, 9.34094),
'SR': (-55.42864, 4.56985),
'SS': (28.13573, 8.50933),
'ST': (6.61025, 0.2215),
'SV': (-89.36665, 13.43072),
'SX': (-63.15393, 17.9345),
'SY': (38.15513, 35.34221),
'SZ': (31.78263, -26.14244),
'TC': (-71.32554, 21.35),
'TD': (17.42092, 13.46223),
'TF': (137.5, -67.5),
'TG': (1.06983, 7.87677),
'TH': (102.00877, 16.42310),
'TJ': (71.91349, 39.01527),
'TK': (-171.82603, -9.20990),
'TL': (126.22520, -8.72636),
'TM': (57.71603, 39.92534),
'TN': (9.04958, 34.84199),
'TO': (-176.99320, -23.11104),
'TR': (32.82002, 39.86350),
'TT': (-60.70793, 11.1385),
'TV': (178.77499, -9.41685),
'TW': (120.30074, 23.17002),
'TZ': (33.53892, -5.01840),
'UA': (33.44335, 49.30619),
'UG': (32.96523, 2.08584),
'UM': (-169.50993, 16.74605),
'US': (-116.39535, 40.71379),
'UY': (-56.46505, -33.62658),
'UZ': (61.35529, 42.96107),
'VA': (12.33197, 42.04931),
'VC': (-61.09905, 13.316),
'VE': (-64.88323, 7.69849),
'VG': (-64.62479, 18.419),
'VI': (-64.88950, 18.32263),
'VN': (104.20179, 10.27644),
'VU': (167.31919, -15.88687),
'WF': (-176.20781, -13.28535),
'WS': (-172.10966, -13.85093),
'YE': (45.94562, 16.16338),
'YT': (44.93774, -12.60882),
'ZA': (23.19488, -30.43276),
'ZM': (26.38618, -14.39966),
'ZW': (30.12419, -19.86907)
}

View File

@@ -2,13 +2,11 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import os
from steps.geometry_alias import ALIASES
class GeometryFactory:
""" Provides functions to create geometries from coordinates and data grids.
"""
@@ -47,7 +45,6 @@ class GeometryFactory:
return "ST_SetSRID('{}'::geometry, 4326)".format(out)
def mk_wkt_point(self, point):
""" Parse a point description.
The point may either consist of 'x y' coordinates or a number
@@ -65,7 +62,6 @@ class GeometryFactory:
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
return "{} {}".format(*pt)
def mk_wkt_points(self, geom):
""" Parse a list of points.
The list must be a comma-separated list of points. Points
@@ -73,7 +69,6 @@ class GeometryFactory:
"""
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
""" Replace the grid with one from the given lines.
"""
@@ -87,7 +82,6 @@ class GeometryFactory:
x += grid_step
y += grid_step
def grid_node(self, nodeid):
""" Get the coordinates for the given grid node.
"""

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Classes wrapping HTTP responses from the Nominatim API.
@@ -11,7 +11,7 @@ import re
import json
import xml.etree.ElementTree as ET
from check_functions import Almost, OsmType, Field, check_for_attributes
from check_functions import OsmType, Field, check_for_attributes
class GenericResponse:
@@ -45,7 +45,6 @@ class GenericResponse:
else:
self.result = [self.result]
def _parse_geojson(self):
self._parse_json()
if self.result:
@@ -76,7 +75,6 @@ class GenericResponse:
new['__' + k] = v
self.result.append(new)
def _parse_geocodejson(self):
self._parse_geojson()
if self.result:
@@ -87,7 +85,6 @@ class GenericResponse:
inner = r.pop('geocoding')
r.update(inner)
def assert_address_field(self, idx, field, value):
""" Check that result rows`idx` has a field `field` with value `value`
in its address. If idx is None, then all results are checked.
@@ -103,7 +100,6 @@ class GenericResponse:
address = self.result[idx]['address']
self.check_row_field(idx, field, value, base=address)
def match_row(self, row, context=None, field=None):
""" Match the result fields against the given behave table row.
"""
@@ -139,7 +135,6 @@ class GenericResponse:
else:
self.check_row_field(i, name, Field(value), base=subdict)
def check_row(self, idx, check, msg):
""" Assert for the condition 'check' and print 'msg' on fail together
with the contents of the failing result.
@@ -154,7 +149,6 @@ class GenericResponse:
assert check, _RowError(self.result[idx])
def check_row_field(self, idx, field, expected, base=None):
""" Check field 'field' of result 'idx' for the expected value
and print a meaningful error if the condition fails.
@@ -172,7 +166,6 @@ class GenericResponse:
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
class SearchResponse(GenericResponse):
""" Specialised class for search and lookup responses.
Transforms the xml response in a format similar to json.
@@ -240,7 +233,8 @@ class ReverseResponse(GenericResponse):
assert 'namedetails' not in self.result[0], "More than one namedetails in result"
self.result[0]['namedetails'] = {}
for tag in child:
assert len(tag) == 0, f"Namedetails element '{tag.attrib['desc']}' has subelements"
assert len(tag) == 0, \
f"Namedetails element '{tag.attrib['desc']}' has subelements"
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
elif child.tag == 'geokml':
assert 'geokml' not in self.result[0], "More than one geokml in result"

View File

@@ -2,10 +2,9 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import importlib
import tempfile
import psycopg
@@ -13,10 +12,9 @@ from psycopg import sql as pysql
from nominatim_db import cli
from nominatim_db.config import Configuration
from nominatim_db.db.connection import Connection, register_hstore, execute_scalar
from nominatim_db.tools import refresh
from nominatim_db.db.connection import register_hstore, execute_scalar
from nominatim_db.tokenizer import factory as tokenizer_factory
from steps.utils import run_script
class NominatimEnvironment:
""" Collects all functions for the execution of Nominatim functions.
@@ -62,7 +60,6 @@ class NominatimEnvironment:
dbargs['password'] = self.db_pass
return psycopg.connect(**dbargs)
def write_nominatim_config(self, dbname):
""" Set up a custom test configuration that connects to the given
database. This sets up the environment variables so that they can
@@ -101,7 +98,6 @@ class NominatimEnvironment:
self.website_dir = tempfile.TemporaryDirectory()
def get_test_config(self):
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
return cfg
@@ -122,14 +118,13 @@ class NominatimEnvironment:
return dsn
def db_drop_database(self, name):
""" Drop the database with the given name.
"""
with self.connect_database('postgres') as conn:
conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(name))
+ pysql.Identifier(name))
def setup_template_db(self):
""" Setup a template database that already contains common test data.
@@ -153,13 +148,12 @@ class NominatimEnvironment:
'--osm2pgsql-cache', '1',
'--ignore-errors',
'--offline', '--index-noanalyse')
except:
except: # noqa: E722
self.db_drop_database(self.template_db)
raise
self.run_nominatim('refresh', '--functions')
def setup_api_db(self):
""" Setup a test against the API test database.
"""
@@ -184,13 +178,12 @@ class NominatimEnvironment:
csv_path = str(testdata / 'full_en_phrases_test.csv')
self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
except:
except: # noqa: E722
self.db_drop_database(self.api_test_db)
raise
tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
def setup_unknown_db(self):
""" Setup a test against a non-existing database.
"""
@@ -213,7 +206,7 @@ class NominatimEnvironment:
with self.connect_database(self.template_db) as conn:
conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(self.test_db))
+ pysql.Identifier(self.test_db))
conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format(
pysql.Identifier(self.test_db),
pysql.Identifier(self.template_db)))
@@ -250,7 +243,6 @@ class NominatimEnvironment:
return False
def reindex_placex(self, db):
""" Run the indexing step until all data in the placex has
been processed. Indexing during updates can produce more data
@@ -259,18 +251,15 @@ class NominatimEnvironment:
"""
self.run_nominatim('index')
def run_nominatim(self, *cmdline):
""" Run the nominatim command-line tool via the library.
"""
if self.website_dir is not None:
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
cli.nominatim(osm2pgsql_path=None,
cli_args=cmdline,
cli.nominatim(cli_args=cmdline,
environ=self.test_env)
def copy_from_place(self, db):
""" Copy data from place to the placex and location_property_osmline
tables invoking the appropriate triggers.
@@ -293,7 +282,6 @@ class NominatimEnvironment:
and osm_type='W'
and ST_GeometryType(geometry) = 'ST_LineString'""")
def create_api_request_func_starlette(self):
import nominatim_api.server.starlette.server
from asgi_lifespan import LifespanManager
@@ -311,7 +299,6 @@ class NominatimEnvironment:
return _request
def create_api_request_func_falcon(self):
import nominatim_api.server.falcon.server
import falcon.testing
@@ -326,6 +313,3 @@ class NominatimEnvironment:
return response.text, response.status_code
return _request

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper classes for filling the place table.
@@ -10,12 +10,13 @@ Helper classes for filling the place table.
import random
import string
class PlaceColumn:
""" Helper class to collect contents from a behave table row and
insert it into the place table.
"""
def __init__(self, context):
self.columns = {'admin_level' : 15}
self.columns = {'admin_level': 15}
self.context = context
self.geometry = None
@@ -28,9 +29,11 @@ class PlaceColumn:
assert 'osm_type' in self.columns, "osm column missing"
if force_name and 'name' not in self.columns:
self._add_hstore('name', 'name',
''.join(random.choice(string.printable)
for _ in range(int(random.random()*30))))
self._add_hstore(
'name',
'name',
''.join(random.choices(string.printable, k=random.randrange(30))),
)
return self
@@ -96,7 +99,7 @@ class PlaceColumn:
""" Issue a delete for the given OSM object.
"""
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
(self.columns['osm_type'] , self.columns['osm_id']))
(self.columns['osm_type'], self.columns['osm_id']))
def db_insert(self, cursor):
""" Insert the collected data into the database.
@@ -104,7 +107,7 @@ class PlaceColumn:
if self.columns['osm_type'] == 'N' and self.geometry is None:
pt = self.context.osm.grid_node(self.columns['osm_id'])
if pt is None:
pt = (random.random()*360 - 180, random.random()*180 - 90)
pt = (random.uniform(-180, 180), random.uniform(-90, 90))
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
else:

View File

@@ -2,20 +2,16 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
""" Steps that run queries against the API.
"""
from pathlib import Path
import json
import os
import re
import logging
import asyncio
import xml.etree.ElementTree as ET
from urllib.parse import urlencode
from utils import run_script
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
from check_functions import Bbox, check_for_attributes
from table_compare import NominatimID
@@ -68,7 +64,7 @@ def send_api_query(endpoint, params, fmt, context):
getattr(context, 'http_headers', {})))
@given(u'the HTTP header')
@given('the HTTP header')
def add_http_header(context):
if not hasattr(context, 'http_headers'):
context.http_headers = {}
@@ -77,7 +73,7 @@ def add_http_header(context):
context.http_headers[h] = context.table[0][h]
@when(u'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
@when(r'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
def website_search_request(context, fmt, query, addr):
params = {}
if query:
@@ -90,7 +86,7 @@ def website_search_request(context, fmt, query, addr):
context.response = SearchResponse(outp, fmt or 'json', status)
@when('sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
@when(r'sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse(context, lat, lon, fmt):
params = {}
if lat is not None:
@@ -106,7 +102,7 @@ def api_endpoint_v1_reverse(context, lat, lon, fmt):
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when('sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
@when(r'sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
params = {}
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
@@ -115,7 +111,7 @@ def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
@when(r'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
def website_details_request(context, fmt, query):
params = {}
if query[0] in 'NWR':
@@ -130,38 +126,45 @@ def website_details_request(context, fmt, query):
context.response = GenericResponse(outp, fmt or 'json', status)
@when(u'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
@when(r'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
def website_lookup_request(context, fmt, query):
params = { 'osm_ids' : query }
params = {'osm_ids': query}
outp, status = send_api_query('lookup', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?status query')
@when(r'sending (?P<fmt>\S+ )?status query')
def website_status_request(context, fmt):
params = {}
outp, status = send_api_query('status', params, fmt, context)
context.response = StatusResponse(outp, fmt or 'text', status)
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
@step(r'(?P<operator>less than|more than|exactly|at least|at most) '
r'(?P<number>\d+) results? (?:is|are) returned')
def validate_result_number(context, operator, number):
context.execute_steps("Then a HTTP 200 is returned")
numres = len(context.response.result)
assert compare(operator, numres, int(number)), \
f"Bad number of results: expected {operator} {number}, got {numres}."
@then(u'a HTTP (?P<status>\d+) is returned')
@then(r'a HTTP (?P<status>\d+) is returned')
def check_http_return_status(context, status):
assert context.response.errorcode == int(status), \
f"Return HTTP status is {context.response.errorcode}."\
f" Full response:\n{context.response.page}"
@then(u'the page contents equals "(?P<text>.+)"')
@then(r'the page contents equals "(?P<text>.+)"')
def check_page_content_equals(context, text):
assert context.response.page == text
@then(u'the result is valid (?P<fmt>\w+)')
@then(r'the result is valid (?P<fmt>\w+)')
def step_impl(context, fmt):
context.execute_steps("Then a HTTP 200 is returned")
if fmt.strip() == 'html':
@@ -178,7 +181,7 @@ def step_impl(context, fmt):
assert context.response.format == fmt
@then(u'a (?P<fmt>\w+) user error is returned')
@then(r'a (?P<fmt>\w+) user error is returned')
def check_page_error(context, fmt):
context.execute_steps("Then a HTTP 400 is returned")
assert context.response.format == fmt
@@ -188,32 +191,34 @@ def check_page_error(context, fmt):
else:
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
@then(u'result header contains')
@then('result header contains')
def check_header_attr(context):
context.execute_steps("Then a HTTP 200 is returned")
for line in context.table:
assert line['attr'] in context.response.header, \
f"Field '{line['attr']}' missing in header. Full header:\n{context.response.header}"
f"Field '{line['attr']}' missing in header. " \
f"Full header:\n{context.response.header}"
value = context.response.header[line['attr']]
assert re.fullmatch(line['value'], value) is not None, \
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)')
@then('result header has (?P<neg>not )?attributes (?P<attrs>.*)')
def check_header_no_attr(context, neg, attrs):
check_for_attributes(context.response.header, attrs,
'absent' if neg else 'present')
@then(u'results contain(?: in field (?P<field>.*))?')
def step_impl(context, field):
@then(r'results contain(?: in field (?P<field>.*))?')
def results_contain_in_field(context, field):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
context.response.match_row(line, context=context, field=field)
@then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
@then(r'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
def validate_attributes(context, lid, neg, attrs):
for i in make_todo_list(context, lid):
check_for_attributes(context.response.result[i], attrs,
@@ -221,7 +226,7 @@ def validate_attributes(context, lid, neg, attrs):
@then(u'result addresses contain')
def step_impl(context):
def result_addresses_contain(context):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
@@ -231,8 +236,9 @@ def step_impl(context):
if name != 'ID':
context.response.assert_address_field(idx, name, value)
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address(context, lid, neg, attrs):
@then(r'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address_has_types(context, lid, neg, attrs):
context.execute_steps(f"then more than {lid} results are returned")
addr_parts = context.response.result[int(lid)]['address']
@@ -243,7 +249,8 @@ def check_address(context, lid, neg, attrs):
else:
assert attr in addr_parts
@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
@then(r'address of result (?P<lid>\d+) (?P<complete>is|contains)')
def check_address(context, lid, complete):
context.execute_steps(f"then more than {lid} results are returned")
@@ -258,7 +265,7 @@ def check_address(context, lid, complete):
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
@then(r'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
def check_bounding_box_in_area(context, lid, coords):
expected = Bbox(coords)
@@ -269,7 +276,7 @@ def check_bounding_box_in_area(context, lid, coords):
f"Bbox is not contained in {expected}")
@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
@then(r'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
def check_centroid_in_area(context, lid, coords):
expected = Bbox(coords)
@@ -280,7 +287,7 @@ def check_centroid_in_area(context, lid, coords):
f"Centroid is not inside {expected}")
@then(u'there are(?P<neg> no)? duplicates')
@then('there are(?P<neg> no)? duplicates')
def check_for_duplicates(context, neg):
context.execute_steps("then at least 1 result is returned")
@@ -298,4 +305,3 @@ def check_for_duplicates(context, neg):
assert not has_dupe, f"Found duplicate for {dup}"
else:
assert has_dupe, "No duplicates found"

View File

@@ -2,9 +2,8 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
import logging
from itertools import chain
import psycopg
@@ -13,9 +12,9 @@ from psycopg import sql as pysql
from place_inserter import PlaceColumn
from table_compare import NominatimID, DBRow
from nominatim_db.indexer import indexer
from nominatim_db.tokenizer import factory as tokenizer_factory
def check_database_integrity(context):
""" Check some generic constraints on the tables.
"""
@@ -31,10 +30,9 @@ def check_database_integrity(context):
cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
# GIVEN ##################################
################################ GIVEN ##################################
@given("the (?P<named>named )?places")
def add_data_to_place_table(context, named):
""" Add entries into the place table. 'named places' makes sure that
@@ -46,6 +44,7 @@ def add_data_to_place_table(context, named):
PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
@given("the relations")
def add_data_to_planet_relations(context):
""" Add entries into the osm2pgsql relation middle table. This is needed
@@ -77,9 +76,11 @@ def add_data_to_planet_relations(context):
else:
members = None
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")])
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings
if h.startswith("tags+")])
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags)
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off,
parts, members, tags)
VALUES (%s, %s, %s, %s, %s, %s)""",
(r['id'], last_node, last_way, parts, members, list(tags)))
else:
@@ -99,6 +100,7 @@ def add_data_to_planet_relations(context):
(r['id'], psycopg.types.json.Json(tags),
psycopg.types.json.Json(members)))
@given("the ways")
def add_data_to_planet_ways(context):
""" Add entries into the osm2pgsql way middle table. This is necessary for
@@ -110,16 +112,18 @@ def add_data_to_planet_ways(context):
json_tags = row is not None and row['value'] != '1'
for r in context.table:
if json_tags:
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings if h.startswith("tags+")})
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings
if h.startswith("tags+")})
else:
tags = list(chain.from_iterable([(h[5:], r[h])
for h in r.headings if h.startswith("tags+")]))
nodes = [ int(x.strip()) for x in r['nodes'].split(',') ]
nodes = [int(x.strip()) for x in r['nodes'].split(',')]
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
(r['id'], nodes, tags))
################################ WHEN ##################################
# WHEN ##################################
@when("importing")
def import_and_index_data_from_place_table(context):
@@ -136,6 +140,7 @@ def import_and_index_data_from_place_table(context):
# itself.
context.log_capture.buffer.clear()
@when("updating places")
def update_place_table(context):
""" Update the place table with the given data. Also runs all triggers
@@ -164,6 +169,7 @@ def update_postcodes(context):
"""
context.nominatim.run_nominatim('refresh', '--postcodes')
@when("marking for delete (?P<oids>.*)")
def delete_places(context, oids):
""" Remove entries from the place table. Multiple ids may be given
@@ -184,7 +190,8 @@ def delete_places(context, oids):
# itself.
context.log_capture.buffer.clear()
################################ THEN ##################################
# THEN ##################################
@then("(?P<table>placex|place) contains(?P<exact> exactly)?")
def check_place_contents(context, table, exact):
@@ -201,7 +208,8 @@ def check_place_contents(context, table, exact):
expected_content = set()
for row in context.table:
nid = NominatimID(row['object'])
query = 'SELECT *, ST_AsText(geometry) as geomtxt, ST_GeometryType(geometry) as geometrytype'
query = """SELECT *, ST_AsText(geometry) as geomtxt,
ST_GeometryType(geometry) as geometrytype """
if table == 'placex':
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
query += " FROM %s WHERE {}" % (table, )
@@ -261,17 +269,18 @@ def check_search_name_contents(context, exclude):
if not exclude:
assert len(tokens) >= len(items), \
"No word entry found for {}. Entries found: {!s}".format(value, len(tokens))
f"No word entry found for {value}. Entries found: {len(tokens)}"
for word, token, wid in tokens:
if exclude:
assert wid not in res[name], \
"Found term for {}/{}: {}".format(nid, name, wid)
"Found term for {}/{}: {}".format(nid, name, wid)
else:
assert wid in res[name], \
"Missing term for {}/{}: {}".format(nid, name, wid)
"Missing term for {}/{}: {}".format(nid, name, wid)
elif name != 'object':
assert db_row.contains(name, value), db_row.assert_msg(name, value)
@then("search_name has no entry for (?P<oid>.*)")
def check_search_name_has_entry(context, oid):
""" Check that there is noentry in the search_name table for the given
@@ -283,6 +292,7 @@ def check_search_name_has_entry(context, oid):
assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid)
@then("location_postcode contains exactly")
def check_location_postcode(context):
""" Check full contents for location_postcode table. Each row represents a table row
@@ -294,21 +304,22 @@ def check_location_postcode(context):
with context.db.cursor() as cur:
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
assert cur.rowcount == len(list(context.table)), \
"Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table)))
"Postcode table has {cur.rowcount} rows, expected {len(list(context.table))}."
results = {}
for row in cur:
key = (row['country_code'], row['postcode'])
assert key not in results, "Postcode table has duplicate entry: {}".format(row)
results[key] = DBRow((row['country_code'],row['postcode']), row, context)
results[key] = DBRow((row['country_code'], row['postcode']), row, context)
for row in context.table:
db_row = results.get((row['country'],row['postcode']))
db_row = results.get((row['country'], row['postcode']))
assert db_row is not None, \
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
db_row.assert_row(row, ('country', 'postcode'))
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
def check_word_table_for_postcodes(context, exclude, postcodes):
""" Check that the tokenizer produces postcode tokens for the given
@@ -333,7 +344,8 @@ def check_word_table_for_postcodes(context, exclude, postcodes):
assert len(found) == 0, f"Unexpected postcodes: {found}"
else:
assert set(found) == set(plist), \
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
@then("place_addressline contains")
def check_place_addressline(context):
@@ -352,11 +364,12 @@ def check_place_addressline(context):
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount > 0, \
"No rows found for place %s and address %s" % (row['object'], row['address'])
f"No rows found for place {row['object']} and address {row['address']}."
for res in cur:
DBRow(nid, res, context).assert_row(row, ('address', 'object'))
@then("place_addressline doesn't contain")
def check_place_addressline_exclude(context):
""" Check that the place_addressline doesn't contain any entries for the
@@ -371,9 +384,10 @@ def check_place_addressline_exclude(context):
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount == 0, \
"Row found for place %s and address %s" % (row['object'], row['address'])
f"Row found for place {row['object']} and address {row['address']}."
@then("W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
@then(r"W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
def check_location_property_osmline(context, oid, neg):
""" Check that the given way is present in the interpolation table.
"""
@@ -392,7 +406,7 @@ def check_location_property_osmline(context, oid, neg):
for i in todo:
row = context.table[i]
if (int(row['start']) == res['startnumber']
and int(row['end']) == res['endnumber']):
and int(row['end']) == res['endnumber']):
todo.remove(i)
break
else:
@@ -402,8 +416,9 @@ def check_location_property_osmline(context, oid, neg):
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
@then("location_property_osmline contains(?P<exact> exactly)?")
def check_place_contents(context, exact):
def check_osmline_contents(context, exact):
""" Check contents of the interpolation table. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via the 'object' column which must
@@ -447,4 +462,3 @@ def check_place_contents(context, exact):
assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}"

View File

@@ -14,6 +14,7 @@ from nominatim_db.tools.replication import run_osm2pgsql_updates
from geometry_alias import ALIASES
def get_osm2pgsql_options(nominatim_env, fname, append):
return dict(import_file=fname,
osm2pgsql='osm2pgsql',
@@ -25,8 +26,7 @@ def get_osm2pgsql_options(nominatim_env, fname, append):
flatnode_file='',
tablespaces=dict(slim_data='', slim_index='',
main_data='', main_index=''),
append=append
)
append=append)
def write_opl_file(opl, grid):
@@ -41,14 +41,14 @@ def write_opl_file(opl, grid):
if line.startswith('n') and line.find(' x') < 0:
coord = grid.grid_node(int(line[1:].split(' ')[0]))
if coord is None:
coord = (random.random() * 360 - 180,
random.random() * 180 - 90)
coord = (random.uniform(-180, 180), random.uniform(-90, 90))
line += " x%f y%f" % coord
fd.write(line.encode('utf-8'))
fd.write(b'\n')
return fd.name
@given('the lua style file')
def lua_style_file(context):
""" Define a custom style file to use for the import.
@@ -91,7 +91,7 @@ def define_node_grid(context, grid_step, origin):
@when(u'loading osm data')
def load_osm_file(context):
"""
Load the given data into a freshly created test data using osm2pgsql.
Load the given data into a freshly created test database using osm2pgsql.
No further indexing is done.
The data is expected as attached text in OPL format.
@@ -103,13 +103,14 @@ def load_osm_file(context):
finally:
os.remove(fname)
### reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
# reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
cur = context.db.cursor()
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique on place using btree(osm_id,osm_type,class,type)""")
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique ON place
USING btree(osm_id,osm_type,class,type)""")
context.db.commit()
@@ -133,6 +134,7 @@ def update_from_osm_file(context):
finally:
os.remove(fname)
@when('indexing')
def index_database(context):
"""

View File

@@ -2,21 +2,21 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions to facilitate accessing and comparing the content of DB tables.
"""
import math
import re
import json
import psycopg
from psycopg import sql as pysql
from steps.check_functions import Almost
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
class NominatimID:
""" Splits a unique identifier for places into its components.
As place_ids cannot be used for testing, we use a unique
@@ -147,10 +147,10 @@ class DBRow:
return str(actual) == expected
def _compare_place_id(self, actual, expected):
if expected == '0':
if expected == '0':
return actual == 0
with self.context.db.cursor() as cur:
with self.context.db.cursor() as cur:
return NominatimID(expected).get_place_id(cur) == actual
def _has_centroid(self, expected):
@@ -166,13 +166,15 @@ class DBRow:
else:
x, y = self.context.osm.grid_node(int(expected))
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy']
return math.isclose(float(x), self.db_row['cx']) \
and math.isclose(float(y), self.db_row['cy'])
def _has_geometry(self, expected):
geom = self.context.osm.parse_geometry(expected)
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute(pysql.SQL("""SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
cur.execute(pysql.SQL("""
SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
.format(pysql.SQL(geom),
pysql.Literal(self.db_row['geomtxt'])))
return cur.fetchone()[0]
@@ -187,7 +189,8 @@ class DBRow:
else:
msg += " No such column."
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row), indent=4, default=str))
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row),
indent=4, default=str))
def _get_actual(self, name):
if '+' in name:

View File

@@ -1,28 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Various smaller helps for step execution.
"""
import logging
import subprocess
LOG = logging.getLogger(__name__)
def run_script(cmd, **kwargs):
""" Run the given command, check that it is successful and output
when necessary.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
(outp, outerr) = proc.communicate()
outp = outp.decode('utf-8')
outerr = outerr.decode('utf-8').replace('\\n', '\n')
LOG.debug("Run command: %s\n%s\n%s", cmd, outp, outerr)
assert proc.returncode == 0, "Script '{}' failed:\n{}\n{}\n".format(cmd[0], outp, outerr)
return outp, outerr

View File

@@ -2,14 +2,13 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper fixtures for API call tests.
"""
import pytest
import pytest_asyncio
import time
import datetime as dt
import sqlalchemy as sa
@@ -20,27 +19,25 @@ from nominatim_api.search.query_analyzer_factory import make_query_analyzer
from nominatim_db.tools import convert_sqlite
import nominatim_api.logging as loglib
class APITester:
def __init__(self):
self.api = napi.NominatimAPI()
self.async_to_sync(self.api._async_api.setup_database())
def async_to_sync(self, func):
""" Run an asynchronous function until completion using the
internal loop of the API.
"""
return self.api._loop.run_until_complete(func)
def add_data(self, table, data):
""" Insert data into the given table.
"""
sql = getattr(self.api._async_api._tables, table).insert()
self.async_to_sync(self.exec_async(sql, data))
def add_placex(self, **kw):
name = kw.get('name')
if isinstance(name, str):
@@ -50,30 +47,29 @@ class APITester:
geometry = kw.get('geometry', 'POINT(%f %f)' % centroid)
self.add_data('placex',
{'place_id': kw.get('place_id', 1000),
'osm_type': kw.get('osm_type', 'W'),
'osm_id': kw.get('osm_id', 4),
'class_': kw.get('class_', 'highway'),
'type': kw.get('type', 'residential'),
'name': name,
'address': kw.get('address'),
'extratags': kw.get('extratags'),
'parent_place_id': kw.get('parent_place_id'),
'linked_place_id': kw.get('linked_place_id'),
'admin_level': kw.get('admin_level', 15),
'country_code': kw.get('country_code'),
'housenumber': kw.get('housenumber'),
'postcode': kw.get('postcode'),
'wikipedia': kw.get('wikipedia'),
'rank_search': kw.get('rank_search', 30),
'rank_address': kw.get('rank_address', 30),
'importance': kw.get('importance'),
'centroid': 'POINT(%f %f)' % centroid,
'indexed_status': kw.get('indexed_status', 0),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': geometry})
{'place_id': kw.get('place_id', 1000),
'osm_type': kw.get('osm_type', 'W'),
'osm_id': kw.get('osm_id', 4),
'class_': kw.get('class_', 'highway'),
'type': kw.get('type', 'residential'),
'name': name,
'address': kw.get('address'),
'extratags': kw.get('extratags'),
'parent_place_id': kw.get('parent_place_id'),
'linked_place_id': kw.get('linked_place_id'),
'admin_level': kw.get('admin_level', 15),
'country_code': kw.get('country_code'),
'housenumber': kw.get('housenumber'),
'postcode': kw.get('postcode'),
'wikipedia': kw.get('wikipedia'),
'rank_search': kw.get('rank_search', 30),
'rank_address': kw.get('rank_address', 30),
'importance': kw.get('importance'),
'centroid': 'POINT(%f %f)' % centroid,
'indexed_status': kw.get('indexed_status', 0),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': geometry})
def add_address_placex(self, object_id, **kw):
self.add_placex(**kw)
@@ -85,46 +81,42 @@ class APITester:
'fromarea': kw.get('fromarea', False),
'isaddress': kw.get('isaddress', True)})
def add_osmline(self, **kw):
self.add_data('osmline',
{'place_id': kw.get('place_id', 10000),
'osm_id': kw.get('osm_id', 4004),
'parent_place_id': kw.get('parent_place_id'),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'address': kw.get('address'),
'postcode': kw.get('postcode'),
'country_code': kw.get('country_code'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
{'place_id': kw.get('place_id', 10000),
'osm_id': kw.get('osm_id', 4004),
'parent_place_id': kw.get('parent_place_id'),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'address': kw.get('address'),
'postcode': kw.get('postcode'),
'country_code': kw.get('country_code'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_tiger(self, **kw):
self.add_data('tiger',
{'place_id': kw.get('place_id', 30000),
'parent_place_id': kw.get('parent_place_id'),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'postcode': kw.get('postcode'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
{'place_id': kw.get('place_id', 30000),
'parent_place_id': kw.get('parent_place_id'),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'postcode': kw.get('postcode'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_postcode(self, **kw):
self.add_data('postcode',
{'place_id': kw.get('place_id', 1000),
'parent_place_id': kw.get('parent_place_id'),
'country_code': kw.get('country_code'),
'postcode': kw.get('postcode'),
'rank_search': kw.get('rank_search', 20),
'rank_address': kw.get('rank_address', 22),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': kw.get('geometry', 'POINT(23 34)')})
{'place_id': kw.get('place_id', 1000),
'parent_place_id': kw.get('parent_place_id'),
'country_code': kw.get('country_code'),
'postcode': kw.get('postcode'),
'rank_search': kw.get('rank_search', 20),
'rank_address': kw.get('rank_address', 22),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': kw.get('geometry', 'POINT(23 34)')})
def add_country(self, country_code, geometry):
self.add_data('country_grid',
@@ -132,14 +124,12 @@ class APITester:
'area': 0.1,
'geometry': geometry})
def add_country_name(self, country_code, names, partition=0):
self.add_data('country_name',
{'country_code': country_code,
'name': names,
'partition': partition})
def add_search_name(self, place_id, **kw):
centroid = kw.get('centroid', (23.0, 34.0))
self.add_data('search_name',
@@ -152,7 +142,6 @@ class APITester:
'country_code': kw.get('country_code', 'xx'),
'centroid': 'POINT(%f %f)' % centroid})
def add_class_type_table(self, cls, typ):
self.async_to_sync(
self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ}
@@ -160,7 +149,6 @@ class APITester:
WHERE class = '{cls}' AND type = '{typ}')
""")))
def add_word_table(self, content):
data = [dict(zip(['word_id', 'word_token', 'type', 'word', 'info'], c))
for c in content]
@@ -176,12 +164,10 @@ class APITester:
self.async_to_sync(_do_sql())
async def exec_async(self, sql, *args, **kwargs):
async with self.api._async_api.begin() as conn:
return await conn.execute(sql, *args, **kwargs)
async def create_tables(self):
async with self.api._async_api._engine.begin() as conn:
await conn.run_sync(self.api._async_api._tables.meta.create_all)
@@ -212,11 +198,12 @@ def frontend(request, event_loop, tmp_path):
db = str(tmp_path / 'test_nominatim_python_unittest.sqlite')
def mkapi(apiobj, options={'reverse'}):
apiobj.add_data('properties',
[{'property': 'tokenizer', 'value': 'icu'},
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
{'property': 'tokenizer_import_transliteration', 'value': "'1' > '/1/'; 'ä' > 'ä '"},
])
apiobj.add_data(
'properties',
[{'property': 'tokenizer', 'value': 'icu'},
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
{'property': 'tokenizer_import_transliteration',
'value': "'1' > '/1/'; 'ä' > 'ä '"}])
async def _do_sql():
async with apiobj.api._async_api.begin() as conn:

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Provides dummy implementations of ASGIAdaptor for testing.
@@ -13,6 +13,7 @@ import nominatim_api.v1.server_glue as glue
from nominatim_api.v1.format import dispatch as formatting
from nominatim_api.config import Configuration
class FakeError(BaseException):
def __init__(self, msg, status):
@@ -22,8 +23,10 @@ class FakeError(BaseException):
def __str__(self):
return f'{self.status} -- {self.msg}'
FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type'])
class FakeAdaptor(glue.ASGIAdaptor):
def __init__(self, params=None, headers=None, config=None):
@@ -31,23 +34,18 @@ class FakeAdaptor(glue.ASGIAdaptor):
self.headers = headers or {}
self._config = config or Configuration(None)
def get(self, name, default=None):
return self.params.get(name, default)
def get_header(self, name, default=None):
return self.headers.get(name, default)
def error(self, msg, status=400):
return FakeError(msg, status)
def create_response(self, status, output, num_results):
return FakeResponse(status, output, self.content_type)
def base_uri(self):
return 'http://test'
@@ -56,5 +54,3 @@ class FakeAdaptor(glue.ASGIAdaptor):
def formatting(self):
return formatting

View File

@@ -2,21 +2,18 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for normalizing search queries.
"""
from pathlib import Path
import pytest
from icu import Transliterator
import nominatim_api.search.query as qmod
from nominatim_api.query_preprocessing.config import QueryConfig
from nominatim_api.query_preprocessing import normalize
def run_preprocessor_on(query, norm):
normalizer = Transliterator.createFromRules("normalization", norm)
proc = normalize.create(QueryConfig().set_normalizer(normalizer))
@@ -26,9 +23,9 @@ def run_preprocessor_on(query, norm):
def test_normalize_simple():
norm = ':: lower();'
query = [qmod.Phrase(qmod.PhraseType.NONE, 'Hallo')]
query = [qmod.Phrase(qmod.PHRASE_ANY, 'Hallo')]
out = run_preprocessor_on(query, norm)
assert len(out) == 1
assert out == [qmod.Phrase(qmod.PhraseType.NONE, 'hallo')]
assert out == [qmod.Phrase(qmod.PHRASE_ANY, 'hallo')]

View File

@@ -0,0 +1,31 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for japanese phrase splitting.
"""
import pytest
import nominatim_api.search.query as qmod
from nominatim_api.query_preprocessing.config import QueryConfig
from nominatim_api.query_preprocessing import split_japanese_phrases
def run_preprocessor_on(query):
proc = split_japanese_phrases.create(QueryConfig().set_normalizer(None))
return proc(query)
@pytest.mark.parametrize('inp,outp', [('大阪府大阪市大阪', '大阪府:大阪市:大阪'),
('大阪府大阪', '大阪府:大阪'),
('大阪市大阪', '大阪市:大阪')])
def test_split_phrases(inp, outp):
query = [qmod.Phrase(qmod.PHRASE_ANY, inp)]
out = run_preprocessor_on(query)
assert out == [qmod.Phrase(qmod.PHRASE_ANY, outp)]

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for tokenized query data structures.
@@ -11,6 +11,7 @@ import pytest
from nominatim_api.search import query
class MyToken(query.Token):
def get_category(self):
@@ -22,42 +23,44 @@ def mktoken(tid: int):
lookup_word='foo')
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),
('AMENITY', 'QUALIFIER'),
('STREET', 'PARTIAL'),
('CITY', 'WORD'),
('COUNTRY', 'COUNTRY'),
('POSTCODE', 'POSTCODE')])
@pytest.fixture
def qnode():
return query.QueryNode(query.BREAK_PHRASE, query.PHRASE_ANY, 0.0, '', '')
@pytest.mark.parametrize('ptype,ttype', [(query.PHRASE_ANY, 'W'),
(query.PHRASE_AMENITY, 'Q'),
(query.PHRASE_STREET, 'w'),
(query.PHRASE_CITY, 'W'),
(query.PHRASE_COUNTRY, 'C'),
(query.PHRASE_POSTCODE, 'P')])
def test_phrase_compatible(ptype, ttype):
assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype], False)
assert query._phrase_compatible_with(ptype, ttype, False)
@pytest.mark.parametrize('ptype', ['COUNTRY', 'POSTCODE'])
@pytest.mark.parametrize('ptype', [query.PHRASE_COUNTRY, query.PHRASE_POSTCODE])
def test_phrase_incompatible(ptype):
assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL, True)
assert not query._phrase_compatible_with(ptype, query.TOKEN_PARTIAL, True)
def test_query_node_empty():
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
assert qn.get_tokens(3, query.TokenType.WORD) is None
def test_query_node_empty(qnode):
assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
assert qnode.get_tokens(3, query.TOKEN_WORD) is None
def test_query_node_with_content():
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
qn.starting.append(query.TokenList(2, query.TokenType.PARTIAL, [mktoken(100), mktoken(101)]))
qn.starting.append(query.TokenList(2, query.TokenType.WORD, [mktoken(1000)]))
def test_query_node_with_content(qnode):
qnode.starting.append(query.TokenList(2, query.TOKEN_PARTIAL, [mktoken(100), mktoken(101)]))
qnode.starting.append(query.TokenList(2, query.TOKEN_WORD, [mktoken(1000)]))
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
assert not qn.has_tokens(2, query.TokenType.COUNTRY)
assert qn.has_tokens(2, query.TokenType.PARTIAL)
assert qn.has_tokens(2, query.TokenType.WORD)
assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
assert not qnode.has_tokens(2, query.TOKEN_COUNTRY)
assert qnode.has_tokens(2, query.TOKEN_PARTIAL)
assert qnode.has_tokens(2, query.TOKEN_WORD)
assert qn.get_tokens(3, query.TokenType.PARTIAL) is None
assert qn.get_tokens(2, query.TokenType.COUNTRY) is None
assert len(qn.get_tokens(2, query.TokenType.PARTIAL)) == 2
assert len(qn.get_tokens(2, query.TokenType.WORD)) == 1
assert qnode.get_tokens(3, query.TOKEN_PARTIAL) is None
assert qnode.get_tokens(2, query.TOKEN_COUNTRY) is None
assert len(qnode.get_tokens(2, query.TOKEN_PARTIAL)) == 2
assert len(qnode.get_tokens(2, query.TOKEN_WORD)) == 1
def test_query_struct_empty():
@@ -67,19 +70,19 @@ def test_query_struct_empty():
def test_query_struct_with_tokens():
q = query.QueryStruct([query.Phrase(query.PhraseType.NONE, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.NONE)
q.add_node(query.BreakType.END, query.PhraseType.NONE)
q = query.QueryStruct([query.Phrase(query.PHRASE_ANY, 'foo bar')])
q.add_node(query.BREAK_WORD, query.PHRASE_ANY)
q.add_node(query.BREAK_END, query.PHRASE_ANY)
assert q.num_token_slots() == 2
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.PARTIAL, mktoken(2))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(99))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(98))
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TOKEN_PARTIAL, mktoken(2))
q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(99))
q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(98))
assert q.get_tokens(query.TokenRange(0, 2), query.TokenType.WORD) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.WORD)) == 2
assert q.get_tokens(query.TokenRange(0, 2), query.TOKEN_WORD) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_WORD)) == 2
partials = q.get_partials_list(query.TokenRange(0, 2))
@@ -91,45 +94,44 @@ def test_query_struct_with_tokens():
def test_query_struct_incompatible_token():
q = query.QueryStruct([query.Phrase(query.PhraseType.COUNTRY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.COUNTRY)
q.add_node(query.BreakType.END, query.PhraseType.NONE)
q = query.QueryStruct([query.Phrase(query.PHRASE_COUNTRY, 'foo bar')])
q.add_node(query.BREAK_WORD, query.PHRASE_COUNTRY)
q.add_node(query.BREAK_END, query.PHRASE_ANY)
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.COUNTRY, mktoken(100))
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TOKEN_COUNTRY, mktoken(100))
assert q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.COUNTRY)) == 1
assert q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_COUNTRY)) == 1
def test_query_struct_amenity_single_word():
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'bar')])
q.add_node(query.BreakType.END, query.PhraseType.NONE)
q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'bar')])
q.add_node(query.BREAK_END, query.PHRASE_ANY)
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3))
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(0, 1), query.TOKEN_QUALIFIER, mktoken(3))
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 0
def test_query_struct_amenity_two_words():
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.AMENITY)
q.add_node(query.BreakType.END, query.PhraseType.NONE)
q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'foo bar')])
q.add_node(query.BREAK_WORD, query.PHRASE_AMENITY)
q.add_node(query.BREAK_END, query.PHRASE_ANY)
for trange in [(0, 1), (1, 2)]:
q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(*trange), query.TokenType.NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3))
q.add_token(query.TokenRange(*trange), query.TOKEN_PARTIAL, mktoken(1))
q.add_token(query.TokenRange(*trange), query.TOKEN_NEAR_ITEM, mktoken(2))
q.add_token(query.TokenRange(*trange), query.TOKEN_QUALIFIER, mktoken(3))
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_PARTIAL)) == 1
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_NEAR_ITEM)) == 0
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_QUALIFIER)) == 1

View File

@@ -9,38 +9,39 @@ Tests for creating abstract searches from token assignments.
"""
import pytest
from nominatim_api.search.query import Token, TokenRange, BreakType, PhraseType, TokenType, QueryStruct, Phrase
from nominatim_api.search.query import Token, TokenRange, QueryStruct, Phrase
import nominatim_api.search.query as qmod
from nominatim_api.search.db_search_builder import SearchBuilder
from nominatim_api.search.token_assignment import TokenAssignment
from nominatim_api.types import SearchDetails
import nominatim_api.search.db_searches as dbs
class MyToken(Token):
def get_category(self):
return 'this', 'that'
def make_query(*args):
q = QueryStruct([Phrase(PhraseType.NONE, '')])
q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
for _ in range(max(inner[0] for tlist in args for inner in tlist)):
q.add_node(BreakType.WORD, PhraseType.NONE)
q.add_node(BreakType.END, PhraseType.NONE)
q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
for start, tlist in enumerate(args):
for end, ttype, tinfo in tlist:
for tid, word in tinfo:
q.add_token(TokenRange(start, end), ttype,
MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0,
MyToken(penalty=0.5 if ttype == qmod.TOKEN_PARTIAL else 0.0,
token=tid, count=1, addr_count=1,
lookup_word=word))
return q
def test_country_search():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -54,7 +55,7 @@ def test_country_search():
def test_country_search_with_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -68,7 +69,7 @@ def test_country_search_with_country_restriction():
def test_country_search_with_conflicting_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
@@ -77,7 +78,7 @@ def test_country_search_with_conflicting_country_restriction():
def test_postcode_search_simple():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])])
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1))))
@@ -93,8 +94,8 @@ def test_postcode_search_simple():
def test_postcode_with_country():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.COUNTRY, [(1, 'xx')])])
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, qmod.TOKEN_COUNTRY, [(1, 'xx')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -111,8 +112,8 @@ def test_postcode_with_country():
def test_postcode_with_address():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')])])
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, qmod.TOKEN_PARTIAL, [(100, 'word')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -129,9 +130,9 @@ def test_postcode_with_address():
def test_postcode_with_address_with_full_word():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')]),
(2, TokenType.WORD, [(1, 'full')])])
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
[(2, qmod.TOKEN_PARTIAL, [(100, 'word')]),
(2, qmod.TOKEN_WORD, [(1, 'full')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
@@ -150,7 +151,7 @@ def test_postcode_with_address_with_full_word():
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
{'near': '10,10'}])
def test_near_item_only(kwargs):
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
@@ -166,7 +167,7 @@ def test_near_item_only(kwargs):
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
{}])
def test_near_item_skipped(kwargs):
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
@@ -175,8 +176,8 @@ def test_near_item_skipped(kwargs):
def test_name_only_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -194,9 +195,9 @@ def test_name_only_search():
def test_name_with_qualifier():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.QUALIFIER, [(55, 'hotel')])])
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, qmod.TOKEN_QUALIFIER, [(55, 'hotel')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -215,9 +216,9 @@ def test_name_with_qualifier():
def test_name_with_housenumber_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.HOUSENUMBER, [(66, '66')])])
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, qmod.TOKEN_HOUSENUMBER, [(66, '66')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -235,13 +236,12 @@ def test_name_with_housenumber_search():
def test_name_and_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]),
(2, TokenType.WORD, [(101, 'b')])],
[(3, TokenType.PARTIAL, [(3, 'c')]),
(3, TokenType.WORD, [(102, 'c')])]
)
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
(2, qmod.TOKEN_WORD, [(101, 'b')])],
[(3, qmod.TOKEN_PARTIAL, [(3, 'c')]),
(3, qmod.TOKEN_WORD, [(102, 'c')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -260,14 +260,13 @@ def test_name_and_address():
def test_name_and_complex_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]),
(3, TokenType.WORD, [(101, 'bc')])],
[(3, TokenType.PARTIAL, [(3, 'c')])],
[(4, TokenType.PARTIAL, [(4, 'd')]),
(4, TokenType.WORD, [(103, 'd')])]
)
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])],
[(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
(3, qmod.TOKEN_WORD, [(101, 'bc')])],
[(3, qmod.TOKEN_PARTIAL, [(3, 'c')])],
[(4, qmod.TOKEN_PARTIAL, [(4, 'd')]),
(4, qmod.TOKEN_WORD, [(103, 'd')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
@@ -286,9 +285,9 @@ def test_name_and_complex_address():
def test_name_only_near_search():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -302,8 +301,8 @@ def test_name_only_near_search():
def test_name_only_search_with_category():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -316,9 +315,9 @@ def test_name_only_search_with_category():
def test_name_with_near_item_search_with_category_mismatch():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -328,9 +327,9 @@ def test_name_with_near_item_search_with_category_mismatch():
def test_name_with_near_item_search_with_category_match():
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
('this', 'that')]}))
@@ -345,9 +344,9 @@ def test_name_with_near_item_search_with_category_match():
def test_name_with_qualifier_search_with_category_mismatch():
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
@@ -357,9 +356,9 @@ def test_name_with_qualifier_search_with_category_mismatch():
def test_name_with_qualifier_search_with_category_match():
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(2, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
('this', 'that')]}))
@@ -374,8 +373,8 @@ def test_name_with_qualifier_search_with_category_match():
def test_name_only_search_with_countries():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
(1, qmod.TOKEN_WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
@@ -391,19 +390,19 @@ def test_name_only_search_with_countries():
def make_counted_searches(name_part, name_full, address_part, address_full,
num_address_parts=1):
q = QueryStruct([Phrase(PhraseType.NONE, '')])
q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
for i in range(1 + num_address_parts):
q.add_node(BreakType.WORD, PhraseType.NONE)
q.add_node(BreakType.END, PhraseType.NONE)
q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
q.add_token(TokenRange(0, 1), qmod.TOKEN_PARTIAL,
MyToken(0.5, 1, name_part, 1, 'name_part'))
q.add_token(TokenRange(0, 1), TokenType.WORD,
q.add_token(TokenRange(0, 1), qmod.TOKEN_WORD,
MyToken(0, 101, name_full, 1, 'name_full'))
for i in range(num_address_parts):
q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL,
q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_PARTIAL,
MyToken(0.5, 2, address_part, 1, 'address_part'))
q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD,
q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_WORD,
MyToken(0, 102, address_full, 1, 'address_full'))
builder = SearchBuilder(q, SearchDetails())
@@ -422,8 +421,8 @@ def test_infrequent_partials_in_name():
assert len(search.lookups) == 2
assert len(search.rankings) == 2
assert set((l.column, l.lookup_type.__name__) for l in search.lookups) == \
{('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')}
assert set((s.column, s.lookup_type.__name__) for s in search.lookups) == \
{('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')}
def test_frequent_partials_in_name_and_address():
@@ -434,10 +433,10 @@ def test_frequent_partials_in_name_and_address():
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
assert set((l.column, l.lookup_type.__name__) for l in searches[1].lookups) == \
{('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')}
assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
assert set((s.column, s.lookup_type.__name__) for s in searches[1].lookups) == \
{('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')}
def test_too_frequent_partials_in_name_and_address():
@@ -448,5 +447,5 @@ def test_too_frequent_partials_in_name_and_address():
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for query analyzer for ICU tokenizer.
@@ -11,11 +11,13 @@ import pytest
import pytest_asyncio
from nominatim_api import NominatimAPIAsync
from nominatim_api.search.query import Phrase, PhraseType, TokenType, BreakType
from nominatim_api.search.query import Phrase
import nominatim_api.search.query as qmod
import nominatim_api.search.icu_tokenizer as tok
from nominatim_api.logging import set_log_output, get_and_disable
async def add_word(conn, word_id, word_token, wtype, word, info = None):
async def add_word(conn, word_id, word_token, wtype, word, info=None):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': word_token,
@@ -25,7 +27,8 @@ async def add_word(conn, word_id, word_token, wtype, word, info = None):
def make_phrase(query):
return [Phrase(PhraseType.NONE, s) for s in query.split(',')]
return [Phrase(qmod.PHRASE_ANY, s) for s in query.split(',')]
@pytest_asyncio.fixture
async def conn(table_factory):
@@ -62,7 +65,7 @@ async def test_single_phrase_with_unknown_terms(conn):
query = await ana.analyze_query(make_phrase('foo BAR'))
assert len(query.source) == 1
assert query.source[0].ptype == PhraseType.NONE
assert query.source[0].ptype == qmod.PHRASE_ANY
assert query.source[0].text == 'foo bar'
assert query.num_token_slots() == 2
@@ -96,17 +99,15 @@ async def test_splitting_in_transliteration(conn):
assert query.num_token_slots() == 2
assert query.nodes[0].starting
assert query.nodes[1].starting
assert query.nodes[1].btype == BreakType.TOKEN
assert query.nodes[1].btype == qmod.BREAK_TOKEN
@pytest.mark.asyncio
@pytest.mark.parametrize('term,order', [('23456', ['POSTCODE', 'HOUSENUMBER', 'WORD', 'PARTIAL']),
('3', ['HOUSENUMBER', 'POSTCODE', 'WORD', 'PARTIAL'])
])
@pytest.mark.parametrize('term,order', [('23456', ['P', 'H', 'W', 'w']),
('3', ['H', 'W', 'w'])])
async def test_penalty_postcodes_and_housenumbers(conn, term, order):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, term, 'P', None)
await add_word(conn, 2, term, 'H', term)
await add_word(conn, 3, term, 'w', term)
await add_word(conn, 4, term, 'W', term)
@@ -115,11 +116,12 @@ async def test_penalty_postcodes_and_housenumbers(conn, term, order):
assert query.num_token_slots() == 1
torder = [(tl.tokens[0].penalty, tl.ttype.name) for tl in query.nodes[0].starting]
torder = [(tl.tokens[0].penalty, tl.ttype) for tl in query.nodes[0].starting]
torder.sort()
assert [t[1] for t in torder] == order
@pytest.mark.asyncio
async def test_category_words_only_at_beginning(conn):
ana = await tok.create_query_analyzer(conn)
@@ -131,7 +133,7 @@ async def test_category_words_only_at_beginning(conn):
assert query.num_token_slots() == 3
assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
assert not query.nodes[2].starting
@@ -145,7 +147,7 @@ async def test_freestanding_qualifier_words_become_category(conn):
assert query.num_token_slots() == 1
assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
@pytest.mark.asyncio
@@ -158,9 +160,9 @@ async def test_qualifier_words(conn):
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
assert query.num_token_slots() == 5
assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[0].starting) == {qmod.TOKEN_QUALIFIER}
assert set(t.ttype for t in query.nodes[2].starting) == {qmod.TOKEN_QUALIFIER}
assert set(t.ttype for t in query.nodes[4].starting) == {qmod.TOKEN_QUALIFIER}
@pytest.mark.asyncio
@@ -172,14 +174,16 @@ async def test_add_unknown_housenumbers(conn):
query = await ana.analyze_query(make_phrase('466 23 99834 34a'))
assert query.num_token_slots() == 4
assert query.nodes[0].starting[0].ttype == TokenType.HOUSENUMBER
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
assert len(query.nodes[0].starting[0].tokens) == 1
assert query.nodes[0].starting[0].tokens[0].token == 0
assert query.nodes[1].starting[0].ttype == TokenType.HOUSENUMBER
assert query.nodes[1].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
assert len(query.nodes[1].starting[0].tokens) == 1
assert query.nodes[1].starting[0].tokens[0].token == 1
assert not query.nodes[2].starting
assert not query.nodes[3].starting
assert query.nodes[2].has_tokens(3, qmod.TOKEN_POSTCODE)
assert not query.nodes[2].has_tokens(3, qmod.TOKEN_HOUSENUMBER)
assert not query.nodes[2].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
assert not query.nodes[3].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
@pytest.mark.asyncio

View File

@@ -0,0 +1,171 @@
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for parsing of postcodes in queries.
"""
import re
from itertools import zip_longest
import pytest
from nominatim_api.search.postcode_parser import PostcodeParser
from nominatim_api.search.query import QueryStruct, PHRASE_ANY, PHRASE_POSTCODE, PHRASE_STREET
@pytest.fixture
def pc_config(project_env):
country_file = project_env.project_dir / 'country_settings.yaml'
country_file.write_text(r"""
ab:
postcode:
pattern: "ddddd ll"
ba:
postcode:
pattern: "ddddd"
de:
postcode:
pattern: "ddddd"
gr:
postcode:
pattern: "(ddd) ?(dd)"
output: \1 \2
in:
postcode:
pattern: "(ddd) ?(ddd)"
output: \1\2
mc:
postcode:
pattern: "980dd"
mz:
postcode:
pattern: "(dddd)(?:-dd)?"
bn:
postcode:
pattern: "(ll) ?(dddd)"
output: \1\2
ky:
postcode:
pattern: "(d)-(dddd)"
output: KY\1-\2
gb:
postcode:
pattern: "(l?ld[A-Z0-9]?) ?(dll)"
output: \1 \2
""")
return project_env
def mk_query(inp):
query = QueryStruct([])
phrase_split = re.split(r"([ ,:'-])", inp)
for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue='>'):
query.add_node(breakchar, PHRASE_ANY, 0.1, word, word)
return query
@pytest.mark.parametrize('query,pos', [('45325 Berlin', 0),
('45325:Berlin', 0),
('45325,Berlin', 0),
('Berlin 45325', 1),
('Berlin,45325', 1),
('Berlin:45325', 1),
('Hansastr,45325 Berlin', 1),
('Hansastr 45325 Berlin', 1)])
def test_simple_postcode(pc_config, query, pos):
parser = PostcodeParser(pc_config)
result = parser.parse(mk_query(query))
assert result == {(pos, pos + 1, '45325'), (pos, pos + 1, '453 25')}
@pytest.mark.parametrize('query', ['EC1R 3HF', 'ec1r 3hf'])
def test_postcode_matching_case_insensitive(pc_config, query):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query(query)) == {(0, 2, 'EC1R 3HF')}
def test_contained_postcode(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('12345 dx')) == {(0, 1, '12345'), (0, 1, '123 45'),
(0, 2, '12345 DX')}
@pytest.mark.parametrize('query,frm,to', [('345987', 0, 1), ('345 987', 0, 2),
('Aina 345 987', 1, 3),
('Aina 23 345 987 ff', 2, 4)])
def test_postcode_with_space(pc_config, query, frm, to):
parser = PostcodeParser(pc_config)
result = parser.parse(mk_query(query))
assert result == {(frm, to, '345987')}
def test_overlapping_postcode(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('123 456 78')) == {(0, 2, '123456'), (1, 3, '456 78')}
@pytest.mark.parametrize('query', ['45325-Berlin', "45325'Berlin",
'Berlin-45325', "Berlin'45325", '45325Berlin'
'345-987', "345'987", '345,987', '345:987'])
def test_not_a_postcode(pc_config, query):
parser = PostcodeParser(pc_config)
assert not parser.parse(mk_query(query))
@pytest.mark.parametrize('query', ['ba 12233', 'ba-12233'])
def test_postcode_with_country_prefix(pc_config, query):
parser = PostcodeParser(pc_config)
assert (0, 2, '12233') in parser.parse(mk_query(query))
def test_postcode_with_joined_country_prefix(pc_config):
parser = PostcodeParser(pc_config)
assert parser.parse(mk_query('ba12233')) == {(0, 1, '12233')}
def test_postcode_with_non_matching_country_prefix(pc_config):
parser = PostcodeParser(pc_config)
assert not parser.parse(mk_query('ky12233'))
def test_postcode_inside_postcode_phrase(pc_config):
parser = PostcodeParser(pc_config)
query = QueryStruct([])
query.nodes[-1].ptype = PHRASE_STREET
query.add_node(',', PHRASE_STREET, 0.1, '12345', '12345')
query.add_node(',', PHRASE_POSTCODE, 0.1, 'xz', 'xz')
query.add_node('>', PHRASE_POSTCODE, 0.1, '4444', '4444')
assert parser.parse(query) == {(2, 3, '4444')}
def test_partial_postcode_in_postcode_phrase(pc_config):
parser = PostcodeParser(pc_config)
query = QueryStruct([])
query.nodes[-1].ptype = PHRASE_POSTCODE
query.add_node(' ', PHRASE_POSTCODE, 0.1, '2224', '2224')
query.add_node('>', PHRASE_POSTCODE, 0.1, '12345', '12345')
assert not parser.parse(query)

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test data types for search queries.
@@ -11,14 +11,15 @@ import pytest
import nominatim_api.search.query as nq
def test_token_range_equal():
assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3)
assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3))
@pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)),
((3, 4), (3, 5)),
((10, 12), (11, 12))])
((3, 4), (3, 5)),
((10, 12), (11, 12))])
def test_token_range_unequal(lop, rop):
assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop))
assert nq.TokenRange(*lop) != nq.TokenRange(*rop)
@@ -28,17 +29,17 @@ def test_token_range_lt():
assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12)
assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8)
assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5)
assert not(nq.TokenRange(5, 6) < nq.TokenRange(5, 6))
assert not(nq.TokenRange(10, 11) < nq.TokenRange(4, 5))
assert not (nq.TokenRange(5, 6) < nq.TokenRange(5, 6))
assert not (nq.TokenRange(10, 11) < nq.TokenRange(4, 5))
def test_token_rankge_gt():
assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2)
assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11)
assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10)
assert not(nq.TokenRange(5, 6) > nq.TokenRange(5, 6))
assert not(nq.TokenRange(1, 2) > nq.TokenRange(3, 4))
assert not(nq.TokenRange(4, 10) > nq.TokenRange(3, 5))
assert not (nq.TokenRange(5, 6) > nq.TokenRange(5, 6))
assert not (nq.TokenRange(1, 2) > nq.TokenRange(3, 4))
assert not (nq.TokenRange(4, 10) > nq.TokenRange(3, 5))
def test_token_range_unimplemented_ops():
@@ -46,3 +47,19 @@ def test_token_range_unimplemented_ops():
nq.TokenRange(1, 3) <= nq.TokenRange(10, 12)
with pytest.raises(TypeError):
nq.TokenRange(1, 3) >= nq.TokenRange(10, 12)
def test_query_extract_words():
q = nq.QueryStruct([])
q.add_node(nq.BREAK_WORD, nq.PHRASE_ANY, 0.1, '12', '')
q.add_node(nq.BREAK_TOKEN, nq.PHRASE_ANY, 0.0, 'ab', '')
q.add_node(nq.BREAK_PHRASE, nq.PHRASE_ANY, 0.0, '12', '')
q.add_node(nq.BREAK_END, nq.PHRASE_ANY, 0.5, 'hallo', '')
words = q.extract_words(base_penalty=1.0)
assert set(words.keys()) \
== {'12', 'ab', 'hallo', '12 ab', 'ab 12', '12 ab 12'}
assert sorted(words['12']) == [nq.TokenRange(0, 1, 1.0), nq.TokenRange(2, 3, 1.0)]
assert words['12 ab'] == [nq.TokenRange(0, 2, 1.1)]
assert words['hallo'] == [nq.TokenRange(3, 4, 1.0)]

View File

@@ -2,18 +2,17 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for query analyzer creation.
"""
from pathlib import Path
import pytest
from nominatim_api.search.query_analyzer_factory import make_query_analyzer
from nominatim_api.search.icu_tokenizer import ICUQueryAnalyzer
@pytest.mark.asyncio
async def test_import_icu_tokenizer(table_factory, api):
table_factory('nominatim_properties',

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the country searcher.
@@ -48,6 +48,7 @@ def test_find_from_placex(apiobj, frontend):
assert results[0].place_id == 55
assert results[0].accuracy == 0.8
def test_find_from_fallback_countries(apiobj, frontend):
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'})
@@ -87,7 +88,6 @@ class TestCountryParameters:
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'})
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML,
napi.GeometryFormat.SVG,
@@ -100,7 +100,6 @@ class TestCountryParameters:
assert len(results) == 1
assert geom.name.lower() in results[0].geometry
@pytest.mark.parametrize('pid,rids', [(76, [55]), (55, [])])
def test_exclude_place_id(self, apiobj, frontend, pid, rids):
results = run_search(apiobj, frontend, 0.5, ['yw', 'ro'],
@@ -108,7 +107,6 @@ class TestCountryParameters:
assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox,rids', [((9, 9, 11, 11), [55]),
((-10, -10, -3, -3), [])])
def test_bounded_viewbox_in_placex(self, apiobj, frontend, viewbox, rids):
@@ -118,9 +116,8 @@ class TestCountryParameters:
assert [r.place_id for r in results] == rids
@pytest.mark.parametrize('viewbox,numres', [((0, 0, 1, 1), 1),
((-10, -10, -3, -3), 0)])
((-10, -10, -3, -3), 0)])
def test_bounded_viewbox_in_fallback(self, apiobj, frontend, viewbox, numres):
results = run_search(apiobj, frontend, 0.5, ['ro'],
details=SearchDetails.from_kwargs({'viewbox': viewbox,

View File

@@ -2,7 +2,7 @@
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the near searcher.
@@ -12,8 +12,8 @@ import pytest
import nominatim_api as napi
from nominatim_api.types import SearchDetails
from nominatim_api.search.db_searches import NearSearch, PlaceSearch
from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories,\
FieldLookup, FieldRanking, RankedTokens
from nominatim_api.search.db_search_fields import WeightedStrings, WeightedCategories, \
FieldLookup
from nominatim_api.search.db_search_lookups import LookupAll
@@ -80,7 +80,6 @@ class TestNearSearch:
apiobj.add_search_name(101, names=[56], country_code='mx',
centroid=(-10.3, 56.9))
def test_near_in_placex(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6001, 4.2994))
@@ -91,7 +90,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22]
def test_multiple_types_near_in_placex(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
importance=0.002,
@@ -105,7 +103,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22, 23]
def test_near_in_classtype(self, apiobj, frontend):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6, 4.34))
@@ -118,7 +115,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [22]
@pytest.mark.parametrize('cc,rid', [('us', 22), ('mx', 23)])
def test_restrict_by_country(self, apiobj, frontend, cc, rid):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
@@ -138,7 +134,6 @@ class TestNearSearch:
assert [r.place_id for r in results] == [rid]
@pytest.mark.parametrize('excluded,rid', [(22, 122), (122, 22)])
def test_exclude_place_by_id(self, apiobj, frontend, excluded, rid):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
@@ -148,13 +143,11 @@ class TestNearSearch:
centroid=(5.6001, 4.2994),
country_code='us')
results = run_search(apiobj, frontend, 0.1, [('amenity', 'bank')],
details=SearchDetails(excluded=[excluded]))
assert [r.place_id for r in results] == [rid]
@pytest.mark.parametrize('layer,rids', [(napi.DataLayer.POI, [22]),
(napi.DataLayer.MANMADE, [])])
def test_with_layer(self, apiobj, frontend, layer, rids):

Some files were not shown because too many files have changed in this diff Show More