forked from hans/Nominatim
Compare commits
118 Commits
typo-Token
...
v5.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fce279226f | ||
|
|
54d895c4ce | ||
|
|
896a1c9d12 | ||
|
|
32728d6c89 | ||
|
|
bfd1c83cb0 | ||
|
|
bbadc62371 | ||
|
|
5c9d3ca8d2 | ||
|
|
be4ba370ef | ||
|
|
3cb183ffb0 | ||
|
|
58ef032a2b | ||
|
|
1705bb5f57 | ||
|
|
f2aa15778f | ||
|
|
efe65c3e49 | ||
|
|
51847ebfeb | ||
|
|
46579f08e4 | ||
|
|
d4994a152b | ||
|
|
00b3ace3cf | ||
|
|
522bc942cf | ||
|
|
d6e749d621 | ||
|
|
13cfb7efe2 | ||
|
|
35baf77b18 | ||
|
|
7e68613cc7 | ||
|
|
b1fc721f4b | ||
|
|
d400fd5f76 | ||
|
|
e4295dba10 | ||
|
|
9419c5adb2 | ||
|
|
2c61fe08a0 | ||
|
|
7b3c725f2a | ||
|
|
edc5ada625 | ||
|
|
72d3360fa2 | ||
|
|
0ffe384c57 | ||
|
|
9dad5edeb6 | ||
|
|
d86d491f2e | ||
|
|
3026c333ca | ||
|
|
ad84bbdec7 | ||
|
|
f5755a7a82 | ||
|
|
cd08956c61 | ||
|
|
12f5719184 | ||
|
|
78f839fbd3 | ||
|
|
c70dfccaca | ||
|
|
4cc788f69e | ||
|
|
5a245e33e0 | ||
|
|
6ff51712fe | ||
|
|
c431e0e45d | ||
|
|
c2d62a59cb | ||
|
|
cd64788a58 | ||
|
|
800a41721a | ||
|
|
1b44fe2555 | ||
|
|
6b0d58d9fd | ||
|
|
afb89f9c7a | ||
|
|
6712627d5e | ||
|
|
434fbbfd18 | ||
|
|
921db8bb2f | ||
|
|
a574b98e4a | ||
|
|
b2af358f66 | ||
|
|
e67ae701ac | ||
|
|
fc1c6261ed | ||
|
|
6759edfb5d | ||
|
|
e362a965e1 | ||
|
|
eff60ba6be | ||
|
|
157414a053 | ||
|
|
18d4996bec | ||
|
|
13db4c9731 | ||
|
|
f567ea89cc | ||
|
|
3e718e40d9 | ||
|
|
49bd18b048 | ||
|
|
31412e0674 | ||
|
|
4577669213 | ||
|
|
9bf1428d81 | ||
|
|
b56edf3d0a | ||
|
|
abc911079e | ||
|
|
adabfee3be | ||
|
|
46c4446dc2 | ||
|
|
add9244a2f | ||
|
|
96d7a8e8f6 | ||
|
|
55c3176957 | ||
|
|
e29823e28f | ||
|
|
97ed168996 | ||
|
|
9b8ef97d4b | ||
|
|
4f3c88f0c1 | ||
|
|
7781186f3c | ||
|
|
f78686edb8 | ||
|
|
e330cd3162 | ||
|
|
671af4cff2 | ||
|
|
e612b7d550 | ||
|
|
0b49d01703 | ||
|
|
f6bc8e153f | ||
|
|
f143ecaf1c | ||
|
|
6730c8bac8 | ||
|
|
ee8915f2b6 | ||
|
|
5475bf7b9c | ||
|
|
95e2d8c846 | ||
|
|
7552818866 | ||
|
|
db3991af74 | ||
|
|
4523b9aaed | ||
|
|
8b1cabebd6 | ||
|
|
0cf636a80c | ||
|
|
c2cb6722fe | ||
|
|
f8337bedb2 | ||
|
|
efc09a5cfc | ||
|
|
86ad9efa8a | ||
|
|
d984100e23 | ||
|
|
499110f549 | ||
|
|
267e5dac0d | ||
|
|
32d3eb46d5 | ||
|
|
c8a0dc8af1 | ||
|
|
14ecfc7834 | ||
|
|
cad44eb00c | ||
|
|
f76dbb0a16 | ||
|
|
8dd218a1d0 | ||
|
|
501e13483e | ||
|
|
b1d25e404f | ||
|
|
71fceb6854 | ||
|
|
a06e123d70 | ||
|
|
df6f70d223 | ||
|
|
bea9249e38 | ||
|
|
1e4677b668 | ||
|
|
7f909dbbd8 |
3
.flake8
3
.flake8
@@ -6,3 +6,6 @@ extend-ignore =
|
|||||||
E711
|
E711
|
||||||
per-file-ignores =
|
per-file-ignores =
|
||||||
__init__.py: F401
|
__init__.py: F401
|
||||||
|
test/python/utils/test_json_writer.py: E131
|
||||||
|
test/python/conftest.py: E402
|
||||||
|
test/bdd/*: F821
|
||||||
|
|||||||
4
.github/actions/setup-postgresql/action.yml
vendored
4
.github/actions/setup-postgresql/action.yml
vendored
@@ -11,10 +11,8 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Remove existing PostgreSQL
|
- name: Remove existing PostgreSQL
|
||||||
run: |
|
run: |
|
||||||
|
sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y
|
||||||
sudo apt-get purge -yq postgresql*
|
sudo apt-get purge -yq postgresql*
|
||||||
sudo apt install curl ca-certificates gnupg
|
|
||||||
curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null
|
|
||||||
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
|
||||||
sudo apt-get update -qq
|
sudo apt-get update -qq
|
||||||
|
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
10
.github/workflows/ci-tests.yml
vendored
10
.github/workflows/ci-tests.yml
vendored
@@ -37,10 +37,10 @@ jobs:
|
|||||||
needs: create-archive
|
needs: create-archive
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
flavour: ["ubuntu-20", "ubuntu-24"]
|
flavour: ["ubuntu-22", "ubuntu-24"]
|
||||||
include:
|
include:
|
||||||
- flavour: ubuntu-20
|
- flavour: ubuntu-22
|
||||||
ubuntu: 20
|
ubuntu: 22
|
||||||
postgresql: 12
|
postgresql: 12
|
||||||
lua: '5.1'
|
lua: '5.1'
|
||||||
dependencies: pip
|
dependencies: pip
|
||||||
@@ -81,7 +81,7 @@ jobs:
|
|||||||
sudo make install
|
sudo make install
|
||||||
cd ../..
|
cd ../..
|
||||||
rm -rf osm2pgsql-build
|
rm -rf osm2pgsql-build
|
||||||
if: matrix.ubuntu == '20'
|
if: matrix.ubuntu == '22'
|
||||||
env:
|
env:
|
||||||
LUA_VERSION: ${{ matrix.lua }}
|
LUA_VERSION: ${{ matrix.lua }}
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ jobs:
|
|||||||
run: ./venv/bin/pip install -U flake8
|
run: ./venv/bin/pip install -U flake8
|
||||||
|
|
||||||
- name: Python linting
|
- name: Python linting
|
||||||
run: ../venv/bin/python -m flake8 src
|
run: ../venv/bin/python -m flake8 src test/python test/bdd
|
||||||
working-directory: Nominatim
|
working-directory: Nominatim
|
||||||
|
|
||||||
- name: Install mypy and typechecking info
|
- name: Install mypy and typechecking info
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ Checklist for releases:
|
|||||||
* [ ] increase versions in
|
* [ ] increase versions in
|
||||||
* `src/nominatim_api/version.py`
|
* `src/nominatim_api/version.py`
|
||||||
* `src/nominatim_db/version.py`
|
* `src/nominatim_db/version.py`
|
||||||
* CMakeLists.txt
|
|
||||||
* [ ] update `ChangeLog` (copy information from patch releases from release branch)
|
* [ ] update `ChangeLog` (copy information from patch releases from release branch)
|
||||||
* [ ] complete `docs/admin/Migration.md`
|
* [ ] complete `docs/admin/Migration.md`
|
||||||
* [ ] update EOL dates in `SECURITY.md`
|
* [ ] update EOL dates in `SECURITY.md`
|
||||||
|
|||||||
44
ChangeLog
44
ChangeLog
@@ -1,3 +1,47 @@
|
|||||||
|
5.1.0
|
||||||
|
* replace datrie with simple internal trie implementation
|
||||||
|
* add pattern-based postcode parser for queries,
|
||||||
|
postcodes no longer need to be present in OSM to be found
|
||||||
|
* take variants into account when computing token similarity
|
||||||
|
* add extratags output to geocodejson format
|
||||||
|
* fix default layer setting used for structured queries
|
||||||
|
* update abbreviation lists for Russian and English
|
||||||
|
(thanks @shoorick, @IvanShift, @mhsrn21)
|
||||||
|
* fix variant generation for Norwegian
|
||||||
|
* fix normalization around space-like characters
|
||||||
|
* improve postcode search and handling of postcodes in queries
|
||||||
|
* reorganise internal query structure and get rid of slow enums
|
||||||
|
* enable code linting for tests
|
||||||
|
* various code moderinsations in test code (thanks @eumiro)
|
||||||
|
* remove setting osm2pgsql location via config.lib_dir
|
||||||
|
* make SQL functions parallel save as far as possible (thanks @otbutz)
|
||||||
|
* various fixes and improvements to documentation (thanks @TuringVerified)
|
||||||
|
|
||||||
|
5.0.0
|
||||||
|
* increase required versions for PostgreSQL (12+), PostGIS (3.0+)
|
||||||
|
* remove installation via cmake and debundle osm2pgsql
|
||||||
|
* remove deprecated PHP frontend
|
||||||
|
* remove deprecated legacy tokenizer
|
||||||
|
* add configurable pre-processing of queries
|
||||||
|
* add query pre-processor to split up Japanese addresses
|
||||||
|
* rewrite of osm2pgsql style implementation
|
||||||
|
(also adds support for osm2pgsql-themepark)
|
||||||
|
* reduce the number of SQL queries needed to complete a 'lookup' call
|
||||||
|
* improve computation of centroid for lines with only two points
|
||||||
|
* improve bbox output for postcode areas
|
||||||
|
* improve result order by returning the largest object when other things are
|
||||||
|
equal
|
||||||
|
* add fallback for reverse geocoding to default country tables
|
||||||
|
* exclude postcode areas from reverse geocoding
|
||||||
|
* disable search endpoint when database is reverse-only (regression)
|
||||||
|
* minor performance improvements to area split algorithm
|
||||||
|
* switch table and index creation to use autocommit mode to avoid deadlocks
|
||||||
|
* drop overly long ways during import
|
||||||
|
* restrict automatic migrations to versions 4.3+
|
||||||
|
* switch linting from pylint to flake8
|
||||||
|
* switch tests to use a wikimedia test file in the new CSV style
|
||||||
|
* various fixes and improvements to documentation
|
||||||
|
|
||||||
4.5.0
|
4.5.0
|
||||||
* allow building Nominatim as a pip package
|
* allow building Nominatim as a pip package
|
||||||
* make osm2pgsql building optional
|
* make osm2pgsql building optional
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -24,7 +24,7 @@ pytest:
|
|||||||
pytest test/python
|
pytest test/python
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
flake8 src
|
flake8 src test/python test/bdd
|
||||||
|
|
||||||
bdd:
|
bdd:
|
||||||
cd test/bdd; behave -DREMOVE_TEMPLATE=1
|
cd test/bdd; behave -DREMOVE_TEMPLATE=1
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ versions.
|
|||||||
|
|
||||||
| Version | End of support for security updates |
|
| Version | End of support for security updates |
|
||||||
| ------- | ----------------------------------- |
|
| ------- | ----------------------------------- |
|
||||||
|
| 5.1.x | 2027-04-01 |
|
||||||
|
| 5.0.x | 2027-02-06 |
|
||||||
| 4.5.x | 2026-09-12 |
|
| 4.5.x | 2026-09-12 |
|
||||||
| 4.4.x | 2026-03-07 |
|
| 4.4.x | 2026-03-07 |
|
||||||
| 4.3.x | 2025-09-07 |
|
| 4.3.x | 2025-09-07 |
|
||||||
| 4.2.x | 2024-11-24 |
|
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ Furthermore the following Python libraries are required:
|
|||||||
* [Jinja2](https://palletsprojects.com/p/jinja/)
|
* [Jinja2](https://palletsprojects.com/p/jinja/)
|
||||||
* [PyICU](https://pypi.org/project/PyICU/)
|
* [PyICU](https://pypi.org/project/PyICU/)
|
||||||
* [PyYaml](https://pyyaml.org/) (5.1+)
|
* [PyYaml](https://pyyaml.org/) (5.1+)
|
||||||
* [datrie](https://github.com/pytries/datrie)
|
|
||||||
|
|
||||||
These will be installed automatically when using pip installation.
|
These will be installed automatically when using pip installation.
|
||||||
|
|
||||||
|
|||||||
@@ -9,19 +9,15 @@ the following steps:
|
|||||||
* Update the frontend: `pip install -U nominatim-api`
|
* Update the frontend: `pip install -U nominatim-api`
|
||||||
* (optionally) Restart updates
|
* (optionally) Restart updates
|
||||||
|
|
||||||
If you are still using CMake for the installation of Nominatim, then you
|
|
||||||
need to update the software in one step before migrating the database.
|
|
||||||
It is not recommended to do this while the machine is serving requests.
|
|
||||||
|
|
||||||
Below you find additional migrations and hints about other structural and
|
Below you find additional migrations and hints about other structural and
|
||||||
breaking changes. **Please read them before running the migration.**
|
breaking changes. **Please read them before running the migration.**
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
If you are migrating from a version <4.3, you need to install 4.3
|
If you are migrating from a version <4.3, you need to install 4.3
|
||||||
first and migrate to 4.3 first. Then you can migrate to the current
|
and migrate to 4.3 first. Then you can migrate to the current
|
||||||
version. It is strongly recommended to do a reimport instead.
|
version. It is strongly recommended to do a reimport instead.
|
||||||
|
|
||||||
## 4.5.0 -> master
|
## 4.5.0 -> 5.0.0
|
||||||
|
|
||||||
### PHP frontend removed
|
### PHP frontend removed
|
||||||
|
|
||||||
@@ -33,6 +29,42 @@ needed. It currently omits a warning and does otherwise nothing. It will be
|
|||||||
removed in later versions of Nominatim. So make sure you remove it from your
|
removed in later versions of Nominatim. So make sure you remove it from your
|
||||||
scripts.
|
scripts.
|
||||||
|
|
||||||
|
### CMake building removed
|
||||||
|
|
||||||
|
Nominatim can now only be installed via pip. Please follow the installation
|
||||||
|
instructions for the current version to change to pip.
|
||||||
|
|
||||||
|
### osm2pgsql no longer vendored in
|
||||||
|
|
||||||
|
Nominatim no longer ships its own version of osm2pgsql. Please install a
|
||||||
|
stock version of osm2pgsql from your distribution. See the
|
||||||
|
[installation instruction for osm2pgsql](https://osm2pgsql.org/doc/install.html)
|
||||||
|
for details. A minimum version of 1.8 is required. The current stable versions
|
||||||
|
of Ubuntu and Debian already ship with an appropriate versions. For older
|
||||||
|
installation, you may have to compile a newer osm2pgsql yourself.
|
||||||
|
|
||||||
|
### Legacy tokenizer removed
|
||||||
|
|
||||||
|
The `legacy` tokenizer is no longer enabled. This tokenizer has been superseded
|
||||||
|
by the `ICU` tokenizer a long time ago. In the unlikely case that your database
|
||||||
|
still uses the `legacy` tokenizer, you must reimport your database.
|
||||||
|
|
||||||
|
### osm2pgsql style overhauled
|
||||||
|
|
||||||
|
There are some fundamental changes to how customized osm2pgsql styles should
|
||||||
|
be written. The changes are mostly backwards compatible, i.e. custom styles
|
||||||
|
should still work with the new implementation. The only exception is a
|
||||||
|
customization of the `process_tags()` function. This function is no longer
|
||||||
|
considered public and neither are the helper functions used in it.
|
||||||
|
They currently still work but will be removed at some point. If you have
|
||||||
|
been making changes to `process_tags`, please review your style and try
|
||||||
|
to switch to the new convenience functions.
|
||||||
|
|
||||||
|
For more information on the changes, see the
|
||||||
|
[pull request](https://github.com/osm-search/Nominatim/pull/3615)
|
||||||
|
and read the new
|
||||||
|
[customization documentation](https://nominatim.org/release-docs/latest/customize/Import-Styles/).
|
||||||
|
|
||||||
## 4.4.0 -> 4.5.0
|
## 4.4.0 -> 4.5.0
|
||||||
|
|
||||||
### New structure for Python packages
|
### New structure for Python packages
|
||||||
|
|||||||
@@ -68,10 +68,10 @@ the update interval no new data has been published yet, it will go to sleep
|
|||||||
until the next expected update and only then attempt to download the next batch.
|
until the next expected update and only then attempt to download the next batch.
|
||||||
|
|
||||||
The one-time mode is particularly useful if you want to run updates continuously
|
The one-time mode is particularly useful if you want to run updates continuously
|
||||||
but need to schedule other work in between updates. For example, the main
|
but need to schedule other work in between updates. For example, you might
|
||||||
service at osm.org uses it, to regularly recompute postcodes -- a process that
|
want to regularly recompute postcodes -- a process that
|
||||||
must not be run while updates are in progress. Its update script
|
must not be run while updates are in progress. An update script refreshing
|
||||||
looks like this:
|
postcodes regularly might look like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
@@ -109,17 +109,19 @@ Unit=nominatim-updates.service
|
|||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
```
|
```
|
||||||
|
|
||||||
And then a similar service definition: `/etc/systemd/system/nominatim-updates.service`:
|
`OnUnitActiveSec` defines how often the individual update command is run.
|
||||||
|
|
||||||
|
Then add a service definition for the timer in `/etc/systemd/system/nominatim-updates.service`:
|
||||||
|
|
||||||
```
|
```
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Single updates of Nominatim
|
Description=Single updates of Nominatim
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
WorkingDirectory=/srv/nominatim
|
WorkingDirectory=/srv/nominatim-project
|
||||||
ExecStart=nominatim replication --once
|
ExecStart=/srv/nominatim-venv/bin/nominatim replication --once
|
||||||
StandardOutput=append:/var/log/nominatim-updates.log
|
StandardOutput=journald
|
||||||
StandardError=append:/var/log/nominatim-updates.error.log
|
StandardError=inherit
|
||||||
User=nominatim
|
User=nominatim
|
||||||
Group=nominatim
|
Group=nominatim
|
||||||
Type=simple
|
Type=simple
|
||||||
@@ -128,9 +130,9 @@ Type=simple
|
|||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
```
|
```
|
||||||
|
|
||||||
Replace the `WorkingDirectory` with your project directory. Also adapt user and
|
Replace the `WorkingDirectory` with your project directory. `ExecStart` points
|
||||||
group names as required. `OnUnitActiveSec` defines how often the individual
|
to the nominatim binary that was installed in your virtualenv earlier.
|
||||||
update command is run.
|
Finally, you might need to adapt user and group names as required.
|
||||||
|
|
||||||
Now activate the service and start the updates:
|
Now activate the service and start the updates:
|
||||||
|
|
||||||
@@ -140,12 +142,13 @@ sudo systemctl enable nominatim-updates.timer
|
|||||||
sudo systemctl start nominatim-updates.timer
|
sudo systemctl start nominatim-updates.timer
|
||||||
```
|
```
|
||||||
|
|
||||||
You can stop future data updates, while allowing any current, in-progress
|
You can stop future data updates while allowing any current, in-progress
|
||||||
update steps to finish, by running `sudo systemctl stop
|
update steps to finish, by running `sudo systemctl stop
|
||||||
nominatim-updates.timer` and waiting until `nominatim-updates.service` isn't
|
nominatim-updates.timer` and waiting until `nominatim-updates.service` isn't
|
||||||
running (`sudo systemctl is-active nominatim-updates.service`). Current output
|
running (`sudo systemctl is-active nominatim-updates.service`).
|
||||||
from the update can be seen like above (`systemctl status
|
|
||||||
nominatim-updates.service`).
|
To check the output from the update process, use journalctl: `journalctl -u
|
||||||
|
nominatim-updates.service`
|
||||||
|
|
||||||
|
|
||||||
#### Catch-up mode
|
#### Catch-up mode
|
||||||
@@ -155,13 +158,13 @@ all changes from the server until the database is up-to-date. The catch-up mode
|
|||||||
still respects the parameter `NOMINATIM_REPLICATION_MAX_DIFF`. It downloads and
|
still respects the parameter `NOMINATIM_REPLICATION_MAX_DIFF`. It downloads and
|
||||||
applies the changes in appropriate batches until all is done.
|
applies the changes in appropriate batches until all is done.
|
||||||
|
|
||||||
The catch-up mode is foremost useful to bring the database up to speed after the
|
The catch-up mode is foremost useful to bring the database up to date after the
|
||||||
initial import. Give that the service usually is not in production at this
|
initial import. Give that the service usually is not in production at this
|
||||||
point, you can temporarily be a bit more generous with the batch size and
|
point, you can temporarily be a bit more generous with the batch size and
|
||||||
number of threads you use for the updates by running catch-up like this:
|
number of threads you use for the updates by running catch-up like this:
|
||||||
|
|
||||||
```
|
```
|
||||||
cd /srv/nominatim
|
cd /srv/nominatim-project
|
||||||
NOMINATIM_REPLICATION_MAX_DIFF=5000 nominatim replication --catch-up --threads 15
|
NOMINATIM_REPLICATION_MAX_DIFF=5000 nominatim replication --catch-up --threads 15
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -173,13 +176,13 @@ replication catch-up at whatever interval you desire.
|
|||||||
When running scheduled updates with catch-up, it is a good idea to choose
|
When running scheduled updates with catch-up, it is a good idea to choose
|
||||||
a replication source with an update frequency that is an order of magnitude
|
a replication source with an update frequency that is an order of magnitude
|
||||||
lower. For example, if you want to update once a day, use an hourly updated
|
lower. For example, if you want to update once a day, use an hourly updated
|
||||||
source. This makes sure that you don't miss an entire day of updates when
|
source. This ensures that you don't miss an entire day of updates when
|
||||||
the source is unexpectedly late to publish its update.
|
the source is unexpectedly late to publish its update.
|
||||||
|
|
||||||
If you want to use the source with the same update frequency (e.g. a daily
|
If you want to use the source with the same update frequency (e.g. a daily
|
||||||
updated source with daily updates), use the
|
updated source with daily updates), use the
|
||||||
continuous update mode. It ensures to re-request the newest update until it
|
once mode together with a frequently run systemd script as described above.
|
||||||
is published.
|
It ensures to re-request the newest update until they have been published.
|
||||||
|
|
||||||
|
|
||||||
#### Continuous updates
|
#### Continuous updates
|
||||||
@@ -197,36 +200,3 @@ parameters:
|
|||||||
|
|
||||||
The update application keeps running forever and retrieves and applies
|
The update application keeps running forever and retrieves and applies
|
||||||
new updates from the server as they are published.
|
new updates from the server as they are published.
|
||||||
|
|
||||||
You can run this command as a simple systemd service. Create a service
|
|
||||||
description like that in `/etc/systemd/system/nominatim-updates.service`:
|
|
||||||
|
|
||||||
```
|
|
||||||
[Unit]
|
|
||||||
Description=Continuous updates of Nominatim
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
WorkingDirectory=/srv/nominatim
|
|
||||||
ExecStart=nominatim replication
|
|
||||||
StandardOutput=append:/var/log/nominatim-updates.log
|
|
||||||
StandardError=append:/var/log/nominatim-updates.error.log
|
|
||||||
User=nominatim
|
|
||||||
Group=nominatim
|
|
||||||
Type=simple
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Replace the `WorkingDirectory` with your project directory. Also adapt user
|
|
||||||
and group names as required.
|
|
||||||
|
|
||||||
Now activate the service and start the updates:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo systemctl daemon-reload
|
|
||||||
sudo systemctl enable nominatim-updates
|
|
||||||
sudo systemctl start nominatim-updates
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -106,8 +106,11 @@ The following feature attributes are implemented:
|
|||||||
* `name` - localised name of the place
|
* `name` - localised name of the place
|
||||||
* `housenumber`, `street`, `locality`, `district`, `postcode`, `city`,
|
* `housenumber`, `street`, `locality`, `district`, `postcode`, `city`,
|
||||||
`county`, `state`, `country` -
|
`county`, `state`, `country` -
|
||||||
provided when it can be determined from the address
|
provided when it can be determined from the address (only with `addressdetails=1`)
|
||||||
* `admin` - list of localised names of administrative boundaries (only with `addressdetails=1`)
|
* `admin` - list of localised names of administrative boundaries (only with `addressdetails=1`)
|
||||||
|
* `extra` - dictionary with additional useful tags like `website` or `maxspeed`
|
||||||
|
(only with `extratags=1`)
|
||||||
|
|
||||||
|
|
||||||
Use `polygon_geojson` to output the full geometry of the object instead
|
Use `polygon_geojson` to output the full geometry of the object instead
|
||||||
of the centroid.
|
of the centroid.
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ The _main tags_ classify what kind of place the OSM object represents. One
|
|||||||
OSM object can have more than one main tag. In such case one database entry
|
OSM object can have more than one main tag. In such case one database entry
|
||||||
is created for each main tag. _Name tags_ represent searchable names of the
|
is created for each main tag. _Name tags_ represent searchable names of the
|
||||||
place. _Address tags_ are used to compute the address hierarchy of the place.
|
place. _Address tags_ are used to compute the address hierarchy of the place.
|
||||||
Address are used for searching and for creating a display name of the place.
|
Address tags are used for searching and for creating a display name of the place.
|
||||||
_Extra tags_ are any tags that are not directly related to search but
|
_Extra tags_ are any tags that are not directly related to search but
|
||||||
contain interesting additional information.
|
contain interesting additional information.
|
||||||
|
|
||||||
@@ -76,7 +76,7 @@ in which category.
|
|||||||
|
|
||||||
The flex style offers a number of functions to set the classification of
|
The flex style offers a number of functions to set the classification of
|
||||||
each OSM tag. Most of these functions can also take a preset string instead
|
each OSM tag. Most of these functions can also take a preset string instead
|
||||||
of a tag descriptions. These presets describe common configurations that
|
of a tag description. These presets describe common configurations that
|
||||||
are also used in the definition of the predefined styles. This section
|
are also used in the definition of the predefined styles. This section
|
||||||
lists the configuration functions and the accepted presets.
|
lists the configuration functions and the accepted presets.
|
||||||
|
|
||||||
@@ -95,7 +95,7 @@ Any other string is matched exactly against tag keys.
|
|||||||
takes a lua table parameter which defines for keys and key/value
|
takes a lua table parameter which defines for keys and key/value
|
||||||
combinations, how they are classified.
|
combinations, how they are classified.
|
||||||
|
|
||||||
The following classifications are recognised:
|
The following classifications are recognized:
|
||||||
|
|
||||||
| classification | meaning |
|
| classification | meaning |
|
||||||
| :-------------- | :------ |
|
| :-------------- | :------ |
|
||||||
@@ -133,7 +133,7 @@ the same.
|
|||||||
In this example an object with a `boundary` tag will only be included
|
In this example an object with a `boundary` tag will only be included
|
||||||
when it has a value of `administrative`. Objects with `highway` tags are
|
when it has a value of `administrative`. Objects with `highway` tags are
|
||||||
always included with two exceptions: the troll tag `highway=no` is
|
always included with two exceptions: the troll tag `highway=no` is
|
||||||
deleted on the spot and when the value is `street_lamp` then the object
|
deleted on the spot. And when the value is `street_lamp` then the object
|
||||||
must have a name, too. Finally, if a `landuse` tag is present then
|
must have a name, too. Finally, if a `landuse` tag is present then
|
||||||
it will be used independently of the concrete value when neither boundary
|
it will be used independently of the concrete value when neither boundary
|
||||||
nor highway tags were found and the object is named.
|
nor highway tags were found and the object is named.
|
||||||
@@ -143,7 +143,7 @@ the same.
|
|||||||
| Name | Description |
|
| Name | Description |
|
||||||
| :----- | :---------- |
|
| :----- | :---------- |
|
||||||
| admin | Basic tag set collecting places and administrative boundaries. This set is needed also to ensure proper address computation and should therefore always be present. You can disable selected place types like `place=locality` after adding this set, if they are not relevant for your use case. |
|
| admin | Basic tag set collecting places and administrative boundaries. This set is needed also to ensure proper address computation and should therefore always be present. You can disable selected place types like `place=locality` after adding this set, if they are not relevant for your use case. |
|
||||||
| all_boundaries | Extends the set of recognised boundaries and places to all available ones. |
|
| all_boundaries | Extends the set of recognized boundaries and places to all available ones. |
|
||||||
| natural | Tags for natural features like rivers and mountain peaks. |
|
| natural | Tags for natural features like rivers and mountain peaks. |
|
||||||
| street/default | Tags for streets. Major streets are always included, minor ones only when they have a name. |
|
| street/default | Tags for streets. Major streets are always included, minor ones only when they have a name. |
|
||||||
| street/car | Tags for all streets that can be used by a motor vehicle. |
|
| street/car | Tags for all streets that can be used by a motor vehicle. |
|
||||||
@@ -229,7 +229,7 @@ in turn take precedence over prefix matches.
|
|||||||
| Name | Description |
|
| Name | Description |
|
||||||
| :----- | :---------- |
|
| :----- | :---------- |
|
||||||
| metatags | Tags with meta information about the OSM tag like source, notes and import sources. |
|
| metatags | Tags with meta information about the OSM tag like source, notes and import sources. |
|
||||||
| name | Non-names that describe in fact properties or name parts. These names can throw off search and should always be removed. |
|
| name | Non-names that actually describe properties or name parts. These names can throw off search and should always be removed. |
|
||||||
| address | Extra `addr:*` tags that are not useful for Nominatim. |
|
| address | Extra `addr:*` tags that are not useful for Nominatim. |
|
||||||
|
|
||||||
|
|
||||||
@@ -305,7 +305,7 @@ the database independently of the presence of other main tags.
|
|||||||
`set_name_tags()` overwrites the current configuration, while
|
`set_name_tags()` overwrites the current configuration, while
|
||||||
`modify_name_tags()` replaces the fields that are given. (Be aware that
|
`modify_name_tags()` replaces the fields that are given. (Be aware that
|
||||||
the fields are replaced as a whole. `main = {'foo_name'}` will cause
|
the fields are replaced as a whole. `main = {'foo_name'}` will cause
|
||||||
`foo_name` to become the only recognised primary name. Any previously
|
`foo_name` to become the only recognized primary name. Any previously
|
||||||
defined primary names are forgotten.)
|
defined primary names are forgotten.)
|
||||||
|
|
||||||
!!! example
|
!!! example
|
||||||
@@ -326,9 +326,9 @@ defined primary names are forgotten.)
|
|||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
| :----- | :---------- |
|
| :----- | :---------- |
|
||||||
| core | Basic set of recognised names for all places. |
|
| core | Basic set of recognized names for all places. |
|
||||||
| address | Additional names useful when indexing full addresses. |
|
| address | Additional names useful when indexing full addresses. |
|
||||||
| poi | Extended set of recognised names for pois. Use on top of the core set. |
|
| poi | Extended set of recognized names for pois. Use on top of the core set. |
|
||||||
|
|
||||||
### Address tags
|
### Address tags
|
||||||
|
|
||||||
@@ -376,8 +376,8 @@ the fields are replaced as a whole.)
|
|||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
| :----- | :---------- |
|
| :----- | :---------- |
|
||||||
| core | Basic set of tags needed to recognise address relationship for any place. Always include this. |
|
| core | Basic set of tags needed to recognize address relationship for any place. Always include this. |
|
||||||
| houses | Additional set of tags needed to recognise proper addresses |
|
| houses | Additional set of tags needed to recognize proper addresses |
|
||||||
|
|
||||||
### Handling of unclassified tags
|
### Handling of unclassified tags
|
||||||
|
|
||||||
@@ -514,7 +514,7 @@ Themepark topics offer two configuration options:
|
|||||||
|
|
||||||
The customization functions described in the
|
The customization functions described in the
|
||||||
[Changing recognized tags](#changing-the-recognized-tags) section
|
[Changing recognized tags](#changing-the-recognized-tags) section
|
||||||
are available from the theme. To access the theme you need to explicitly initialise it.
|
are available from the theme. To access the theme you need to explicitly initialize it.
|
||||||
|
|
||||||
!!! Example
|
!!! Example
|
||||||
``` lua
|
``` lua
|
||||||
@@ -568,7 +568,7 @@ gazetteer output.
|
|||||||
|
|
||||||
## Changing the style of existing databases
|
## Changing the style of existing databases
|
||||||
|
|
||||||
There is normally no issue changing the style of a database that is already
|
There is usually no issue changing the style of a database that is already
|
||||||
imported and now kept up-to-date with change files. Just be aware that any
|
imported and now kept up-to-date with change files. Just be aware that any
|
||||||
change in the style applies to updates only. If you want to change the data
|
change in the style applies to updates only. If you want to change the data
|
||||||
that is already in the database, then a reimport is necessary.
|
that is already in the database, then a reimport is necessary.
|
||||||
|
|||||||
@@ -336,7 +336,7 @@ NOMINATIM_TABLESPACE_SEARCH_INDEX
|
|||||||
NOMINATIM_TABLESPACE_OSM_DATA
|
NOMINATIM_TABLESPACE_OSM_DATA
|
||||||
: Raw OSM data cache used for import and updates.
|
: Raw OSM data cache used for import and updates.
|
||||||
|
|
||||||
NOMINATIM_TABLESPACE_OSM_DATA
|
NOMINATIM_TABLESPACE_OSM_INDEX
|
||||||
: Indexes on the raw OSM data cache.
|
: Indexes on the raw OSM data cache.
|
||||||
|
|
||||||
NOMINATIM_TABLESPACE_PLACE_DATA
|
NOMINATIM_TABLESPACE_PLACE_DATA
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ queries. This happens in two stages:
|
|||||||
as during the import process but may involve other processing like,
|
as during the import process but may involve other processing like,
|
||||||
for example, word break detection.
|
for example, word break detection.
|
||||||
2. The **token analysis** step breaks down the query parts into tokens,
|
2. The **token analysis** step breaks down the query parts into tokens,
|
||||||
looks them up in the database and assignes them possible functions and
|
looks them up in the database and assigns them possible functions and
|
||||||
probabilities.
|
probabilities.
|
||||||
|
|
||||||
Query processing can be further customized while the rest of the analysis
|
Query processing can be further customized while the rest of the analysis
|
||||||
|
|||||||
@@ -69,9 +69,9 @@ To set up the virtual environment with all necessary packages run:
|
|||||||
```sh
|
```sh
|
||||||
virtualenv ~/nominatim-dev-venv
|
virtualenv ~/nominatim-dev-venv
|
||||||
~/nominatim-dev-venv/bin/pip install\
|
~/nominatim-dev-venv/bin/pip install\
|
||||||
psutil psycopg[binary] PyICU SQLAlchemy \
|
psutil 'psycopg[binary]' PyICU SQLAlchemy \
|
||||||
python-dotenv jinja2 pyYAML datrie behave \
|
python-dotenv jinja2 pyYAML behave \
|
||||||
mkdocs mkdocstrings mkdocs-gen-files pytest pytest-asyncio flake8 \
|
mkdocs 'mkdocstrings[python]' mkdocs-gen-files pytest pytest-asyncio flake8 \
|
||||||
types-jinja2 types-markupsafe types-psutil types-psycopg2 \
|
types-jinja2 types-markupsafe types-psutil types-psycopg2 \
|
||||||
types-pygments types-pyyaml types-requests types-ujson \
|
types-pygments types-pyyaml types-requests types-ujson \
|
||||||
types-urllib3 typing-extensions unicorn falcon starlette \
|
types-urllib3 typing-extensions unicorn falcon starlette \
|
||||||
|
|||||||
@@ -60,13 +60,19 @@ The order of phrases matters to Nominatim when doing further processing.
|
|||||||
Thus, while you may split or join phrases, you should not reorder them
|
Thus, while you may split or join phrases, you should not reorder them
|
||||||
unless you really know what you are doing.
|
unless you really know what you are doing.
|
||||||
|
|
||||||
Phrase types (`nominatim_api.search.PhraseType`) can further help narrowing
|
Phrase types can further help narrowing down how the tokens in the phrase
|
||||||
down how the tokens in the phrase are interpreted. The following phrase types
|
are interpreted. The following phrase types are known:
|
||||||
are known:
|
|
||||||
|
|
||||||
::: nominatim_api.search.PhraseType
|
| Name | Description |
|
||||||
options:
|
|----------------|-------------|
|
||||||
heading_level: 6
|
| PHRASE_ANY | No specific designation (i.e. source is free-form query) |
|
||||||
|
| PHRASE_AMENITY | Contains name or type of a POI |
|
||||||
|
| PHRASE_STREET | Contains a street name optionally with a housenumber |
|
||||||
|
| PHRASE_CITY | Contains the postal city |
|
||||||
|
| PHRASE_COUNTY | Contains the equivalent of a county |
|
||||||
|
| PHRASE_STATE | Contains a state or province |
|
||||||
|
| PHRASE_POSTCODE| Contains a postal code |
|
||||||
|
| PHRASE_COUNTRY | Contains the country name or code |
|
||||||
|
|
||||||
|
|
||||||
## Custom sanitizer modules
|
## Custom sanitizer modules
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ Nominatim expects two files containing the Python part of the implementation:
|
|||||||
|
|
||||||
* `src/nominatim_db/tokenizer/<NAME>_tokenizer.py` contains the tokenizer
|
* `src/nominatim_db/tokenizer/<NAME>_tokenizer.py` contains the tokenizer
|
||||||
code used during import and
|
code used during import and
|
||||||
* `src/nominatim_api/search/NAME>_tokenizer.py` has the code used during
|
* `src/nominatim_api/search/<NAME>_tokenizer.py` has the code used during
|
||||||
query time.
|
query time.
|
||||||
|
|
||||||
`<NAME>` is a unique name for the tokenizer consisting of only lower-case
|
`<NAME>` is a unique name for the tokenizer consisting of only lower-case
|
||||||
|
|||||||
@@ -425,7 +425,7 @@ function Place:write_row(k, v)
|
|||||||
if self.geometry == nil then
|
if self.geometry == nil then
|
||||||
self.geometry = self.geom_func(self.object)
|
self.geometry = self.geom_func(self.object)
|
||||||
end
|
end
|
||||||
if self.geometry:is_null() then
|
if self.geometry == nil or self.geometry:is_null() then
|
||||||
return 0
|
return 0
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -608,6 +608,9 @@ function module.process_way(object)
|
|||||||
|
|
||||||
if geom:is_null() then
|
if geom:is_null() then
|
||||||
geom = o:as_linestring()
|
geom = o:as_linestring()
|
||||||
|
if geom:is_null() or geom:length() > 30 then
|
||||||
|
return nil
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
return geom
|
return geom
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
{% include('functions/utils.sql') %}
|
{% include('functions/utils.sql') %}
|
||||||
{% include('functions/ranking.sql') %}
|
{% include('functions/ranking.sql') %}
|
||||||
{% include('functions/importance.sql') %}
|
{% include('functions/importance.sql') %}
|
||||||
{% include('functions/address_lookup.sql') %}
|
|
||||||
{% include('functions/interpolation.sql') %}
|
{% include('functions/interpolation.sql') %}
|
||||||
|
|
||||||
{% if 'place' in db.tables %}
|
{% if 'place' in db.tables %}
|
||||||
|
|||||||
@@ -1,334 +0,0 @@
|
|||||||
-- SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
--
|
|
||||||
-- This file is part of Nominatim. (https://nominatim.org)
|
|
||||||
--
|
|
||||||
-- Copyright (C) 2022 by the Nominatim developer community.
|
|
||||||
-- For a full list of authors see the git log.
|
|
||||||
|
|
||||||
-- Functions for returning address information for a place.
|
|
||||||
|
|
||||||
DROP TYPE IF EXISTS addressline CASCADE;
|
|
||||||
CREATE TYPE addressline as (
|
|
||||||
place_id BIGINT,
|
|
||||||
osm_type CHAR(1),
|
|
||||||
osm_id BIGINT,
|
|
||||||
name HSTORE,
|
|
||||||
class TEXT,
|
|
||||||
type TEXT,
|
|
||||||
place_type TEXT,
|
|
||||||
admin_level INTEGER,
|
|
||||||
fromarea BOOLEAN,
|
|
||||||
isaddress BOOLEAN,
|
|
||||||
rank_address INTEGER,
|
|
||||||
distance FLOAT
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_name_by_language(name hstore, languagepref TEXT[])
|
|
||||||
RETURNS TEXT
|
|
||||||
AS $$
|
|
||||||
DECLARE
|
|
||||||
result TEXT;
|
|
||||||
BEGIN
|
|
||||||
IF name is null THEN
|
|
||||||
RETURN null;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
FOR j IN 1..array_upper(languagepref,1) LOOP
|
|
||||||
IF name ? languagepref[j] THEN
|
|
||||||
result := trim(name->languagepref[j]);
|
|
||||||
IF result != '' THEN
|
|
||||||
return result;
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
-- as a fallback - take the last element since it is the default name
|
|
||||||
RETURN trim((avals(name))[array_length(avals(name), 1)]);
|
|
||||||
END;
|
|
||||||
$$
|
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
|
||||||
|
|
||||||
|
|
||||||
--housenumber only needed for tiger data
|
|
||||||
CREATE OR REPLACE FUNCTION get_address_by_language(for_place_id BIGINT,
|
|
||||||
housenumber INTEGER,
|
|
||||||
languagepref TEXT[])
|
|
||||||
RETURNS TEXT
|
|
||||||
AS $$
|
|
||||||
DECLARE
|
|
||||||
result TEXT[];
|
|
||||||
currresult TEXT;
|
|
||||||
prevresult TEXT;
|
|
||||||
location RECORD;
|
|
||||||
BEGIN
|
|
||||||
|
|
||||||
result := '{}';
|
|
||||||
prevresult := '';
|
|
||||||
|
|
||||||
FOR location IN
|
|
||||||
SELECT name,
|
|
||||||
CASE WHEN place_id = for_place_id THEN 99 ELSE rank_address END as rank_address
|
|
||||||
FROM get_addressdata(for_place_id, housenumber)
|
|
||||||
WHERE isaddress order by rank_address desc
|
|
||||||
LOOP
|
|
||||||
currresult := trim(get_name_by_language(location.name, languagepref));
|
|
||||||
IF currresult != prevresult AND currresult IS NOT NULL
|
|
||||||
AND result[(100 - location.rank_address)] IS NULL
|
|
||||||
THEN
|
|
||||||
result[(100 - location.rank_address)] := currresult;
|
|
||||||
prevresult := currresult;
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
RETURN array_to_string(result,', ');
|
|
||||||
END;
|
|
||||||
$$
|
|
||||||
LANGUAGE plpgsql STABLE;
|
|
||||||
|
|
||||||
DROP TYPE IF EXISTS addressdata_place;
|
|
||||||
CREATE TYPE addressdata_place AS (
|
|
||||||
place_id BIGINT,
|
|
||||||
country_code VARCHAR(2),
|
|
||||||
housenumber TEXT,
|
|
||||||
postcode TEXT,
|
|
||||||
class TEXT,
|
|
||||||
type TEXT,
|
|
||||||
name HSTORE,
|
|
||||||
address HSTORE,
|
|
||||||
centroid GEOMETRY
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Compute the list of address parts for the given place.
|
|
||||||
--
|
|
||||||
-- If in_housenumber is greator or equal 0, look for an interpolation.
|
|
||||||
CREATE OR REPLACE FUNCTION get_addressdata(in_place_id BIGINT, in_housenumber INTEGER)
|
|
||||||
RETURNS setof addressline
|
|
||||||
AS $$
|
|
||||||
DECLARE
|
|
||||||
place addressdata_place;
|
|
||||||
location RECORD;
|
|
||||||
country RECORD;
|
|
||||||
current_rank_address INTEGER;
|
|
||||||
location_isaddress BOOLEAN;
|
|
||||||
BEGIN
|
|
||||||
-- The place in question might not have a direct entry in place_addressline.
|
|
||||||
-- Look for the parent of such places then and save it in place.
|
|
||||||
|
|
||||||
-- first query osmline (interpolation lines)
|
|
||||||
IF in_housenumber >= 0 THEN
|
|
||||||
SELECT parent_place_id as place_id, country_code,
|
|
||||||
in_housenumber as housenumber, postcode,
|
|
||||||
'place' as class, 'house' as type,
|
|
||||||
null as name, null as address,
|
|
||||||
ST_Centroid(linegeo) as centroid
|
|
||||||
INTO place
|
|
||||||
FROM location_property_osmline
|
|
||||||
WHERE place_id = in_place_id
|
|
||||||
AND in_housenumber between startnumber and endnumber;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
--then query tiger data
|
|
||||||
{% if config.get_bool('USE_US_TIGER_DATA') %}
|
|
||||||
IF place IS NULL AND in_housenumber >= 0 THEN
|
|
||||||
SELECT parent_place_id as place_id, 'us' as country_code,
|
|
||||||
in_housenumber as housenumber, postcode,
|
|
||||||
'place' as class, 'house' as type,
|
|
||||||
null as name, null as address,
|
|
||||||
ST_Centroid(linegeo) as centroid
|
|
||||||
INTO place
|
|
||||||
FROM location_property_tiger
|
|
||||||
WHERE place_id = in_place_id
|
|
||||||
AND in_housenumber between startnumber and endnumber;
|
|
||||||
END IF;
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
-- postcode table
|
|
||||||
IF place IS NULL THEN
|
|
||||||
SELECT parent_place_id as place_id, country_code,
|
|
||||||
null::text as housenumber, postcode,
|
|
||||||
'place' as class, 'postcode' as type,
|
|
||||||
null as name, null as address,
|
|
||||||
null as centroid
|
|
||||||
INTO place
|
|
||||||
FROM location_postcode
|
|
||||||
WHERE place_id = in_place_id;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- POI objects in the placex table
|
|
||||||
IF place IS NULL THEN
|
|
||||||
SELECT parent_place_id as place_id, country_code,
|
|
||||||
coalesce(address->'housenumber',
|
|
||||||
address->'streetnumber',
|
|
||||||
address->'conscriptionnumber')::text as housenumber,
|
|
||||||
postcode,
|
|
||||||
class, type,
|
|
||||||
name, address,
|
|
||||||
centroid
|
|
||||||
INTO place
|
|
||||||
FROM placex
|
|
||||||
WHERE place_id = in_place_id and rank_search > 27;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- If place is still NULL at this point then the object has its own
|
|
||||||
-- entry in place_address line. However, still check if there is not linked
|
|
||||||
-- place we should be using instead.
|
|
||||||
IF place IS NULL THEN
|
|
||||||
select coalesce(linked_place_id, place_id) as place_id, country_code,
|
|
||||||
null::text as housenumber, postcode,
|
|
||||||
class, type,
|
|
||||||
null as name, address,
|
|
||||||
null as centroid
|
|
||||||
INTO place
|
|
||||||
FROM placex where place_id = in_place_id;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
--RAISE WARNING '% % % %',searchcountrycode, searchhousenumber, searchpostcode;
|
|
||||||
|
|
||||||
-- --- Return the record for the base entry.
|
|
||||||
|
|
||||||
current_rank_address := 1000;
|
|
||||||
FOR location IN
|
|
||||||
SELECT placex.place_id, osm_type, osm_id, name,
|
|
||||||
coalesce(extratags->'linked_place', extratags->'place') as place_type,
|
|
||||||
class, type, admin_level,
|
|
||||||
CASE WHEN rank_address = 0 THEN 100
|
|
||||||
WHEN rank_address = 11 THEN 5
|
|
||||||
ELSE rank_address END as rank_address,
|
|
||||||
country_code
|
|
||||||
FROM placex
|
|
||||||
WHERE place_id = place.place_id
|
|
||||||
LOOP
|
|
||||||
--RAISE WARNING '%',location;
|
|
||||||
-- mix in default names for countries
|
|
||||||
IF location.rank_address = 4 and place.country_code is not NULL THEN
|
|
||||||
FOR country IN
|
|
||||||
SELECT coalesce(name, ''::hstore) as name FROM country_name
|
|
||||||
WHERE country_code = place.country_code LIMIT 1
|
|
||||||
LOOP
|
|
||||||
place.name := country.name || place.name;
|
|
||||||
END LOOP;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF location.rank_address < 4 THEN
|
|
||||||
-- no country locations for ranks higher than country
|
|
||||||
place.country_code := NULL::varchar(2);
|
|
||||||
ELSEIF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
|
|
||||||
place.country_code := location.country_code;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
|
|
||||||
location.name, location.class, location.type,
|
|
||||||
location.place_type,
|
|
||||||
location.admin_level, true,
|
|
||||||
location.type not in ('postcode', 'postal_code'),
|
|
||||||
location.rank_address, 0)::addressline;
|
|
||||||
|
|
||||||
current_rank_address := location.rank_address;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
-- --- Return records for address parts.
|
|
||||||
|
|
||||||
FOR location IN
|
|
||||||
SELECT placex.place_id, osm_type, osm_id, name, class, type,
|
|
||||||
coalesce(extratags->'linked_place', extratags->'place') as place_type,
|
|
||||||
admin_level, fromarea, isaddress,
|
|
||||||
CASE WHEN rank_address = 11 THEN 5 ELSE rank_address END as rank_address,
|
|
||||||
distance, country_code, postcode
|
|
||||||
FROM place_addressline join placex on (address_place_id = placex.place_id)
|
|
||||||
WHERE place_addressline.place_id IN (place.place_id, in_place_id)
|
|
||||||
AND linked_place_id is null
|
|
||||||
AND (placex.country_code IS NULL OR place.country_code IS NULL
|
|
||||||
OR placex.country_code = place.country_code)
|
|
||||||
ORDER BY rank_address desc,
|
|
||||||
(place_addressline.place_id = in_place_id) desc,
|
|
||||||
(CASE WHEN coalesce((avals(name) && avals(place.address)), False) THEN 2
|
|
||||||
WHEN isaddress THEN 0
|
|
||||||
WHEN fromarea
|
|
||||||
and place.centroid is not null
|
|
||||||
and ST_Contains(geometry, place.centroid) THEN 1
|
|
||||||
ELSE -1 END) desc,
|
|
||||||
fromarea desc, distance asc, rank_search desc
|
|
||||||
LOOP
|
|
||||||
-- RAISE WARNING '%',location;
|
|
||||||
location_isaddress := location.rank_address != current_rank_address;
|
|
||||||
|
|
||||||
IF place.country_code IS NULL AND location.country_code IS NOT NULL THEN
|
|
||||||
place.country_code := location.country_code;
|
|
||||||
END IF;
|
|
||||||
IF location.type in ('postcode', 'postal_code')
|
|
||||||
AND place.postcode is not null
|
|
||||||
THEN
|
|
||||||
-- If the place had a postcode assigned, take this one only
|
|
||||||
-- into consideration when it is an area and the place does not have
|
|
||||||
-- a postcode itself.
|
|
||||||
IF location.fromarea AND location_isaddress
|
|
||||||
AND (place.address is null or not place.address ? 'postcode')
|
|
||||||
THEN
|
|
||||||
place.postcode := null; -- remove the less exact postcode
|
|
||||||
ELSE
|
|
||||||
location_isaddress := false;
|
|
||||||
END IF;
|
|
||||||
END IF;
|
|
||||||
RETURN NEXT ROW(location.place_id, location.osm_type, location.osm_id,
|
|
||||||
location.name, location.class, location.type,
|
|
||||||
location.place_type,
|
|
||||||
location.admin_level, location.fromarea,
|
|
||||||
location_isaddress,
|
|
||||||
location.rank_address,
|
|
||||||
location.distance)::addressline;
|
|
||||||
|
|
||||||
current_rank_address := location.rank_address;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
-- If no country was included yet, add the name information from country_name.
|
|
||||||
IF current_rank_address > 4 THEN
|
|
||||||
FOR location IN
|
|
||||||
SELECT name || coalesce(derived_name, ''::hstore) as name FROM country_name
|
|
||||||
WHERE country_code = place.country_code LIMIT 1
|
|
||||||
LOOP
|
|
||||||
--RAISE WARNING '% % %',current_rank_address,searchcountrycode,countryname;
|
|
||||||
RETURN NEXT ROW(null, null, null, location.name, 'place', 'country', NULL,
|
|
||||||
null, true, true, 4, 0)::addressline;
|
|
||||||
END LOOP;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- Finally add some artificial rows.
|
|
||||||
IF place.country_code IS NOT NULL THEN
|
|
||||||
location := ROW(null, null, null, hstore('ref', place.country_code),
|
|
||||||
'place', 'country_code', null, null, true, false, 4, 0)::addressline;
|
|
||||||
RETURN NEXT location;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF place.name IS NOT NULL THEN
|
|
||||||
location := ROW(in_place_id, null, null, place.name, place.class,
|
|
||||||
place.type, null, null, true, true, 29, 0)::addressline;
|
|
||||||
RETURN NEXT location;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF place.housenumber IS NOT NULL THEN
|
|
||||||
location := ROW(null, null, null, hstore('ref', place.housenumber),
|
|
||||||
'place', 'house_number', null, null, true, true, 28, 0)::addressline;
|
|
||||||
RETURN NEXT location;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF place.address is not null and place.address ? '_unlisted_place' THEN
|
|
||||||
RETURN NEXT ROW(null, null, null, hstore('name', place.address->'_unlisted_place'),
|
|
||||||
'place', 'locality', null, null, true, true, 25, 0)::addressline;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF place.postcode is not null THEN
|
|
||||||
location := ROW(null, null, null, hstore('ref', place.postcode), 'place',
|
|
||||||
'postcode', null, null, false, true, 5, 0)::addressline;
|
|
||||||
RETURN NEXT location;
|
|
||||||
ELSEIF place.address is not null and place.address ? 'postcode'
|
|
||||||
and not place.address->'postcode' SIMILAR TO '%(,|;)%' THEN
|
|
||||||
location := ROW(null, null, null, hstore('ref', place.address->'postcode'), 'place',
|
|
||||||
'postcode', null, null, false, true, 5, 0)::addressline;
|
|
||||||
RETURN NEXT location;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
RETURN;
|
|
||||||
END;
|
|
||||||
$$
|
|
||||||
LANGUAGE plpgsql STABLE;
|
|
||||||
@@ -65,7 +65,7 @@ BEGIN
|
|||||||
RETURN NULL;
|
RETURN NULL;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
@@ -78,7 +78,7 @@ SELECT convert_from(CAST(E'\\x' || array_to_string(ARRAY(
|
|||||||
FROM regexp_matches($1, '%[0-9a-f][0-9a-f]|.', 'gi') AS r(m)
|
FROM regexp_matches($1, '%[0-9a-f][0-9a-f]|.', 'gi') AS r(m)
|
||||||
), '') AS bytea), 'UTF8');
|
), '') AS bytea), 'UTF8');
|
||||||
$$
|
$$
|
||||||
LANGUAGE SQL IMMUTABLE STRICT;
|
LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION catch_decode_url_part(p varchar)
|
CREATE OR REPLACE FUNCTION catch_decode_url_part(p varchar)
|
||||||
@@ -91,7 +91,7 @@ EXCEPTION
|
|||||||
WHEN others THEN return null;
|
WHEN others THEN return null;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE STRICT;
|
LANGUAGE plpgsql IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_wikipedia_match(extratags HSTORE, country_code varchar(2))
|
CREATE OR REPLACE FUNCTION get_wikipedia_match(extratags HSTORE, country_code varchar(2))
|
||||||
@@ -139,7 +139,7 @@ BEGIN
|
|||||||
RETURN NULL;
|
RETURN NULL;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -203,5 +203,5 @@ BEGIN
|
|||||||
RETURN result;
|
RETURN result;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql;
|
LANGUAGE plpgsql PARALLEL SAFE;
|
||||||
|
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ BEGIN
|
|||||||
RETURN in_address;
|
RETURN in_address;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -70,7 +70,7 @@ BEGIN
|
|||||||
RETURN parent_place_id;
|
RETURN parent_place_id;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION reinsert_interpolation(way_id BIGINT, addr HSTORE,
|
CREATE OR REPLACE FUNCTION reinsert_interpolation(way_id BIGINT, addr HSTORE,
|
||||||
|
|||||||
@@ -17,28 +17,6 @@ CREATE TYPE nearfeaturecentr AS (
|
|||||||
centroid GEOMETRY
|
centroid GEOMETRY
|
||||||
);
|
);
|
||||||
|
|
||||||
-- feature intersects geometry
|
|
||||||
-- for areas and linestrings they must touch at least along a line
|
|
||||||
CREATE OR REPLACE FUNCTION is_relevant_geometry(de9im TEXT, geom_type TEXT)
|
|
||||||
RETURNS BOOLEAN
|
|
||||||
AS $$
|
|
||||||
BEGIN
|
|
||||||
IF substring(de9im from 1 for 2) != 'FF' THEN
|
|
||||||
RETURN TRUE;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF geom_type = 'ST_Point' THEN
|
|
||||||
RETURN substring(de9im from 4 for 1) = '0';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF geom_type in ('ST_LineString', 'ST_MultiLineString') THEN
|
|
||||||
RETURN substring(de9im from 4 for 1) = '1';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
RETURN substring(de9im from 4 for 1) = '2';
|
|
||||||
END
|
|
||||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
|
||||||
|
|
||||||
CREATE OR REPLACE function getNearFeatures(in_partition INTEGER, feature GEOMETRY,
|
CREATE OR REPLACE function getNearFeatures(in_partition INTEGER, feature GEOMETRY,
|
||||||
feature_centroid GEOMETRY,
|
feature_centroid GEOMETRY,
|
||||||
maxrank INTEGER)
|
maxrank INTEGER)
|
||||||
@@ -59,7 +37,12 @@ BEGIN
|
|||||||
isguess, postcode, centroid
|
isguess, postcode, centroid
|
||||||
FROM location_area_large_{{ partition }}
|
FROM location_area_large_{{ partition }}
|
||||||
WHERE geometry && feature
|
WHERE geometry && feature
|
||||||
AND is_relevant_geometry(ST_Relate(geometry, feature), ST_GeometryType(feature))
|
AND CASE WHEN ST_Dimension(feature) = 0
|
||||||
|
THEN _ST_Covers(geometry, feature)
|
||||||
|
WHEN ST_Dimension(feature) = 2
|
||||||
|
THEN ST_Relate(geometry, feature, 'T********')
|
||||||
|
ELSE ST_NPoints(ST_Intersection(geometry, feature)) > 1
|
||||||
|
END
|
||||||
AND rank_address < maxrank
|
AND rank_address < maxrank
|
||||||
-- Postcodes currently still use rank_search to define for which
|
-- Postcodes currently still use rank_search to define for which
|
||||||
-- features they are relevant.
|
-- features they are relevant.
|
||||||
@@ -75,7 +58,7 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END
|
END
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_address_place(in_partition SMALLINT, feature GEOMETRY,
|
CREATE OR REPLACE FUNCTION get_address_place(in_partition SMALLINT, feature GEOMETRY,
|
||||||
@@ -104,7 +87,7 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
create or replace function deleteLocationArea(in_partition INTEGER, in_place_id BIGINT, in_rank_search INTEGER) RETURNS BOOLEAN AS $$
|
create or replace function deleteLocationArea(in_partition INTEGER, in_place_id BIGINT, in_rank_search INTEGER) RETURNS BOOLEAN AS $$
|
||||||
@@ -142,14 +125,16 @@ BEGIN
|
|||||||
|
|
||||||
IF in_rank_search <= 4 and not in_estimate THEN
|
IF in_rank_search <= 4 and not in_estimate THEN
|
||||||
INSERT INTO location_area_country (place_id, country_code, geometry)
|
INSERT INTO location_area_country (place_id, country_code, geometry)
|
||||||
values (in_place_id, in_country_code, in_geometry);
|
(SELECT in_place_id, in_country_code, geom
|
||||||
|
FROM split_geometry(in_geometry) as geom);
|
||||||
RETURN TRUE;
|
RETURN TRUE;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
{% for partition in db.partitions %}
|
{% for partition in db.partitions %}
|
||||||
IF in_partition = {{ partition }} THEN
|
IF in_partition = {{ partition }} THEN
|
||||||
INSERT INTO location_area_large_{{ partition }} (partition, place_id, country_code, keywords, rank_search, rank_address, isguess, postcode, centroid, geometry)
|
INSERT INTO location_area_large_{{ partition }} (partition, place_id, country_code, keywords, rank_search, rank_address, isguess, postcode, centroid, geometry)
|
||||||
values (in_partition, in_place_id, in_country_code, in_keywords, in_rank_search, in_rank_address, in_estimate, postcode, in_centroid, in_geometry);
|
(SELECT in_partition, in_place_id, in_country_code, in_keywords, in_rank_search, in_rank_address, in_estimate, postcode, in_centroid, geom
|
||||||
|
FROM split_geometry(in_geometry) as geom);
|
||||||
RETURN TRUE;
|
RETURN TRUE;
|
||||||
END IF;
|
END IF;
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
@@ -187,7 +172,7 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END
|
END
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION getNearestNamedPlacePlaceId(in_partition INTEGER,
|
CREATE OR REPLACE FUNCTION getNearestNamedPlacePlaceId(in_partition INTEGER,
|
||||||
point GEOMETRY,
|
point GEOMETRY,
|
||||||
@@ -217,7 +202,7 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END
|
END
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
create or replace function insertSearchName(
|
create or replace function insertSearchName(
|
||||||
in_partition INTEGER, in_place_id BIGINT, in_name_vector INTEGER[],
|
in_partition INTEGER, in_place_id BIGINT, in_name_vector INTEGER[],
|
||||||
@@ -325,7 +310,7 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END
|
END
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION getNearestParallelRoadFeature(in_partition INTEGER,
|
CREATE OR REPLACE FUNCTION getNearestParallelRoadFeature(in_partition INTEGER,
|
||||||
line GEOMETRY)
|
line GEOMETRY)
|
||||||
@@ -369,4 +354,4 @@ BEGIN
|
|||||||
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
RAISE EXCEPTION 'Unknown partition %', in_partition;
|
||||||
END
|
END
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ BEGIN
|
|||||||
RETURN result;
|
RETURN result;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION find_associated_street(poi_osm_type CHAR(1),
|
CREATE OR REPLACE FUNCTION find_associated_street(poi_osm_type CHAR(1),
|
||||||
@@ -200,7 +200,7 @@ BEGIN
|
|||||||
RETURN result;
|
RETURN result;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Find the parent road of a POI.
|
-- Find the parent road of a POI.
|
||||||
@@ -286,7 +286,7 @@ BEGIN
|
|||||||
RETURN parent_place_id;
|
RETURN parent_place_id;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
-- Try to find a linked place for the given object.
|
-- Try to find a linked place for the given object.
|
||||||
CREATE OR REPLACE FUNCTION find_linked_place(bnd placex)
|
CREATE OR REPLACE FUNCTION find_linked_place(bnd placex)
|
||||||
@@ -404,7 +404,7 @@ BEGIN
|
|||||||
RETURN NULL;
|
RETURN NULL;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION create_poi_search_terms(obj_place_id BIGINT,
|
CREATE OR REPLACE FUNCTION create_poi_search_terms(obj_place_id BIGINT,
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ BEGIN
|
|||||||
RETURN 0.02;
|
RETURN 0.02;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Return an approximate update radius according to the search rank.
|
-- Return an approximate update radius according to the search rank.
|
||||||
@@ -60,7 +60,7 @@ BEGIN
|
|||||||
RETURN 0;
|
RETURN 0;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
-- Compute a base address rank from the extent of the given geometry.
|
-- Compute a base address rank from the extent of the given geometry.
|
||||||
--
|
--
|
||||||
@@ -107,7 +107,7 @@ BEGIN
|
|||||||
RETURN 23;
|
RETURN 23;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Guess a ranking for postcodes from country and postcode format.
|
-- Guess a ranking for postcodes from country and postcode format.
|
||||||
@@ -167,7 +167,7 @@ BEGIN
|
|||||||
|
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Get standard search and address rank for an object.
|
-- Get standard search and address rank for an object.
|
||||||
@@ -236,7 +236,7 @@ BEGIN
|
|||||||
END IF;
|
END IF;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_addr_tag_rank(key TEXT, country TEXT,
|
CREATE OR REPLACE FUNCTION get_addr_tag_rank(key TEXT, country TEXT,
|
||||||
OUT from_rank SMALLINT,
|
OUT from_rank SMALLINT,
|
||||||
@@ -283,7 +283,7 @@ BEGIN
|
|||||||
END LOOP;
|
END LOOP;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION weigh_search(search_vector INT[],
|
CREATE OR REPLACE FUNCTION weigh_search(search_vector INT[],
|
||||||
@@ -304,4 +304,4 @@ BEGIN
|
|||||||
RETURN def_weight;
|
RETURN def_weight;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ BEGIN
|
|||||||
RETURN ST_PointOnSurface(place);
|
RETURN ST_PointOnSurface(place);
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION geometry_sector(partition INTEGER, place GEOMETRY)
|
CREATE OR REPLACE FUNCTION geometry_sector(partition INTEGER, place GEOMETRY)
|
||||||
@@ -34,7 +34,7 @@ BEGIN
|
|||||||
RETURN (partition*1000000) + (500-ST_X(place)::INTEGER)*1000 + (500-ST_Y(place)::INTEGER);
|
RETURN (partition*1000000) + (500-ST_X(place)::INTEGER)*1000 + (500-ST_Y(place)::INTEGER);
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ BEGIN
|
|||||||
RETURN r;
|
RETURN r;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
-- Return the node members with a given label from a relation member list
|
-- Return the node members with a given label from a relation member list
|
||||||
-- as a set.
|
-- as a set.
|
||||||
@@ -88,7 +88,7 @@ BEGIN
|
|||||||
RETURN;
|
RETURN;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_rel_node_members(members JSONB, memberLabels TEXT[])
|
CREATE OR REPLACE FUNCTION get_rel_node_members(members JSONB, memberLabels TEXT[])
|
||||||
@@ -107,7 +107,7 @@ BEGIN
|
|||||||
RETURN;
|
RETURN;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Copy 'name' to or from the default language.
|
-- Copy 'name' to or from the default language.
|
||||||
@@ -136,7 +136,7 @@ BEGIN
|
|||||||
END IF;
|
END IF;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Find the nearest artificial postcode for the given geometry.
|
-- Find the nearest artificial postcode for the given geometry.
|
||||||
@@ -172,7 +172,7 @@ BEGIN
|
|||||||
RETURN outcode;
|
RETURN outcode;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
|
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
|
||||||
@@ -233,7 +233,7 @@ BEGIN
|
|||||||
RETURN NULL;
|
RETURN NULL;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_country_language_code(search_country_code VARCHAR(2))
|
CREATE OR REPLACE FUNCTION get_country_language_code(search_country_code VARCHAR(2))
|
||||||
@@ -251,7 +251,7 @@ BEGIN
|
|||||||
RETURN NULL;
|
RETURN NULL;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION get_partition(in_country_code VARCHAR(10))
|
CREATE OR REPLACE FUNCTION get_partition(in_country_code VARCHAR(10))
|
||||||
@@ -268,7 +268,7 @@ BEGIN
|
|||||||
RETURN 0;
|
RETURN 0;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Find the parent of an address with addr:street/addr:place tag.
|
-- Find the parent of an address with addr:street/addr:place tag.
|
||||||
@@ -299,7 +299,7 @@ BEGIN
|
|||||||
RETURN parent_place_id;
|
RETURN parent_place_id;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql STABLE;
|
LANGUAGE plpgsql STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION delete_location(OLD_place_id BIGINT)
|
CREATE OR REPLACE FUNCTION delete_location(OLD_place_id BIGINT)
|
||||||
@@ -337,7 +337,7 @@ BEGIN
|
|||||||
ST_Project(geom::geography, radius, 3.9269908)::geometry));
|
ST_Project(geom::geography, radius, 3.9269908)::geometry));
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2),
|
CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2),
|
||||||
@@ -348,8 +348,6 @@ CREATE OR REPLACE FUNCTION add_location(place_id BIGINT, country_code varchar(2)
|
|||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
DECLARE
|
DECLARE
|
||||||
locationid INTEGER;
|
|
||||||
secgeo GEOMETRY;
|
|
||||||
postcode TEXT;
|
postcode TEXT;
|
||||||
BEGIN
|
BEGIN
|
||||||
PERFORM deleteLocationArea(partition, place_id, rank_search);
|
PERFORM deleteLocationArea(partition, place_id, rank_search);
|
||||||
@@ -360,18 +358,19 @@ BEGIN
|
|||||||
postcode := upper(trim (in_postcode));
|
postcode := upper(trim (in_postcode));
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
IF ST_GeometryType(geometry) in ('ST_Polygon','ST_MultiPolygon') THEN
|
IF ST_Dimension(geometry) = 2 THEN
|
||||||
FOR secgeo IN select split_geometry(geometry) AS geom LOOP
|
RETURN insertLocationAreaLarge(partition, place_id, country_code, keywords,
|
||||||
PERFORM insertLocationAreaLarge(partition, place_id, country_code, keywords, rank_search, rank_address, false, postcode, centroid, secgeo);
|
rank_search, rank_address, false, postcode,
|
||||||
END LOOP;
|
centroid, geometry);
|
||||||
|
|
||||||
ELSEIF ST_GeometryType(geometry) = 'ST_Point' THEN
|
|
||||||
secgeo := place_node_fuzzy_area(geometry, rank_search);
|
|
||||||
PERFORM insertLocationAreaLarge(partition, place_id, country_code, keywords, rank_search, rank_address, true, postcode, centroid, secgeo);
|
|
||||||
|
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
RETURN true;
|
IF ST_Dimension(geometry) = 0 THEN
|
||||||
|
RETURN insertLocationAreaLarge(partition, place_id, country_code, keywords,
|
||||||
|
rank_search, rank_address, true, postcode,
|
||||||
|
centroid, place_node_fuzzy_area(geometry, rank_search));
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN false;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql;
|
LANGUAGE plpgsql;
|
||||||
@@ -394,19 +393,21 @@ DECLARE
|
|||||||
geo RECORD;
|
geo RECORD;
|
||||||
area FLOAT;
|
area FLOAT;
|
||||||
remainingdepth INTEGER;
|
remainingdepth INTEGER;
|
||||||
added INTEGER;
|
|
||||||
BEGIN
|
BEGIN
|
||||||
|
|
||||||
-- RAISE WARNING 'quad_split_geometry: maxarea=%, depth=%',maxarea,maxdepth;
|
-- RAISE WARNING 'quad_split_geometry: maxarea=%, depth=%',maxarea,maxdepth;
|
||||||
|
|
||||||
IF (ST_GeometryType(geometry) not in ('ST_Polygon','ST_MultiPolygon') OR NOT ST_IsValid(geometry)) THEN
|
IF not ST_IsValid(geometry) THEN
|
||||||
|
RETURN;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF ST_Dimension(geometry) != 2 OR maxdepth <= 1 THEN
|
||||||
RETURN NEXT geometry;
|
RETURN NEXT geometry;
|
||||||
RETURN;
|
RETURN;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
remainingdepth := maxdepth - 1;
|
remainingdepth := maxdepth - 1;
|
||||||
area := ST_AREA(geometry);
|
area := ST_AREA(geometry);
|
||||||
IF remainingdepth < 1 OR area < maxarea THEN
|
IF area < maxarea THEN
|
||||||
RETURN NEXT geometry;
|
RETURN NEXT geometry;
|
||||||
RETURN;
|
RETURN;
|
||||||
END IF;
|
END IF;
|
||||||
@@ -426,7 +427,6 @@ BEGIN
|
|||||||
xmid := (xmin+xmax)/2;
|
xmid := (xmin+xmax)/2;
|
||||||
ymid := (ymin+ymax)/2;
|
ymid := (ymin+ymax)/2;
|
||||||
|
|
||||||
added := 0;
|
|
||||||
FOR seg IN 1..4 LOOP
|
FOR seg IN 1..4 LOOP
|
||||||
|
|
||||||
IF seg = 1 THEN
|
IF seg = 1 THEN
|
||||||
@@ -442,23 +442,20 @@ BEGIN
|
|||||||
secbox := ST_SetSRID(ST_MakeBox2D(ST_Point(xmid,ymid),ST_Point(xmax,ymax)),4326);
|
secbox := ST_SetSRID(ST_MakeBox2D(ST_Point(xmid,ymid),ST_Point(xmax,ymax)),4326);
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
IF st_intersects(geometry, secbox) THEN
|
secgeo := st_intersection(geometry, secbox);
|
||||||
secgeo := st_intersection(geometry, secbox);
|
IF NOT ST_IsEmpty(secgeo) AND ST_Dimension(secgeo) = 2 THEN
|
||||||
IF NOT ST_IsEmpty(secgeo) AND ST_GeometryType(secgeo) in ('ST_Polygon','ST_MultiPolygon') THEN
|
FOR geo IN SELECT quad_split_geometry(secgeo, maxarea, remainingdepth) as geom LOOP
|
||||||
FOR geo IN select quad_split_geometry(secgeo, maxarea, remainingdepth) as geom LOOP
|
IF NOT ST_IsEmpty(geo.geom) AND ST_Dimension(geo.geom) = 2 THEN
|
||||||
IF NOT ST_IsEmpty(geo.geom) AND ST_GeometryType(geo.geom) in ('ST_Polygon','ST_MultiPolygon') THEN
|
RETURN NEXT geo.geom;
|
||||||
added := added + 1;
|
END IF;
|
||||||
RETURN NEXT geo.geom;
|
END LOOP;
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
END IF;
|
|
||||||
END IF;
|
END IF;
|
||||||
END LOOP;
|
END LOOP;
|
||||||
|
|
||||||
RETURN;
|
RETURN;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
|
CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
|
||||||
@@ -467,14 +464,26 @@ CREATE OR REPLACE FUNCTION split_geometry(geometry GEOMETRY)
|
|||||||
DECLARE
|
DECLARE
|
||||||
geo RECORD;
|
geo RECORD;
|
||||||
BEGIN
|
BEGIN
|
||||||
-- 10000000000 is ~~ 1x1 degree
|
IF ST_GeometryType(geometry) = 'ST_MultiPolygon'
|
||||||
FOR geo IN select quad_split_geometry(geometry, 0.25, 20) as geom LOOP
|
and ST_Area(geometry) * 10 > ST_Area(Box2D(geometry))
|
||||||
RETURN NEXT geo.geom;
|
THEN
|
||||||
END LOOP;
|
FOR geo IN
|
||||||
|
SELECT quad_split_geometry(g, 0.25, 20) as geom
|
||||||
|
FROM (SELECT (ST_Dump(geometry)).geom::geometry(Polygon, 4326) AS g) xx
|
||||||
|
LOOP
|
||||||
|
RETURN NEXT geo.geom;
|
||||||
|
END LOOP;
|
||||||
|
ELSE
|
||||||
|
FOR geo IN
|
||||||
|
SELECT quad_split_geometry(geometry, 0.25, 20) as geom
|
||||||
|
LOOP
|
||||||
|
RETURN NEXT geo.geom;
|
||||||
|
END LOOP;
|
||||||
|
END IF;
|
||||||
RETURN;
|
RETURN;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION simplify_large_polygons(geometry GEOMETRY)
|
CREATE OR REPLACE FUNCTION simplify_large_polygons(geometry GEOMETRY)
|
||||||
RETURNS GEOMETRY
|
RETURNS GEOMETRY
|
||||||
@@ -488,7 +497,7 @@ BEGIN
|
|||||||
RETURN geometry;
|
RETURN geometry;
|
||||||
END;
|
END;
|
||||||
$$
|
$$
|
||||||
LANGUAGE plpgsql IMMUTABLE;
|
LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION place_force_delete(placeid BIGINT)
|
CREATE OR REPLACE FUNCTION place_force_delete(placeid BIGINT)
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ CREATE OR REPLACE FUNCTION token_get_name_search_tokens(info JSONB)
|
|||||||
RETURNS INTEGER[]
|
RETURNS INTEGER[]
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'names')::INTEGER[]
|
SELECT (info->>'names')::INTEGER[]
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Get tokens for matching the place name against others.
|
-- Get tokens for matching the place name against others.
|
||||||
@@ -22,7 +22,7 @@ CREATE OR REPLACE FUNCTION token_get_name_match_tokens(info JSONB)
|
|||||||
RETURNS INTEGER[]
|
RETURNS INTEGER[]
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'names')::INTEGER[]
|
SELECT (info->>'names')::INTEGER[]
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Return the housenumber tokens applicable for the place.
|
-- Return the housenumber tokens applicable for the place.
|
||||||
@@ -30,7 +30,7 @@ CREATE OR REPLACE FUNCTION token_get_housenumber_search_tokens(info JSONB)
|
|||||||
RETURNS INTEGER[]
|
RETURNS INTEGER[]
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'hnr_tokens')::INTEGER[]
|
SELECT (info->>'hnr_tokens')::INTEGER[]
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Return the housenumber in the form that it can be matched during search.
|
-- Return the housenumber in the form that it can be matched during search.
|
||||||
@@ -38,77 +38,77 @@ CREATE OR REPLACE FUNCTION token_normalized_housenumber(info JSONB)
|
|||||||
RETURNS TEXT
|
RETURNS TEXT
|
||||||
AS $$
|
AS $$
|
||||||
SELECT info->>'hnr';
|
SELECT info->>'hnr';
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_is_street_address(info JSONB)
|
CREATE OR REPLACE FUNCTION token_is_street_address(info JSONB)
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT info->>'street' is not null or info->>'place' is null;
|
SELECT info->>'street' is not null or info->>'place' is null;
|
||||||
$$ LANGUAGE SQL IMMUTABLE;
|
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_has_addr_street(info JSONB)
|
CREATE OR REPLACE FUNCTION token_has_addr_street(info JSONB)
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT info->>'street' is not null and info->>'street' != '{}';
|
SELECT info->>'street' is not null and info->>'street' != '{}';
|
||||||
$$ LANGUAGE SQL IMMUTABLE;
|
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_has_addr_place(info JSONB)
|
CREATE OR REPLACE FUNCTION token_has_addr_place(info JSONB)
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT info->>'place' is not null;
|
SELECT info->>'place' is not null;
|
||||||
$$ LANGUAGE SQL IMMUTABLE;
|
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_matches_street(info JSONB, street_tokens INTEGER[])
|
CREATE OR REPLACE FUNCTION token_matches_street(info JSONB, street_tokens INTEGER[])
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'street')::INTEGER[] && street_tokens
|
SELECT (info->>'street')::INTEGER[] && street_tokens
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_matches_place(info JSONB, place_tokens INTEGER[])
|
CREATE OR REPLACE FUNCTION token_matches_place(info JSONB, place_tokens INTEGER[])
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'place')::INTEGER[] <@ place_tokens
|
SELECT (info->>'place')::INTEGER[] <@ place_tokens
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_addr_place_search_tokens(info JSONB)
|
CREATE OR REPLACE FUNCTION token_addr_place_search_tokens(info JSONB)
|
||||||
RETURNS INTEGER[]
|
RETURNS INTEGER[]
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->>'place')::INTEGER[]
|
SELECT (info->>'place')::INTEGER[]
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_get_address_keys(info JSONB)
|
CREATE OR REPLACE FUNCTION token_get_address_keys(info JSONB)
|
||||||
RETURNS SETOF TEXT
|
RETURNS SETOF TEXT
|
||||||
AS $$
|
AS $$
|
||||||
SELECT * FROM jsonb_object_keys(info->'addr');
|
SELECT * FROM jsonb_object_keys(info->'addr');
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_get_address_search_tokens(info JSONB, key TEXT)
|
CREATE OR REPLACE FUNCTION token_get_address_search_tokens(info JSONB, key TEXT)
|
||||||
RETURNS INTEGER[]
|
RETURNS INTEGER[]
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->'addr'->>key)::INTEGER[];
|
SELECT (info->'addr'->>key)::INTEGER[];
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_matches_address(info JSONB, key TEXT, tokens INTEGER[])
|
CREATE OR REPLACE FUNCTION token_matches_address(info JSONB, key TEXT, tokens INTEGER[])
|
||||||
RETURNS BOOLEAN
|
RETURNS BOOLEAN
|
||||||
AS $$
|
AS $$
|
||||||
SELECT (info->'addr'->>key)::INTEGER[] <@ tokens;
|
SELECT (info->'addr'->>key)::INTEGER[] <@ tokens;
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION token_get_postcode(info JSONB)
|
CREATE OR REPLACE FUNCTION token_get_postcode(info JSONB)
|
||||||
RETURNS TEXT
|
RETURNS TEXT
|
||||||
AS $$
|
AS $$
|
||||||
SELECT info->>'postcode';
|
SELECT info->>'postcode';
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
|
|
||||||
-- Return token info that should be saved permanently in the database.
|
-- Return token info that should be saved permanently in the database.
|
||||||
@@ -116,7 +116,7 @@ CREATE OR REPLACE FUNCTION token_strip_info(info JSONB)
|
|||||||
RETURNS JSONB
|
RETURNS JSONB
|
||||||
AS $$
|
AS $$
|
||||||
SELECT NULL::JSONB;
|
SELECT NULL::JSONB;
|
||||||
$$ LANGUAGE SQL IMMUTABLE STRICT;
|
$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
|
||||||
|
|
||||||
--------------- private functions ----------------------------------------------
|
--------------- private functions ----------------------------------------------
|
||||||
|
|
||||||
@@ -128,16 +128,14 @@ DECLARE
|
|||||||
partial_terms TEXT[] = '{}'::TEXT[];
|
partial_terms TEXT[] = '{}'::TEXT[];
|
||||||
term TEXT;
|
term TEXT;
|
||||||
term_id INTEGER;
|
term_id INTEGER;
|
||||||
term_count INTEGER;
|
|
||||||
BEGIN
|
BEGIN
|
||||||
SELECT min(word_id) INTO full_token
|
SELECT min(word_id) INTO full_token
|
||||||
FROM word WHERE word = norm_term and type = 'W';
|
FROM word WHERE word = norm_term and type = 'W';
|
||||||
|
|
||||||
IF full_token IS NULL THEN
|
IF full_token IS NULL THEN
|
||||||
full_token := nextval('seq_word');
|
full_token := nextval('seq_word');
|
||||||
INSERT INTO word (word_id, word_token, type, word, info)
|
INSERT INTO word (word_id, word_token, type, word)
|
||||||
SELECT full_token, lookup_term, 'W', norm_term,
|
SELECT full_token, lookup_term, 'W', norm_term
|
||||||
json_build_object('count', 0)
|
|
||||||
FROM unnest(lookup_terms) as lookup_term;
|
FROM unnest(lookup_terms) as lookup_term;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
@@ -150,14 +148,67 @@ BEGIN
|
|||||||
|
|
||||||
partial_tokens := '{}'::INT[];
|
partial_tokens := '{}'::INT[];
|
||||||
FOR term IN SELECT unnest(partial_terms) LOOP
|
FOR term IN SELECT unnest(partial_terms) LOOP
|
||||||
SELECT min(word_id), max(info->>'count') INTO term_id, term_count
|
SELECT min(word_id) INTO term_id
|
||||||
FROM word WHERE word_token = term and type = 'w';
|
FROM word WHERE word_token = term and type = 'w';
|
||||||
|
|
||||||
IF term_id IS NULL THEN
|
IF term_id IS NULL THEN
|
||||||
term_id := nextval('seq_word');
|
term_id := nextval('seq_word');
|
||||||
term_count := 0;
|
INSERT INTO word (word_id, word_token, type)
|
||||||
INSERT INTO word (word_id, word_token, type, info)
|
VALUES (term_id, term, 'w');
|
||||||
VALUES (term_id, term, 'w', json_build_object('count', term_count));
|
END IF;
|
||||||
|
|
||||||
|
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);
|
||||||
|
END LOOP;
|
||||||
|
END;
|
||||||
|
$$
|
||||||
|
LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION getorcreate_full_word(norm_term TEXT,
|
||||||
|
lookup_terms TEXT[],
|
||||||
|
lookup_norm_terms TEXT[],
|
||||||
|
OUT full_token INT,
|
||||||
|
OUT partial_tokens INT[])
|
||||||
|
AS $$
|
||||||
|
DECLARE
|
||||||
|
partial_terms TEXT[] = '{}'::TEXT[];
|
||||||
|
term TEXT;
|
||||||
|
term_id INTEGER;
|
||||||
|
BEGIN
|
||||||
|
SELECT min(word_id) INTO full_token
|
||||||
|
FROM word WHERE word = norm_term and type = 'W';
|
||||||
|
|
||||||
|
IF full_token IS NULL THEN
|
||||||
|
full_token := nextval('seq_word');
|
||||||
|
IF lookup_norm_terms IS NULL THEN
|
||||||
|
INSERT INTO word (word_id, word_token, type, word)
|
||||||
|
SELECT full_token, lookup_term, 'W', norm_term
|
||||||
|
FROM unnest(lookup_terms) as lookup_term;
|
||||||
|
ELSE
|
||||||
|
INSERT INTO word (word_id, word_token, type, word, info)
|
||||||
|
SELECT full_token, t.lookup, 'W', norm_term,
|
||||||
|
CASE WHEN norm_term = t.norm THEN null
|
||||||
|
ELSE json_build_object('lookup', t.norm) END
|
||||||
|
FROM unnest(lookup_terms, lookup_norm_terms) as t(lookup, norm);
|
||||||
|
END IF;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
|
||||||
|
term := trim(term);
|
||||||
|
IF NOT (ARRAY[term] <@ partial_terms) THEN
|
||||||
|
partial_terms := partial_terms || term;
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
|
||||||
|
partial_tokens := '{}'::INT[];
|
||||||
|
FOR term IN SELECT unnest(partial_terms) LOOP
|
||||||
|
SELECT min(word_id) INTO term_id
|
||||||
|
FROM word WHERE word_token = term and type = 'w';
|
||||||
|
|
||||||
|
IF term_id IS NULL THEN
|
||||||
|
term_id := nextval('seq_word');
|
||||||
|
INSERT INTO word (word_id, word_token, type)
|
||||||
|
VALUES (term_id, term, 'w');
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);
|
partial_tokens := array_merge(partial_tokens, ARRAY[term_id]);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Helper script for development to run nominatim from the source directory.
|
Helper script for development to run nominatim from the source directory.
|
||||||
@@ -15,4 +15,4 @@ sys.path.insert(1, str((Path(__file__) / '..' / 'src').resolve()))
|
|||||||
|
|
||||||
from nominatim_db import cli
|
from nominatim_db import cli
|
||||||
|
|
||||||
exit(cli.nominatim(module_dir=None, osm2pgsql_path=None))
|
exit(cli.nominatim())
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ dependencies = [
|
|||||||
"python-dotenv",
|
"python-dotenv",
|
||||||
"jinja2",
|
"jinja2",
|
||||||
"pyYAML>=5.1",
|
"pyYAML>=5.1",
|
||||||
"datrie",
|
|
||||||
"psutil",
|
"psutil",
|
||||||
"PyICU"
|
"PyICU"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -2,4 +2,4 @@
|
|||||||
|
|
||||||
from nominatim_db import cli
|
from nominatim_db import cli
|
||||||
|
|
||||||
exit(cli.nominatim(osm2pgsql_path=None))
|
exit(cli.nominatim())
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
"allotments" : 22,
|
"allotments" : 22,
|
||||||
"neighbourhood" : [20, 22],
|
"neighbourhood" : [20, 22],
|
||||||
"quarter" : [20, 22],
|
"quarter" : [20, 22],
|
||||||
"isolated_dwelling" : [22, 20],
|
"isolated_dwelling" : [22, 25],
|
||||||
"farm" : [22, 20],
|
"farm" : [22, 25],
|
||||||
"city_block" : 25,
|
"city_block" : 25,
|
||||||
"mountain_pass" : 25,
|
"mountain_pass" : 25,
|
||||||
"square" : 25,
|
"square" : 25,
|
||||||
@@ -216,6 +216,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{ "countries" : ["sa"],
|
||||||
|
"tags" : {
|
||||||
|
"place" : {
|
||||||
|
"province" : 12,
|
||||||
|
"municipality" : 18
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
{ "countries" : ["sk"],
|
{ "countries" : ["sk"],
|
||||||
"tags" : {
|
"tags" : {
|
||||||
"boundary" : {
|
"boundary" : {
|
||||||
|
|||||||
@@ -1809,7 +1809,8 @@ us:
|
|||||||
languages: en
|
languages: en
|
||||||
names: !include country-names/us.yaml
|
names: !include country-names/us.yaml
|
||||||
postcode:
|
postcode:
|
||||||
pattern: "ddddd"
|
pattern: "(ddddd)(?:-dddd)?"
|
||||||
|
output: \1
|
||||||
|
|
||||||
|
|
||||||
# Uruguay (Uruguay)
|
# Uruguay (Uruguay)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
- aparcament -> aparc
|
- aparcament -> aparc
|
||||||
- apartament -> apmt
|
- apartament -> apmt
|
||||||
- apartat -> apt
|
- apartat -> apt
|
||||||
- àtic -> àt
|
- àtic -> àt
|
||||||
- autopista -> auto
|
- autopista -> auto
|
||||||
- autopista -> autop
|
- autopista -> autop
|
||||||
- autovia -> autov
|
- autovia -> autov
|
||||||
@@ -19,7 +19,6 @@
|
|||||||
- biblioteca -> bibl
|
- biblioteca -> bibl
|
||||||
- bloc -> bl
|
- bloc -> bl
|
||||||
- carrer -> c
|
- carrer -> c
|
||||||
- carrer -> c/
|
|
||||||
- carreró -> cró
|
- carreró -> cró
|
||||||
- carretera -> ctra
|
- carretera -> ctra
|
||||||
- cantonada -> cant
|
- cantonada -> cant
|
||||||
@@ -58,7 +57,6 @@
|
|||||||
- número -> n
|
- número -> n
|
||||||
- sense número -> s/n
|
- sense número -> s/n
|
||||||
- parada -> par
|
- parada -> par
|
||||||
- parcel·la -> parc
|
|
||||||
- passadís -> pdís
|
- passadís -> pdís
|
||||||
- passatge -> ptge
|
- passatge -> ptge
|
||||||
- passeig -> pg
|
- passeig -> pg
|
||||||
|
|||||||
@@ -1,438 +1,393 @@
|
|||||||
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#English
|
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#English
|
||||||
|
# Source: https://pe.usps.com/text/pub28/28apc_002.htm
|
||||||
- lang: en
|
- lang: en
|
||||||
words:
|
words:
|
||||||
- Access -> Accs
|
- Access -> Accs
|
||||||
- Air Force Base -> AFB
|
- Air Force Base -> AFB
|
||||||
- Air National Guard Base -> ANGB
|
- Air National Guard Base -> ANGB
|
||||||
- Airport -> Aprt
|
- Airport -> Aprt
|
||||||
- Alley -> Al
|
- Alley -> Al,All,Ally,Aly
|
||||||
- Alley -> All
|
|
||||||
- Alley -> Ally
|
|
||||||
- Alley -> Aly
|
|
||||||
- Alleyway -> Alwy
|
- Alleyway -> Alwy
|
||||||
- Amble -> Ambl
|
- Amble -> Ambl
|
||||||
|
- Anex -> Anx
|
||||||
- Apartments -> Apts
|
- Apartments -> Apts
|
||||||
- Approach -> Apch
|
- Approach -> Apch,App
|
||||||
- Approach -> App
|
|
||||||
- Arcade -> Arc
|
- Arcade -> Arc
|
||||||
- Arterial -> Artl
|
- Arterial -> Artl
|
||||||
- Artery -> Arty
|
- Artery -> Arty
|
||||||
- Avenue -> Av
|
- Avenue -> Av,Ave
|
||||||
- Avenue -> Ave
|
|
||||||
- Back -> Bk
|
- Back -> Bk
|
||||||
- Banan -> Ba
|
- Banan -> Ba
|
||||||
- Basin -> Basn
|
- Basin -> Basn,Bsn
|
||||||
- Basin -> Bsn
|
- Bayou -> Byu
|
||||||
- Beach -> Bch
|
- Beach -> Bch
|
||||||
- Bend -> Bend
|
|
||||||
- Bend -> Bnd
|
- Bend -> Bnd
|
||||||
- Block -> Blk
|
- Block -> Blk
|
||||||
|
- Bluff -> Blf
|
||||||
|
- Bluffs -> Blfs
|
||||||
- Boardwalk -> Bwlk
|
- Boardwalk -> Bwlk
|
||||||
- Boulevard -> Blvd
|
- Bottom -> Btm
|
||||||
- Boulevard -> Bvd
|
- Boulevard -> Blvd,Bvd
|
||||||
- Boundary -> Bdy
|
- Boundary -> Bdy
|
||||||
- Bowl -> Bl
|
- Bowl -> Bl
|
||||||
- Brace -> Br
|
- Brace -> Br
|
||||||
- Brae -> Br
|
- Brae -> Br
|
||||||
- Brae -> Brae
|
- Branch -> Br
|
||||||
- Break -> Brk
|
- Break -> Brk
|
||||||
- Bridge -> Bdge
|
- Bridge$ -> Bdge,Br,Brdg,Brg,Bri
|
||||||
- Bridge -> Br
|
- Broadway -> Bdwy,Bway,Bwy
|
||||||
- Bridge -> Brdg
|
|
||||||
- Bridge -> Bri
|
|
||||||
- Broadway -> Bdwy
|
|
||||||
- Broadway -> Bway
|
|
||||||
- Broadway -> Bwy
|
|
||||||
- Brook -> Brk
|
- Brook -> Brk
|
||||||
|
- Brooks -> Brks
|
||||||
- Brow -> Brw
|
- Brow -> Brw
|
||||||
- Brow -> Brow
|
- Buildings -> Bldgs,Bldngs
|
||||||
- Buildings -> Bldgs
|
|
||||||
- Buildings -> Bldngs
|
|
||||||
- Business -> Bus
|
- Business -> Bus
|
||||||
- Bypass -> Bps
|
- Burg -> Bg
|
||||||
- Bypass -> Byp
|
- Burgs -> Bgs
|
||||||
- Bypass -> Bypa
|
- Bypass -> Bps,Byp,Bypa
|
||||||
- Byway -> Bywy
|
- Byway -> Bywy
|
||||||
|
- Camp -> Cp
|
||||||
|
- Canyon -> Cyn
|
||||||
|
- Cape -> Cpe
|
||||||
- Caravan -> Cvn
|
- Caravan -> Cvn
|
||||||
- Causeway -> Caus
|
- Causeway -> Caus,Cswy,Cway
|
||||||
- Causeway -> Cswy
|
- Center,Centre -> Cen,Ctr
|
||||||
- Causeway -> Cway
|
- Centers -> Ctrs
|
||||||
- Center -> Cen
|
|
||||||
- Center -> Ctr
|
|
||||||
- Central -> Ctrl
|
- Central -> Ctrl
|
||||||
- Centre -> Cen
|
|
||||||
- Centre -> Ctr
|
|
||||||
- Centreway -> Cnwy
|
- Centreway -> Cnwy
|
||||||
- Chase -> Ch
|
- Chase -> Ch
|
||||||
- Church -> Ch
|
- Church -> Ch
|
||||||
- Circle -> Cir
|
- Circle -> Cir
|
||||||
- Circuit -> Cct
|
- Circles -> Cirs
|
||||||
- Circuit -> Ci
|
- Circuit -> Cct,Ci
|
||||||
- Circus -> Crc
|
- Circus -> Crc,Crcs
|
||||||
- Circus -> Crcs
|
|
||||||
- City -> Cty
|
- City -> Cty
|
||||||
|
- Cliff -> Clf
|
||||||
|
- Cliffs -> Clfs
|
||||||
- Close -> Cl
|
- Close -> Cl
|
||||||
- Common -> Cmn
|
- Club -> Clb
|
||||||
- Common -> Comm
|
- Common -> Cmn,Comm
|
||||||
|
- Commons -> Cmns
|
||||||
- Community -> Comm
|
- Community -> Comm
|
||||||
- Concourse -> Cnc
|
- Concourse -> Cnc
|
||||||
- Concourse -> Con
|
- Concourse -> Con
|
||||||
- Copse -> Cps
|
- Copse -> Cps
|
||||||
- Corner -> Cnr
|
- Corner -> Cor,Cnr,Crn
|
||||||
- Corner -> Crn
|
- Corners -> Cors
|
||||||
- Corso -> Cso
|
- Corso -> Cso
|
||||||
- Cottages -> Cotts
|
- Cottages -> Cotts
|
||||||
- County -> Co
|
- County -> Co
|
||||||
- County Road -> CR
|
- County Road -> CR
|
||||||
- County Route -> CR
|
- County Route -> CR
|
||||||
- Court -> Crt
|
- Course -> Crse
|
||||||
- Court -> Ct
|
- Court -> Crt,Ct
|
||||||
|
- Courts -> Cts
|
||||||
- Courtyard -> Cyd
|
- Courtyard -> Cyd
|
||||||
- Courtyard -> Ctyd
|
- Courtyard -> Ctyd
|
||||||
- Cove -> Ce
|
- Cove$ -> Ce,Cov,Cv
|
||||||
- Cove -> Cov
|
- Coves -> Cvs
|
||||||
- Cove -> Cove
|
- Creek$ -> Ck,Cr,Crk
|
||||||
- Cove -> Cv
|
|
||||||
- Creek -> Ck
|
|
||||||
- Creek -> Cr
|
|
||||||
- Creek -> Crk
|
|
||||||
- Crescent -> Cr
|
- Crescent -> Cr
|
||||||
- Crescent -> Cres
|
- Crescent -> Cres
|
||||||
- Crest -> Crst
|
- Crest -> Crst,Cst
|
||||||
- Crest -> Cst
|
|
||||||
- Croft -> Cft
|
- Croft -> Cft
|
||||||
- Cross -> Cs
|
- Cross -> Cs,Crss
|
||||||
- Cross -> Crss
|
- Crossing -> Crsg,Csg,Xing
|
||||||
- Crossing -> Crsg
|
- Crossroad -> Crd,Xrd
|
||||||
- Crossing -> Csg
|
- Crossroads -> Xrds
|
||||||
- Crossing -> Xing
|
|
||||||
- Crossroad -> Crd
|
|
||||||
- Crossway -> Cowy
|
- Crossway -> Cowy
|
||||||
- Cul-de-sac -> Cds
|
- Cul-de-sac -> Cds,Csac
|
||||||
- Cul-de-sac -> Csac
|
- Curve -> Cve,Curv
|
||||||
- Curve -> Cve
|
|
||||||
- Cutting -> Cutt
|
- Cutting -> Cutt
|
||||||
- Dale -> Dle
|
- Dale -> Dle
|
||||||
- Dale -> Dale
|
- Dam -> Dm
|
||||||
- Deviation -> Devn
|
- Deviation -> Devn
|
||||||
- Dip -> Dip
|
|
||||||
- Distributor -> Dstr
|
- Distributor -> Dstr
|
||||||
|
- Divide -> Dv
|
||||||
- Down -> Dn
|
- Down -> Dn
|
||||||
- Downs -> Dn
|
- Downs -> Dn
|
||||||
- Drive -> Dr
|
- Drive -> Dr,Drv,Dv
|
||||||
- Drive -> Drv
|
- Drives -> Drs
|
||||||
- Drive -> Dv
|
|
||||||
- Drive-In => Drive-In # prevent abbreviation here
|
- Drive-In => Drive-In # prevent abbreviation here
|
||||||
- Driveway -> Drwy
|
- Driveway -> Drwy,Dvwy,Dwy
|
||||||
- Driveway -> Dvwy
|
|
||||||
- Driveway -> Dwy
|
|
||||||
- East -> E
|
- East -> E
|
||||||
- Edge -> Edg
|
- Edge -> Edg
|
||||||
- Edge -> Edge
|
|
||||||
- Elbow -> Elb
|
- Elbow -> Elb
|
||||||
- End -> End
|
|
||||||
- Entrance -> Ent
|
- Entrance -> Ent
|
||||||
- Esplanade -> Esp
|
- Esplanade -> Esp
|
||||||
- Estate -> Est
|
- Estate -> Est
|
||||||
- Expressway -> Exp
|
- Estates -> Ests
|
||||||
- Expressway -> Expy
|
- Expressway -> Exp,Expy,Expwy,Xway
|
||||||
- Expressway -> Expwy
|
|
||||||
- Expressway -> Xway
|
|
||||||
- Extension -> Ex
|
- Extension -> Ex
|
||||||
- Fairway -> Fawy
|
- Extensions -> Exts
|
||||||
- Fairway -> Fy
|
- Fairway -> Fawy,Fy
|
||||||
|
- Falls -> Fls
|
||||||
- Father -> Fr
|
- Father -> Fr
|
||||||
- Ferry -> Fy
|
- Ferry -> Fy,Fry
|
||||||
- Field -> Fd
|
- Field -> Fd,Fld
|
||||||
|
- Fields -> Flds
|
||||||
- Fire Track -> Ftrk
|
- Fire Track -> Ftrk
|
||||||
- Firetrail -> Fit
|
- Firetrail -> Fit
|
||||||
- Flat -> Fl
|
- Flat -> Fl,Flt
|
||||||
- Flat -> Flat
|
- Flats -> Flts
|
||||||
- Follow -> Folw
|
- Follow -> Folw
|
||||||
- Footway -> Ftwy
|
- Footway -> Ftwy
|
||||||
|
- Ford -> Frd
|
||||||
|
- Fords -> Frds
|
||||||
- Foreshore -> Fshr
|
- Foreshore -> Fshr
|
||||||
|
- Forest -> Frst
|
||||||
- Forest Service Road -> FSR
|
- Forest Service Road -> FSR
|
||||||
|
- Forge -> Frg
|
||||||
|
- Forges -> Frgs
|
||||||
- Formation -> Form
|
- Formation -> Form
|
||||||
|
- Fork -> Frk
|
||||||
|
- Forks -> Frks
|
||||||
- Fort -> Ft
|
- Fort -> Ft
|
||||||
- Freeway -> Frwy
|
- Freeway -> Frwy,Fwy
|
||||||
- Freeway -> Fwy
|
|
||||||
- Front -> Frnt
|
- Front -> Frnt
|
||||||
- Frontage -> Fr
|
- Frontage -> Fr,Frtg
|
||||||
- Frontage -> Frtg
|
|
||||||
- Gap -> Gap
|
|
||||||
- Garden -> Gdn
|
- Garden -> Gdn
|
||||||
- Gardens -> Gdn
|
- Gardens -> Gdn,Gdns
|
||||||
- Gardens -> Gdns
|
- Gate,Gates -> Ga,Gte
|
||||||
- Gate -> Ga
|
- Gateway -> Gwy,Gtwy
|
||||||
- Gate -> Gte
|
|
||||||
- Gates -> Ga
|
|
||||||
- Gates -> Gte
|
|
||||||
- Gateway -> Gwy
|
|
||||||
- George -> Geo
|
- George -> Geo
|
||||||
- Glade -> Gl
|
- Glade$ -> Gl,Gld,Glde
|
||||||
- Glade -> Gld
|
|
||||||
- Glade -> Glde
|
|
||||||
- Glen -> Gln
|
- Glen -> Gln
|
||||||
- Glen -> Glen
|
- Glens -> Glns
|
||||||
- Grange -> Gra
|
- Grange -> Gra
|
||||||
- Green -> Gn
|
- Green -> Gn,Grn
|
||||||
- Green -> Grn
|
- Greens -> Grns
|
||||||
- Ground -> Grnd
|
- Ground -> Grnd
|
||||||
- Grove -> Gr
|
- Grove$ -> Gr,Gro,Grv
|
||||||
- Grove -> Gro
|
- Groves -> Grvs
|
||||||
- Grovet -> Gr
|
- Grovet -> Gr
|
||||||
- Gully -> Gly
|
- Gully -> Gly
|
||||||
- Harbor -> Hbr
|
- Harbor -> Hbr,Harbour
|
||||||
- Harbour -> Hbr
|
- Harbors -> Hbrs
|
||||||
|
- Harbour -> Hbr,Harbor
|
||||||
- Haven -> Hvn
|
- Haven -> Hvn
|
||||||
- Head -> Hd
|
- Head -> Hd
|
||||||
- Heads -> Hd
|
- Heads -> Hd
|
||||||
- Heights -> Hgts
|
- Heights -> Hgts,Ht,Hts
|
||||||
- Heights -> Ht
|
|
||||||
- Heights -> Hts
|
|
||||||
- High School -> HS
|
- High School -> HS
|
||||||
- Highroad -> Hird
|
- Highroad -> Hird,Hrd
|
||||||
- Highroad -> Hrd
|
|
||||||
- Highway -> Hwy
|
- Highway -> Hwy
|
||||||
- Hill -> Hill
|
|
||||||
- Hill -> Hl
|
- Hill -> Hl
|
||||||
- Hills -> Hl
|
- Hills -> Hl,Hls
|
||||||
- Hills -> Hls
|
- Hollow -> Holw
|
||||||
- Hospital -> Hosp
|
- Hospital -> Hosp
|
||||||
- House -> Ho
|
- House -> Ho,Hse
|
||||||
- House -> Hse
|
|
||||||
- Industrial -> Ind
|
- Industrial -> Ind
|
||||||
|
- Inlet -> Inlt
|
||||||
- Interchange -> Intg
|
- Interchange -> Intg
|
||||||
- International -> Intl
|
- International -> Intl
|
||||||
- Island -> I
|
- Island -> I,Is
|
||||||
- Island -> Is
|
- Islands -> Iss
|
||||||
- Junction -> Jctn
|
- Junction -> Jct,Jctn,Jnc
|
||||||
- Junction -> Jnc
|
- Junctions -> Jcts
|
||||||
- Junior -> Jr
|
- Junior -> Jr
|
||||||
- Key -> Key
|
- Key -> Ky
|
||||||
|
- Keys -> Kys
|
||||||
|
- Knoll -> Knl
|
||||||
|
- Knolls -> Knls
|
||||||
- Lagoon -> Lgn
|
- Lagoon -> Lgn
|
||||||
- Lakes -> L
|
- Lake -> Lk
|
||||||
- Landing -> Ldg
|
- Lakes -> L,Lks
|
||||||
- Lane -> La
|
- Landing -> Ldg,Lndg
|
||||||
- Lane -> Lane
|
- Lane -> La,Ln
|
||||||
- Lane -> Ln
|
|
||||||
- Laneway -> Lnwy
|
- Laneway -> Lnwy
|
||||||
- Line -> Line
|
- Light -> Lgt
|
||||||
|
- Lights -> Lgts
|
||||||
- Line -> Ln
|
- Line -> Ln
|
||||||
- Link -> Link
|
|
||||||
- Link -> Lk
|
- Link -> Lk
|
||||||
- Little -> Lit
|
- Little -> Lit,Lt
|
||||||
- Little -> Lt
|
- Loaf -> Lf
|
||||||
|
- Lock -> Lck
|
||||||
|
- Locks -> Lcks
|
||||||
- Lodge -> Ldg
|
- Lodge -> Ldg
|
||||||
- Lookout -> Lkt
|
- Lookout -> Lkt
|
||||||
- Loop -> Loop
|
|
||||||
- Loop -> Lp
|
- Loop -> Lp
|
||||||
- Lower -> Low
|
- Lower -> Low,Lr,Lwr
|
||||||
- Lower -> Lr
|
|
||||||
- Lower -> Lwr
|
|
||||||
- Mall -> Mall
|
|
||||||
- Mall -> Ml
|
- Mall -> Ml
|
||||||
- Manor -> Mnr
|
- Manor -> Mnr
|
||||||
|
- Manors -> Mnrs
|
||||||
- Mansions -> Mans
|
- Mansions -> Mans
|
||||||
- Market -> Mkt
|
- Market -> Mkt
|
||||||
- Meadow -> Mdw
|
- Meadow -> Mdw
|
||||||
- Meadows -> Mdw
|
- Meadows -> Mdw,Mdws
|
||||||
- Meadows -> Mdws
|
|
||||||
- Mead -> Md
|
- Mead -> Md
|
||||||
- Meander -> Mdr
|
- Meander -> Mdr,Mndr,Mr
|
||||||
- Meander -> Mndr
|
|
||||||
- Meander -> Mr
|
|
||||||
- Medical -> Med
|
- Medical -> Med
|
||||||
- Memorial -> Mem
|
- Memorial -> Mem
|
||||||
- Mews -> Mews
|
|
||||||
- Mews -> Mw
|
- Mews -> Mw
|
||||||
- Middle -> Mid
|
- Middle -> Mid
|
||||||
- Middle School -> MS
|
- Middle School -> MS
|
||||||
- Mile -> Mi
|
- Mile -> Mi
|
||||||
- Military -> Mil
|
- Military -> Mil
|
||||||
- Motorway -> Mtwy
|
- Mill -> Ml
|
||||||
- Motorway -> Mwy
|
- Mills -> Mls
|
||||||
|
- Mission -> Msn
|
||||||
|
- Motorway -> Mtwy,Mwy
|
||||||
- Mount -> Mt
|
- Mount -> Mt
|
||||||
- Mountain -> Mtn
|
- Mountain -> Mtn
|
||||||
- Mountains -> Mtn
|
- Mountains$ -> Mtn,Mtns
|
||||||
- Municipal -> Mun
|
- Municipal -> Mun
|
||||||
- Museum -> Mus
|
- Museum -> Mus
|
||||||
- National Park -> NP
|
- National Park -> NP
|
||||||
- National Recreation Area -> NRA
|
- National Recreation Area -> NRA
|
||||||
- National Wildlife Refuge Area -> NWRA
|
- National Wildlife Refuge Area -> NWRA
|
||||||
|
- Neck -> Nck
|
||||||
- Nook -> Nk
|
- Nook -> Nk
|
||||||
- Nook -> Nook
|
|
||||||
- North -> N
|
- North -> N
|
||||||
- Northeast -> NE
|
- Northeast -> NE
|
||||||
- Northwest -> NW
|
- Northwest -> NW
|
||||||
- Outlook -> Out
|
- Orchard -> Orch
|
||||||
- Outlook -> Otlk
|
- Outlook -> Out,Otlk
|
||||||
|
- Overpass -> Opas
|
||||||
- Parade -> Pde
|
- Parade -> Pde
|
||||||
- Paradise -> Pdse
|
- Paradise -> Pdse
|
||||||
- Park -> Park
|
|
||||||
- Park -> Pk
|
- Park -> Pk
|
||||||
- Parklands -> Pkld
|
- Parklands -> Pkld
|
||||||
- Parkway -> Pkwy
|
- Parkway -> Pkwy,Pky,Pwy
|
||||||
- Parkway -> Pky
|
- Parkways -> Pkwy
|
||||||
- Parkway -> Pwy
|
|
||||||
- Pass -> Pass
|
|
||||||
- Pass -> Ps
|
- Pass -> Ps
|
||||||
- Passage -> Psge
|
- Passage -> Psge
|
||||||
- Path -> Path
|
- Pathway -> Phwy,Pway,Pwy
|
||||||
- Pathway -> Phwy
|
|
||||||
- Pathway -> Pway
|
|
||||||
- Pathway -> Pwy
|
|
||||||
- Piazza -> Piaz
|
- Piazza -> Piaz
|
||||||
- Pike -> Pk
|
- Pike -> Pk
|
||||||
|
- Pine -> Pne
|
||||||
|
- Pines -> Pnes
|
||||||
- Place -> Pl
|
- Place -> Pl
|
||||||
- Plain -> Pl
|
- Plain -> Pl,Pln
|
||||||
- Plains -> Pl
|
- Plains -> Pl,Plns
|
||||||
- Plateau -> Plat
|
- Plateau -> Plat
|
||||||
- Plaza -> Pl
|
- Plaza -> Pl,Plz,Plza
|
||||||
- Plaza -> Plz
|
|
||||||
- Plaza -> Plza
|
|
||||||
- Pocket -> Pkt
|
- Pocket -> Pkt
|
||||||
- Point -> Pnt
|
- Point -> Pnt,Pt
|
||||||
- Point -> Pt
|
- Points -> Pts
|
||||||
- Port -> Port
|
- Port -> Prt,Pt
|
||||||
- Port -> Pt
|
- Ports -> Prts
|
||||||
- Post Office -> PO
|
- Post Office -> PO
|
||||||
|
- Prairie -> Pr
|
||||||
- Precinct -> Pct
|
- Precinct -> Pct
|
||||||
- Promenade -> Prm
|
- Promenade -> Prm,Prom
|
||||||
- Promenade -> Prom
|
|
||||||
- Quad -> Quad
|
|
||||||
- Quadrangle -> Qdgl
|
- Quadrangle -> Qdgl
|
||||||
- Quadrant -> Qdrt
|
- Quadrant -> Qdrt,Qd
|
||||||
- Quadrant -> Qd
|
|
||||||
- Quay -> Qy
|
- Quay -> Qy
|
||||||
- Quays -> Qy
|
- Quays -> Qy
|
||||||
- Quays -> Qys
|
- Quays -> Qys
|
||||||
|
- Radial -> Radl
|
||||||
- Ramble -> Ra
|
- Ramble -> Ra
|
||||||
- Ramble -> Rmbl
|
- Ramble -> Rmbl
|
||||||
- Range -> Rge
|
- Ranch -> Rnch
|
||||||
- Range -> Rnge
|
- Range -> Rge,Rnge
|
||||||
|
- Rapid -> Rpd
|
||||||
|
- Rapids -> Rpds
|
||||||
- Reach -> Rch
|
- Reach -> Rch
|
||||||
- Reservation -> Res
|
- Reservation -> Res
|
||||||
- Reserve -> Res
|
- Reserve -> Res
|
||||||
- Reservoir -> Res
|
- Reservoir -> Res
|
||||||
- Rest -> Rest
|
|
||||||
- Rest -> Rst
|
- Rest -> Rst
|
||||||
- Retreat -> Rt
|
- Retreat -> Rt,Rtt
|
||||||
- Retreat -> Rtt
|
|
||||||
- Return -> Rtn
|
- Return -> Rtn
|
||||||
- Ridge -> Rdg
|
- Ridge -> Rdg,Rdge
|
||||||
- Ridge -> Rdge
|
- Ridges -> Rdgs
|
||||||
- Ridgeway -> Rgwy
|
- Ridgeway -> Rgwy
|
||||||
- Right of Way -> Rowy
|
- Right of Way -> Rowy
|
||||||
- Rise -> Ri
|
- Rise -> Ri
|
||||||
- Rise -> Rise
|
- ^River -> R,Riv,Rvr
|
||||||
- River -> R
|
- River$ -> R,Riv,Rvr
|
||||||
- River -> Riv
|
|
||||||
- River -> Rvr
|
|
||||||
- Riverway -> Rvwy
|
- Riverway -> Rvwy
|
||||||
- Riviera -> Rvra
|
- Riviera -> Rvra
|
||||||
- Road -> Rd
|
- Road -> Rd
|
||||||
- Roads -> Rds
|
- Roads -> Rds
|
||||||
- Roadside -> Rdsd
|
- Roadside -> Rdsd
|
||||||
- Roadway -> Rdwy
|
- Roadway -> Rdwy,Rdy
|
||||||
- Roadway -> Rdy
|
|
||||||
- Robert -> Robt
|
|
||||||
- Rocks -> Rks
|
- Rocks -> Rks
|
||||||
- Ronde -> Rnde
|
- Ronde -> Rnde
|
||||||
- Rosebowl -> Rsbl
|
- Rosebowl -> Rsbl
|
||||||
- Rotary -> Rty
|
- Rotary -> Rty
|
||||||
- Round -> Rnd
|
- Round -> Rnd
|
||||||
- Route -> Rt
|
- Route -> Rt,Rte
|
||||||
- Route -> Rte
|
|
||||||
- Row -> Row
|
|
||||||
- Rue -> Rue
|
|
||||||
- Run -> Run
|
|
||||||
- Saint -> St
|
- Saint -> St
|
||||||
- Saints -> SS
|
- Saints -> SS
|
||||||
- Senior -> Sr
|
- Senior -> Sr
|
||||||
- Serviceway -> Swy
|
- Serviceway -> Swy,Svwy
|
||||||
- Serviceway -> Svwy
|
- Shoal -> Shl
|
||||||
|
- Shore -> Shr
|
||||||
|
- Shores -> Shrs
|
||||||
- Shunt -> Shun
|
- Shunt -> Shun
|
||||||
- Siding -> Sdng
|
- Siding -> Sdng
|
||||||
- Sister -> Sr
|
- Sister -> Sr
|
||||||
|
- Skyway -> Skwy
|
||||||
- Slope -> Slpe
|
- Slope -> Slpe
|
||||||
- Sound -> Snd
|
- Sound -> Snd
|
||||||
- South -> S
|
- South -> S,Sth
|
||||||
- South -> Sth
|
|
||||||
- Southeast -> SE
|
- Southeast -> SE
|
||||||
- Southwest -> SW
|
- Southwest -> SW
|
||||||
- Spur -> Spur
|
- Spring -> Spg
|
||||||
|
- Springs -> Spgs
|
||||||
|
- Spurs -> Spur
|
||||||
- Square -> Sq
|
- Square -> Sq
|
||||||
|
- Squares -> Sqs
|
||||||
- Stairway -> Strwy
|
- Stairway -> Strwy
|
||||||
- State Highway -> SH
|
- State Highway -> SH,SHwy
|
||||||
- State Highway -> SHwy
|
|
||||||
- State Route -> SR
|
- State Route -> SR
|
||||||
- Station -> Sta
|
- Station -> Sta,Stn
|
||||||
- Station -> Stn
|
- Strand -> Sd,Stra
|
||||||
- Strand -> Sd
|
- Stravenue -> Stra
|
||||||
- Strand -> Stra
|
- Stream -> Strm
|
||||||
- Street -> St
|
- Street -> St
|
||||||
|
- Streets -> Sts
|
||||||
- Strip -> Strp
|
- Strip -> Strp
|
||||||
- Subway -> Sbwy
|
- Subway -> Sbwy
|
||||||
|
- Summit -> Smt
|
||||||
- Tarn -> Tn
|
- Tarn -> Tn
|
||||||
- Tarn -> Tarn
|
|
||||||
- Terminal -> Term
|
- Terminal -> Term
|
||||||
- Terrace -> Tce
|
- Terrace -> Tce,Ter,Terr
|
||||||
- Terrace -> Ter
|
- Thoroughfare -> Thfr,Thor
|
||||||
- Terrace -> Terr
|
- Throughway -> Trwy
|
||||||
- Thoroughfare -> Thfr
|
- Tollway -> Tlwy,Twy
|
||||||
- Thoroughfare -> Thor
|
|
||||||
- Tollway -> Tlwy
|
|
||||||
- Tollway -> Twy
|
|
||||||
- Top -> Top
|
|
||||||
- Tor -> Tor
|
|
||||||
- Towers -> Twrs
|
- Towers -> Twrs
|
||||||
- Township -> Twp
|
- Township -> Twp
|
||||||
- Trace -> Trce
|
- Trace -> Trce
|
||||||
- Track -> Tr
|
- Track -> Tr,Trak,Trk
|
||||||
- Track -> Trk
|
- Trafficway -> Trfy
|
||||||
- Trail -> Trl
|
- Trail -> Trl
|
||||||
- Trailer -> Trlr
|
- Trailer -> Trlr
|
||||||
- Triangle -> Tri
|
- Triangle -> Tri
|
||||||
- Trunkway -> Tkwy
|
- Trunkway -> Tkwy
|
||||||
- Tunnel -> Tun
|
- Tunnel -> Tun,Tunl
|
||||||
- Turn -> Tn
|
- Turn -> Tn,Trn
|
||||||
- Turn -> Trn
|
- Turnpike -> Tpk,Tpke
|
||||||
- Turn -> Turn
|
- Underpass -> Upas,Ups
|
||||||
- Turnpike -> Tpk
|
- Union -> Un
|
||||||
- Turnpike -> Tpke
|
- Unions -> Uns
|
||||||
- Underpass -> Upas
|
- University -> Uni,Univ
|
||||||
- Underpass -> Ups
|
|
||||||
- University -> Uni
|
|
||||||
- University -> Univ
|
|
||||||
- Upper -> Up
|
- Upper -> Up
|
||||||
- Upper -> Upr
|
- Upper -> Upr
|
||||||
- Vale -> Va
|
- Vale -> Va
|
||||||
- Vale -> Vale
|
- Valley -> Vly
|
||||||
- Valley -> Vy
|
- Valley -> Vy
|
||||||
- Viaduct -> Vdct
|
- Valleys -> Vlys
|
||||||
- Viaduct -> Via
|
- Viaduct$ -> Vdct,Via,Viad
|
||||||
- Viaduct -> Viad
|
|
||||||
- View -> Vw
|
- View -> Vw
|
||||||
- View -> View
|
- Views -> Vws
|
||||||
- Village -> Vill
|
- Village -> Vill,Vlg
|
||||||
|
- Villages -> Vlgs
|
||||||
- Villas -> Vlls
|
- Villas -> Vlls
|
||||||
- Vista -> Vst
|
- Ville -> Vl
|
||||||
- Vista -> Vsta
|
- Vista -> Vis,Vst,Vsta
|
||||||
- Walk -> Walk
|
- Walk -> Wk,Wlk
|
||||||
- Walk -> Wk
|
- Walks -> Walk
|
||||||
- Walk -> Wlk
|
- Walkway -> Wkwy,Wky
|
||||||
- Walkway -> Wkwy
|
|
||||||
- Walkway -> Wky
|
|
||||||
- Waters -> Wtr
|
- Waters -> Wtr
|
||||||
- Way -> Way
|
|
||||||
- Way -> Wy
|
- Way -> Wy
|
||||||
|
- Well -> Wl
|
||||||
|
- Wells -> Wls
|
||||||
- West -> W
|
- West -> W
|
||||||
- Wharf -> Whrf
|
- Wharf -> Whrf
|
||||||
- William -> Wm
|
- William -> Wm
|
||||||
- Wynd -> Wyn
|
- Wynd -> Wyn
|
||||||
- Wynd -> Wynd
|
|
||||||
- Yard -> Yard
|
|
||||||
- Yard -> Yd
|
- Yard -> Yd
|
||||||
- lang: en
|
- lang: en
|
||||||
country: ca
|
country: ca
|
||||||
|
|||||||
@@ -30,7 +30,6 @@
|
|||||||
- Bloque -> Blq
|
- Bloque -> Blq
|
||||||
- Bulevar -> Blvr
|
- Bulevar -> Blvr
|
||||||
- Boulevard -> Blvd
|
- Boulevard -> Blvd
|
||||||
- Calle -> C/
|
|
||||||
- Calle -> C
|
- Calle -> C
|
||||||
- Calle -> Cl
|
- Calle -> Cl
|
||||||
- Calleja -> Cllja
|
- Calleja -> Cllja
|
||||||
|
|||||||
@@ -3,20 +3,16 @@
|
|||||||
words:
|
words:
|
||||||
- Abbaye -> ABE
|
- Abbaye -> ABE
|
||||||
- Agglomération -> AGL
|
- Agglomération -> AGL
|
||||||
- Aire -> AIRE
|
|
||||||
- Aires -> AIRE
|
- Aires -> AIRE
|
||||||
- Allée -> ALL
|
- Allée -> ALL
|
||||||
- Allée -> All
|
|
||||||
- Allées -> ALL
|
- Allées -> ALL
|
||||||
- Ancien chemin -> ACH
|
- Ancien chemin -> ACH
|
||||||
- Ancienne route -> ART
|
- Ancienne route -> ART
|
||||||
- Anciennes routes -> ART
|
- Anciennes routes -> ART
|
||||||
- Anse -> ANSE
|
|
||||||
- Arcade -> ARC
|
- Arcade -> ARC
|
||||||
- Arcades -> ARC
|
- Arcades -> ARC
|
||||||
- Autoroute -> AUT
|
- Autoroute -> AUT
|
||||||
- Avenue -> AV
|
- Avenue -> AV
|
||||||
- Avenue -> Av
|
|
||||||
- Barrière -> BRE
|
- Barrière -> BRE
|
||||||
- Barrières -> BRE
|
- Barrières -> BRE
|
||||||
- Bas chemin -> BCH
|
- Bas chemin -> BCH
|
||||||
@@ -28,16 +24,11 @@
|
|||||||
- Berges -> BER
|
- Berges -> BER
|
||||||
- Bois -> BOIS
|
- Bois -> BOIS
|
||||||
- Boucle -> BCLE
|
- Boucle -> BCLE
|
||||||
- Boulevard -> Bd
|
|
||||||
- Boulevard -> BD
|
- Boulevard -> BD
|
||||||
- Bourg -> BRG
|
- Bourg -> BRG
|
||||||
- Butte -> BUT
|
- Butte -> BUT
|
||||||
- Cité -> CITE
|
|
||||||
- Cités -> CITE
|
- Cités -> CITE
|
||||||
- Côte -> COTE
|
|
||||||
- Côteau -> COTE
|
- Côteau -> COTE
|
||||||
- Cale -> CALE
|
|
||||||
- Camp -> CAMP
|
|
||||||
- Campagne -> CGNE
|
- Campagne -> CGNE
|
||||||
- Camping -> CPG
|
- Camping -> CPG
|
||||||
- Carreau -> CAU
|
- Carreau -> CAU
|
||||||
@@ -56,17 +47,13 @@
|
|||||||
- Chaussées -> CHS
|
- Chaussées -> CHS
|
||||||
- Chemin -> Ch
|
- Chemin -> Ch
|
||||||
- Chemin -> CHE
|
- Chemin -> CHE
|
||||||
- Chemin -> Che
|
|
||||||
- Chemin vicinal -> CHV
|
- Chemin vicinal -> CHV
|
||||||
- Cheminement -> CHEM
|
- Cheminement -> CHEM
|
||||||
- Cheminements -> CHEM
|
- Cheminements -> CHEM
|
||||||
- Chemins -> CHE
|
- Chemins -> CHE
|
||||||
- Chemins vicinaux -> CHV
|
- Chemins vicinaux -> CHV
|
||||||
- Chez -> CHEZ
|
|
||||||
- Château -> CHT
|
- Château -> CHT
|
||||||
- Cloître -> CLOI
|
- Cloître -> CLOI
|
||||||
- Clos -> CLOS
|
|
||||||
- Col -> COL
|
|
||||||
- Colline -> COLI
|
- Colline -> COLI
|
||||||
- Collines -> COLI
|
- Collines -> COLI
|
||||||
- Contour -> CTR
|
- Contour -> CTR
|
||||||
@@ -74,9 +61,7 @@
|
|||||||
- Corniches -> COR
|
- Corniches -> COR
|
||||||
- Cottage -> COTT
|
- Cottage -> COTT
|
||||||
- Cottages -> COTT
|
- Cottages -> COTT
|
||||||
- Cour -> COUR
|
|
||||||
- Cours -> CRS
|
- Cours -> CRS
|
||||||
- Cours -> Crs
|
|
||||||
- Darse -> DARS
|
- Darse -> DARS
|
||||||
- Degré -> DEG
|
- Degré -> DEG
|
||||||
- Degrés -> DEG
|
- Degrés -> DEG
|
||||||
@@ -87,11 +72,8 @@
|
|||||||
- Domaine -> DOM
|
- Domaine -> DOM
|
||||||
- Domaines -> DOM
|
- Domaines -> DOM
|
||||||
- Écluse -> ECL
|
- Écluse -> ECL
|
||||||
- Écluse -> ÉCL
|
|
||||||
- Écluses -> ECL
|
- Écluses -> ECL
|
||||||
- Écluses -> ÉCL
|
|
||||||
- Église -> EGL
|
- Église -> EGL
|
||||||
- Église -> ÉGL
|
|
||||||
- Enceinte -> EN
|
- Enceinte -> EN
|
||||||
- Enclave -> ENV
|
- Enclave -> ENV
|
||||||
- Enclos -> ENC
|
- Enclos -> ENC
|
||||||
@@ -100,21 +82,16 @@
|
|||||||
- Espace -> ESPA
|
- Espace -> ESPA
|
||||||
- Esplanade -> ESP
|
- Esplanade -> ESP
|
||||||
- Esplanades -> ESP
|
- Esplanades -> ESP
|
||||||
- Étang -> ETANG
|
|
||||||
- Étang -> ÉTANG
|
|
||||||
- Faubourg -> FG
|
- Faubourg -> FG
|
||||||
- Faubourg -> Fg
|
|
||||||
- Ferme -> FRM
|
- Ferme -> FRM
|
||||||
- Fermes -> FRM
|
- Fermes -> FRM
|
||||||
- Fontaine -> FON
|
- Fontaine -> FON
|
||||||
- Fort -> FORT
|
|
||||||
- Forum -> FORM
|
- Forum -> FORM
|
||||||
- Fosse -> FOS
|
- Fosse -> FOS
|
||||||
- Fosses -> FOS
|
- Fosses -> FOS
|
||||||
- Foyer -> FOYR
|
- Foyer -> FOYR
|
||||||
- Galerie -> GAL
|
- Galerie -> GAL
|
||||||
- Galeries -> GAL
|
- Galeries -> GAL
|
||||||
- Gare -> GARE
|
|
||||||
- Garenne -> GARN
|
- Garenne -> GARN
|
||||||
- Grand boulevard -> GBD
|
- Grand boulevard -> GBD
|
||||||
- Grand ensemble -> GDEN
|
- Grand ensemble -> GDEN
|
||||||
@@ -134,13 +111,9 @@
|
|||||||
- Haut chemin -> HCH
|
- Haut chemin -> HCH
|
||||||
- Hauts chemins -> HCH
|
- Hauts chemins -> HCH
|
||||||
- Hippodrome -> HIP
|
- Hippodrome -> HIP
|
||||||
- HLM -> HLM
|
|
||||||
- Île -> ILE
|
|
||||||
- Île -> ÎLE
|
|
||||||
- Immeuble -> IMM
|
- Immeuble -> IMM
|
||||||
- Immeubles -> IMM
|
- Immeubles -> IMM
|
||||||
- Impasse -> IMP
|
- Impasse -> IMP
|
||||||
- Impasse -> Imp
|
|
||||||
- Impasses -> IMP
|
- Impasses -> IMP
|
||||||
- Jardin -> JARD
|
- Jardin -> JARD
|
||||||
- Jardins -> JARD
|
- Jardins -> JARD
|
||||||
@@ -150,13 +123,11 @@
|
|||||||
- Lieu-dit -> LD
|
- Lieu-dit -> LD
|
||||||
- Lotissement -> LOT
|
- Lotissement -> LOT
|
||||||
- Lotissements -> LOT
|
- Lotissements -> LOT
|
||||||
- Mail -> MAIL
|
|
||||||
- Maison forestière -> MF
|
- Maison forestière -> MF
|
||||||
- Manoir -> MAN
|
- Manoir -> MAN
|
||||||
- Marche -> MAR
|
- Marche -> MAR
|
||||||
- Marches -> MAR
|
- Marches -> MAR
|
||||||
- Maréchal -> MAL
|
- Maréchal -> MAL
|
||||||
- Mas -> MAS
|
|
||||||
- Monseigneur -> Mgr
|
- Monseigneur -> Mgr
|
||||||
- Mont -> Mt
|
- Mont -> Mt
|
||||||
- Montée -> MTE
|
- Montée -> MTE
|
||||||
@@ -168,13 +139,9 @@
|
|||||||
- Métro -> MÉT
|
- Métro -> MÉT
|
||||||
- Nouvelle route -> NTE
|
- Nouvelle route -> NTE
|
||||||
- Palais -> PAL
|
- Palais -> PAL
|
||||||
- Parc -> PARC
|
|
||||||
- Parcs -> PARC
|
|
||||||
- Parking -> PKG
|
- Parking -> PKG
|
||||||
- Parvis -> PRV
|
- Parvis -> PRV
|
||||||
- Passage -> PAS
|
- Passage -> PAS
|
||||||
- Passage -> Pas
|
|
||||||
- Passage -> Pass
|
|
||||||
- Passage à niveau -> PN
|
- Passage à niveau -> PN
|
||||||
- Passe -> PASS
|
- Passe -> PASS
|
||||||
- Passerelle -> PLE
|
- Passerelle -> PLE
|
||||||
@@ -191,19 +158,14 @@
|
|||||||
- Petite rue -> PTR
|
- Petite rue -> PTR
|
||||||
- Petites allées -> PTA
|
- Petites allées -> PTA
|
||||||
- Place -> PL
|
- Place -> PL
|
||||||
- Place -> Pl
|
|
||||||
- Placis -> PLCI
|
- Placis -> PLCI
|
||||||
- Plage -> PLAG
|
- Plage -> PLAG
|
||||||
- Plages -> PLAG
|
- Plages -> PLAG
|
||||||
- Plaine -> PLN
|
- Plaine -> PLN
|
||||||
- Plan -> PLAN
|
|
||||||
- Plateau -> PLT
|
- Plateau -> PLT
|
||||||
- Plateaux -> PLT
|
- Plateaux -> PLT
|
||||||
- Pointe -> PNT
|
- Pointe -> PNT
|
||||||
- Pont -> PONT
|
|
||||||
- Ponts -> PONT
|
|
||||||
- Porche -> PCH
|
- Porche -> PCH
|
||||||
- Port -> PORT
|
|
||||||
- Porte -> PTE
|
- Porte -> PTE
|
||||||
- Portique -> PORQ
|
- Portique -> PORQ
|
||||||
- Portiques -> PORQ
|
- Portiques -> PORQ
|
||||||
@@ -211,25 +173,19 @@
|
|||||||
- Pourtour -> POUR
|
- Pourtour -> POUR
|
||||||
- Presqu’île -> PRQ
|
- Presqu’île -> PRQ
|
||||||
- Promenade -> PROM
|
- Promenade -> PROM
|
||||||
- Promenade -> Prom
|
|
||||||
- Pré -> PRE
|
|
||||||
- Pré -> PRÉ
|
|
||||||
- Périphérique -> PERI
|
- Périphérique -> PERI
|
||||||
- Péristyle -> PSTY
|
- Péristyle -> PSTY
|
||||||
- Quai -> QU
|
- Quai -> QU
|
||||||
- Quai -> Qu
|
|
||||||
- Quartier -> QUA
|
- Quartier -> QUA
|
||||||
- Raccourci -> RAC
|
- Raccourci -> RAC
|
||||||
- Raidillon -> RAID
|
- Raidillon -> RAID
|
||||||
- Rampe -> RPE
|
- Rampe -> RPE
|
||||||
- Rempart -> REM
|
- Rempart -> REM
|
||||||
- Roc -> ROC
|
|
||||||
- Rocade -> ROC
|
- Rocade -> ROC
|
||||||
- Rond point -> RPT
|
- Rond point -> RPT
|
||||||
- Roquet -> ROQT
|
- Roquet -> ROQT
|
||||||
- Rotonde -> RTD
|
- Rotonde -> RTD
|
||||||
- Route -> RTE
|
- Route -> RTE
|
||||||
- Route -> Rte
|
|
||||||
- Routes -> RTE
|
- Routes -> RTE
|
||||||
- Rue -> R
|
- Rue -> R
|
||||||
- Rue -> R
|
- Rue -> R
|
||||||
@@ -245,7 +201,6 @@
|
|||||||
- Sentier -> SEN
|
- Sentier -> SEN
|
||||||
- Sentiers -> SEN
|
- Sentiers -> SEN
|
||||||
- Square -> SQ
|
- Square -> SQ
|
||||||
- Square -> Sq
|
|
||||||
- Stade -> STDE
|
- Stade -> STDE
|
||||||
- Station -> STA
|
- Station -> STA
|
||||||
- Terrain -> TRN
|
- Terrain -> TRN
|
||||||
@@ -254,13 +209,11 @@
|
|||||||
- Terre plein -> TPL
|
- Terre plein -> TPL
|
||||||
- Tertre -> TRT
|
- Tertre -> TRT
|
||||||
- Tertres -> TRT
|
- Tertres -> TRT
|
||||||
- Tour -> TOUR
|
|
||||||
- Traverse -> TRA
|
- Traverse -> TRA
|
||||||
- Vallon -> VAL
|
- Vallon -> VAL
|
||||||
- Vallée -> VAL
|
- Vallée -> VAL
|
||||||
- Venelle -> VEN
|
- Venelle -> VEN
|
||||||
- Venelles -> VEN
|
- Venelles -> VEN
|
||||||
- Via -> VIA
|
|
||||||
- Vieille route -> VTE
|
- Vieille route -> VTE
|
||||||
- Vieux chemin -> VCHE
|
- Vieux chemin -> VCHE
|
||||||
- Villa -> VLA
|
- Villa -> VLA
|
||||||
@@ -269,7 +222,6 @@
|
|||||||
- Villas -> VLA
|
- Villas -> VLA
|
||||||
- Voie -> VOI
|
- Voie -> VOI
|
||||||
- Voies -> VOI
|
- Voies -> VOI
|
||||||
- Zone -> ZONE
|
|
||||||
- Zone artisanale -> ZA
|
- Zone artisanale -> ZA
|
||||||
- Zone d'aménagement concerté -> ZAC
|
- Zone d'aménagement concerté -> ZAC
|
||||||
- Zone d'aménagement différé -> ZAD
|
- Zone d'aménagement différé -> ZAD
|
||||||
@@ -289,7 +241,6 @@
|
|||||||
- Esplanade -> ESPL
|
- Esplanade -> ESPL
|
||||||
- Passage -> PASS
|
- Passage -> PASS
|
||||||
- Plateau -> PLAT
|
- Plateau -> PLAT
|
||||||
- Rang -> RANG
|
|
||||||
- Rond-point -> RDPT
|
- Rond-point -> RDPT
|
||||||
- Sentier -> SENT
|
- Sentier -> SENT
|
||||||
- Subdivision -> SUBDIV
|
- Subdivision -> SUBDIV
|
||||||
|
|||||||
@@ -29,7 +29,6 @@
|
|||||||
- Prima -> I
|
- Prima -> I
|
||||||
- Primo -> I
|
- Primo -> I
|
||||||
- Primo -> 1
|
- Primo -> 1
|
||||||
- Primo -> 1°
|
|
||||||
- Quarta -> IV
|
- Quarta -> IV
|
||||||
- Quarto -> IV
|
- Quarto -> IV
|
||||||
- Quattro -> IV
|
- Quattro -> IV
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#Norsk_-_Norwegian
|
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#Norsk_-_Norwegian
|
||||||
- lang: no
|
- lang: "no"
|
||||||
words:
|
words:
|
||||||
# convert between Nynorsk and Bookmal here
|
# convert between Nynorsk and Bookmal here
|
||||||
- vei, veg => v,vn,vei,veg
|
- ~vei, ~veg -> v,vei,veg
|
||||||
- veien, vegen -> v,vn,veien,vegen
|
- ~veien, ~vegen -> vn,veien,vegen
|
||||||
- gate -> g,gt
|
|
||||||
# convert between the two female forms
|
# convert between the two female forms
|
||||||
- gaten, gata => g,gt,gaten,gata
|
- gate, gaten, gata -> g,gt
|
||||||
- plass, plassen -> pl
|
- plass, plassen -> pl
|
||||||
- sving, svingen -> sv
|
- sving, svingen -> sv
|
||||||
|
|||||||
@@ -1,14 +1,128 @@
|
|||||||
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#.D0.A0.D1.83.D1.81.D1.81.D0.BA.D0.B8.D0.B9_-_Russian
|
# Source: https://wiki.openstreetmap.org/wiki/Name_finder:Abbreviations#.D0.A0.D1.83.D1.81.D1.81.D0.BA.D0.B8.D0.B9_-_Russian
|
||||||
|
# Source: https://www.plantarium.ru/page/help/topic/abbreviations.html
|
||||||
|
# Source: https://dic.academic.ru/dic.nsf/ruwiki/1871310
|
||||||
- lang: ru
|
- lang: ru
|
||||||
words:
|
words:
|
||||||
|
- Академик, Академика -> Ак
|
||||||
|
- акционерное общество -> АО
|
||||||
- аллея -> ал
|
- аллея -> ал
|
||||||
|
- архипелаг -> арх
|
||||||
|
- атомная электростанция -> АЭС
|
||||||
|
- аэродром -> аэрд
|
||||||
|
- аэропорт -> аэрп
|
||||||
|
- Башкирский, Башкирская, Башкирское, Башкирские -> Баш, Башк, Башкир
|
||||||
|
- Белый, Белая, Белое. Белые -> Бел
|
||||||
|
- болото -> бол
|
||||||
|
- больница -> больн
|
||||||
|
- Большой, Большая, Большое, Большие -> Б, Бол
|
||||||
|
- брод -> бр
|
||||||
- бульвар -> бул
|
- бульвар -> бул
|
||||||
|
- бухта -> бух
|
||||||
|
- бывший, бывшая, бывшее, бывшие -> бывш
|
||||||
|
- Великий, Великая, Великое, Великие -> Вел
|
||||||
|
- Верхний, Верхняя, Верхнее, Верхние -> В, Верх
|
||||||
|
- водокачка -> вдкч
|
||||||
|
- водопад -> вдп
|
||||||
|
- водохранилище -> вдхр
|
||||||
|
- вокзал -> вкз, вокз
|
||||||
|
- Восточный, Восточная, Восточное, Восточные -> В, Вост
|
||||||
|
- вулкан -> влк
|
||||||
|
- гидроэлектростанция -> ГЭС
|
||||||
|
- гора -> г
|
||||||
|
- город -> г
|
||||||
|
- дворец культуры, дом культуры -> ДК
|
||||||
|
- дворец спорта -> ДС
|
||||||
|
- деревня -> д, дер
|
||||||
|
- детский оздоровительный лагерь -> ДОЛ
|
||||||
|
- дом -> д
|
||||||
|
- дом отдыха -> Д О
|
||||||
|
- железная дорога -> ж д
|
||||||
|
- железнодорожный, железнодорожная, железнодорожное -> ж-д
|
||||||
|
- железобетонных изделий -> ЖБИ
|
||||||
|
- жилой комплекс -> ЖК
|
||||||
|
- завод -> з-д
|
||||||
|
- закрытое административно-территориальное образование -> ЗАТО
|
||||||
|
- залив -> зал
|
||||||
|
- Западный, Западная, Западное, Западные -> З, Зап, Запад
|
||||||
|
- заповедник -> запов
|
||||||
|
- имени -> им
|
||||||
|
- институт -> инст
|
||||||
|
- исправительная колония -> ИК
|
||||||
|
- километр -> км
|
||||||
|
- Красный, Красная, Красное, Красные -> Кр, Крас
|
||||||
|
- лагерь -> лаг
|
||||||
|
- Левый, Левая,Левое, Левые -> Л, Лев
|
||||||
|
- ледник -> ледн
|
||||||
|
- лесничество -> леснич
|
||||||
|
- лесной, лесная, лесное -> лес
|
||||||
|
- линия электропередачи -> ЛЭП
|
||||||
|
- Малый, Малая, Малое, Малые -> М, Мал
|
||||||
|
- Мордовский, Мордовская, Мордовское, Мордовские -> Мордов
|
||||||
|
- морской, морская, морское -> мор
|
||||||
|
- Московский, Московская, Московское, Московские -> Мос, Моск
|
||||||
|
- мыс -> м
|
||||||
- набережная -> наб
|
- набережная -> наб
|
||||||
|
- Нижний, Нижняя, Нижнее, Нижние -> Ниж, Н
|
||||||
|
- Новый, Новая, Новое, Новые -> Нов, Н
|
||||||
|
- обгонный пункт -> обг п
|
||||||
|
- область -> обл
|
||||||
|
- озеро -> оз
|
||||||
|
- особо охраняемая природная территория -> ООПТ
|
||||||
|
- остановочный пункт -> о п
|
||||||
|
- остров -> о
|
||||||
|
- острова -> о-ва
|
||||||
|
- парк культуры и отдыха -> ПКиО
|
||||||
|
- перевал -> пер
|
||||||
- переулок -> пер
|
- переулок -> пер
|
||||||
|
- пещера -> пещ
|
||||||
|
- пионерский лагерь -> пионерлаг
|
||||||
|
- платформа -> пл, платф
|
||||||
- площадь -> пл
|
- площадь -> пл
|
||||||
|
- подсобное хозяйство -> подсоб хоз
|
||||||
|
- полуостров -> п-ов
|
||||||
|
- посёлок -> пос, п
|
||||||
|
- посёлок городского типа -> п г т, пгт
|
||||||
|
- Правый, Правая, Правое, Правые -> П, Пр, Прав
|
||||||
- проезд -> пр
|
- проезд -> пр
|
||||||
- проспект -> просп
|
- проспект -> просп
|
||||||
- шоссе -> ш
|
- пруд -> пр
|
||||||
|
- пустыня -> пуст
|
||||||
|
- разъезд -> рзд
|
||||||
|
- район -> р-н
|
||||||
|
- резинотехнических изделий -> РТИ
|
||||||
|
- река -> р
|
||||||
|
- речной, речная, речное -> реч, речн
|
||||||
|
- Российский, Российская, Российское, Российские -> Рос
|
||||||
|
- Русский, Русская, Русское, Русские -> Рус, Русск
|
||||||
|
- ручей -> руч
|
||||||
|
- садовое некоммерческое товарищество -> СНТ
|
||||||
|
- садовые участки -> сад уч
|
||||||
|
- санаторий -> сан
|
||||||
|
- сарай -> сар
|
||||||
|
- Северный, Северная, Северное, Северные -> С, Сев
|
||||||
|
- село -> с
|
||||||
|
- Сибирский, Сибирская, Сибирское, Сибирские -> Сиб
|
||||||
|
- Советский, Советская, Советское, Советские -> Сов
|
||||||
|
- совхоз -> свх
|
||||||
|
- Сортировочный, Сортировочная, Сортировочное, Сортировочные -> Сорт
|
||||||
|
- станция -> ст
|
||||||
|
- Старый, Старая, Среднее, Средние -> Ср
|
||||||
|
- Татарский, Татарская, Татарское, Татарские -> Тат, Татар
|
||||||
|
- теплоэлекстростанция -> ТЭС
|
||||||
|
- теплоэлектроцентраль -> ТЭЦ
|
||||||
|
- техникум -> техн
|
||||||
|
- тоннель, туннель -> тун
|
||||||
- тупик -> туп
|
- тупик -> туп
|
||||||
- улица -> ул
|
- улица -> ул
|
||||||
- область -> обл
|
- Уральский, Уральская, Уральское, Уральские -> Ур, Урал
|
||||||
|
- урочище -> ур
|
||||||
|
- хозяйство -> хоз, хоз-во
|
||||||
|
- хребет -> хр
|
||||||
|
- хутор -> хут
|
||||||
|
- Чёрный, Чёрная, Чёрное, Чёрные -> Черн
|
||||||
|
- Чувашский, Чувашская, Чувашское, Чувашские -> Чуваш
|
||||||
|
- шахта -> шах
|
||||||
|
- школа -> шк
|
||||||
|
- шоссе -> ш
|
||||||
|
- элеватор -> элев
|
||||||
|
- Южный, Южная, Южное, Южные -> Ю, Юж, Южн
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
query-preprocessing:
|
query-preprocessing:
|
||||||
|
- step: split_japanese_phrases
|
||||||
- step: normalize
|
- step: normalize
|
||||||
normalization:
|
normalization:
|
||||||
- ":: lower ()"
|
- ":: lower ()"
|
||||||
@@ -9,16 +10,17 @@ normalization:
|
|||||||
- "'nº' > 'no'"
|
- "'nº' > 'no'"
|
||||||
- "ª > a"
|
- "ª > a"
|
||||||
- "º > o"
|
- "º > o"
|
||||||
- "[[:Punctuation:][:Symbol:]\u02bc] > ' '"
|
- "[[:Punctuation:][:Symbol:][\u02bc] - [-:]]+ > '-'"
|
||||||
- "ß > 'ss'" # German szet is unambiguously equal to double ss
|
- "ß > 'ss'" # German szet is unambiguously equal to double ss
|
||||||
- "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:]] >"
|
- "[^[:alnum:] [:Canonical_Combining_Class=Virama:] [:Space:] [-:]] >"
|
||||||
- "[:Lm:] >"
|
- "[:Lm:] >"
|
||||||
- ":: [[:Number:]] Latin ()"
|
- ":: [[:Number:]] Latin ()"
|
||||||
- ":: [[:Number:]] Ascii ();"
|
- ":: [[:Number:]] Ascii ();"
|
||||||
- ":: [[:Number:]] NFD ();"
|
- ":: [[:Number:]] NFD ();"
|
||||||
- "[[:Nonspacing Mark:] [:Cf:]] >;"
|
- "[[:Nonspacing Mark:] [:Cf:]] >;"
|
||||||
- "[:Space:]+ > ' '"
|
- "[-:]?[:Space:]+[-:]? > ' '"
|
||||||
transliteration:
|
transliteration:
|
||||||
|
- "[-:] > ' '"
|
||||||
- ":: Latin ()"
|
- ":: Latin ()"
|
||||||
- !include icu-rules/extended-unicode-to-asccii.yaml
|
- !include icu-rules/extended-unicode-to-asccii.yaml
|
||||||
- ":: Ascii ()"
|
- ":: Ascii ()"
|
||||||
@@ -44,7 +46,7 @@ sanitizers:
|
|||||||
- step: strip-brace-terms
|
- step: strip-brace-terms
|
||||||
- step: tag-analyzer-by-language
|
- step: tag-analyzer-by-language
|
||||||
filter-kind: [".*name.*"]
|
filter-kind: [".*name.*"]
|
||||||
whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,no,pl,pt,ro,ru,sk,sl,sv,tr,uk,vi]
|
whitelist: [bg,ca,cs,da,de,el,en,es,et,eu,fi,fr,gl,hu,it,ja,mg,ms,nl,"no",pl,pt,ro,ru,sk,sl,sv,tr,uk,vi]
|
||||||
use-defaults: all
|
use-defaults: all
|
||||||
mode: append
|
mode: append
|
||||||
- step: tag-japanese
|
- step: tag-japanese
|
||||||
@@ -156,7 +158,7 @@ token-analysis:
|
|||||||
mode: variant-only
|
mode: variant-only
|
||||||
variants:
|
variants:
|
||||||
- !include icu-rules/variants-nl.yaml
|
- !include icu-rules/variants-nl.yaml
|
||||||
- id: no
|
- id: "no"
|
||||||
analyzer: generic
|
analyzer: generic
|
||||||
mode: variant-only
|
mode: variant-only
|
||||||
variants:
|
variants:
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ from .connection import SearchConnection
|
|||||||
from .status import get_status, StatusResult
|
from .status import get_status, StatusResult
|
||||||
from .lookup import get_places, get_detailed_place
|
from .lookup import get_places, get_detailed_place
|
||||||
from .reverse import ReverseGeocoder
|
from .reverse import ReverseGeocoder
|
||||||
from .search import ForwardGeocoder, Phrase, PhraseType, make_query_analyzer
|
from . import search as nsearch
|
||||||
from . import types as ntyp
|
from . import types as ntyp
|
||||||
from .results import DetailedResult, ReverseResult, SearchResults
|
from .results import DetailedResult, ReverseResult, SearchResults
|
||||||
|
|
||||||
@@ -207,7 +207,7 @@ class NominatimAPIAsync:
|
|||||||
async with self.begin() as conn:
|
async with self.begin() as conn:
|
||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
if details.keywords:
|
if details.keywords:
|
||||||
await make_query_analyzer(conn)
|
await nsearch.make_query_analyzer(conn)
|
||||||
return await get_detailed_place(conn, place, details)
|
return await get_detailed_place(conn, place, details)
|
||||||
|
|
||||||
async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
|
async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
|
||||||
@@ -219,7 +219,7 @@ class NominatimAPIAsync:
|
|||||||
async with self.begin() as conn:
|
async with self.begin() as conn:
|
||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
if details.keywords:
|
if details.keywords:
|
||||||
await make_query_analyzer(conn)
|
await nsearch.make_query_analyzer(conn)
|
||||||
return await get_places(conn, places, details)
|
return await get_places(conn, places, details)
|
||||||
|
|
||||||
async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
|
async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
|
||||||
@@ -237,7 +237,7 @@ class NominatimAPIAsync:
|
|||||||
async with self.begin() as conn:
|
async with self.begin() as conn:
|
||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
if details.keywords:
|
if details.keywords:
|
||||||
await make_query_analyzer(conn)
|
await nsearch.make_query_analyzer(conn)
|
||||||
geocoder = ReverseGeocoder(conn, details,
|
geocoder = ReverseGeocoder(conn, details,
|
||||||
self.reverse_restrict_to_country_area)
|
self.reverse_restrict_to_country_area)
|
||||||
return await geocoder.lookup(coord)
|
return await geocoder.lookup(coord)
|
||||||
@@ -251,10 +251,10 @@ class NominatimAPIAsync:
|
|||||||
|
|
||||||
async with self.begin() as conn:
|
async with self.begin() as conn:
|
||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
geocoder = ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
|
geocoder = nsearch.ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
|
||||||
self.config.get_int('REQUEST_TIMEOUT')
|
self.config.get_int('REQUEST_TIMEOUT')
|
||||||
if self.config.REQUEST_TIMEOUT else None)
|
if self.config.REQUEST_TIMEOUT else None)
|
||||||
phrases = [Phrase(PhraseType.NONE, p.strip()) for p in query.split(',')]
|
phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p.strip()) for p in query.split(',')]
|
||||||
return await geocoder.lookup(phrases)
|
return await geocoder.lookup(phrases)
|
||||||
|
|
||||||
async def search_address(self, amenity: Optional[str] = None,
|
async def search_address(self, amenity: Optional[str] = None,
|
||||||
@@ -271,22 +271,22 @@ class NominatimAPIAsync:
|
|||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
details = ntyp.SearchDetails.from_kwargs(params)
|
details = ntyp.SearchDetails.from_kwargs(params)
|
||||||
|
|
||||||
phrases: List[Phrase] = []
|
phrases: List[nsearch.Phrase] = []
|
||||||
|
|
||||||
if amenity:
|
if amenity:
|
||||||
phrases.append(Phrase(PhraseType.AMENITY, amenity))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_AMENITY, amenity))
|
||||||
if street:
|
if street:
|
||||||
phrases.append(Phrase(PhraseType.STREET, street))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_STREET, street))
|
||||||
if city:
|
if city:
|
||||||
phrases.append(Phrase(PhraseType.CITY, city))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_CITY, city))
|
||||||
if county:
|
if county:
|
||||||
phrases.append(Phrase(PhraseType.COUNTY, county))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTY, county))
|
||||||
if state:
|
if state:
|
||||||
phrases.append(Phrase(PhraseType.STATE, state))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_STATE, state))
|
||||||
if postalcode:
|
if postalcode:
|
||||||
phrases.append(Phrase(PhraseType.POSTCODE, postalcode))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_POSTCODE, postalcode))
|
||||||
if country:
|
if country:
|
||||||
phrases.append(Phrase(PhraseType.COUNTRY, country))
|
phrases.append(nsearch.Phrase(nsearch.PHRASE_COUNTRY, country))
|
||||||
|
|
||||||
if not phrases:
|
if not phrases:
|
||||||
raise UsageError('Nothing to search for.')
|
raise UsageError('Nothing to search for.')
|
||||||
@@ -304,14 +304,14 @@ class NominatimAPIAsync:
|
|||||||
else:
|
else:
|
||||||
details.restrict_min_max_rank(4, 4)
|
details.restrict_min_max_rank(4, 4)
|
||||||
|
|
||||||
if 'layers' not in params:
|
if details.layers is None:
|
||||||
details.layers = ntyp.DataLayer.ADDRESS
|
details.layers = ntyp.DataLayer.ADDRESS
|
||||||
if amenity:
|
if amenity:
|
||||||
details.layers |= ntyp.DataLayer.POI
|
details.layers |= ntyp.DataLayer.POI
|
||||||
|
|
||||||
geocoder = ForwardGeocoder(conn, details,
|
geocoder = nsearch.ForwardGeocoder(conn, details,
|
||||||
self.config.get_int('REQUEST_TIMEOUT')
|
self.config.get_int('REQUEST_TIMEOUT')
|
||||||
if self.config.REQUEST_TIMEOUT else None)
|
if self.config.REQUEST_TIMEOUT else None)
|
||||||
return await geocoder.lookup(phrases)
|
return await geocoder.lookup(phrases)
|
||||||
|
|
||||||
async def search_category(self, categories: List[Tuple[str, str]],
|
async def search_category(self, categories: List[Tuple[str, str]],
|
||||||
@@ -328,15 +328,15 @@ class NominatimAPIAsync:
|
|||||||
async with self.begin() as conn:
|
async with self.begin() as conn:
|
||||||
conn.set_query_timeout(self.query_timeout)
|
conn.set_query_timeout(self.query_timeout)
|
||||||
if near_query:
|
if near_query:
|
||||||
phrases = [Phrase(PhraseType.NONE, p) for p in near_query.split(',')]
|
phrases = [nsearch.Phrase(nsearch.PHRASE_ANY, p) for p in near_query.split(',')]
|
||||||
else:
|
else:
|
||||||
phrases = []
|
phrases = []
|
||||||
if details.keywords:
|
if details.keywords:
|
||||||
await make_query_analyzer(conn)
|
await nsearch.make_query_analyzer(conn)
|
||||||
|
|
||||||
geocoder = ForwardGeocoder(conn, details,
|
geocoder = nsearch.ForwardGeocoder(conn, details,
|
||||||
self.config.get_int('REQUEST_TIMEOUT')
|
self.config.get_int('REQUEST_TIMEOUT')
|
||||||
if self.config.REQUEST_TIMEOUT else None)
|
if self.config.REQUEST_TIMEOUT else None)
|
||||||
return await geocoder.lookup_pois(categories, phrases)
|
return await geocoder.lookup_pois(categories, phrases)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -27,5 +27,5 @@ def create(config: QueryConfig) -> QueryProcessingFunc:
|
|||||||
|
|
||||||
return lambda phrases: list(
|
return lambda phrases: list(
|
||||||
filter(lambda p: p.text,
|
filter(lambda p: p.text,
|
||||||
(Phrase(p.ptype, cast(str, normalizer.transliterate(p.text)))
|
(Phrase(p.ptype, cast(str, normalizer.transliterate(p.text)).strip('-: '))
|
||||||
for p in phrases)))
|
for p in phrases)))
|
||||||
|
|||||||
@@ -0,0 +1,61 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
|
#
|
||||||
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
|
# For a full list of authors see the git log.
|
||||||
|
"""
|
||||||
|
This file divides Japanese addresses into three categories:
|
||||||
|
prefecture, municipality, and other.
|
||||||
|
The division is not strict but simple using these keywords.
|
||||||
|
"""
|
||||||
|
from typing import List
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .config import QueryConfig
|
||||||
|
from .base import QueryProcessingFunc
|
||||||
|
from ..search.query import Phrase
|
||||||
|
|
||||||
|
MATCH_PATTERNS = [
|
||||||
|
r'''
|
||||||
|
(...??[都都道府県縣]) # [group1] prefecture
|
||||||
|
(.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
|
||||||
|
(.+) # [group3] other words
|
||||||
|
''',
|
||||||
|
r'''
|
||||||
|
(...??[都都道府県縣]) # [group1] prefecture
|
||||||
|
(.+) # [group3] other words
|
||||||
|
''',
|
||||||
|
r'''
|
||||||
|
(.+?[市区區町村]) # [group2] municipalities (city/wards/towns/villages)
|
||||||
|
(.+) # [group3] other words
|
||||||
|
'''
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class _JapanesePreprocessing:
|
||||||
|
|
||||||
|
def __init__(self, config: QueryConfig) -> None:
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def split_phrase(self, phrase: Phrase) -> Phrase:
|
||||||
|
"""
|
||||||
|
This function performs a division on the given text using a regular expression.
|
||||||
|
"""
|
||||||
|
for pattern in MATCH_PATTERNS:
|
||||||
|
result = re.match(pattern, phrase.text, re.VERBOSE)
|
||||||
|
if result is not None:
|
||||||
|
return Phrase(phrase.ptype, ':'.join(result.groups()))
|
||||||
|
|
||||||
|
return phrase
|
||||||
|
|
||||||
|
def __call__(self, phrases: List[Phrase]) -> List[Phrase]:
|
||||||
|
"""Split a Japanese address using japanese_tokenizer.
|
||||||
|
"""
|
||||||
|
return [self.split_phrase(p) for p in phrases]
|
||||||
|
|
||||||
|
|
||||||
|
def create(config: QueryConfig) -> QueryProcessingFunc:
|
||||||
|
""" Create a function of japanese preprocessing.
|
||||||
|
"""
|
||||||
|
return _JapanesePreprocessing(config)
|
||||||
@@ -9,5 +9,12 @@ Module for forward search.
|
|||||||
"""
|
"""
|
||||||
from .geocoder import (ForwardGeocoder as ForwardGeocoder)
|
from .geocoder import (ForwardGeocoder as ForwardGeocoder)
|
||||||
from .query import (Phrase as Phrase,
|
from .query import (Phrase as Phrase,
|
||||||
PhraseType as PhraseType)
|
PHRASE_ANY as PHRASE_ANY,
|
||||||
|
PHRASE_AMENITY as PHRASE_AMENITY,
|
||||||
|
PHRASE_STREET as PHRASE_STREET,
|
||||||
|
PHRASE_CITY as PHRASE_CITY,
|
||||||
|
PHRASE_COUNTY as PHRASE_COUNTY,
|
||||||
|
PHRASE_STATE as PHRASE_STATE,
|
||||||
|
PHRASE_POSTCODE as PHRASE_POSTCODE,
|
||||||
|
PHRASE_COUNTRY as PHRASE_COUNTRY)
|
||||||
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)
|
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from typing import Optional, List, Tuple, Iterator, Dict
|
|||||||
import heapq
|
import heapq
|
||||||
|
|
||||||
from ..types import SearchDetails, DataLayer
|
from ..types import SearchDetails, DataLayer
|
||||||
from .query import QueryStruct, Token, TokenType, TokenRange, BreakType
|
from . import query as qmod
|
||||||
from .token_assignment import TokenAssignment
|
from .token_assignment import TokenAssignment
|
||||||
from . import db_search_fields as dbf
|
from . import db_search_fields as dbf
|
||||||
from . import db_searches as dbs
|
from . import db_searches as dbs
|
||||||
@@ -51,7 +51,7 @@ class SearchBuilder:
|
|||||||
""" Build the abstract search queries from token assignments.
|
""" Build the abstract search queries from token assignments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, query: QueryStruct, details: SearchDetails) -> None:
|
def __init__(self, query: qmod.QueryStruct, details: SearchDetails) -> None:
|
||||||
self.query = query
|
self.query = query
|
||||||
self.details = details
|
self.details = details
|
||||||
|
|
||||||
@@ -97,7 +97,7 @@ class SearchBuilder:
|
|||||||
builder = self.build_poi_search(sdata)
|
builder = self.build_poi_search(sdata)
|
||||||
elif assignment.housenumber:
|
elif assignment.housenumber:
|
||||||
hnr_tokens = self.query.get_tokens(assignment.housenumber,
|
hnr_tokens = self.query.get_tokens(assignment.housenumber,
|
||||||
TokenType.HOUSENUMBER)
|
qmod.TOKEN_HOUSENUMBER)
|
||||||
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
|
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
|
||||||
else:
|
else:
|
||||||
builder = self.build_special_search(sdata, assignment.address,
|
builder = self.build_special_search(sdata, assignment.address,
|
||||||
@@ -128,7 +128,7 @@ class SearchBuilder:
|
|||||||
yield dbs.PoiSearch(sdata)
|
yield dbs.PoiSearch(sdata)
|
||||||
|
|
||||||
def build_special_search(self, sdata: dbf.SearchData,
|
def build_special_search(self, sdata: dbf.SearchData,
|
||||||
address: List[TokenRange],
|
address: List[qmod.TokenRange],
|
||||||
is_category: bool) -> Iterator[dbs.AbstractSearch]:
|
is_category: bool) -> Iterator[dbs.AbstractSearch]:
|
||||||
""" Build abstract search queries for searches that do not involve
|
""" Build abstract search queries for searches that do not involve
|
||||||
a named place.
|
a named place.
|
||||||
@@ -148,11 +148,10 @@ class SearchBuilder:
|
|||||||
[t.token for r in address
|
[t.token for r in address
|
||||||
for t in self.query.get_partials_list(r)],
|
for t in self.query.get_partials_list(r)],
|
||||||
lookups.Restrict)]
|
lookups.Restrict)]
|
||||||
penalty += 0.2
|
|
||||||
yield dbs.PostcodeSearch(penalty, sdata)
|
yield dbs.PostcodeSearch(penalty, sdata)
|
||||||
|
|
||||||
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token],
|
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[qmod.Token],
|
||||||
address: List[TokenRange]) -> Iterator[dbs.AbstractSearch]:
|
address: List[qmod.TokenRange]) -> Iterator[dbs.AbstractSearch]:
|
||||||
""" Build a simple address search for special entries where the
|
""" Build a simple address search for special entries where the
|
||||||
housenumber is the main name token.
|
housenumber is the main name token.
|
||||||
"""
|
"""
|
||||||
@@ -174,7 +173,7 @@ class SearchBuilder:
|
|||||||
list(partials), lookups.LookupAll))
|
list(partials), lookups.LookupAll))
|
||||||
else:
|
else:
|
||||||
addr_fulls = [t.token for t
|
addr_fulls = [t.token for t
|
||||||
in self.query.get_tokens(address[0], TokenType.WORD)]
|
in self.query.get_tokens(address[0], qmod.TOKEN_WORD)]
|
||||||
if len(addr_fulls) > 5:
|
if len(addr_fulls) > 5:
|
||||||
return
|
return
|
||||||
sdata.lookups.append(
|
sdata.lookups.append(
|
||||||
@@ -184,7 +183,7 @@ class SearchBuilder:
|
|||||||
yield dbs.PlaceSearch(0.05, sdata, expected_count)
|
yield dbs.PlaceSearch(0.05, sdata, expected_count)
|
||||||
|
|
||||||
def build_name_search(self, sdata: dbf.SearchData,
|
def build_name_search(self, sdata: dbf.SearchData,
|
||||||
name: TokenRange, address: List[TokenRange],
|
name: qmod.TokenRange, address: List[qmod.TokenRange],
|
||||||
is_category: bool) -> Iterator[dbs.AbstractSearch]:
|
is_category: bool) -> Iterator[dbs.AbstractSearch]:
|
||||||
""" Build abstract search queries for simple name or address searches.
|
""" Build abstract search queries for simple name or address searches.
|
||||||
"""
|
"""
|
||||||
@@ -197,7 +196,7 @@ class SearchBuilder:
|
|||||||
sdata.lookups = lookup
|
sdata.lookups = lookup
|
||||||
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
|
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
|
||||||
|
|
||||||
def yield_lookups(self, name: TokenRange, address: List[TokenRange]
|
def yield_lookups(self, name: qmod.TokenRange, address: List[qmod.TokenRange]
|
||||||
) -> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
|
) -> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
|
||||||
""" Yield all variants how the given name and address should best
|
""" Yield all variants how the given name and address should best
|
||||||
be searched for. This takes into account how frequent the terms
|
be searched for. This takes into account how frequent the terms
|
||||||
@@ -209,26 +208,26 @@ class SearchBuilder:
|
|||||||
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
|
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
|
||||||
addr_tokens = list({t.token for t in addr_partials})
|
addr_tokens = list({t.token for t in addr_partials})
|
||||||
|
|
||||||
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
|
exp_count = min(t.count for t in name_partials.values()) / (3**(len(name_partials) - 1))
|
||||||
|
|
||||||
if (len(name_partials) > 3 or exp_count < 8000):
|
if (len(name_partials) > 3 or exp_count < 8000):
|
||||||
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
|
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
|
||||||
return
|
return
|
||||||
|
|
||||||
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
|
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 50000
|
||||||
# Partial term to frequent. Try looking up by rare full names first.
|
# Partial term to frequent. Try looking up by rare full names first.
|
||||||
name_fulls = self.query.get_tokens(name, TokenType.WORD)
|
name_fulls = self.query.get_tokens(name, qmod.TOKEN_WORD)
|
||||||
if name_fulls:
|
if name_fulls:
|
||||||
fulls_count = sum(t.count for t in name_fulls)
|
fulls_count = sum(t.count for t in name_fulls)
|
||||||
|
|
||||||
if fulls_count < 50000 or addr_count < 30000:
|
if fulls_count < 50000 or addr_count < 50000:
|
||||||
yield penalty, fulls_count / (2**len(addr_tokens)), \
|
yield penalty, fulls_count / (2**len(addr_tokens)), \
|
||||||
self.get_full_name_ranking(name_fulls, addr_partials,
|
self.get_full_name_ranking(name_fulls, addr_partials,
|
||||||
fulls_count > 30000 / max(1, len(addr_tokens)))
|
fulls_count > 30000 / max(1, len(addr_tokens)))
|
||||||
|
|
||||||
# To catch remaining results, lookup by name and address
|
# To catch remaining results, lookup by name and address
|
||||||
# We only do this if there is a reasonable number of results expected.
|
# We only do this if there is a reasonable number of results expected.
|
||||||
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
|
exp_count /= 2**len(addr_tokens)
|
||||||
if exp_count < 10000 and addr_count < 20000:
|
if exp_count < 10000 and addr_count < 20000:
|
||||||
penalty += 0.35 * max(1 if name_fulls else 0.1,
|
penalty += 0.35 * max(1 if name_fulls else 0.1,
|
||||||
5 - len(name_partials) - len(addr_tokens))
|
5 - len(name_partials) - len(addr_tokens))
|
||||||
@@ -236,7 +235,7 @@ class SearchBuilder:
|
|||||||
self.get_name_address_ranking(list(name_partials.keys()), addr_partials)
|
self.get_name_address_ranking(list(name_partials.keys()), addr_partials)
|
||||||
|
|
||||||
def get_name_address_ranking(self, name_tokens: List[int],
|
def get_name_address_ranking(self, name_tokens: List[int],
|
||||||
addr_partials: List[Token]) -> List[dbf.FieldLookup]:
|
addr_partials: List[qmod.Token]) -> List[dbf.FieldLookup]:
|
||||||
""" Create a ranking expression looking up by name and address.
|
""" Create a ranking expression looking up by name and address.
|
||||||
"""
|
"""
|
||||||
lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
|
lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
|
||||||
@@ -258,23 +257,16 @@ class SearchBuilder:
|
|||||||
|
|
||||||
return lookup
|
return lookup
|
||||||
|
|
||||||
def get_full_name_ranking(self, name_fulls: List[Token], addr_partials: List[Token],
|
def get_full_name_ranking(self, name_fulls: List[qmod.Token], addr_partials: List[qmod.Token],
|
||||||
use_lookup: bool) -> List[dbf.FieldLookup]:
|
use_lookup: bool) -> List[dbf.FieldLookup]:
|
||||||
""" Create a ranking expression with full name terms and
|
""" Create a ranking expression with full name terms and
|
||||||
additional address lookup. When 'use_lookup' is true, then
|
additional address lookup. When 'use_lookup' is true, then
|
||||||
address lookups will use the index, when the occurrences are not
|
address lookups will use the index, when the occurrences are not
|
||||||
too many.
|
too many.
|
||||||
"""
|
"""
|
||||||
# At this point drop unindexed partials from the address.
|
|
||||||
# This might yield wrong results, nothing we can do about that.
|
|
||||||
if use_lookup:
|
if use_lookup:
|
||||||
addr_restrict_tokens = []
|
addr_restrict_tokens = []
|
||||||
addr_lookup_tokens = []
|
addr_lookup_tokens = [t.token for t in addr_partials]
|
||||||
for t in addr_partials:
|
|
||||||
if t.addr_count > 20000:
|
|
||||||
addr_restrict_tokens.append(t.token)
|
|
||||||
else:
|
|
||||||
addr_lookup_tokens.append(t.token)
|
|
||||||
else:
|
else:
|
||||||
addr_restrict_tokens = [t.token for t in addr_partials]
|
addr_restrict_tokens = [t.token for t in addr_partials]
|
||||||
addr_lookup_tokens = []
|
addr_lookup_tokens = []
|
||||||
@@ -282,11 +274,11 @@ class SearchBuilder:
|
|||||||
return dbf.lookup_by_any_name([t.token for t in name_fulls],
|
return dbf.lookup_by_any_name([t.token for t in name_fulls],
|
||||||
addr_restrict_tokens, addr_lookup_tokens)
|
addr_restrict_tokens, addr_lookup_tokens)
|
||||||
|
|
||||||
def get_name_ranking(self, trange: TokenRange,
|
def get_name_ranking(self, trange: qmod.TokenRange,
|
||||||
db_field: str = 'name_vector') -> dbf.FieldRanking:
|
db_field: str = 'name_vector') -> dbf.FieldRanking:
|
||||||
""" Create a ranking expression for a name term in the given range.
|
""" Create a ranking expression for a name term in the given range.
|
||||||
"""
|
"""
|
||||||
name_fulls = self.query.get_tokens(trange, TokenType.WORD)
|
name_fulls = self.query.get_tokens(trange, qmod.TOKEN_WORD)
|
||||||
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
|
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
|
||||||
ranks.sort(key=lambda r: r.penalty)
|
ranks.sort(key=lambda r: r.penalty)
|
||||||
# Fallback, sum of penalty for partials
|
# Fallback, sum of penalty for partials
|
||||||
@@ -294,7 +286,7 @@ class SearchBuilder:
|
|||||||
default = sum(t.penalty for t in name_partials) + 0.2
|
default = sum(t.penalty for t in name_partials) + 0.2
|
||||||
return dbf.FieldRanking(db_field, default, ranks)
|
return dbf.FieldRanking(db_field, default, ranks)
|
||||||
|
|
||||||
def get_addr_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
|
def get_addr_ranking(self, trange: qmod.TokenRange) -> dbf.FieldRanking:
|
||||||
""" Create a list of ranking expressions for an address term
|
""" Create a list of ranking expressions for an address term
|
||||||
for the given ranges.
|
for the given ranges.
|
||||||
"""
|
"""
|
||||||
@@ -305,10 +297,10 @@ class SearchBuilder:
|
|||||||
while todo:
|
while todo:
|
||||||
neglen, pos, rank = heapq.heappop(todo)
|
neglen, pos, rank = heapq.heappop(todo)
|
||||||
for tlist in self.query.nodes[pos].starting:
|
for tlist in self.query.nodes[pos].starting:
|
||||||
if tlist.ttype in (TokenType.PARTIAL, TokenType.WORD):
|
if tlist.ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_WORD):
|
||||||
if tlist.end < trange.end:
|
if tlist.end < trange.end:
|
||||||
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
|
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
|
||||||
if tlist.ttype == TokenType.PARTIAL:
|
if tlist.ttype == qmod.TOKEN_PARTIAL:
|
||||||
penalty = rank.penalty + chgpenalty \
|
penalty = rank.penalty + chgpenalty \
|
||||||
+ max(t.penalty for t in tlist.tokens)
|
+ max(t.penalty for t in tlist.tokens)
|
||||||
heapq.heappush(todo, (neglen - 1, tlist.end,
|
heapq.heappush(todo, (neglen - 1, tlist.end,
|
||||||
@@ -318,7 +310,7 @@ class SearchBuilder:
|
|||||||
heapq.heappush(todo, (neglen - 1, tlist.end,
|
heapq.heappush(todo, (neglen - 1, tlist.end,
|
||||||
rank.with_token(t, chgpenalty)))
|
rank.with_token(t, chgpenalty)))
|
||||||
elif tlist.end == trange.end:
|
elif tlist.end == trange.end:
|
||||||
if tlist.ttype == TokenType.PARTIAL:
|
if tlist.ttype == qmod.TOKEN_PARTIAL:
|
||||||
ranks.append(dbf.RankedTokens(rank.penalty
|
ranks.append(dbf.RankedTokens(rank.penalty
|
||||||
+ max(t.penalty for t in tlist.tokens),
|
+ max(t.penalty for t in tlist.tokens),
|
||||||
rank.tokens))
|
rank.tokens))
|
||||||
@@ -358,11 +350,11 @@ class SearchBuilder:
|
|||||||
if assignment.housenumber:
|
if assignment.housenumber:
|
||||||
sdata.set_strings('housenumbers',
|
sdata.set_strings('housenumbers',
|
||||||
self.query.get_tokens(assignment.housenumber,
|
self.query.get_tokens(assignment.housenumber,
|
||||||
TokenType.HOUSENUMBER))
|
qmod.TOKEN_HOUSENUMBER))
|
||||||
if assignment.postcode:
|
if assignment.postcode:
|
||||||
sdata.set_strings('postcodes',
|
sdata.set_strings('postcodes',
|
||||||
self.query.get_tokens(assignment.postcode,
|
self.query.get_tokens(assignment.postcode,
|
||||||
TokenType.POSTCODE))
|
qmod.TOKEN_POSTCODE))
|
||||||
if assignment.qualifier:
|
if assignment.qualifier:
|
||||||
tokens = self.get_qualifier_tokens(assignment.qualifier)
|
tokens = self.get_qualifier_tokens(assignment.qualifier)
|
||||||
if not tokens:
|
if not tokens:
|
||||||
@@ -387,23 +379,23 @@ class SearchBuilder:
|
|||||||
|
|
||||||
return sdata
|
return sdata
|
||||||
|
|
||||||
def get_country_tokens(self, trange: TokenRange) -> List[Token]:
|
def get_country_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
|
||||||
""" Return the list of country tokens for the given range,
|
""" Return the list of country tokens for the given range,
|
||||||
optionally filtered by the country list from the details
|
optionally filtered by the country list from the details
|
||||||
parameters.
|
parameters.
|
||||||
"""
|
"""
|
||||||
tokens = self.query.get_tokens(trange, TokenType.COUNTRY)
|
tokens = self.query.get_tokens(trange, qmod.TOKEN_COUNTRY)
|
||||||
if self.details.countries:
|
if self.details.countries:
|
||||||
tokens = [t for t in tokens if t.lookup_word in self.details.countries]
|
tokens = [t for t in tokens if t.lookup_word in self.details.countries]
|
||||||
|
|
||||||
return tokens
|
return tokens
|
||||||
|
|
||||||
def get_qualifier_tokens(self, trange: TokenRange) -> List[Token]:
|
def get_qualifier_tokens(self, trange: qmod.TokenRange) -> List[qmod.Token]:
|
||||||
""" Return the list of qualifier tokens for the given range,
|
""" Return the list of qualifier tokens for the given range,
|
||||||
optionally filtered by the qualifier list from the details
|
optionally filtered by the qualifier list from the details
|
||||||
parameters.
|
parameters.
|
||||||
"""
|
"""
|
||||||
tokens = self.query.get_tokens(trange, TokenType.QUALIFIER)
|
tokens = self.query.get_tokens(trange, qmod.TOKEN_QUALIFIER)
|
||||||
if self.details.categories:
|
if self.details.categories:
|
||||||
tokens = [t for t in tokens if t.get_category() in self.details.categories]
|
tokens = [t for t in tokens if t.get_category() in self.details.categories]
|
||||||
|
|
||||||
@@ -416,7 +408,7 @@ class SearchBuilder:
|
|||||||
"""
|
"""
|
||||||
if assignment.near_item:
|
if assignment.near_item:
|
||||||
tokens: Dict[Tuple[str, str], float] = {}
|
tokens: Dict[Tuple[str, str], float] = {}
|
||||||
for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
|
for t in self.query.get_tokens(assignment.near_item, qmod.TOKEN_NEAR_ITEM):
|
||||||
cat = t.get_category()
|
cat = t.get_category()
|
||||||
# The category of a near search will be that of near_item.
|
# The category of a near search will be that of near_item.
|
||||||
# Thus, if search is restricted to a category parameter,
|
# Thus, if search is restricted to a category parameter,
|
||||||
@@ -430,10 +422,11 @@ class SearchBuilder:
|
|||||||
|
|
||||||
|
|
||||||
PENALTY_WORDCHANGE = {
|
PENALTY_WORDCHANGE = {
|
||||||
BreakType.START: 0.0,
|
qmod.BREAK_START: 0.0,
|
||||||
BreakType.END: 0.0,
|
qmod.BREAK_END: 0.0,
|
||||||
BreakType.PHRASE: 0.0,
|
qmod.BREAK_PHRASE: 0.0,
|
||||||
BreakType.WORD: 0.1,
|
qmod.BREAK_SOFT_PHRASE: 0.0,
|
||||||
BreakType.PART: 0.2,
|
qmod.BREAK_WORD: 0.1,
|
||||||
BreakType.TOKEN: 0.4
|
qmod.BREAK_PART: 0.2,
|
||||||
|
qmod.BREAK_TOKEN: 0.4
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -581,9 +581,13 @@ class PostcodeSearch(AbstractSearch):
|
|||||||
.where((tsearch.c.name_vector + tsearch.c.nameaddress_vector)
|
.where((tsearch.c.name_vector + tsearch.c.nameaddress_vector)
|
||||||
.contains(sa.type_coerce(self.lookups[0].tokens,
|
.contains(sa.type_coerce(self.lookups[0].tokens,
|
||||||
IntArray)))
|
IntArray)))
|
||||||
|
# Do NOT add rerank penalties based on the address terms.
|
||||||
|
# The standard rerank penalty only checks the address vector
|
||||||
|
# while terms may appear in name and address vector. This would
|
||||||
|
# lead to overly high penalties.
|
||||||
|
# We assume that a postcode is precise enough to not require
|
||||||
|
# additional full name matches.
|
||||||
|
|
||||||
for ranking in self.rankings:
|
|
||||||
penalty += ranking.sql_penalty(conn.t.search_name)
|
|
||||||
penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes),
|
penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes),
|
||||||
else_=1.0)
|
else_=1.0)
|
||||||
|
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ class ForwardGeocoder:
|
|||||||
"""
|
"""
|
||||||
assert self.query_analyzer is not None
|
assert self.query_analyzer is not None
|
||||||
qwords = [word for phrase in query.source
|
qwords = [word for phrase in query.source
|
||||||
for word in re.split('[, ]+', phrase.text) if word]
|
for word in re.split('[-,: ]+', phrase.text) if word]
|
||||||
if not qwords:
|
if not qwords:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -146,7 +146,7 @@ class ForwardGeocoder:
|
|||||||
distance = 0.0
|
distance = 0.0
|
||||||
norm = self.query_analyzer.normalize_text(' '.join((result.display_name,
|
norm = self.query_analyzer.normalize_text(' '.join((result.display_name,
|
||||||
result.country_code or '')))
|
result.country_code or '')))
|
||||||
words = set((w for w in norm.split(' ') if w))
|
words = set((w for w in re.split('[-,: ]+', norm) if w))
|
||||||
if not words:
|
if not words:
|
||||||
continue
|
continue
|
||||||
for qword in qwords:
|
for qword in qwords:
|
||||||
@@ -238,7 +238,7 @@ def _dump_searches(searches: List[AbstractSearch], query: QueryStruct,
|
|||||||
if not lk:
|
if not lk:
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
return f"{lk.lookup_type}({lk.column}{tk(lk.tokens)})"
|
return f"{lk.lookup_type.__name__}({lk.column}{tk(lk.tokens)})"
|
||||||
|
|
||||||
def fmt_cstr(c: Any) -> str:
|
def fmt_cstr(c: Any) -> str:
|
||||||
if not c:
|
if not c:
|
||||||
|
|||||||
@@ -7,10 +7,11 @@
|
|||||||
"""
|
"""
|
||||||
Implementation of query analysis for the ICU tokenizer.
|
Implementation of query analysis for the ICU tokenizer.
|
||||||
"""
|
"""
|
||||||
from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
|
from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
|
||||||
from collections import defaultdict
|
|
||||||
import dataclasses
|
import dataclasses
|
||||||
import difflib
|
import difflib
|
||||||
|
import re
|
||||||
|
from itertools import zip_longest
|
||||||
|
|
||||||
from icu import Transliterator
|
from icu import Transliterator
|
||||||
|
|
||||||
@@ -23,45 +24,28 @@ from ..connection import SearchConnection
|
|||||||
from ..logging import log
|
from ..logging import log
|
||||||
from . import query as qmod
|
from . import query as qmod
|
||||||
from ..query_preprocessing.config import QueryConfig
|
from ..query_preprocessing.config import QueryConfig
|
||||||
|
from ..query_preprocessing.base import QueryProcessingFunc
|
||||||
from .query_analyzer_factory import AbstractQueryAnalyzer
|
from .query_analyzer_factory import AbstractQueryAnalyzer
|
||||||
|
from .postcode_parser import PostcodeParser
|
||||||
|
|
||||||
|
|
||||||
DB_TO_TOKEN_TYPE = {
|
DB_TO_TOKEN_TYPE = {
|
||||||
'W': qmod.TokenType.WORD,
|
'W': qmod.TOKEN_WORD,
|
||||||
'w': qmod.TokenType.PARTIAL,
|
'w': qmod.TOKEN_PARTIAL,
|
||||||
'H': qmod.TokenType.HOUSENUMBER,
|
'H': qmod.TOKEN_HOUSENUMBER,
|
||||||
'P': qmod.TokenType.POSTCODE,
|
'P': qmod.TOKEN_POSTCODE,
|
||||||
'C': qmod.TokenType.COUNTRY
|
'C': qmod.TOKEN_COUNTRY
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PENALTY_IN_TOKEN_BREAK = {
|
||||||
class QueryPart(NamedTuple):
|
qmod.BREAK_START: 0.5,
|
||||||
""" Normalized and transliterated form of a single term in the query.
|
qmod.BREAK_END: 0.5,
|
||||||
When the term came out of a split during the transliteration,
|
qmod.BREAK_PHRASE: 0.5,
|
||||||
the normalized string is the full word before transliteration.
|
qmod.BREAK_SOFT_PHRASE: 0.5,
|
||||||
The word number keeps track of the word before transliteration
|
qmod.BREAK_WORD: 0.1,
|
||||||
and can be used to identify partial transliterated terms.
|
qmod.BREAK_PART: 0.0,
|
||||||
"""
|
qmod.BREAK_TOKEN: 0.0
|
||||||
token: str
|
}
|
||||||
normalized: str
|
|
||||||
word_number: int
|
|
||||||
|
|
||||||
|
|
||||||
QueryParts = List[QueryPart]
|
|
||||||
WordDict = Dict[str, List[qmod.TokenRange]]
|
|
||||||
|
|
||||||
|
|
||||||
def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
|
|
||||||
""" Return all combinations of words in the terms list after the
|
|
||||||
given position.
|
|
||||||
"""
|
|
||||||
total = len(terms)
|
|
||||||
for first in range(start, total):
|
|
||||||
word = terms[first].token
|
|
||||||
yield word, qmod.TokenRange(first, first + 1)
|
|
||||||
for last in range(first + 1, min(first + 20, total)):
|
|
||||||
word = ' '.join((word, terms[last].token))
|
|
||||||
yield word, qmod.TokenRange(first, last + 1)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
@@ -94,25 +78,25 @@ class ICUToken(qmod.Token):
|
|||||||
self.penalty += (distance/len(self.lookup_word))
|
self.penalty += (distance/len(self.lookup_word))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_db_row(row: SaRow) -> 'ICUToken':
|
def from_db_row(row: SaRow, base_penalty: float = 0.0) -> 'ICUToken':
|
||||||
""" Create a ICUToken from the row of the word table.
|
""" Create a ICUToken from the row of the word table.
|
||||||
"""
|
"""
|
||||||
count = 1 if row.info is None else row.info.get('count', 1)
|
count = 1 if row.info is None else row.info.get('count', 1)
|
||||||
addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
|
addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
|
||||||
|
|
||||||
penalty = 0.0
|
penalty = base_penalty
|
||||||
if row.type == 'w':
|
if row.type == 'w':
|
||||||
penalty = 0.3
|
penalty += 0.3
|
||||||
elif row.type == 'W':
|
elif row.type == 'W':
|
||||||
if len(row.word_token) == 1 and row.word_token == row.word:
|
if len(row.word_token) == 1 and row.word_token == row.word:
|
||||||
penalty = 0.2 if row.word.isdigit() else 0.3
|
penalty += 0.2 if row.word.isdigit() else 0.3
|
||||||
elif row.type == 'H':
|
elif row.type == 'H':
|
||||||
penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
|
penalty += sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
|
||||||
if all(not c.isdigit() for c in row.word_token):
|
if all(not c.isdigit() for c in row.word_token):
|
||||||
penalty += 0.2 * (len(row.word_token) - 1)
|
penalty += 0.2 * (len(row.word_token) - 1)
|
||||||
elif row.type == 'C':
|
elif row.type == 'C':
|
||||||
if len(row.word_token) == 1:
|
if len(row.word_token) == 1:
|
||||||
penalty = 0.3
|
penalty += 0.3
|
||||||
|
|
||||||
if row.info is None:
|
if row.info is None:
|
||||||
lookup_word = row.word
|
lookup_word = row.word
|
||||||
@@ -129,60 +113,51 @@ class ICUToken(qmod.Token):
|
|||||||
addr_count=max(1, addr_count))
|
addr_count=max(1, addr_count))
|
||||||
|
|
||||||
|
|
||||||
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
|
@dataclasses.dataclass
|
||||||
""" Converter for query strings into a tokenized query
|
class ICUAnalyzerConfig:
|
||||||
using the tokens created by a ICU tokenizer.
|
postcode_parser: PostcodeParser
|
||||||
"""
|
normalizer: Transliterator
|
||||||
def __init__(self, conn: SearchConnection) -> None:
|
transliterator: Transliterator
|
||||||
self.conn = conn
|
preprocessors: List[QueryProcessingFunc]
|
||||||
|
|
||||||
async def setup(self) -> None:
|
@staticmethod
|
||||||
""" Set up static data structures needed for the analysis.
|
async def create(conn: SearchConnection) -> 'ICUAnalyzerConfig':
|
||||||
"""
|
rules = await conn.get_property('tokenizer_import_normalisation')
|
||||||
async def _make_normalizer() -> Any:
|
normalizer = Transliterator.createFromRules("normalization", rules)
|
||||||
rules = await self.conn.get_property('tokenizer_import_normalisation')
|
|
||||||
return Transliterator.createFromRules("normalization", rules)
|
|
||||||
|
|
||||||
self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
|
rules = await conn.get_property('tokenizer_import_transliteration')
|
||||||
_make_normalizer)
|
transliterator = Transliterator.createFromRules("transliteration", rules)
|
||||||
|
|
||||||
async def _make_transliterator() -> Any:
|
preprocessing_rules = conn.config.load_sub_configuration('icu_tokenizer.yaml',
|
||||||
rules = await self.conn.get_property('tokenizer_import_transliteration')
|
config='TOKENIZER_CONFIG')\
|
||||||
return Transliterator.createFromRules("transliteration", rules)
|
.get('query-preprocessing', [])
|
||||||
|
|
||||||
self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
|
|
||||||
_make_transliterator)
|
|
||||||
|
|
||||||
await self._setup_preprocessing()
|
|
||||||
|
|
||||||
if 'word' not in self.conn.t.meta.tables:
|
|
||||||
sa.Table('word', self.conn.t.meta,
|
|
||||||
sa.Column('word_id', sa.Integer),
|
|
||||||
sa.Column('word_token', sa.Text, nullable=False),
|
|
||||||
sa.Column('type', sa.Text, nullable=False),
|
|
||||||
sa.Column('word', sa.Text),
|
|
||||||
sa.Column('info', Json))
|
|
||||||
|
|
||||||
async def _setup_preprocessing(self) -> None:
|
|
||||||
""" Load the rules for preprocessing and set up the handlers.
|
|
||||||
"""
|
|
||||||
|
|
||||||
rules = self.conn.config.load_sub_configuration('icu_tokenizer.yaml',
|
|
||||||
config='TOKENIZER_CONFIG')
|
|
||||||
preprocessing_rules = rules.get('query-preprocessing', [])
|
|
||||||
|
|
||||||
self.preprocessors = []
|
|
||||||
|
|
||||||
|
preprocessors: List[QueryProcessingFunc] = []
|
||||||
for func in preprocessing_rules:
|
for func in preprocessing_rules:
|
||||||
if 'step' not in func:
|
if 'step' not in func:
|
||||||
raise UsageError("Preprocessing rule is missing the 'step' attribute.")
|
raise UsageError("Preprocessing rule is missing the 'step' attribute.")
|
||||||
if not isinstance(func['step'], str):
|
if not isinstance(func['step'], str):
|
||||||
raise UsageError("'step' attribute must be a simple string.")
|
raise UsageError("'step' attribute must be a simple string.")
|
||||||
|
|
||||||
module = self.conn.config.load_plugin_module(
|
module = conn.config.load_plugin_module(
|
||||||
func['step'], 'nominatim_api.query_preprocessing')
|
func['step'], 'nominatim_api.query_preprocessing')
|
||||||
self.preprocessors.append(
|
preprocessors.append(
|
||||||
module.create(QueryConfig(func).set_normalizer(self.normalizer)))
|
module.create(QueryConfig(func).set_normalizer(normalizer)))
|
||||||
|
|
||||||
|
return ICUAnalyzerConfig(PostcodeParser(conn.config),
|
||||||
|
normalizer, transliterator, preprocessors)
|
||||||
|
|
||||||
|
|
||||||
|
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
|
||||||
|
""" Converter for query strings into a tokenized query
|
||||||
|
using the tokens created by a ICU tokenizer.
|
||||||
|
"""
|
||||||
|
def __init__(self, conn: SearchConnection, config: ICUAnalyzerConfig) -> None:
|
||||||
|
self.conn = conn
|
||||||
|
self.postcode_parser = config.postcode_parser
|
||||||
|
self.normalizer = config.normalizer
|
||||||
|
self.transliterator = config.transliterator
|
||||||
|
self.preprocessors = config.preprocessors
|
||||||
|
|
||||||
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
|
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
|
||||||
""" Analyze the given list of phrases and return the
|
""" Analyze the given list of phrases and return the
|
||||||
@@ -197,26 +172,34 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
|
|||||||
if not query.source:
|
if not query.source:
|
||||||
return query
|
return query
|
||||||
|
|
||||||
parts, words = self.split_query(query)
|
self.split_query(query)
|
||||||
log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts))
|
log().var_dump('Transliterated query', lambda: query.get_transliterated_query())
|
||||||
|
words = query.extract_words(base_penalty=PENALTY_IN_TOKEN_BREAK[qmod.BREAK_WORD])
|
||||||
|
|
||||||
for row in await self.lookup_in_db(list(words.keys())):
|
for row in await self.lookup_in_db(list(words.keys())):
|
||||||
for trange in words[row.word_token]:
|
for trange in words[row.word_token]:
|
||||||
token = ICUToken.from_db_row(row)
|
token = ICUToken.from_db_row(row, trange.penalty or 0.0)
|
||||||
if row.type == 'S':
|
if row.type == 'S':
|
||||||
if row.info['op'] in ('in', 'near'):
|
if row.info['op'] in ('in', 'near'):
|
||||||
if trange.start == 0:
|
if trange.start == 0:
|
||||||
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
|
query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
|
||||||
else:
|
else:
|
||||||
if trange.start == 0 and trange.end == query.num_token_slots():
|
if trange.start == 0 and trange.end == query.num_token_slots():
|
||||||
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
|
query.add_token(trange, qmod.TOKEN_NEAR_ITEM, token)
|
||||||
else:
|
else:
|
||||||
query.add_token(trange, qmod.TokenType.QUALIFIER, token)
|
query.add_token(trange, qmod.TOKEN_QUALIFIER, token)
|
||||||
else:
|
else:
|
||||||
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
|
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
|
||||||
|
|
||||||
self.add_extra_tokens(query, parts)
|
self.add_extra_tokens(query)
|
||||||
self.rerank_tokens(query, parts)
|
for start, end, pc in self.postcode_parser.parse(query):
|
||||||
|
term = ' '.join(n.term_lookup for n in query.nodes[start + 1:end + 1])
|
||||||
|
query.add_token(qmod.TokenRange(start, end),
|
||||||
|
qmod.TOKEN_POSTCODE,
|
||||||
|
ICUToken(penalty=0.1, token=0, count=1, addr_count=1,
|
||||||
|
lookup_word=pc, word_token=term,
|
||||||
|
info=None))
|
||||||
|
self.rerank_tokens(query)
|
||||||
|
|
||||||
log().table_dump('Word tokens', _dump_word_tokens(query))
|
log().table_dump('Word tokens', _dump_word_tokens(query))
|
||||||
|
|
||||||
@@ -227,97 +210,93 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
|
|||||||
standardized form search will work with. All information removed
|
standardized form search will work with. All information removed
|
||||||
at this stage is inevitably lost.
|
at this stage is inevitably lost.
|
||||||
"""
|
"""
|
||||||
return cast(str, self.normalizer.transliterate(text))
|
return cast(str, self.normalizer.transliterate(text)).strip('-: ')
|
||||||
|
|
||||||
def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]:
|
def split_query(self, query: qmod.QueryStruct) -> None:
|
||||||
""" Transliterate the phrases and split them into tokens.
|
""" Transliterate the phrases and split them into tokens.
|
||||||
|
|
||||||
Returns the list of transliterated tokens together with their
|
|
||||||
normalized form and a dictionary of words for lookup together
|
|
||||||
with their position.
|
|
||||||
"""
|
"""
|
||||||
parts: QueryParts = []
|
|
||||||
phrase_start = 0
|
|
||||||
words = defaultdict(list)
|
|
||||||
wordnr = 0
|
|
||||||
for phrase in query.source:
|
for phrase in query.source:
|
||||||
query.nodes[-1].ptype = phrase.ptype
|
query.nodes[-1].ptype = phrase.ptype
|
||||||
for word in phrase.text.split(' '):
|
phrase_split = re.split('([ :-])', phrase.text)
|
||||||
|
# The zip construct will give us the pairs of word/break from
|
||||||
|
# the regular expression split. As the split array ends on the
|
||||||
|
# final word, we simply use the fillvalue to even out the list and
|
||||||
|
# add the phrase break at the end.
|
||||||
|
for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue=','):
|
||||||
|
if not word:
|
||||||
|
continue
|
||||||
trans = self.transliterator.transliterate(word)
|
trans = self.transliterator.transliterate(word)
|
||||||
if trans:
|
if trans:
|
||||||
for term in trans.split(' '):
|
for term in trans.split(' '):
|
||||||
if term:
|
if term:
|
||||||
parts.append(QueryPart(term, word, wordnr))
|
query.add_node(qmod.BREAK_TOKEN, phrase.ptype,
|
||||||
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
|
PENALTY_IN_TOKEN_BREAK[qmod.BREAK_TOKEN],
|
||||||
query.nodes[-1].btype = qmod.BreakType.WORD
|
term, word)
|
||||||
wordnr += 1
|
query.nodes[-1].adjust_break(breakchar,
|
||||||
query.nodes[-1].btype = qmod.BreakType.PHRASE
|
PENALTY_IN_TOKEN_BREAK[breakchar])
|
||||||
|
|
||||||
for word, wrange in yield_words(parts, phrase_start):
|
query.nodes[-1].adjust_break(qmod.BREAK_END, PENALTY_IN_TOKEN_BREAK[qmod.BREAK_END])
|
||||||
words[word].append(wrange)
|
|
||||||
|
|
||||||
phrase_start = len(parts)
|
|
||||||
query.nodes[-1].btype = qmod.BreakType.END
|
|
||||||
|
|
||||||
return parts, words
|
|
||||||
|
|
||||||
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
|
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
|
||||||
""" Return the token information from the database for the
|
""" Return the token information from the database for the
|
||||||
given word tokens.
|
given word tokens.
|
||||||
|
|
||||||
|
This function excludes postcode tokens
|
||||||
"""
|
"""
|
||||||
t = self.conn.t.meta.tables['word']
|
t = self.conn.t.meta.tables['word']
|
||||||
return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
|
return await self.conn.execute(t.select()
|
||||||
|
.where(t.c.word_token.in_(words))
|
||||||
|
.where(t.c.type != 'P'))
|
||||||
|
|
||||||
def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
|
def add_extra_tokens(self, query: qmod.QueryStruct) -> None:
|
||||||
""" Add tokens to query that are not saved in the database.
|
""" Add tokens to query that are not saved in the database.
|
||||||
"""
|
"""
|
||||||
for part, node, i in zip(parts, query.nodes, range(1000)):
|
need_hnr = False
|
||||||
if len(part.token) <= 4 and part[0].isdigit()\
|
for i, node in enumerate(query.nodes):
|
||||||
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
|
is_full_token = node.btype not in (qmod.BREAK_TOKEN, qmod.BREAK_PART)
|
||||||
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
|
if need_hnr and is_full_token \
|
||||||
|
and len(node.term_normalized) <= 4 and node.term_normalized.isdigit():
|
||||||
|
query.add_token(qmod.TokenRange(i-1, i), qmod.TOKEN_HOUSENUMBER,
|
||||||
ICUToken(penalty=0.5, token=0,
|
ICUToken(penalty=0.5, token=0,
|
||||||
count=1, addr_count=1, lookup_word=part.token,
|
count=1, addr_count=1,
|
||||||
word_token=part.token, info=None))
|
lookup_word=node.term_lookup,
|
||||||
|
word_token=node.term_lookup, info=None))
|
||||||
|
|
||||||
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
|
need_hnr = is_full_token and not node.has_tokens(i+1, qmod.TOKEN_HOUSENUMBER)
|
||||||
|
|
||||||
|
def rerank_tokens(self, query: qmod.QueryStruct) -> None:
|
||||||
""" Add penalties to tokens that depend on presence of other token.
|
""" Add penalties to tokens that depend on presence of other token.
|
||||||
"""
|
"""
|
||||||
for i, node, tlist in query.iter_token_lists():
|
for i, node, tlist in query.iter_token_lists():
|
||||||
if tlist.ttype == qmod.TokenType.POSTCODE:
|
if tlist.ttype == qmod.TOKEN_POSTCODE:
|
||||||
|
tlen = len(cast(ICUToken, tlist.tokens[0]).word_token)
|
||||||
for repl in node.starting:
|
for repl in node.starting:
|
||||||
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
|
if repl.end == tlist.end and repl.ttype != qmod.TOKEN_POSTCODE \
|
||||||
and (repl.ttype != qmod.TokenType.HOUSENUMBER
|
and (repl.ttype != qmod.TOKEN_HOUSENUMBER or tlen > 4):
|
||||||
or len(tlist.tokens[0].lookup_word) > 4):
|
|
||||||
repl.add_penalty(0.39)
|
repl.add_penalty(0.39)
|
||||||
elif (tlist.ttype == qmod.TokenType.HOUSENUMBER
|
elif (tlist.ttype == qmod.TOKEN_HOUSENUMBER
|
||||||
and len(tlist.tokens[0].lookup_word) <= 3):
|
and len(tlist.tokens[0].lookup_word) <= 3):
|
||||||
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
|
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
|
||||||
for repl in node.starting:
|
for repl in node.starting:
|
||||||
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER:
|
if repl.end == tlist.end and repl.ttype != qmod.TOKEN_HOUSENUMBER:
|
||||||
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
|
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
|
||||||
elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL):
|
elif tlist.ttype not in (qmod.TOKEN_COUNTRY, qmod.TOKEN_PARTIAL):
|
||||||
norm = parts[i].normalized
|
norm = ' '.join(n.term_normalized for n in query.nodes[i + 1:tlist.end + 1]
|
||||||
for j in range(i + 1, tlist.end):
|
if n.btype != qmod.BREAK_TOKEN)
|
||||||
if parts[j - 1].word_number != parts[j].word_number:
|
if not norm:
|
||||||
norm += ' ' + parts[j].normalized
|
# Can happen when the token only covers a partial term
|
||||||
|
norm = query.nodes[i + 1].term_normalized
|
||||||
for token in tlist.tokens:
|
for token in tlist.tokens:
|
||||||
cast(ICUToken, token).rematch(norm)
|
cast(ICUToken, token).rematch(norm)
|
||||||
|
|
||||||
|
|
||||||
def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
|
|
||||||
out = query.nodes[0].btype.value
|
|
||||||
for node, part in zip(query.nodes[1:], parts):
|
|
||||||
out += part.token + node.btype.value
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
|
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
|
||||||
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
|
yield ['type', 'from', 'to', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
|
||||||
for node in query.nodes:
|
for i, node in enumerate(query.nodes):
|
||||||
for tlist in node.starting:
|
for tlist in node.starting:
|
||||||
for token in tlist.tokens:
|
for token in tlist.tokens:
|
||||||
t = cast(ICUToken, token)
|
t = cast(ICUToken, token)
|
||||||
yield [tlist.ttype.name, t.token, t.word_token or '',
|
yield [tlist.ttype, str(i), str(tlist.end), t.token, t.word_token or '',
|
||||||
t.lookup_word or '', t.penalty, t.count, t.info]
|
t.lookup_word or '', t.penalty, t.count, t.info]
|
||||||
|
|
||||||
|
|
||||||
@@ -325,7 +304,17 @@ async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer
|
|||||||
""" Create and set up a new query analyzer for a database based
|
""" Create and set up a new query analyzer for a database based
|
||||||
on the ICU tokenizer.
|
on the ICU tokenizer.
|
||||||
"""
|
"""
|
||||||
out = ICUQueryAnalyzer(conn)
|
async def _get_config() -> ICUAnalyzerConfig:
|
||||||
await out.setup()
|
if 'word' not in conn.t.meta.tables:
|
||||||
|
sa.Table('word', conn.t.meta,
|
||||||
|
sa.Column('word_id', sa.Integer),
|
||||||
|
sa.Column('word_token', sa.Text, nullable=False),
|
||||||
|
sa.Column('type', sa.Text, nullable=False),
|
||||||
|
sa.Column('word', sa.Text),
|
||||||
|
sa.Column('info', Json))
|
||||||
|
|
||||||
return out
|
return await ICUAnalyzerConfig.create(conn)
|
||||||
|
|
||||||
|
config = await conn.get_cached_value('ICUTOK', 'config', _get_config)
|
||||||
|
|
||||||
|
return ICUQueryAnalyzer(conn, config)
|
||||||
|
|||||||
104
src/nominatim_api/search/postcode_parser.py
Normal file
104
src/nominatim_api/search/postcode_parser.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
|
#
|
||||||
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
|
# For a full list of authors see the git log.
|
||||||
|
"""
|
||||||
|
Handling of arbitrary postcode tokens in tokenized query string.
|
||||||
|
"""
|
||||||
|
from typing import Tuple, Set, Dict, List
|
||||||
|
import re
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from ..config import Configuration
|
||||||
|
from . import query as qmod
|
||||||
|
|
||||||
|
|
||||||
|
class PostcodeParser:
|
||||||
|
""" Pattern-based parser for postcodes in tokenized queries.
|
||||||
|
|
||||||
|
The postcode patterns are read from the country configuration.
|
||||||
|
The parser does currently not return country restrictions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: Configuration) -> None:
|
||||||
|
# skip over includes here to avoid loading the complete country name data
|
||||||
|
yaml.add_constructor('!include', lambda loader, node: [],
|
||||||
|
Loader=yaml.SafeLoader)
|
||||||
|
cdata = yaml.safe_load(config.find_config_file('country_settings.yaml')
|
||||||
|
.read_text(encoding='utf-8'))
|
||||||
|
|
||||||
|
unique_patterns: Dict[str, Dict[str, List[str]]] = {}
|
||||||
|
for cc, data in cdata.items():
|
||||||
|
if data.get('postcode'):
|
||||||
|
pat = data['postcode']['pattern'].replace('d', '[0-9]').replace('l', '[A-Z]')
|
||||||
|
out = data['postcode'].get('output')
|
||||||
|
if pat not in unique_patterns:
|
||||||
|
unique_patterns[pat] = defaultdict(list)
|
||||||
|
unique_patterns[pat][out].append(cc.upper())
|
||||||
|
|
||||||
|
self.global_pattern = re.compile(
|
||||||
|
'(?:(?P<cc>[A-Z][A-Z])(?P<space>[ -]?))?(?P<pc>(?:(?:'
|
||||||
|
+ ')|(?:'.join(unique_patterns) + '))[:, >].*)')
|
||||||
|
|
||||||
|
self.local_patterns = [(re.compile(f"{pat}[:, >]"), list(info.items()))
|
||||||
|
for pat, info in unique_patterns.items()]
|
||||||
|
|
||||||
|
def parse(self, query: qmod.QueryStruct) -> Set[Tuple[int, int, str]]:
|
||||||
|
""" Parse postcodes in the given list of query tokens taking into
|
||||||
|
account the list of breaks from the nodes.
|
||||||
|
|
||||||
|
The result is a sequence of tuples with
|
||||||
|
[start node id, end node id, postcode token]
|
||||||
|
"""
|
||||||
|
nodes = query.nodes
|
||||||
|
outcodes: Set[Tuple[int, int, str]] = set()
|
||||||
|
|
||||||
|
terms = [n.term_normalized.upper() + n.btype for n in nodes]
|
||||||
|
for i in range(query.num_token_slots()):
|
||||||
|
if nodes[i].btype in '<,: ' and nodes[i + 1].btype != '`' \
|
||||||
|
and (i == 0 or nodes[i - 1].ptype != qmod.PHRASE_POSTCODE):
|
||||||
|
if nodes[i].ptype == qmod.PHRASE_ANY:
|
||||||
|
word = terms[i + 1]
|
||||||
|
if word[-1] in ' -' and nodes[i + 2].btype != '`' \
|
||||||
|
and nodes[i + 1].ptype == qmod.PHRASE_ANY:
|
||||||
|
word += terms[i + 2]
|
||||||
|
if word[-1] in ' -' and nodes[i + 3].btype != '`' \
|
||||||
|
and nodes[i + 2].ptype == qmod.PHRASE_ANY:
|
||||||
|
word += terms[i + 3]
|
||||||
|
|
||||||
|
self._match_word(word, i, False, outcodes)
|
||||||
|
elif nodes[i].ptype == qmod.PHRASE_POSTCODE:
|
||||||
|
word = terms[i + 1]
|
||||||
|
for j in range(i + 1, query.num_token_slots()):
|
||||||
|
if nodes[j].ptype != qmod.PHRASE_POSTCODE:
|
||||||
|
break
|
||||||
|
word += terms[j + 1]
|
||||||
|
|
||||||
|
self._match_word(word, i, True, outcodes)
|
||||||
|
|
||||||
|
return outcodes
|
||||||
|
|
||||||
|
def _match_word(self, word: str, pos: int, fullmatch: bool,
|
||||||
|
outcodes: Set[Tuple[int, int, str]]) -> None:
|
||||||
|
# Use global pattern to check for presence of any postcode.
|
||||||
|
m = self.global_pattern.fullmatch(word)
|
||||||
|
if m:
|
||||||
|
# If there was a match, check against each pattern separately
|
||||||
|
# because multiple patterns might be machting at the end.
|
||||||
|
cc = m.group('cc')
|
||||||
|
pc_word = m.group('pc')
|
||||||
|
cc_spaces = len(m.group('space') or '')
|
||||||
|
for pattern, info in self.local_patterns:
|
||||||
|
lm = pattern.fullmatch(pc_word) if fullmatch else pattern.match(pc_word)
|
||||||
|
if lm:
|
||||||
|
trange = (pos, pos + cc_spaces + sum(c in ' ,-:>' for c in lm.group(0)))
|
||||||
|
for out, out_ccs in info:
|
||||||
|
if cc is None or cc in out_ccs:
|
||||||
|
if out:
|
||||||
|
outcodes.add((*trange, lm.expand(out)))
|
||||||
|
else:
|
||||||
|
outcodes.add((*trange, lm.group(0)[:-1]))
|
||||||
@@ -7,88 +7,95 @@
|
|||||||
"""
|
"""
|
||||||
Datastructures for a tokenized query.
|
Datastructures for a tokenized query.
|
||||||
"""
|
"""
|
||||||
from typing import List, Tuple, Optional, Iterator
|
from typing import Dict, List, Tuple, Optional, Iterator
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from collections import defaultdict
|
||||||
import dataclasses
|
import dataclasses
|
||||||
import enum
|
|
||||||
|
|
||||||
|
|
||||||
class BreakType(enum.Enum):
|
BreakType = str
|
||||||
""" Type of break between tokens.
|
""" Type of break between tokens.
|
||||||
"""
|
"""
|
||||||
START = '<'
|
BREAK_START = '<'
|
||||||
""" Begin of the query. """
|
""" Begin of the query. """
|
||||||
END = '>'
|
BREAK_END = '>'
|
||||||
""" End of the query. """
|
""" End of the query. """
|
||||||
PHRASE = ','
|
BREAK_PHRASE = ','
|
||||||
""" Break between two phrases. """
|
""" Hard break between two phrases. Address parts cannot cross hard
|
||||||
WORD = ' '
|
phrase boundaries."""
|
||||||
""" Break between words. """
|
BREAK_SOFT_PHRASE = ':'
|
||||||
PART = '-'
|
""" Likely break between two phrases. Address parts should not cross soft
|
||||||
""" Break inside a word, for example a hyphen or apostrophe. """
|
phrase boundaries. Soft breaks can be inserted by a preprocessor
|
||||||
TOKEN = '`'
|
that is analysing the input string.
|
||||||
""" Break created as a result of tokenization.
|
"""
|
||||||
This may happen in languages without spaces between words.
|
BREAK_WORD = ' '
|
||||||
|
""" Break between words. """
|
||||||
|
BREAK_PART = '-'
|
||||||
|
""" Break inside a word, for example a hyphen or apostrophe. """
|
||||||
|
BREAK_TOKEN = '`'
|
||||||
|
""" Break created as a result of tokenization.
|
||||||
|
This may happen in languages without spaces between words.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
TokenType = str
|
||||||
|
""" Type of token.
|
||||||
|
"""
|
||||||
|
TOKEN_WORD = 'W'
|
||||||
|
""" Full name of a place. """
|
||||||
|
TOKEN_PARTIAL = 'w'
|
||||||
|
""" Word term without breaks, does not necessarily represent a full name. """
|
||||||
|
TOKEN_HOUSENUMBER = 'H'
|
||||||
|
""" Housenumber term. """
|
||||||
|
TOKEN_POSTCODE = 'P'
|
||||||
|
""" Postal code term. """
|
||||||
|
TOKEN_COUNTRY = 'C'
|
||||||
|
""" Country name or reference. """
|
||||||
|
TOKEN_QUALIFIER = 'Q'
|
||||||
|
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
|
||||||
|
TOKEN_NEAR_ITEM = 'N'
|
||||||
|
""" Special term used as searchable object(e.g. supermarket in ...). """
|
||||||
|
|
||||||
|
|
||||||
|
PhraseType = int
|
||||||
|
""" Designation of a phrase.
|
||||||
|
"""
|
||||||
|
PHRASE_ANY = 0
|
||||||
|
""" No specific designation (i.e. source is free-form query). """
|
||||||
|
PHRASE_AMENITY = 1
|
||||||
|
""" Contains name or type of a POI. """
|
||||||
|
PHRASE_STREET = 2
|
||||||
|
""" Contains a street name optionally with a housenumber. """
|
||||||
|
PHRASE_CITY = 3
|
||||||
|
""" Contains the postal city. """
|
||||||
|
PHRASE_COUNTY = 4
|
||||||
|
""" Contains the equivalent of a county. """
|
||||||
|
PHRASE_STATE = 5
|
||||||
|
""" Contains a state or province. """
|
||||||
|
PHRASE_POSTCODE = 6
|
||||||
|
""" Contains a postal code. """
|
||||||
|
PHRASE_COUNTRY = 7
|
||||||
|
""" Contains the country name or code. """
|
||||||
|
|
||||||
|
|
||||||
|
def _phrase_compatible_with(ptype: PhraseType, ttype: TokenType,
|
||||||
|
is_full_phrase: bool) -> bool:
|
||||||
|
""" Check if the given token type can be used with the phrase type.
|
||||||
"""
|
"""
|
||||||
|
if ptype == PHRASE_ANY:
|
||||||
|
return not is_full_phrase or ttype != TOKEN_QUALIFIER
|
||||||
|
if ptype == PHRASE_AMENITY:
|
||||||
|
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)\
|
||||||
|
or (is_full_phrase and ttype == TOKEN_NEAR_ITEM)\
|
||||||
|
or (not is_full_phrase and ttype == TOKEN_QUALIFIER)
|
||||||
|
if ptype == PHRASE_STREET:
|
||||||
|
return ttype in (TOKEN_WORD, TOKEN_PARTIAL, TOKEN_HOUSENUMBER)
|
||||||
|
if ptype == PHRASE_POSTCODE:
|
||||||
|
return ttype == TOKEN_POSTCODE
|
||||||
|
if ptype == PHRASE_COUNTRY:
|
||||||
|
return ttype == TOKEN_COUNTRY
|
||||||
|
|
||||||
|
return ttype in (TOKEN_WORD, TOKEN_PARTIAL)
|
||||||
class TokenType(enum.Enum):
|
|
||||||
""" Type of token.
|
|
||||||
"""
|
|
||||||
WORD = enum.auto()
|
|
||||||
""" Full name of a place. """
|
|
||||||
PARTIAL = enum.auto()
|
|
||||||
""" Word term without breaks, does not necessarily represent a full name. """
|
|
||||||
HOUSENUMBER = enum.auto()
|
|
||||||
""" Housenumber term. """
|
|
||||||
POSTCODE = enum.auto()
|
|
||||||
""" Postal code term. """
|
|
||||||
COUNTRY = enum.auto()
|
|
||||||
""" Country name or reference. """
|
|
||||||
QUALIFIER = enum.auto()
|
|
||||||
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
|
|
||||||
NEAR_ITEM = enum.auto()
|
|
||||||
""" Special term used as searchable object(e.g. supermarket in ...). """
|
|
||||||
|
|
||||||
|
|
||||||
class PhraseType(enum.Enum):
|
|
||||||
""" Designation of a phrase.
|
|
||||||
"""
|
|
||||||
NONE = 0
|
|
||||||
""" No specific designation (i.e. source is free-form query). """
|
|
||||||
AMENITY = enum.auto()
|
|
||||||
""" Contains name or type of a POI. """
|
|
||||||
STREET = enum.auto()
|
|
||||||
""" Contains a street name optionally with a housenumber. """
|
|
||||||
CITY = enum.auto()
|
|
||||||
""" Contains the postal city. """
|
|
||||||
COUNTY = enum.auto()
|
|
||||||
""" Contains the equivalent of a county. """
|
|
||||||
STATE = enum.auto()
|
|
||||||
""" Contains a state or province. """
|
|
||||||
POSTCODE = enum.auto()
|
|
||||||
""" Contains a postal code. """
|
|
||||||
COUNTRY = enum.auto()
|
|
||||||
""" Contains the country name or code. """
|
|
||||||
|
|
||||||
def compatible_with(self, ttype: TokenType,
|
|
||||||
is_full_phrase: bool) -> bool:
|
|
||||||
""" Check if the given token type can be used with the phrase type.
|
|
||||||
"""
|
|
||||||
if self == PhraseType.NONE:
|
|
||||||
return not is_full_phrase or ttype != TokenType.QUALIFIER
|
|
||||||
if self == PhraseType.AMENITY:
|
|
||||||
return ttype in (TokenType.WORD, TokenType.PARTIAL)\
|
|
||||||
or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\
|
|
||||||
or (not is_full_phrase and ttype == TokenType.QUALIFIER)
|
|
||||||
if self == PhraseType.STREET:
|
|
||||||
return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
|
|
||||||
if self == PhraseType.POSTCODE:
|
|
||||||
return ttype == TokenType.POSTCODE
|
|
||||||
if self == PhraseType.COUNTRY:
|
|
||||||
return ttype == TokenType.COUNTRY
|
|
||||||
|
|
||||||
return ttype in (TokenType.WORD, TokenType.PARTIAL)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
@@ -116,6 +123,7 @@ class TokenRange:
|
|||||||
"""
|
"""
|
||||||
start: int
|
start: int
|
||||||
end: int
|
end: int
|
||||||
|
penalty: Optional[float] = None
|
||||||
|
|
||||||
def __lt__(self, other: 'TokenRange') -> bool:
|
def __lt__(self, other: 'TokenRange') -> bool:
|
||||||
return self.end <= other.start
|
return self.end <= other.start
|
||||||
@@ -164,11 +172,33 @@ class TokenList:
|
|||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class QueryNode:
|
class QueryNode:
|
||||||
""" A node of the query representing a break between terms.
|
""" A node of the query representing a break between terms.
|
||||||
|
|
||||||
|
The node also contains information on the source term
|
||||||
|
ending at the node. The tokens are created from this information.
|
||||||
"""
|
"""
|
||||||
btype: BreakType
|
btype: BreakType
|
||||||
ptype: PhraseType
|
ptype: PhraseType
|
||||||
|
|
||||||
|
penalty: float
|
||||||
|
""" Penalty for the break at this node.
|
||||||
|
"""
|
||||||
|
term_lookup: str
|
||||||
|
""" Transliterated term following this node.
|
||||||
|
"""
|
||||||
|
term_normalized: str
|
||||||
|
""" Normalised form of term following this node.
|
||||||
|
When the token resulted from a split during transliteration,
|
||||||
|
then this string contains the complete source term.
|
||||||
|
"""
|
||||||
|
|
||||||
starting: List[TokenList] = dataclasses.field(default_factory=list)
|
starting: List[TokenList] = dataclasses.field(default_factory=list)
|
||||||
|
|
||||||
|
def adjust_break(self, btype: BreakType, penalty: float) -> None:
|
||||||
|
""" Change the break type and penalty for this node.
|
||||||
|
"""
|
||||||
|
self.btype = btype
|
||||||
|
self.penalty = penalty
|
||||||
|
|
||||||
def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
|
def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
|
||||||
""" Check if there are tokens of the given types ending at the
|
""" Check if there are tokens of the given types ending at the
|
||||||
given node.
|
given node.
|
||||||
@@ -211,19 +241,22 @@ class QueryStruct:
|
|||||||
def __init__(self, source: List[Phrase]) -> None:
|
def __init__(self, source: List[Phrase]) -> None:
|
||||||
self.source = source
|
self.source = source
|
||||||
self.nodes: List[QueryNode] = \
|
self.nodes: List[QueryNode] = \
|
||||||
[QueryNode(BreakType.START, source[0].ptype if source else PhraseType.NONE)]
|
[QueryNode(BREAK_START, source[0].ptype if source else PHRASE_ANY,
|
||||||
|
0.0, '', '')]
|
||||||
|
|
||||||
def num_token_slots(self) -> int:
|
def num_token_slots(self) -> int:
|
||||||
""" Return the length of the query in vertice steps.
|
""" Return the length of the query in vertice steps.
|
||||||
"""
|
"""
|
||||||
return len(self.nodes) - 1
|
return len(self.nodes) - 1
|
||||||
|
|
||||||
def add_node(self, btype: BreakType, ptype: PhraseType) -> None:
|
def add_node(self, btype: BreakType, ptype: PhraseType,
|
||||||
|
break_penalty: float = 0.0,
|
||||||
|
term_lookup: str = '', term_normalized: str = '') -> None:
|
||||||
""" Append a new break node with the given break type.
|
""" Append a new break node with the given break type.
|
||||||
The phrase type denotes the type for any tokens starting
|
The phrase type denotes the type for any tokens starting
|
||||||
at the node.
|
at the node.
|
||||||
"""
|
"""
|
||||||
self.nodes.append(QueryNode(btype, ptype))
|
self.nodes.append(QueryNode(btype, ptype, break_penalty, term_lookup, term_normalized))
|
||||||
|
|
||||||
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
|
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
|
||||||
""" Add a token to the query. 'start' and 'end' are the indexes of the
|
""" Add a token to the query. 'start' and 'end' are the indexes of the
|
||||||
@@ -236,9 +269,9 @@ class QueryStruct:
|
|||||||
be added to, then the token is silently dropped.
|
be added to, then the token is silently dropped.
|
||||||
"""
|
"""
|
||||||
snode = self.nodes[trange.start]
|
snode = self.nodes[trange.start]
|
||||||
full_phrase = snode.btype in (BreakType.START, BreakType.PHRASE)\
|
full_phrase = snode.btype in (BREAK_START, BREAK_PHRASE)\
|
||||||
and self.nodes[trange.end].btype in (BreakType.PHRASE, BreakType.END)
|
and self.nodes[trange.end].btype in (BREAK_PHRASE, BREAK_END)
|
||||||
if snode.ptype.compatible_with(ttype, full_phrase):
|
if _phrase_compatible_with(snode.ptype, ttype, full_phrase):
|
||||||
tlist = snode.get_tokens(trange.end, ttype)
|
tlist = snode.get_tokens(trange.end, ttype)
|
||||||
if tlist is None:
|
if tlist is None:
|
||||||
snode.starting.append(TokenList(trange.end, ttype, [token]))
|
snode.starting.append(TokenList(trange.end, ttype, [token]))
|
||||||
@@ -258,7 +291,7 @@ class QueryStruct:
|
|||||||
going to the subsequent node. Such PARTIAL tokens are
|
going to the subsequent node. Such PARTIAL tokens are
|
||||||
assumed to exist.
|
assumed to exist.
|
||||||
"""
|
"""
|
||||||
return [next(iter(self.get_tokens(TokenRange(i, i+1), TokenType.PARTIAL)))
|
return [next(iter(self.get_tokens(TokenRange(i, i+1), TOKEN_PARTIAL)))
|
||||||
for i in range(trange.start, trange.end)]
|
for i in range(trange.start, trange.end)]
|
||||||
|
|
||||||
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
|
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
|
||||||
@@ -278,5 +311,44 @@ class QueryStruct:
|
|||||||
for tlist in node.starting:
|
for tlist in node.starting:
|
||||||
for t in tlist.tokens:
|
for t in tlist.tokens:
|
||||||
if t.token == token:
|
if t.token == token:
|
||||||
return f"[{tlist.ttype.name[0]}]{t.lookup_word}"
|
return f"[{tlist.ttype}]{t.lookup_word}"
|
||||||
return 'None'
|
return 'None'
|
||||||
|
|
||||||
|
def get_transliterated_query(self) -> str:
|
||||||
|
""" Return a string representation of the transliterated query
|
||||||
|
with the character representation of the different break types.
|
||||||
|
|
||||||
|
For debugging purposes only.
|
||||||
|
"""
|
||||||
|
return ''.join(''.join((n.term_lookup, n.btype)) for n in self.nodes)
|
||||||
|
|
||||||
|
def extract_words(self, base_penalty: float = 0.0,
|
||||||
|
start: int = 0,
|
||||||
|
endpos: Optional[int] = None) -> Dict[str, List[TokenRange]]:
|
||||||
|
""" Add all combinations of words that can be formed from the terms
|
||||||
|
between the given start and endnode. The terms are joined with
|
||||||
|
spaces for each break. Words can never go across a BREAK_PHRASE.
|
||||||
|
|
||||||
|
The functions returns a dictionary of possible words with their
|
||||||
|
position within the query and a penalty. The penalty is computed
|
||||||
|
from the base_penalty plus the penalty for each node the word
|
||||||
|
crosses.
|
||||||
|
"""
|
||||||
|
if endpos is None:
|
||||||
|
endpos = len(self.nodes)
|
||||||
|
|
||||||
|
words: Dict[str, List[TokenRange]] = defaultdict(list)
|
||||||
|
|
||||||
|
for first in range(start, endpos - 1):
|
||||||
|
word = self.nodes[first + 1].term_lookup
|
||||||
|
penalty = base_penalty
|
||||||
|
words[word].append(TokenRange(first, first + 1, penalty=penalty))
|
||||||
|
if self.nodes[first + 1].btype != BREAK_PHRASE:
|
||||||
|
for last in range(first + 2, min(first + 20, endpos)):
|
||||||
|
word = ' '.join((word, self.nodes[last].term_lookup))
|
||||||
|
penalty += self.nodes[last - 1].penalty
|
||||||
|
words[word].append(TokenRange(first, last, penalty=penalty))
|
||||||
|
if self.nodes[last].btype == BREAK_PHRASE:
|
||||||
|
break
|
||||||
|
|
||||||
|
return words
|
||||||
|
|||||||
@@ -24,12 +24,13 @@ class TypedRange:
|
|||||||
|
|
||||||
|
|
||||||
PENALTY_TOKENCHANGE = {
|
PENALTY_TOKENCHANGE = {
|
||||||
qmod.BreakType.START: 0.0,
|
qmod.BREAK_START: 0.0,
|
||||||
qmod.BreakType.END: 0.0,
|
qmod.BREAK_END: 0.0,
|
||||||
qmod.BreakType.PHRASE: 0.0,
|
qmod.BREAK_PHRASE: 0.0,
|
||||||
qmod.BreakType.WORD: 0.1,
|
qmod.BREAK_SOFT_PHRASE: 0.0,
|
||||||
qmod.BreakType.PART: 0.2,
|
qmod.BREAK_WORD: 0.1,
|
||||||
qmod.BreakType.TOKEN: 0.4
|
qmod.BREAK_PART: 0.2,
|
||||||
|
qmod.BREAK_TOKEN: 0.4
|
||||||
}
|
}
|
||||||
|
|
||||||
TypedRangeSeq = List[TypedRange]
|
TypedRangeSeq = List[TypedRange]
|
||||||
@@ -55,17 +56,17 @@ class TokenAssignment:
|
|||||||
"""
|
"""
|
||||||
out = TokenAssignment()
|
out = TokenAssignment()
|
||||||
for token in ranges:
|
for token in ranges:
|
||||||
if token.ttype == qmod.TokenType.PARTIAL:
|
if token.ttype == qmod.TOKEN_PARTIAL:
|
||||||
out.address.append(token.trange)
|
out.address.append(token.trange)
|
||||||
elif token.ttype == qmod.TokenType.HOUSENUMBER:
|
elif token.ttype == qmod.TOKEN_HOUSENUMBER:
|
||||||
out.housenumber = token.trange
|
out.housenumber = token.trange
|
||||||
elif token.ttype == qmod.TokenType.POSTCODE:
|
elif token.ttype == qmod.TOKEN_POSTCODE:
|
||||||
out.postcode = token.trange
|
out.postcode = token.trange
|
||||||
elif token.ttype == qmod.TokenType.COUNTRY:
|
elif token.ttype == qmod.TOKEN_COUNTRY:
|
||||||
out.country = token.trange
|
out.country = token.trange
|
||||||
elif token.ttype == qmod.TokenType.NEAR_ITEM:
|
elif token.ttype == qmod.TOKEN_NEAR_ITEM:
|
||||||
out.near_item = token.trange
|
out.near_item = token.trange
|
||||||
elif token.ttype == qmod.TokenType.QUALIFIER:
|
elif token.ttype == qmod.TOKEN_QUALIFIER:
|
||||||
out.qualifier = token.trange
|
out.qualifier = token.trange
|
||||||
return out
|
return out
|
||||||
|
|
||||||
@@ -83,7 +84,7 @@ class _TokenSequence:
|
|||||||
self.penalty = penalty
|
self.penalty = penalty
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype.name}]' for r in self.seq)
|
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype}]' for r in self.seq)
|
||||||
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
|
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -104,7 +105,7 @@ class _TokenSequence:
|
|||||||
"""
|
"""
|
||||||
# Country and category must be the final term for left-to-right
|
# Country and category must be the final term for left-to-right
|
||||||
return len(self.seq) > 1 and \
|
return len(self.seq) > 1 and \
|
||||||
self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM)
|
self.seq[-1].ttype in (qmod.TOKEN_COUNTRY, qmod.TOKEN_NEAR_ITEM)
|
||||||
|
|
||||||
def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
|
def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
|
||||||
""" Check if the give token type is appendable to the existing sequence.
|
""" Check if the give token type is appendable to the existing sequence.
|
||||||
@@ -113,23 +114,23 @@ class _TokenSequence:
|
|||||||
new direction of the sequence after adding such a type. The
|
new direction of the sequence after adding such a type. The
|
||||||
token is not added.
|
token is not added.
|
||||||
"""
|
"""
|
||||||
if ttype == qmod.TokenType.WORD:
|
if ttype == qmod.TOKEN_WORD:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if not self.seq:
|
if not self.seq:
|
||||||
# Append unconditionally to the empty list
|
# Append unconditionally to the empty list
|
||||||
if ttype == qmod.TokenType.COUNTRY:
|
if ttype == qmod.TOKEN_COUNTRY:
|
||||||
return -1
|
return -1
|
||||||
if ttype in (qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
|
if ttype in (qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
|
||||||
return 1
|
return 1
|
||||||
return self.direction
|
return self.direction
|
||||||
|
|
||||||
# Name tokens are always acceptable and don't change direction
|
# Name tokens are always acceptable and don't change direction
|
||||||
if ttype == qmod.TokenType.PARTIAL:
|
if ttype == qmod.TOKEN_PARTIAL:
|
||||||
# qualifiers cannot appear in the middle of the query. They need
|
# qualifiers cannot appear in the middle of the query. They need
|
||||||
# to be near the next phrase.
|
# to be near the next phrase.
|
||||||
if self.direction == -1 \
|
if self.direction == -1 \
|
||||||
and any(t.ttype == qmod.TokenType.QUALIFIER for t in self.seq[:-1]):
|
and any(t.ttype == qmod.TOKEN_QUALIFIER for t in self.seq[:-1]):
|
||||||
return None
|
return None
|
||||||
return self.direction
|
return self.direction
|
||||||
|
|
||||||
@@ -137,54 +138,54 @@ class _TokenSequence:
|
|||||||
if self.has_types(ttype):
|
if self.has_types(ttype):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if ttype == qmod.TokenType.HOUSENUMBER:
|
if ttype == qmod.TOKEN_HOUSENUMBER:
|
||||||
if self.direction == 1:
|
if self.direction == 1:
|
||||||
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TokenType.QUALIFIER:
|
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TOKEN_QUALIFIER:
|
||||||
return None
|
return None
|
||||||
if len(self.seq) > 2 \
|
if len(self.seq) > 2 \
|
||||||
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
|
or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
|
||||||
return None # direction left-to-right: housenumber must come before anything
|
return None # direction left-to-right: housenumber must come before anything
|
||||||
elif (self.direction == -1
|
elif (self.direction == -1
|
||||||
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY)):
|
or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY)):
|
||||||
return -1 # force direction right-to-left if after other terms
|
return -1 # force direction right-to-left if after other terms
|
||||||
|
|
||||||
return self.direction
|
return self.direction
|
||||||
|
|
||||||
if ttype == qmod.TokenType.POSTCODE:
|
if ttype == qmod.TOKEN_POSTCODE:
|
||||||
if self.direction == -1:
|
if self.direction == -1:
|
||||||
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
|
if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
|
||||||
return None
|
return None
|
||||||
return -1
|
return -1
|
||||||
if self.direction == 1:
|
if self.direction == 1:
|
||||||
return None if self.has_types(qmod.TokenType.COUNTRY) else 1
|
return None if self.has_types(qmod.TOKEN_COUNTRY) else 1
|
||||||
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
|
if self.has_types(qmod.TOKEN_HOUSENUMBER, qmod.TOKEN_QUALIFIER):
|
||||||
return 1
|
return 1
|
||||||
return self.direction
|
return self.direction
|
||||||
|
|
||||||
if ttype == qmod.TokenType.COUNTRY:
|
if ttype == qmod.TOKEN_COUNTRY:
|
||||||
return None if self.direction == -1 else 1
|
return None if self.direction == -1 else 1
|
||||||
|
|
||||||
if ttype == qmod.TokenType.NEAR_ITEM:
|
if ttype == qmod.TOKEN_NEAR_ITEM:
|
||||||
return self.direction
|
return self.direction
|
||||||
|
|
||||||
if ttype == qmod.TokenType.QUALIFIER:
|
if ttype == qmod.TOKEN_QUALIFIER:
|
||||||
if self.direction == 1:
|
if self.direction == 1:
|
||||||
if (len(self.seq) == 1
|
if (len(self.seq) == 1
|
||||||
and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \
|
and self.seq[0].ttype in (qmod.TOKEN_PARTIAL, qmod.TOKEN_NEAR_ITEM)) \
|
||||||
or (len(self.seq) == 2
|
or (len(self.seq) == 2
|
||||||
and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM
|
and self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM
|
||||||
and self.seq[1].ttype == qmod.TokenType.PARTIAL):
|
and self.seq[1].ttype == qmod.TOKEN_PARTIAL):
|
||||||
return 1
|
return 1
|
||||||
return None
|
return None
|
||||||
if self.direction == -1:
|
if self.direction == -1:
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq
|
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TOKEN_NEAR_ITEM else self.seq
|
||||||
if len(tempseq) == 0:
|
if len(tempseq) == 0:
|
||||||
return 1
|
return 1
|
||||||
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
|
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TOKEN_HOUSENUMBER:
|
||||||
return None
|
return None
|
||||||
if len(tempseq) > 1 or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
|
if len(tempseq) > 1 or self.has_types(qmod.TOKEN_POSTCODE, qmod.TOKEN_COUNTRY):
|
||||||
return -1
|
return -1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@@ -204,7 +205,7 @@ class _TokenSequence:
|
|||||||
new_penalty = 0.0
|
new_penalty = 0.0
|
||||||
else:
|
else:
|
||||||
last = self.seq[-1]
|
last = self.seq[-1]
|
||||||
if btype != qmod.BreakType.PHRASE and last.ttype == ttype:
|
if btype != qmod.BREAK_PHRASE and last.ttype == ttype:
|
||||||
# extend the existing range
|
# extend the existing range
|
||||||
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
|
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
|
||||||
new_penalty = 0.0
|
new_penalty = 0.0
|
||||||
@@ -239,18 +240,18 @@ class _TokenSequence:
|
|||||||
# housenumbers may not be further than 2 words from the beginning.
|
# housenumbers may not be further than 2 words from the beginning.
|
||||||
# If there are two words in front, give it a penalty.
|
# If there are two words in front, give it a penalty.
|
||||||
hnrpos = next((i for i, tr in enumerate(self.seq)
|
hnrpos = next((i for i, tr in enumerate(self.seq)
|
||||||
if tr.ttype == qmod.TokenType.HOUSENUMBER),
|
if tr.ttype == qmod.TOKEN_HOUSENUMBER),
|
||||||
None)
|
None)
|
||||||
if hnrpos is not None:
|
if hnrpos is not None:
|
||||||
if self.direction != -1:
|
if self.direction != -1:
|
||||||
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TokenType.PARTIAL)
|
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TOKEN_PARTIAL)
|
||||||
if not self._adapt_penalty_from_priors(priors, -1):
|
if not self._adapt_penalty_from_priors(priors, -1):
|
||||||
return False
|
return False
|
||||||
if self.direction != 1:
|
if self.direction != 1:
|
||||||
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
|
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TOKEN_PARTIAL)
|
||||||
if not self._adapt_penalty_from_priors(priors, 1):
|
if not self._adapt_penalty_from_priors(priors, 1):
|
||||||
return False
|
return False
|
||||||
if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq):
|
if any(t.ttype == qmod.TOKEN_NEAR_ITEM for t in self.seq):
|
||||||
self.penalty += 1.0
|
self.penalty += 1.0
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -268,10 +269,9 @@ class _TokenSequence:
|
|||||||
# <address>,<postcode> should give preference to address search
|
# <address>,<postcode> should give preference to address search
|
||||||
if base.postcode.start == 0:
|
if base.postcode.start == 0:
|
||||||
penalty = self.penalty
|
penalty = self.penalty
|
||||||
self.direction = -1 # name searches are only possible backwards
|
|
||||||
else:
|
else:
|
||||||
penalty = self.penalty + 0.1
|
penalty = self.penalty + 0.1
|
||||||
self.direction = 1 # name searches are only possible forwards
|
penalty += 0.1 * max(0, len(base.address) - 1)
|
||||||
yield dataclasses.replace(base, penalty=penalty)
|
yield dataclasses.replace(base, penalty=penalty)
|
||||||
|
|
||||||
def _get_assignments_address_forward(self, base: TokenAssignment,
|
def _get_assignments_address_forward(self, base: TokenAssignment,
|
||||||
@@ -281,6 +281,11 @@ class _TokenSequence:
|
|||||||
"""
|
"""
|
||||||
first = base.address[0]
|
first = base.address[0]
|
||||||
|
|
||||||
|
# The postcode must come after the name.
|
||||||
|
if base.postcode and base.postcode < first:
|
||||||
|
log().var_dump('skip forward', (base.postcode, first))
|
||||||
|
return
|
||||||
|
|
||||||
log().comment('first word = name')
|
log().comment('first word = name')
|
||||||
yield dataclasses.replace(base, penalty=self.penalty,
|
yield dataclasses.replace(base, penalty=self.penalty,
|
||||||
name=first, address=base.address[1:])
|
name=first, address=base.address[1:])
|
||||||
@@ -292,7 +297,7 @@ class _TokenSequence:
|
|||||||
# * the containing phrase is strictly typed
|
# * the containing phrase is strictly typed
|
||||||
if (base.housenumber and first.end < base.housenumber.start)\
|
if (base.housenumber and first.end < base.housenumber.start)\
|
||||||
or (base.qualifier and base.qualifier > first)\
|
or (base.qualifier and base.qualifier > first)\
|
||||||
or (query.nodes[first.start].ptype != qmod.PhraseType.NONE):
|
or (query.nodes[first.start].ptype != qmod.PHRASE_ANY):
|
||||||
return
|
return
|
||||||
|
|
||||||
penalty = self.penalty
|
penalty = self.penalty
|
||||||
@@ -316,7 +321,12 @@ class _TokenSequence:
|
|||||||
"""
|
"""
|
||||||
last = base.address[-1]
|
last = base.address[-1]
|
||||||
|
|
||||||
if self.direction == -1 or len(base.address) > 1:
|
# The postcode must come before the name for backward direction.
|
||||||
|
if base.postcode and base.postcode > last:
|
||||||
|
log().var_dump('skip backward', (base.postcode, last))
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.direction == -1 or len(base.address) > 1 or base.postcode:
|
||||||
log().comment('last word = name')
|
log().comment('last word = name')
|
||||||
yield dataclasses.replace(base, penalty=self.penalty,
|
yield dataclasses.replace(base, penalty=self.penalty,
|
||||||
name=last, address=base.address[:-1])
|
name=last, address=base.address[:-1])
|
||||||
@@ -328,7 +338,7 @@ class _TokenSequence:
|
|||||||
# * the containing phrase is strictly typed
|
# * the containing phrase is strictly typed
|
||||||
if (base.housenumber and last.start > base.housenumber.end)\
|
if (base.housenumber and last.start > base.housenumber.end)\
|
||||||
or (base.qualifier and base.qualifier < last)\
|
or (base.qualifier and base.qualifier < last)\
|
||||||
or (query.nodes[last.start].ptype != qmod.PhraseType.NONE):
|
or (query.nodes[last.start].ptype != qmod.PHRASE_ANY):
|
||||||
return
|
return
|
||||||
|
|
||||||
penalty = self.penalty
|
penalty = self.penalty
|
||||||
@@ -392,7 +402,7 @@ def yield_token_assignments(query: qmod.QueryStruct) -> Iterator[TokenAssignment
|
|||||||
another. It does not include penalties for transitions within a
|
another. It does not include penalties for transitions within a
|
||||||
type.
|
type.
|
||||||
"""
|
"""
|
||||||
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PhraseType.NONE else 1)]
|
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PHRASE_ANY else 1)]
|
||||||
|
|
||||||
while todo:
|
while todo:
|
||||||
state = todo.pop()
|
state = todo.pop()
|
||||||
|
|||||||
@@ -173,7 +173,7 @@ class Geometry(types.UserDefinedType): # type: ignore[type-arg]
|
|||||||
def __init__(self, subtype: str = 'Geometry'):
|
def __init__(self, subtype: str = 'Geometry'):
|
||||||
self.subtype = subtype
|
self.subtype = subtype
|
||||||
|
|
||||||
def get_col_spec(self) -> str:
|
def get_col_spec(self, **_: Any) -> str:
|
||||||
return f'GEOMETRY({self.subtype}, 4326)'
|
return f'GEOMETRY({self.subtype}, 4326)'
|
||||||
|
|
||||||
def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]:
|
def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Common json type for different dialects.
|
Common json type for different dialects.
|
||||||
@@ -24,6 +24,6 @@ class Json(sa.types.TypeDecorator[Any]):
|
|||||||
|
|
||||||
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
|
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
|
||||||
if dialect.name == 'postgresql':
|
if dialect.name == 'postgresql':
|
||||||
return JSONB(none_as_null=True) # type: ignore[no-untyped-call]
|
return JSONB(none_as_null=True)
|
||||||
|
|
||||||
return sqlite_json(none_as_null=True)
|
return sqlite_json(none_as_null=True)
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ class Point(NamedTuple):
|
|||||||
except ValueError as exc:
|
except ValueError as exc:
|
||||||
raise UsageError('Point parameter needs to be numbers.') from exc
|
raise UsageError('Point parameter needs to be numbers.') from exc
|
||||||
|
|
||||||
if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0:
|
if not -180 <= x <= 180 or not -90 <= y <= 90.0:
|
||||||
raise UsageError('Point coordinates invalid.')
|
raise UsageError('Point coordinates invalid.')
|
||||||
|
|
||||||
return Point(x, y)
|
return Point(x, y)
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ def get_label_tag(category: Tuple[str, str], extratags: Optional[Mapping[str, st
|
|||||||
elif rank < 26 and extratags and 'linked_place' in extratags:
|
elif rank < 26 and extratags and 'linked_place' in extratags:
|
||||||
label = extratags['linked_place']
|
label = extratags['linked_place']
|
||||||
elif category == ('boundary', 'administrative'):
|
elif category == ('boundary', 'administrative'):
|
||||||
label = ADMIN_LABELS.get((country or '', int(rank/2)))\
|
label = ADMIN_LABELS.get((country or '', rank // 2))\
|
||||||
or ADMIN_LABELS.get(('', int(rank/2)))\
|
or ADMIN_LABELS.get(('', rank // 2))\
|
||||||
or 'Administrative'
|
or 'Administrative'
|
||||||
elif category[1] == 'postal_code':
|
elif category[1] == 'postal_code':
|
||||||
label = 'postcode'
|
label = 'postcode'
|
||||||
|
|||||||
@@ -249,6 +249,9 @@ def format_base_geocodejson(results: Union[ReverseResults, SearchResults],
|
|||||||
out.keyval(f"level{line.admin_level}", line.local_name)
|
out.keyval(f"level{line.admin_level}", line.local_name)
|
||||||
out.end_object().next()
|
out.end_object().next()
|
||||||
|
|
||||||
|
if options.get('extratags', False):
|
||||||
|
out.keyval('extra', result.extratags)
|
||||||
|
|
||||||
out.end_object().next().end_object().next()
|
out.end_object().next().end_object().next()
|
||||||
|
|
||||||
out.key('geometry').raw(result.geometry.get('geojson')
|
out.key('geometry').raw(result.geometry.get('geojson')
|
||||||
|
|||||||
@@ -8,4 +8,4 @@
|
|||||||
Version information for the Nominatim API.
|
Version information for the Nominatim API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
NOMINATIM_API_VERSION = '4.5.0'
|
NOMINATIM_API_VERSION = '5.1.0'
|
||||||
|
|||||||
@@ -2,16 +2,15 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Command-line interface to the Nominatim functions for import, update,
|
Command-line interface to the Nominatim functions for import, update,
|
||||||
database administration and querying.
|
database administration and querying.
|
||||||
"""
|
"""
|
||||||
from typing import Optional, Any
|
from typing import Optional, List, Mapping
|
||||||
import importlib
|
import importlib
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
import argparse
|
import argparse
|
||||||
import asyncio
|
import asyncio
|
||||||
@@ -81,13 +80,14 @@ class CommandlineParser:
|
|||||||
parser.set_defaults(command=cmd)
|
parser.set_defaults(command=cmd)
|
||||||
cmd.add_args(parser)
|
cmd.add_args(parser)
|
||||||
|
|
||||||
def run(self, **kwargs: Any) -> int:
|
def run(self, cli_args: Optional[List[str]],
|
||||||
|
environ: Optional[Mapping[str, str]]) -> int:
|
||||||
""" Parse the command line arguments of the program and execute the
|
""" Parse the command line arguments of the program and execute the
|
||||||
appropriate subcommand.
|
appropriate subcommand.
|
||||||
"""
|
"""
|
||||||
args = NominatimArgs()
|
args = NominatimArgs()
|
||||||
try:
|
try:
|
||||||
self.parser.parse_args(args=kwargs.get('cli_args'), namespace=args)
|
self.parser.parse_args(args=cli_args, namespace=args)
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
@@ -101,23 +101,19 @@ class CommandlineParser:
|
|||||||
|
|
||||||
args.project_dir = Path(args.project_dir).resolve()
|
args.project_dir = Path(args.project_dir).resolve()
|
||||||
|
|
||||||
if 'cli_args' not in kwargs:
|
if cli_args is None:
|
||||||
logging.basicConfig(stream=sys.stderr,
|
logging.basicConfig(stream=sys.stderr,
|
||||||
format='%(asctime)s: %(message)s',
|
format='%(asctime)s: %(message)s',
|
||||||
datefmt='%Y-%m-%d %H:%M:%S',
|
datefmt='%Y-%m-%d %H:%M:%S',
|
||||||
level=max(4 - args.verbose, 1) * 10)
|
level=max(4 - args.verbose, 1) * 10)
|
||||||
|
|
||||||
args.config = Configuration(args.project_dir,
|
args.config = Configuration(args.project_dir, environ=environ)
|
||||||
environ=kwargs.get('environ', os.environ))
|
|
||||||
args.config.set_libdirs(osm2pgsql=kwargs['osm2pgsql_path'])
|
|
||||||
|
|
||||||
log = logging.getLogger()
|
log = logging.getLogger()
|
||||||
log.warning('Using project directory: %s', str(args.project_dir))
|
log.warning('Using project directory: %s', str(args.project_dir))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ret = args.command.run(args)
|
return args.command.run(args)
|
||||||
|
|
||||||
return ret
|
|
||||||
except UsageError as exception:
|
except UsageError as exception:
|
||||||
if log.isEnabledFor(logging.DEBUG):
|
if log.isEnabledFor(logging.DEBUG):
|
||||||
raise # use Python's exception printing
|
raise # use Python's exception printing
|
||||||
@@ -233,9 +229,16 @@ def get_set_parser() -> CommandlineParser:
|
|||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
def nominatim(**kwargs: Any) -> int:
|
def nominatim(cli_args: Optional[List[str]] = None,
|
||||||
|
environ: Optional[Mapping[str, str]] = None) -> int:
|
||||||
"""\
|
"""\
|
||||||
Command-line tools for importing, updating, administrating and
|
Command-line tools for importing, updating, administrating and
|
||||||
querying the Nominatim database.
|
querying the Nominatim database.
|
||||||
|
|
||||||
|
'cli_args' is a list of parameters for the command to run. If not given,
|
||||||
|
sys.args will be used.
|
||||||
|
|
||||||
|
'environ' is the dictionary of environment variables containing the
|
||||||
|
Nominatim configuration. When None, the os.environ is inherited.
|
||||||
"""
|
"""
|
||||||
return get_set_parser().run(**kwargs)
|
return get_set_parser().run(cli_args=cli_args, environ=environ)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Provides custom functions over command-line arguments.
|
Provides custom functions over command-line arguments.
|
||||||
@@ -186,7 +186,7 @@ class NominatimArgs:
|
|||||||
from the command line arguments. The resulting dict can be
|
from the command line arguments. The resulting dict can be
|
||||||
further customized and then used in `run_osm2pgsql()`.
|
further customized and then used in `run_osm2pgsql()`.
|
||||||
"""
|
"""
|
||||||
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY or self.config.lib_dir.osm2pgsql,
|
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY,
|
||||||
osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
|
osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
|
||||||
osm2pgsql_style=self.config.get_import_style_file(),
|
osm2pgsql_style=self.config.get_import_style_file(),
|
||||||
osm2pgsql_style_path=self.config.lib_dir.lua,
|
osm2pgsql_style_path=self.config.lib_dir.lua,
|
||||||
|
|||||||
@@ -122,13 +122,16 @@ class SetupAll:
|
|||||||
|
|
||||||
LOG.warning('Post-process tables')
|
LOG.warning('Post-process tables')
|
||||||
with connect(args.config.get_libpq_dsn()) as conn:
|
with connect(args.config.get_libpq_dsn()) as conn:
|
||||||
|
conn.autocommit = True
|
||||||
await database_import.create_search_indices(conn, args.config,
|
await database_import.create_search_indices(conn, args.config,
|
||||||
drop=args.no_updates,
|
drop=args.no_updates,
|
||||||
threads=num_threads)
|
threads=num_threads)
|
||||||
LOG.warning('Create search index for default country names.')
|
LOG.warning('Create search index for default country names.')
|
||||||
|
conn.autocommit = False
|
||||||
country_info.create_country_names(conn, tokenizer,
|
country_info.create_country_names(conn, tokenizer,
|
||||||
args.config.get_str_list('LANGUAGES'))
|
args.config.get_str_list('LANGUAGES'))
|
||||||
if args.no_updates:
|
if args.no_updates:
|
||||||
|
conn.autocommit = True
|
||||||
freeze.drop_update_tables(conn)
|
freeze.drop_update_tables(conn)
|
||||||
tokenizer.finalize_import(args.config)
|
tokenizer.finalize_import(args.config)
|
||||||
|
|
||||||
@@ -183,6 +186,7 @@ class SetupAll:
|
|||||||
from ..tools import database_import, refresh
|
from ..tools import database_import, refresh
|
||||||
|
|
||||||
with connect(config.get_libpq_dsn()) as conn:
|
with connect(config.get_libpq_dsn()) as conn:
|
||||||
|
conn.autocommit = True
|
||||||
LOG.warning('Create functions (1st pass)')
|
LOG.warning('Create functions (1st pass)')
|
||||||
refresh.create_functions(conn, config, False, False)
|
refresh.create_functions(conn, config, False, False)
|
||||||
LOG.warning('Create tables')
|
LOG.warning('Create tables')
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Nominatim configuration accessor.
|
Nominatim configuration accessor.
|
||||||
@@ -73,7 +73,6 @@ class Configuration:
|
|||||||
self.project_dir = None
|
self.project_dir = None
|
||||||
|
|
||||||
class _LibDirs:
|
class _LibDirs:
|
||||||
osm2pgsql: Path
|
|
||||||
sql = paths.SQLLIB_DIR
|
sql = paths.SQLLIB_DIR
|
||||||
lua = paths.LUALIB_DIR
|
lua = paths.LUALIB_DIR
|
||||||
data = paths.DATA_DIR
|
data = paths.DATA_DIR
|
||||||
|
|||||||
@@ -102,10 +102,10 @@ def server_version_tuple(conn: Connection) -> Tuple[int, int]:
|
|||||||
Converts correctly for pre-10 and post-10 PostgreSQL versions.
|
Converts correctly for pre-10 and post-10 PostgreSQL versions.
|
||||||
"""
|
"""
|
||||||
version = conn.info.server_version
|
version = conn.info.server_version
|
||||||
if version < 100000:
|
major, minor = divmod(version, 10000)
|
||||||
return (int(version / 10000), int((version % 10000) / 100))
|
if major < 10:
|
||||||
|
minor //= 100
|
||||||
return (int(version / 10000), version % 10000)
|
return major, minor
|
||||||
|
|
||||||
|
|
||||||
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:
|
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:
|
||||||
|
|||||||
@@ -50,8 +50,8 @@ class ProgressLogger:
|
|||||||
places_per_sec = self.done_places / done_time
|
places_per_sec = self.done_places / done_time
|
||||||
eta = (self.total_places - self.done_places) / places_per_sec
|
eta = (self.total_places - self.done_places) / places_per_sec
|
||||||
|
|
||||||
LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f",
|
LOG.warning("Done %d in %.0f @ %.3f per second - %s ETA (seconds): %.2f",
|
||||||
self.done_places, int(done_time),
|
self.done_places, done_time,
|
||||||
places_per_sec, self.name, eta)
|
places_per_sec, self.name, eta)
|
||||||
|
|
||||||
self.next_info += int(places_per_sec) * self.log_interval
|
self.next_info += int(places_per_sec) * self.log_interval
|
||||||
@@ -68,8 +68,8 @@ class ProgressLogger:
|
|||||||
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
|
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
|
||||||
places_per_sec = self.done_places / diff_seconds
|
places_per_sec = self.done_places / diff_seconds
|
||||||
|
|
||||||
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
|
LOG.warning("Done %d/%d in %.0f @ %.3f per second - FINISHED %s\n",
|
||||||
self.done_places, self.total_places, int(diff_seconds),
|
self.done_places, self.total_places, diff_seconds,
|
||||||
places_per_sec, self.name)
|
places_per_sec, self.name)
|
||||||
|
|
||||||
return self.done_places
|
return self.done_places
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ class ICUTokenAnalysis:
|
|||||||
|
|
||||||
def __init__(self, norm_rules: str, trans_rules: str,
|
def __init__(self, norm_rules: str, trans_rules: str,
|
||||||
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
|
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
|
||||||
|
# additional break signs are not relevant during name analysis
|
||||||
|
norm_rules += ";[[:Space:][-:]]+ > ' ';"
|
||||||
self.normalizer = Transliterator.createFromRules("icu_normalization",
|
self.normalizer = Transliterator.createFromRules("icu_normalization",
|
||||||
norm_rules)
|
norm_rules)
|
||||||
trans_rules += ";[:Space:]+ > ' '"
|
trans_rules += ";[:Space:]+ > ' '"
|
||||||
|
|||||||
@@ -121,10 +121,10 @@ class ICUTokenizer(AbstractTokenizer):
|
|||||||
SELECT unnest(nameaddress_vector) as id, count(*)
|
SELECT unnest(nameaddress_vector) as id, count(*)
|
||||||
FROM search_name GROUP BY id)
|
FROM search_name GROUP BY id)
|
||||||
SELECT coalesce(a.id, w.id) as id,
|
SELECT coalesce(a.id, w.id) as id,
|
||||||
(CASE WHEN w.count is null THEN '{}'::JSONB
|
(CASE WHEN w.count is null or w.count <= 1 THEN '{}'::JSONB
|
||||||
ELSE jsonb_build_object('count', w.count) END
|
ELSE jsonb_build_object('count', w.count) END
|
||||||
||
|
||
|
||||||
CASE WHEN a.count is null THEN '{}'::JSONB
|
CASE WHEN a.count is null or a.count <= 1 THEN '{}'::JSONB
|
||||||
ELSE jsonb_build_object('addr_count', a.count) END) as info
|
ELSE jsonb_build_object('addr_count', a.count) END) as info
|
||||||
FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
|
FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
|
||||||
""")
|
""")
|
||||||
@@ -134,9 +134,10 @@ class ICUTokenizer(AbstractTokenizer):
|
|||||||
drop_tables(conn, 'tmp_word')
|
drop_tables(conn, 'tmp_word')
|
||||||
cur.execute("""CREATE TABLE tmp_word AS
|
cur.execute("""CREATE TABLE tmp_word AS
|
||||||
SELECT word_id, word_token, type, word,
|
SELECT word_id, word_token, type, word,
|
||||||
(CASE WHEN wf.info is null THEN word.info
|
coalesce(word.info, '{}'::jsonb)
|
||||||
ELSE coalesce(word.info, '{}'::jsonb) || wf.info
|
- 'count' - 'addr_count' ||
|
||||||
END) as info
|
coalesce(wf.info, '{}'::jsonb)
|
||||||
|
as info
|
||||||
FROM word LEFT JOIN word_frequencies wf
|
FROM word LEFT JOIN word_frequencies wf
|
||||||
ON word.word_id = wf.id
|
ON word.word_id = wf.id
|
||||||
""")
|
""")
|
||||||
@@ -381,76 +382,15 @@ class ICUNameAnalyzer(AbstractAnalyzer):
|
|||||||
return postcode.strip().upper()
|
return postcode.strip().upper()
|
||||||
|
|
||||||
def update_postcodes_from_db(self) -> None:
|
def update_postcodes_from_db(self) -> None:
|
||||||
""" Update postcode tokens in the word table from the location_postcode
|
""" Postcode update.
|
||||||
table.
|
|
||||||
|
Removes all postcodes from the word table because they are not
|
||||||
|
needed. Postcodes are recognised by pattern.
|
||||||
"""
|
"""
|
||||||
assert self.conn is not None
|
assert self.conn is not None
|
||||||
analyzer = self.token_analysis.analysis.get('@postcode')
|
|
||||||
|
|
||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
# First get all postcode names currently in the word table.
|
cur.execute("DELETE FROM word WHERE type = 'P'")
|
||||||
cur.execute("SELECT DISTINCT word FROM word WHERE type = 'P'")
|
|
||||||
word_entries = set((entry[0] for entry in cur))
|
|
||||||
|
|
||||||
# Then compute the required postcode names from the postcode table.
|
|
||||||
needed_entries = set()
|
|
||||||
cur.execute("SELECT country_code, postcode FROM location_postcode")
|
|
||||||
for cc, postcode in cur:
|
|
||||||
info = PlaceInfo({'country_code': cc,
|
|
||||||
'class': 'place', 'type': 'postcode',
|
|
||||||
'address': {'postcode': postcode}})
|
|
||||||
address = self.sanitizer.process_names(info)[1]
|
|
||||||
for place in address:
|
|
||||||
if place.kind == 'postcode':
|
|
||||||
if analyzer is None:
|
|
||||||
postcode_name = place.name.strip().upper()
|
|
||||||
variant_base = None
|
|
||||||
else:
|
|
||||||
postcode_name = analyzer.get_canonical_id(place)
|
|
||||||
variant_base = place.get_attr("variant")
|
|
||||||
|
|
||||||
if variant_base:
|
|
||||||
needed_entries.add(f'{postcode_name}@{variant_base}')
|
|
||||||
else:
|
|
||||||
needed_entries.add(postcode_name)
|
|
||||||
break
|
|
||||||
|
|
||||||
# Now update the word table.
|
|
||||||
self._delete_unused_postcode_words(word_entries - needed_entries)
|
|
||||||
self._add_missing_postcode_words(needed_entries - word_entries)
|
|
||||||
|
|
||||||
def _delete_unused_postcode_words(self, tokens: Iterable[str]) -> None:
|
|
||||||
assert self.conn is not None
|
|
||||||
if tokens:
|
|
||||||
with self.conn.cursor() as cur:
|
|
||||||
cur.execute("DELETE FROM word WHERE type = 'P' and word = any(%s)",
|
|
||||||
(list(tokens), ))
|
|
||||||
|
|
||||||
def _add_missing_postcode_words(self, tokens: Iterable[str]) -> None:
|
|
||||||
assert self.conn is not None
|
|
||||||
if not tokens:
|
|
||||||
return
|
|
||||||
|
|
||||||
analyzer = self.token_analysis.analysis.get('@postcode')
|
|
||||||
terms = []
|
|
||||||
|
|
||||||
for postcode_name in tokens:
|
|
||||||
if '@' in postcode_name:
|
|
||||||
term, variant = postcode_name.split('@', 2)
|
|
||||||
term = self._search_normalized(term)
|
|
||||||
if analyzer is None:
|
|
||||||
variants = [term]
|
|
||||||
else:
|
|
||||||
variants = analyzer.compute_variants(variant)
|
|
||||||
if term not in variants:
|
|
||||||
variants.append(term)
|
|
||||||
else:
|
|
||||||
variants = [self._search_normalized(postcode_name)]
|
|
||||||
terms.append((postcode_name, variants))
|
|
||||||
|
|
||||||
if terms:
|
|
||||||
with self.conn.cursor() as cur:
|
|
||||||
cur.executemany("""SELECT create_postcode_word(%s, %s)""", terms)
|
|
||||||
|
|
||||||
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
|
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
|
||||||
should_replace: bool) -> None:
|
should_replace: bool) -> None:
|
||||||
@@ -645,10 +585,14 @@ class ICUNameAnalyzer(AbstractAnalyzer):
|
|||||||
if word_id:
|
if word_id:
|
||||||
result = self._cache.housenumbers.get(word_id, result)
|
result = self._cache.housenumbers.get(word_id, result)
|
||||||
if result[0] is None:
|
if result[0] is None:
|
||||||
variants = analyzer.compute_variants(word_id)
|
varout = analyzer.compute_variants(word_id)
|
||||||
|
if isinstance(varout, tuple):
|
||||||
|
variants = varout[0]
|
||||||
|
else:
|
||||||
|
variants = varout
|
||||||
if variants:
|
if variants:
|
||||||
hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
|
hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
|
||||||
(word_id, list(variants)))
|
(word_id, variants))
|
||||||
result = hid, variants[0]
|
result = hid, variants[0]
|
||||||
self._cache.housenumbers[word_id] = result
|
self._cache.housenumbers[word_id] = result
|
||||||
|
|
||||||
@@ -693,13 +637,17 @@ class ICUNameAnalyzer(AbstractAnalyzer):
|
|||||||
|
|
||||||
full, part = self._cache.names.get(token_id, (None, None))
|
full, part = self._cache.names.get(token_id, (None, None))
|
||||||
if full is None:
|
if full is None:
|
||||||
variants = analyzer.compute_variants(word_id)
|
varset = analyzer.compute_variants(word_id)
|
||||||
|
if isinstance(varset, tuple):
|
||||||
|
variants, lookups = varset
|
||||||
|
else:
|
||||||
|
variants, lookups = varset, None
|
||||||
if not variants:
|
if not variants:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
|
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s, %s)",
|
||||||
(token_id, variants))
|
(token_id, variants, lookups))
|
||||||
full, part = cast(Tuple[int, List[int]], cur.fetchone())
|
full, part = cast(Tuple[int, List[int]], cur.fetchone())
|
||||||
|
|
||||||
self._cache.names[token_id] = (full, part)
|
self._cache.names[token_id] = (full, part)
|
||||||
@@ -718,32 +666,9 @@ class ICUNameAnalyzer(AbstractAnalyzer):
|
|||||||
analyzer = self.token_analysis.analysis.get('@postcode')
|
analyzer = self.token_analysis.analysis.get('@postcode')
|
||||||
|
|
||||||
if analyzer is None:
|
if analyzer is None:
|
||||||
postcode_name = item.name.strip().upper()
|
return item.name.strip().upper()
|
||||||
variant_base = None
|
|
||||||
else:
|
else:
|
||||||
postcode_name = analyzer.get_canonical_id(item)
|
return analyzer.get_canonical_id(item)
|
||||||
variant_base = item.get_attr("variant")
|
|
||||||
|
|
||||||
if variant_base:
|
|
||||||
postcode = f'{postcode_name}@{variant_base}'
|
|
||||||
else:
|
|
||||||
postcode = postcode_name
|
|
||||||
|
|
||||||
if postcode not in self._cache.postcodes:
|
|
||||||
term = self._search_normalized(postcode_name)
|
|
||||||
if not term:
|
|
||||||
return None
|
|
||||||
|
|
||||||
variants = {term}
|
|
||||||
if analyzer is not None and variant_base:
|
|
||||||
variants.update(analyzer.compute_variants(variant_base))
|
|
||||||
|
|
||||||
with self.conn.cursor() as cur:
|
|
||||||
cur.execute("SELECT create_postcode_word(%s, %s)",
|
|
||||||
(postcode, list(variants)))
|
|
||||||
self._cache.postcodes.add(postcode)
|
|
||||||
|
|
||||||
return postcode_name
|
|
||||||
|
|
||||||
|
|
||||||
class _TokenInfo:
|
class _TokenInfo:
|
||||||
@@ -836,5 +761,4 @@ class _TokenCache:
|
|||||||
self.names: Dict[str, Tuple[int, List[int]]] = {}
|
self.names: Dict[str, Tuple[int, List[int]]] = {}
|
||||||
self.partials: Dict[str, int] = {}
|
self.partials: Dict[str, int] = {}
|
||||||
self.fulls: Dict[str, List[int]] = {}
|
self.fulls: Dict[str, List[int]] = {}
|
||||||
self.postcodes: Set[str] = set()
|
|
||||||
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}
|
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
"""
|
"""
|
||||||
Common data types and protocols for analysers.
|
Common data types and protocols for analysers.
|
||||||
"""
|
"""
|
||||||
from typing import Mapping, List, Any
|
from typing import Mapping, List, Any, Union, Tuple
|
||||||
|
|
||||||
from ...typing import Protocol
|
from ...typing import Protocol
|
||||||
from ...data.place_name import PlaceName
|
from ...data.place_name import PlaceName
|
||||||
@@ -33,7 +33,7 @@ class Analyzer(Protocol):
|
|||||||
for example because the character set in use does not match.
|
for example because the character set in use does not match.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def compute_variants(self, canonical_id: str) -> List[str]:
|
def compute_variants(self, canonical_id: str) -> Union[List[str], Tuple[List[str], List[str]]]:
|
||||||
""" Compute the transliterated spelling variants for the given
|
""" Compute the transliterated spelling variants for the given
|
||||||
canonical ID.
|
canonical ID.
|
||||||
|
|
||||||
|
|||||||
@@ -2,20 +2,19 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Generic processor for names that creates abbreviation variants.
|
Generic processor for names that creates abbreviation variants.
|
||||||
"""
|
"""
|
||||||
from typing import Mapping, Dict, Any, Iterable, Iterator, Optional, List, cast
|
from typing import Mapping, Dict, Any, Iterable, Optional, List, cast, Tuple
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
import datrie
|
|
||||||
|
|
||||||
from ...errors import UsageError
|
from ...errors import UsageError
|
||||||
from ...data.place_name import PlaceName
|
from ...data.place_name import PlaceName
|
||||||
from .config_variants import get_variant_config
|
from .config_variants import get_variant_config
|
||||||
from .generic_mutation import MutationVariantGenerator
|
from .generic_mutation import MutationVariantGenerator
|
||||||
|
from .simple_trie import SimpleTrie
|
||||||
|
|
||||||
# Configuration section
|
# Configuration section
|
||||||
|
|
||||||
@@ -25,8 +24,7 @@ def configure(rules: Mapping[str, Any], normalizer: Any, _: Any) -> Dict[str, An
|
|||||||
"""
|
"""
|
||||||
config: Dict[str, Any] = {}
|
config: Dict[str, Any] = {}
|
||||||
|
|
||||||
config['replacements'], config['chars'] = get_variant_config(rules.get('variants'),
|
config['replacements'], _ = get_variant_config(rules.get('variants'), normalizer)
|
||||||
normalizer)
|
|
||||||
config['variant_only'] = rules.get('mode', '') == 'variant-only'
|
config['variant_only'] = rules.get('mode', '') == 'variant-only'
|
||||||
|
|
||||||
# parse mutation rules
|
# parse mutation rules
|
||||||
@@ -68,12 +66,8 @@ class GenericTokenAnalysis:
|
|||||||
self.variant_only = config['variant_only']
|
self.variant_only = config['variant_only']
|
||||||
|
|
||||||
# Set up datrie
|
# Set up datrie
|
||||||
if config['replacements']:
|
self.replacements: Optional[SimpleTrie[List[str]]] = \
|
||||||
self.replacements = datrie.Trie(config['chars'])
|
SimpleTrie(config['replacements']) if config['replacements'] else None
|
||||||
for src, repllist in config['replacements']:
|
|
||||||
self.replacements[src] = repllist
|
|
||||||
else:
|
|
||||||
self.replacements = None
|
|
||||||
|
|
||||||
# set up mutation rules
|
# set up mutation rules
|
||||||
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
|
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
|
||||||
@@ -84,7 +78,7 @@ class GenericTokenAnalysis:
|
|||||||
"""
|
"""
|
||||||
return cast(str, self.norm.transliterate(name.name)).strip()
|
return cast(str, self.norm.transliterate(name.name)).strip()
|
||||||
|
|
||||||
def compute_variants(self, norm_name: str) -> List[str]:
|
def compute_variants(self, norm_name: str) -> Tuple[List[str], List[str]]:
|
||||||
""" Compute the spelling variants for the given normalized name
|
""" Compute the spelling variants for the given normalized name
|
||||||
and transliterate the result.
|
and transliterate the result.
|
||||||
"""
|
"""
|
||||||
@@ -93,18 +87,20 @@ class GenericTokenAnalysis:
|
|||||||
for mutation in self.mutations:
|
for mutation in self.mutations:
|
||||||
variants = mutation.generate(variants)
|
variants = mutation.generate(variants)
|
||||||
|
|
||||||
return [name for name in self._transliterate_unique_list(norm_name, variants) if name]
|
varset = set(map(str.strip, variants))
|
||||||
|
|
||||||
def _transliterate_unique_list(self, norm_name: str,
|
|
||||||
iterable: Iterable[str]) -> Iterator[Optional[str]]:
|
|
||||||
seen = set()
|
|
||||||
if self.variant_only:
|
if self.variant_only:
|
||||||
seen.add(norm_name)
|
varset.discard(norm_name)
|
||||||
|
|
||||||
for variant in map(str.strip, iterable):
|
trans = []
|
||||||
if variant not in seen:
|
norm = []
|
||||||
seen.add(variant)
|
|
||||||
yield self.to_ascii.transliterate(variant).strip()
|
for var in varset:
|
||||||
|
t = self.to_ascii.transliterate(var).strip()
|
||||||
|
if t:
|
||||||
|
trans.append(t)
|
||||||
|
norm.append(var)
|
||||||
|
|
||||||
|
return trans, norm
|
||||||
|
|
||||||
def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
|
def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
|
||||||
baseform = '^ ' + norm_name + ' ^'
|
baseform = '^ ' + norm_name + ' ^'
|
||||||
@@ -116,10 +112,10 @@ class GenericTokenAnalysis:
|
|||||||
pos = 0
|
pos = 0
|
||||||
force_space = False
|
force_space = False
|
||||||
while pos < baselen:
|
while pos < baselen:
|
||||||
full, repl = self.replacements.longest_prefix_item(baseform[pos:],
|
frm = pos
|
||||||
(None, None))
|
repl, pos = self.replacements.longest_prefix(baseform, pos)
|
||||||
if full is not None:
|
if repl is not None:
|
||||||
done = baseform[startpos:pos]
|
done = baseform[startpos:frm]
|
||||||
partials = [v + done + r
|
partials = [v + done + r
|
||||||
for v, r in itertools.product(partials, repl)
|
for v, r in itertools.product(partials, repl)
|
||||||
if not force_space or r.startswith(' ')]
|
if not force_space or r.startswith(' ')]
|
||||||
@@ -128,11 +124,10 @@ class GenericTokenAnalysis:
|
|||||||
# to be helpful. Only use the original term.
|
# to be helpful. Only use the original term.
|
||||||
startpos = 0
|
startpos = 0
|
||||||
break
|
break
|
||||||
startpos = pos + len(full)
|
if baseform[pos - 1] == ' ':
|
||||||
if full[-1] == ' ':
|
pos -= 1
|
||||||
startpos -= 1
|
|
||||||
force_space = True
|
force_space = True
|
||||||
pos = startpos
|
startpos = pos
|
||||||
else:
|
else:
|
||||||
pos += 1
|
pos += 1
|
||||||
force_space = False
|
force_space = False
|
||||||
|
|||||||
84
src/nominatim_db/tokenizer/token_analysis/simple_trie.py
Normal file
84
src/nominatim_db/tokenizer/token_analysis/simple_trie.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
|
#
|
||||||
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
|
# For a full list of authors see the git log.
|
||||||
|
"""
|
||||||
|
Simple dict-based implementation of a trie structure.
|
||||||
|
"""
|
||||||
|
from typing import TypeVar, Generic, Tuple, Optional, List, Dict
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
T = TypeVar('T')
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleTrie(Generic[T]):
|
||||||
|
""" A simple read-only trie structure.
|
||||||
|
This structure supports examply one lookup operation,
|
||||||
|
which is longest-prefix lookup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, data: Optional[List[Tuple[str, T]]] = None) -> None:
|
||||||
|
self._tree: Dict[str, 'SimpleTrie[T]'] = defaultdict(SimpleTrie[T])
|
||||||
|
self._value: Optional[T] = None
|
||||||
|
self._prefix = ''
|
||||||
|
|
||||||
|
if data:
|
||||||
|
for key, value in data:
|
||||||
|
self._add(key, 0, value)
|
||||||
|
|
||||||
|
self._make_compact()
|
||||||
|
|
||||||
|
def _add(self, word: str, pos: int, value: T) -> None:
|
||||||
|
""" (Internal) Add a sub-word to the trie.
|
||||||
|
The word is added from index 'pos'. If the sub-word to add
|
||||||
|
is empty, then the trie saves the given value.
|
||||||
|
"""
|
||||||
|
if pos < len(word):
|
||||||
|
self._tree[word[pos]]._add(word, pos + 1, value)
|
||||||
|
else:
|
||||||
|
self._value = value
|
||||||
|
|
||||||
|
def _make_compact(self) -> None:
|
||||||
|
""" (Internal) Compress tree where there is exactly one subtree
|
||||||
|
and no value.
|
||||||
|
|
||||||
|
Compression works recursively starting at the leaf.
|
||||||
|
"""
|
||||||
|
for t in self._tree.values():
|
||||||
|
t._make_compact()
|
||||||
|
|
||||||
|
if len(self._tree) == 1 and self._value is None:
|
||||||
|
assert not self._prefix
|
||||||
|
for k, v in self._tree.items():
|
||||||
|
self._prefix = k + v._prefix
|
||||||
|
self._tree = v._tree
|
||||||
|
self._value = v._value
|
||||||
|
|
||||||
|
def longest_prefix(self, word: str, start: int = 0) -> Tuple[Optional[T], int]:
|
||||||
|
""" Return the longest prefix match for the given word starting at
|
||||||
|
the position 'start'.
|
||||||
|
|
||||||
|
The function returns a tuple with the value for the longest match and
|
||||||
|
the position of the word after the match. If no match was found at
|
||||||
|
all, the function returns (None, start).
|
||||||
|
"""
|
||||||
|
cur = self
|
||||||
|
pos = start
|
||||||
|
result: Tuple[Optional[T], int] = None, start
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if cur._prefix:
|
||||||
|
if not word.startswith(cur._prefix, pos):
|
||||||
|
return result
|
||||||
|
pos += len(cur._prefix)
|
||||||
|
|
||||||
|
if cur._value:
|
||||||
|
result = cur._value, pos
|
||||||
|
|
||||||
|
if pos >= len(word) or word[pos] not in cur._tree:
|
||||||
|
return result
|
||||||
|
|
||||||
|
cur = cur._tree[word[pos]]
|
||||||
|
pos += 1
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Helper functions for executing external programs.
|
Helper functions for executing external programs.
|
||||||
@@ -85,7 +85,7 @@ def _mk_tablespace_options(ttype: str, options: Mapping[str, Any]) -> List[str]:
|
|||||||
|
|
||||||
|
|
||||||
def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str:
|
def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str:
|
||||||
if cmdline is not None:
|
if cmdline:
|
||||||
return cmdline
|
return cmdline
|
||||||
|
|
||||||
in_path = shutil.which('osm2pgsql')
|
in_path = shutil.which('osm2pgsql')
|
||||||
|
|||||||
@@ -108,8 +108,7 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
|
|||||||
|
|
||||||
async with QueryPool(dsn, place_threads, autocommit=True) as pool:
|
async with QueryPool(dsn, place_threads, autocommit=True) as pool:
|
||||||
with tokenizer.name_analyzer() as analyzer:
|
with tokenizer.name_analyzer() as analyzer:
|
||||||
lines = 0
|
for lineno, row in enumerate(tar, 1):
|
||||||
for row in tar:
|
|
||||||
try:
|
try:
|
||||||
address = dict(street=row['street'], postcode=row['postcode'])
|
address = dict(street=row['street'], postcode=row['postcode'])
|
||||||
args = ('SRID=4326;' + row['geometry'],
|
args = ('SRID=4326;' + row['geometry'],
|
||||||
@@ -124,10 +123,8 @@ async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
|
|||||||
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
|
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
|
||||||
args)
|
args)
|
||||||
|
|
||||||
lines += 1
|
if not lineno % 1000:
|
||||||
if lines == 1000:
|
|
||||||
print('.', end='', flush=True)
|
print('.', end='', flush=True)
|
||||||
lines = 0
|
|
||||||
|
|
||||||
print('', flush=True)
|
print('', flush=True)
|
||||||
|
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ class PointsCentroid:
|
|||||||
if self.count == 0:
|
if self.count == 0:
|
||||||
raise ValueError("No points available for centroid.")
|
raise ValueError("No points available for centroid.")
|
||||||
|
|
||||||
return (float(self.sum_x/self.count)/10000000,
|
return (self.sum_x / self.count / 10_000_000,
|
||||||
float(self.sum_y/self.count)/10000000)
|
self.sum_y / self.count / 10_000_000)
|
||||||
|
|
||||||
def __len__(self) -> int:
|
def __len__(self) -> int:
|
||||||
return self.count
|
return self.count
|
||||||
@@ -40,8 +40,8 @@ class PointsCentroid:
|
|||||||
if isinstance(other, Collection) and len(other) == 2:
|
if isinstance(other, Collection) and len(other) == 2:
|
||||||
if all(isinstance(p, (float, int)) for p in other):
|
if all(isinstance(p, (float, int)) for p in other):
|
||||||
x, y = other
|
x, y = other
|
||||||
self.sum_x += int(x * 10000000)
|
self.sum_x += int(x * 10_000_000)
|
||||||
self.sum_y += int(y * 10000000)
|
self.sum_y += int(y * 10_000_000)
|
||||||
self.count += 1
|
self.count += 1
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ def parse_version(version: str) -> NominatimVersion:
|
|||||||
return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
|
return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
|
||||||
|
|
||||||
|
|
||||||
NOMINATIM_VERSION = parse_version('4.5.0-0')
|
NOMINATIM_VERSION = parse_version('5.1.0-0')
|
||||||
|
|
||||||
POSTGRESQL_REQUIRED_VERSION = (12, 0)
|
POSTGRESQL_REQUIRED_VERSION = (12, 0)
|
||||||
POSTGIS_REQUIRED_VERSION = (3, 0)
|
POSTGIS_REQUIRED_VERSION = (3, 0)
|
||||||
|
|||||||
@@ -3,9 +3,8 @@
|
|||||||
Feature: Searches with postcodes
|
Feature: Searches with postcodes
|
||||||
Various searches involving postcodes
|
Various searches involving postcodes
|
||||||
|
|
||||||
@Fail
|
|
||||||
Scenario: US 5+4 ZIP codes are shortened to 5 ZIP codes if not found
|
Scenario: US 5+4 ZIP codes are shortened to 5 ZIP codes if not found
|
||||||
When sending json search query "36067 1111, us" with address
|
When sending json search query "36067-1111, us" with address
|
||||||
Then result addresses contain
|
Then result addresses contain
|
||||||
| postcode |
|
| postcode |
|
||||||
| 36067 |
|
| 36067 |
|
||||||
|
|||||||
@@ -67,3 +67,13 @@ Feature: Structured search queries
|
|||||||
Then result addresses contain
|
Then result addresses contain
|
||||||
| town |
|
| town |
|
||||||
| Vaduz |
|
| Vaduz |
|
||||||
|
|
||||||
|
#3651
|
||||||
|
Scenario: Structured search with surrounding extra characters
|
||||||
|
When sending xml search query "" with address
|
||||||
|
| street | city | postalcode |
|
||||||
|
| "19 Am schrägen Weg" | "Vaduz" | "9491" |
|
||||||
|
Then result addresses contain
|
||||||
|
| house_number | road |
|
||||||
|
| 19 | Am Schrägen Weg |
|
||||||
|
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ Feature: Import of postcodes
|
|||||||
| object | postcode |
|
| object | postcode |
|
||||||
| W93 | 11200 |
|
| W93 | 11200 |
|
||||||
|
|
||||||
Scenario: Postcodes are added to the postcode and word table
|
Scenario: Postcodes are added to the postcode
|
||||||
Given the places
|
Given the places
|
||||||
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
||||||
| N34 | place | house | 01982 | 111 |country:de |
|
| N34 | place | house | 01982 | 111 |country:de |
|
||||||
@@ -178,7 +178,6 @@ Feature: Import of postcodes
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| de | 01982 | country:de |
|
| de | 01982 | country:de |
|
||||||
And there are word tokens for postcodes 01982
|
|
||||||
|
|
||||||
|
|
||||||
@Fail
|
@Fail
|
||||||
@@ -195,7 +194,7 @@ Feature: Import of postcodes
|
|||||||
| E45 2 | gb | 23 | 5 |
|
| E45 2 | gb | 23 | 5 |
|
||||||
| Y45 | gb | 21 | 5 |
|
| Y45 | gb | 21 | 5 |
|
||||||
|
|
||||||
Scenario: Postcodes outside all countries are not added to the postcode and word table
|
Scenario: Postcodes outside all countries are not added to the postcode table
|
||||||
Given the places
|
Given the places
|
||||||
| osm | class | type | addr+postcode | addr+housenumber | addr+place | geometry |
|
| osm | class | type | addr+postcode | addr+housenumber | addr+place | geometry |
|
||||||
| N34 | place | house | 01982 | 111 | Null Island | 0 0.00001 |
|
| N34 | place | house | 01982 | 111 | Null Island | 0 0.00001 |
|
||||||
@@ -205,7 +204,6 @@ Feature: Import of postcodes
|
|||||||
When importing
|
When importing
|
||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
And there are no word tokens for postcodes 01982
|
|
||||||
When sending search query "111, 01982 Null Island"
|
When sending search query "111, 01982 Null Island"
|
||||||
Then results contain
|
Then results contain
|
||||||
| osm | display_name |
|
| osm | display_name |
|
||||||
|
|||||||
@@ -267,3 +267,34 @@ Feature: Rank assignment
|
|||||||
| object | rank_search | rank_address |
|
| object | rank_search | rank_address |
|
||||||
| N23:amenity | 30 | 30 |
|
| N23:amenity | 30 | 30 |
|
||||||
| N23:place | 16 | 16 |
|
| N23:place | 16 | 16 |
|
||||||
|
|
||||||
|
Scenario: Address rank 25 is only used for addr:place
|
||||||
|
Given the grid
|
||||||
|
| 10 | 33 | 34 | 11 |
|
||||||
|
Given the places
|
||||||
|
| osm | class | type | name |
|
||||||
|
| N10 | place | village | vil |
|
||||||
|
| N11 | place | farm | farm |
|
||||||
|
And the places
|
||||||
|
| osm | class | type | name | geometry |
|
||||||
|
| W1 | highway | residential | RD | 33,11 |
|
||||||
|
And the places
|
||||||
|
| osm | class | type | name | addr+farm | geometry |
|
||||||
|
| W2 | highway | residential | RD2 | farm | 34,11 |
|
||||||
|
And the places
|
||||||
|
| osm | class | type | housenr |
|
||||||
|
| N33 | place | house | 23 |
|
||||||
|
And the places
|
||||||
|
| osm | class | type | housenr | addr+place |
|
||||||
|
| N34 | place | house | 23 | farm |
|
||||||
|
When importing
|
||||||
|
Then placex contains
|
||||||
|
| object | parent_place_id |
|
||||||
|
| N11 | N10 |
|
||||||
|
| N33 | W1 |
|
||||||
|
| N34 | N11 |
|
||||||
|
And place_addressline contains
|
||||||
|
| object | address |
|
||||||
|
| W1 | N10 |
|
||||||
|
| W2 | N10 |
|
||||||
|
| W2 | N11 |
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Feature: Update of postcode
|
Feature: Update of postcode
|
||||||
Tests for updating of data related to postcodes
|
Tests for updating of data related to postcodes
|
||||||
|
|
||||||
Scenario: A new postcode appears in the postcode and word table
|
Scenario: A new postcode appears in the postcode table
|
||||||
Given the places
|
Given the places
|
||||||
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
||||||
| N34 | place | house | 01982 | 111 |country:de |
|
| N34 | place | house | 01982 | 111 |country:de |
|
||||||
@@ -18,9 +18,8 @@ Feature: Update of postcode
|
|||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| de | 01982 | country:de |
|
| de | 01982 | country:de |
|
||||||
| ch | 4567 | country:ch |
|
| ch | 4567 | country:ch |
|
||||||
And there are word tokens for postcodes 01982,4567
|
|
||||||
|
|
||||||
Scenario: When the last postcode is deleted, it is deleted from postcode and word
|
Scenario: When the last postcode is deleted, it is deleted from postcode
|
||||||
Given the places
|
Given the places
|
||||||
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
||||||
| N34 | place | house | 01982 | 111 |country:de |
|
| N34 | place | house | 01982 | 111 |country:de |
|
||||||
@@ -31,10 +30,8 @@ Feature: Update of postcode
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| ch | 4567 | country:ch |
|
| ch | 4567 | country:ch |
|
||||||
And there are word tokens for postcodes 4567
|
|
||||||
And there are no word tokens for postcodes 01982
|
|
||||||
|
|
||||||
Scenario: A postcode is not deleted from postcode and word when it exist in another country
|
Scenario: A postcode is not deleted from postcode when it exist in another country
|
||||||
Given the places
|
Given the places
|
||||||
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
| osm | class | type | addr+postcode | addr+housenumber | geometry |
|
||||||
| N34 | place | house | 01982 | 111 |country:de |
|
| N34 | place | house | 01982 | 111 |country:de |
|
||||||
@@ -45,7 +42,6 @@ Feature: Update of postcode
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| fr | 01982 | country:fr |
|
| fr | 01982 | country:fr |
|
||||||
And there are word tokens for postcodes 01982
|
|
||||||
|
|
||||||
Scenario: Updating a postcode is reflected in postcode table
|
Scenario: Updating a postcode is reflected in postcode table
|
||||||
Given the places
|
Given the places
|
||||||
@@ -59,7 +55,6 @@ Feature: Update of postcode
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| de | 20453 | country:de |
|
| de | 20453 | country:de |
|
||||||
And there are word tokens for postcodes 20453
|
|
||||||
|
|
||||||
Scenario: When changing from a postcode type, the entry appears in placex
|
Scenario: When changing from a postcode type, the entry appears in placex
|
||||||
When importing
|
When importing
|
||||||
@@ -80,7 +75,6 @@ Feature: Update of postcode
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| de | 20453 | country:de |
|
| de | 20453 | country:de |
|
||||||
And there are word tokens for postcodes 20453
|
|
||||||
|
|
||||||
Scenario: When changing to a postcode type, the entry disappears from placex
|
Scenario: When changing to a postcode type, the entry disappears from placex
|
||||||
When importing
|
When importing
|
||||||
@@ -101,7 +95,6 @@ Feature: Update of postcode
|
|||||||
Then location_postcode contains exactly
|
Then location_postcode contains exactly
|
||||||
| country | postcode | geometry |
|
| country | postcode | geometry |
|
||||||
| de | 01982 | country:de |
|
| de | 01982 | country:de |
|
||||||
And there are word tokens for postcodes 01982
|
|
||||||
|
|
||||||
Scenario: When a parent is deleted, the postcode gets a new parent
|
Scenario: When a parent is deleted, the postcode gets a new parent
|
||||||
Given the grid with origin DE
|
Given the grid with origin DE
|
||||||
|
|||||||
@@ -2,43 +2,45 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from behave import *
|
from behave import * # noqa
|
||||||
|
|
||||||
sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve()))
|
sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve()))
|
||||||
|
|
||||||
from steps.geometry_factory import GeometryFactory
|
from steps.geometry_factory import GeometryFactory # noqa: E402
|
||||||
from steps.nominatim_environment import NominatimEnvironment
|
from steps.nominatim_environment import NominatimEnvironment # noqa: E402
|
||||||
|
|
||||||
TEST_BASE_DIR = Path(__file__, '..', '..').resolve()
|
TEST_BASE_DIR = Path(__file__, '..', '..').resolve()
|
||||||
|
|
||||||
userconfig = {
|
userconfig = {
|
||||||
'REMOVE_TEMPLATE' : False,
|
'REMOVE_TEMPLATE': False,
|
||||||
'KEEP_TEST_DB' : False,
|
'KEEP_TEST_DB': False,
|
||||||
'DB_HOST' : None,
|
'DB_HOST': None,
|
||||||
'DB_PORT' : None,
|
'DB_PORT': None,
|
||||||
'DB_USER' : None,
|
'DB_USER': None,
|
||||||
'DB_PASS' : None,
|
'DB_PASS': None,
|
||||||
'TEMPLATE_DB' : 'test_template_nominatim',
|
'TEMPLATE_DB': 'test_template_nominatim',
|
||||||
'TEST_DB' : 'test_nominatim',
|
'TEST_DB': 'test_nominatim',
|
||||||
'API_TEST_DB' : 'test_api_nominatim',
|
'API_TEST_DB': 'test_api_nominatim',
|
||||||
'API_TEST_FILE' : TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
|
'API_TEST_FILE': TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
|
||||||
'TOKENIZER' : None, # Test with a custom tokenizer
|
'TOKENIZER': None, # Test with a custom tokenizer
|
||||||
'STYLE' : 'extratags',
|
'STYLE': 'extratags',
|
||||||
'API_ENGINE': 'falcon'
|
'API_ENGINE': 'falcon'
|
||||||
}
|
}
|
||||||
|
|
||||||
use_step_matcher("re")
|
|
||||||
|
use_step_matcher("re") # noqa: F405
|
||||||
|
|
||||||
|
|
||||||
def before_all(context):
|
def before_all(context):
|
||||||
# logging setup
|
# logging setup
|
||||||
context.config.setup_logging()
|
context.config.setup_logging()
|
||||||
# set up -D options
|
# set up -D options
|
||||||
for k,v in userconfig.items():
|
for k, v in userconfig.items():
|
||||||
context.config.userdata.setdefault(k, v)
|
context.config.userdata.setdefault(k, v)
|
||||||
# Nominatim test setup
|
# Nominatim test setup
|
||||||
context.nominatim = NominatimEnvironment(context.config.userdata)
|
context.nominatim = NominatimEnvironment(context.config.userdata)
|
||||||
@@ -46,7 +48,7 @@ def before_all(context):
|
|||||||
|
|
||||||
|
|
||||||
def before_scenario(context, scenario):
|
def before_scenario(context, scenario):
|
||||||
if not 'SQLITE' in context.tags \
|
if 'SQLITE' not in context.tags \
|
||||||
and context.config.userdata['API_TEST_DB'].startswith('sqlite:'):
|
and context.config.userdata['API_TEST_DB'].startswith('sqlite:'):
|
||||||
context.scenario.skip("Not usable with Sqlite database.")
|
context.scenario.skip("Not usable with Sqlite database.")
|
||||||
elif 'DB' in context.tags:
|
elif 'DB' in context.tags:
|
||||||
@@ -56,6 +58,7 @@ def before_scenario(context, scenario):
|
|||||||
elif 'UNKNOWNDB' in context.tags:
|
elif 'UNKNOWNDB' in context.tags:
|
||||||
context.nominatim.setup_unknown_db()
|
context.nominatim.setup_unknown_db()
|
||||||
|
|
||||||
|
|
||||||
def after_scenario(context, scenario):
|
def after_scenario(context, scenario):
|
||||||
if 'DB' in context.tags:
|
if 'DB' in context.tags:
|
||||||
context.nominatim.teardown_db(context)
|
context.nominatim.teardown_db(context)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2023 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Collection of assertion functions used for the steps.
|
Collection of assertion functions used for the steps.
|
||||||
@@ -11,20 +11,10 @@ import json
|
|||||||
import math
|
import math
|
||||||
import re
|
import re
|
||||||
|
|
||||||
class Almost:
|
|
||||||
""" Compares a float value with a certain jitter.
|
|
||||||
"""
|
|
||||||
def __init__(self, value, offset=0.00001):
|
|
||||||
self.value = value
|
|
||||||
self.offset = offset
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation',
|
||||||
return abs(other - self.value) < self.offset
|
'n': 'node', 'w': 'way', 'r': 'relation',
|
||||||
|
'node': 'n', 'way': 'w', 'relation': 'r'}
|
||||||
|
|
||||||
OSM_TYPE = {'N' : 'node', 'W' : 'way', 'R' : 'relation',
|
|
||||||
'n' : 'node', 'w' : 'way', 'r' : 'relation',
|
|
||||||
'node' : 'n', 'way' : 'w', 'relation' : 'r'}
|
|
||||||
|
|
||||||
|
|
||||||
class OsmType:
|
class OsmType:
|
||||||
@@ -34,11 +24,9 @@ class OsmType:
|
|||||||
def __init__(self, value):
|
def __init__(self, value):
|
||||||
self.value = value
|
self.value = value
|
||||||
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return other == self.value or other == OSM_TYPE[self.value]
|
return other == self.value or other == OSM_TYPE[self.value]
|
||||||
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.value} or {OSM_TYPE[self.value]}"
|
return f"{self.value} or {OSM_TYPE[self.value]}"
|
||||||
|
|
||||||
@@ -92,7 +80,6 @@ class Bbox:
|
|||||||
return str(self.coord)
|
return str(self.coord)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def check_for_attributes(obj, attrs, presence='present'):
|
def check_for_attributes(obj, attrs, presence='present'):
|
||||||
""" Check that the object has the given attributes. 'attrs' is a
|
""" Check that the object has the given attributes. 'attrs' is a
|
||||||
string with a comma-separated list of attributes. If 'presence'
|
string with a comma-separated list of attributes. If 'presence'
|
||||||
@@ -110,4 +97,3 @@ def check_for_attributes(obj, attrs, presence='present'):
|
|||||||
else:
|
else:
|
||||||
assert attr in obj, \
|
assert attr in obj, \
|
||||||
f"No attribute '{attr}'. Full response:\n{_dump_json()}"
|
f"No attribute '{attr}'. Full response:\n{_dump_json()}"
|
||||||
|
|
||||||
|
|||||||
@@ -2,261 +2,261 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2022 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Collection of aliases for various world coordinates.
|
Collection of aliases for various world coordinates.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ALIASES = {
|
ALIASES = {
|
||||||
# Country aliases
|
# Country aliases
|
||||||
'AD': (1.58972, 42.54241),
|
'AD': (1.58972, 42.54241),
|
||||||
'AE': (54.61589, 24.82431),
|
'AE': (54.61589, 24.82431),
|
||||||
'AF': (65.90264, 34.84708),
|
'AF': (65.90264, 34.84708),
|
||||||
'AG': (-61.72430, 17.069),
|
'AG': (-61.72430, 17.069),
|
||||||
'AI': (-63.10571, 18.25461),
|
'AI': (-63.10571, 18.25461),
|
||||||
'AL': (19.84941, 40.21232),
|
'AL': (19.84941, 40.21232),
|
||||||
'AM': (44.64229, 40.37821),
|
'AM': (44.64229, 40.37821),
|
||||||
'AO': (16.21924, -12.77014),
|
'AO': (16.21924, -12.77014),
|
||||||
'AQ': (44.99999, -75.65695),
|
'AQ': (44.99999, -75.65695),
|
||||||
'AR': (-61.10759, -34.37615),
|
'AR': (-61.10759, -34.37615),
|
||||||
'AS': (-170.68470, -14.29307),
|
'AS': (-170.68470, -14.29307),
|
||||||
'AT': (14.25747, 47.36542),
|
'AT': (14.25747, 47.36542),
|
||||||
'AU': (138.23155, -23.72068),
|
'AU': (138.23155, -23.72068),
|
||||||
'AW': (-69.98255, 12.555),
|
'AW': (-69.98255, 12.555),
|
||||||
'AX': (19.91839, 59.81682),
|
'AX': (19.91839, 59.81682),
|
||||||
'AZ': (48.38555, 40.61639),
|
'AZ': (48.38555, 40.61639),
|
||||||
'BA': (17.18514, 44.25582),
|
'BA': (17.18514, 44.25582),
|
||||||
'BB': (-59.53342, 13.19),
|
'BB': (-59.53342, 13.19),
|
||||||
'BD': (89.75989, 24.34205),
|
'BD': (89.75989, 24.34205),
|
||||||
'BE': (4.90078, 50.34682),
|
'BE': (4.90078, 50.34682),
|
||||||
'BF': (-0.56743, 11.90471),
|
'BF': (-0.56743, 11.90471),
|
||||||
'BG': (24.80616, 43.09859),
|
'BG': (24.80616, 43.09859),
|
||||||
'BH': (50.52032, 25.94685),
|
'BH': (50.52032, 25.94685),
|
||||||
'BI': (29.54561, -2.99057),
|
'BI': (29.54561, -2.99057),
|
||||||
'BJ': (2.70062, 10.02792),
|
'BJ': (2.70062, 10.02792),
|
||||||
'BL': (-62.79349, 17.907),
|
'BL': (-62.79349, 17.907),
|
||||||
'BM': (-64.77406, 32.30199),
|
'BM': (-64.77406, 32.30199),
|
||||||
'BN': (114.52196, 4.28638),
|
'BN': (114.52196, 4.28638),
|
||||||
'BO': (-62.02473, -17.77723),
|
'BO': (-62.02473, -17.77723),
|
||||||
'BQ': (-63.14322, 17.566),
|
'BQ': (-63.14322, 17.566),
|
||||||
'BR': (-45.77065, -9.58685),
|
'BR': (-45.77065, -9.58685),
|
||||||
'BS': (-77.60916, 23.8745),
|
'BS': (-77.60916, 23.8745),
|
||||||
'BT': (90.01350, 27.28137),
|
'BT': (90.01350, 27.28137),
|
||||||
'BV': (3.35744, -54.4215),
|
'BV': (3.35744, -54.4215),
|
||||||
'BW': (23.51505, -23.48391),
|
'BW': (23.51505, -23.48391),
|
||||||
'BY': (26.77259, 53.15885),
|
'BY': (26.77259, 53.15885),
|
||||||
'BZ': (-88.63489, 16.33951),
|
'BZ': (-88.63489, 16.33951),
|
||||||
'CA': (-107.74817, 67.12612),
|
'CA': (-107.74817, 67.12612),
|
||||||
'CC': (96.84420, -12.01734),
|
'CC': (96.84420, -12.01734),
|
||||||
'CD': (24.09544, -1.67713),
|
'CD': (24.09544, -1.67713),
|
||||||
'CF': (22.58701, 5.98438),
|
'CF': (22.58701, 5.98438),
|
||||||
'CG': (15.78875, 0.40388),
|
'CG': (15.78875, 0.40388),
|
||||||
'CH': (7.65705, 46.57446),
|
'CH': (7.65705, 46.57446),
|
||||||
'CI': (-6.31190, 6.62783),
|
'CI': (-6.31190, 6.62783),
|
||||||
'CK': (-159.77835, -21.23349),
|
'CK': (-159.77835, -21.23349),
|
||||||
'CL': (-70.41790, -53.77189),
|
'CL': (-70.41790, -53.77189),
|
||||||
'CM': (13.26022, 5.94519),
|
'CM': (13.26022, 5.94519),
|
||||||
'CN': (96.44285, 38.04260),
|
'CN': (96.44285, 38.04260),
|
||||||
'CO': (-72.52951, 2.45174),
|
'CO': (-72.52951, 2.45174),
|
||||||
'CR': (-83.83314, 9.93514),
|
'CR': (-83.83314, 9.93514),
|
||||||
'CU': (-80.81673, 21.88852),
|
'CU': (-80.81673, 21.88852),
|
||||||
'CV': (-24.50810, 14.929),
|
'CV': (-24.50810, 14.929),
|
||||||
'CW': (-68.96409, 12.1845),
|
'CW': (-68.96409, 12.1845),
|
||||||
'CX': (105.62411, -10.48417),
|
'CX': (105.62411, -10.48417),
|
||||||
'CY': (32.95922, 35.37010),
|
'CY': (32.95922, 35.37010),
|
||||||
'CZ': (16.32098, 49.50692),
|
'CZ': (16.32098, 49.50692),
|
||||||
'DE': (9.30716, 50.21289),
|
'DE': (9.30716, 50.21289),
|
||||||
'DJ': (42.96904, 11.41542),
|
'DJ': (42.96904, 11.41542),
|
||||||
'DK': (9.18490, 55.98916),
|
'DK': (9.18490, 55.98916),
|
||||||
'DM': (-61.00358, 15.65470),
|
'DM': (-61.00358, 15.65470),
|
||||||
'DO': (-69.62855, 18.58841),
|
'DO': (-69.62855, 18.58841),
|
||||||
'DZ': (4.24749, 25.79721),
|
'DZ': (4.24749, 25.79721),
|
||||||
'EC': (-77.45831, -0.98284),
|
'EC': (-77.45831, -0.98284),
|
||||||
'EE': (23.94288, 58.43952),
|
'EE': (23.94288, 58.43952),
|
||||||
'EG': (28.95293, 28.17718),
|
'EG': (28.95293, 28.17718),
|
||||||
'EH': (-13.69031, 25.01241),
|
'EH': (-13.69031, 25.01241),
|
||||||
'ER': (39.01223, 14.96033),
|
'ER': (39.01223, 14.96033),
|
||||||
'ES': (-2.59110, 38.79354),
|
'ES': (-2.59110, 38.79354),
|
||||||
'ET': (38.61697, 7.71399),
|
'ET': (38.61697, 7.71399),
|
||||||
'FI': (26.89798, 63.56194),
|
'FI': (26.89798, 63.56194),
|
||||||
'FJ': (177.91853, -17.74237),
|
'FJ': (177.91853, -17.74237),
|
||||||
'FK': (-58.99044, -51.34509),
|
'FK': (-58.99044, -51.34509),
|
||||||
'FM': (151.95358, 8.5045),
|
'FM': (151.95358, 8.5045),
|
||||||
'FO': (-6.60483, 62.10000),
|
'FO': (-6.60483, 62.10000),
|
||||||
'FR': (0.28410, 47.51045),
|
'FR': (0.28410, 47.51045),
|
||||||
'GA': (10.81070, -0.07429),
|
'GA': (10.81070, -0.07429),
|
||||||
'GB': (-0.92823, 52.01618),
|
'GB': (-0.92823, 52.01618),
|
||||||
'GD': (-61.64524, 12.191),
|
'GD': (-61.64524, 12.191),
|
||||||
'GE': (44.16664, 42.00385),
|
'GE': (44.16664, 42.00385),
|
||||||
'GF': (-53.46524, 3.56188),
|
'GF': (-53.46524, 3.56188),
|
||||||
'GG': (-2.50580, 49.58543),
|
'GG': (-2.50580, 49.58543),
|
||||||
'GH': (-0.46348, 7.16051),
|
'GH': (-0.46348, 7.16051),
|
||||||
'GI': (-5.32053, 36.11066),
|
'GI': (-5.32053, 36.11066),
|
||||||
'GL': (-33.85511, 74.66355),
|
'GL': (-33.85511, 74.66355),
|
||||||
'GM': (-16.40960, 13.25),
|
'GM': (-16.40960, 13.25),
|
||||||
'GN': (-13.83940, 10.96291),
|
'GN': (-13.83940, 10.96291),
|
||||||
'GP': (-61.68712, 16.23049),
|
'GP': (-61.68712, 16.23049),
|
||||||
'GQ': (10.23973, 1.43119),
|
'GQ': (10.23973, 1.43119),
|
||||||
'GR': (23.17850, 39.06206),
|
'GR': (23.17850, 39.06206),
|
||||||
'GS': (-36.49430, -54.43067),
|
'GS': (-36.49430, -54.43067),
|
||||||
'GT': (-90.74368, 15.20428),
|
'GT': (-90.74368, 15.20428),
|
||||||
'GU': (144.73362, 13.44413),
|
'GU': (144.73362, 13.44413),
|
||||||
'GW': (-14.83525, 11.92486),
|
'GW': (-14.83525, 11.92486),
|
||||||
'GY': (-58.45167, 5.73698),
|
'GY': (-58.45167, 5.73698),
|
||||||
'HK': (114.18577, 22.34923),
|
'HK': (114.18577, 22.34923),
|
||||||
'HM': (73.68230, -53.22105),
|
'HM': (73.68230, -53.22105),
|
||||||
'HN': (-86.95414, 15.23820),
|
'HN': (-86.95414, 15.23820),
|
||||||
'HR': (17.49966, 45.52689),
|
'HR': (17.49966, 45.52689),
|
||||||
'HT': (-73.51925, 18.32492),
|
'HT': (-73.51925, 18.32492),
|
||||||
'HU': (20.35362, 47.51721),
|
'HU': (20.35362, 47.51721),
|
||||||
'ID': (123.34505, -0.83791),
|
'ID': (123.34505, -0.83791),
|
||||||
'IE': (-9.00520, 52.87725),
|
'IE': (-9.00520, 52.87725),
|
||||||
'IL': (35.46314, 32.86165),
|
'IL': (35.46314, 32.86165),
|
||||||
'IM': (-4.86740, 54.023),
|
'IM': (-4.86740, 54.023),
|
||||||
'IN': (88.67620, 27.86155),
|
'IN': (88.67620, 27.86155),
|
||||||
'IO': (71.42743, -6.14349),
|
'IO': (71.42743, -6.14349),
|
||||||
'IQ': (42.58109, 34.26103),
|
'IQ': (42.58109, 34.26103),
|
||||||
'IR': (56.09355, 30.46751),
|
'IR': (56.09355, 30.46751),
|
||||||
'IS': (-17.51785, 64.71687),
|
'IS': (-17.51785, 64.71687),
|
||||||
'IT': (10.42639, 44.87904),
|
'IT': (10.42639, 44.87904),
|
||||||
'JE': (-2.19261, 49.12458),
|
'JE': (-2.19261, 49.12458),
|
||||||
'JM': (-76.84020, 18.3935),
|
'JM': (-76.84020, 18.3935),
|
||||||
'JO': (36.55552, 30.75741),
|
'JO': (36.55552, 30.75741),
|
||||||
'JP': (138.72531, 35.92099),
|
'JP': (138.72531, 35.92099),
|
||||||
'KE': (36.90602, 1.08512),
|
'KE': (36.90602, 1.08512),
|
||||||
'KG': (76.15571, 41.66497),
|
'KG': (76.15571, 41.66497),
|
||||||
'KH': (104.31901, 12.95555),
|
'KH': (104.31901, 12.95555),
|
||||||
'KI': (173.63353, 0.139),
|
'KI': (173.63353, 0.139),
|
||||||
'KM': (44.31474, -12.241),
|
'KM': (44.31474, -12.241),
|
||||||
'KN': (-62.69379, 17.2555),
|
'KN': (-62.69379, 17.2555),
|
||||||
'KP': (126.65575, 39.64575),
|
'KP': (126.65575, 39.64575),
|
||||||
'KR': (127.27740, 36.41388),
|
'KR': (127.27740, 36.41388),
|
||||||
'KW': (47.30684, 29.69180),
|
'KW': (47.30684, 29.69180),
|
||||||
'KY': (-81.07455, 19.29949),
|
'KY': (-81.07455, 19.29949),
|
||||||
'KZ': (72.00811, 49.88855),
|
'KZ': (72.00811, 49.88855),
|
||||||
'LA': (102.44391, 19.81609),
|
'LA': (102.44391, 19.81609),
|
||||||
'LB': (35.48464, 33.41766),
|
'LB': (35.48464, 33.41766),
|
||||||
'LC': (-60.97894, 13.891),
|
'LC': (-60.97894, 13.891),
|
||||||
'LI': (9.54693, 47.15934),
|
'LI': (9.54693, 47.15934),
|
||||||
'LK': (80.38520, 8.41649),
|
'LK': (80.38520, 8.41649),
|
||||||
'LR': (-11.16960, 4.04122),
|
'LR': (-11.16960, 4.04122),
|
||||||
'LS': (28.66984, -29.94538),
|
'LS': (28.66984, -29.94538),
|
||||||
'LT': (24.51735, 55.49293),
|
'LT': (24.51735, 55.49293),
|
||||||
'LU': (6.08649, 49.81533),
|
'LU': (6.08649, 49.81533),
|
||||||
'LV': (23.51033, 56.67144),
|
'LV': (23.51033, 56.67144),
|
||||||
'LY': (15.36841, 28.12177),
|
'LY': (15.36841, 28.12177),
|
||||||
'MA': (-4.03061, 33.21696),
|
'MA': (-4.03061, 33.21696),
|
||||||
'MC': (7.47743, 43.62917),
|
'MC': (7.47743, 43.62917),
|
||||||
'MD': (29.61725, 46.66517),
|
'MD': (29.61725, 46.66517),
|
||||||
'ME': (19.72291, 43.02441),
|
'ME': (19.72291, 43.02441),
|
||||||
'MF': (-63.06666, 18.08102),
|
'MF': (-63.06666, 18.08102),
|
||||||
'MG': (45.86378, -20.50245),
|
'MG': (45.86378, -20.50245),
|
||||||
'MH': (171.94982, 5.983),
|
'MH': (171.94982, 5.983),
|
||||||
'MK': (21.42108, 41.08980),
|
'MK': (21.42108, 41.08980),
|
||||||
'ML': (-1.93310, 16.46993),
|
'ML': (-1.93310, 16.46993),
|
||||||
'MM': (95.54624, 21.09620),
|
'MM': (95.54624, 21.09620),
|
||||||
'MN': (99.81138, 48.18615),
|
'MN': (99.81138, 48.18615),
|
||||||
'MO': (113.56441, 22.16209),
|
'MO': (113.56441, 22.16209),
|
||||||
'MP': (145.21345, 14.14902),
|
'MP': (145.21345, 14.14902),
|
||||||
'MQ': (-60.81128, 14.43706),
|
'MQ': (-60.81128, 14.43706),
|
||||||
'MR': (-9.42324, 22.59251),
|
'MR': (-9.42324, 22.59251),
|
||||||
'MS': (-62.19455, 16.745),
|
'MS': (-62.19455, 16.745),
|
||||||
'MT': (14.38363, 35.94467),
|
'MT': (14.38363, 35.94467),
|
||||||
'MU': (57.55121, -20.41),
|
'MU': (57.55121, -20.41),
|
||||||
'MV': (73.39292, 4.19375),
|
'MV': (73.39292, 4.19375),
|
||||||
'MW': (33.95722, -12.28218),
|
'MW': (33.95722, -12.28218),
|
||||||
'MX': (-105.89221, 25.86826),
|
'MX': (-105.89221, 25.86826),
|
||||||
'MY': (112.71154, 2.10098),
|
'MY': (112.71154, 2.10098),
|
||||||
'MZ': (37.58689, -13.72682),
|
'MZ': (37.58689, -13.72682),
|
||||||
'NA': (16.68569, -21.46572),
|
'NA': (16.68569, -21.46572),
|
||||||
'NC': (164.95322, -20.38889),
|
'NC': (164.95322, -20.38889),
|
||||||
'NE': (10.06041, 19.08273),
|
'NE': (10.06041, 19.08273),
|
||||||
'NF': (167.95718, -29.0645),
|
'NF': (167.95718, -29.0645),
|
||||||
'NG': (10.17781, 10.17804),
|
'NG': (10.17781, 10.17804),
|
||||||
'NI': (-85.87974, 13.21715),
|
'NI': (-85.87974, 13.21715),
|
||||||
'NL': (-68.57062, 12.041),
|
'NL': (-68.57062, 12.041),
|
||||||
'NO': (23.11556, 70.09934),
|
'NO': (23.11556, 70.09934),
|
||||||
'NP': (83.36259, 28.13107),
|
'NP': (83.36259, 28.13107),
|
||||||
'NR': (166.93479, -0.5275),
|
'NR': (166.93479, -0.5275),
|
||||||
'NU': (-169.84873, -19.05305),
|
'NU': (-169.84873, -19.05305),
|
||||||
'NZ': (167.97209, -45.13056),
|
'NZ': (167.97209, -45.13056),
|
||||||
'OM': (56.86055, 20.47413),
|
'OM': (56.86055, 20.47413),
|
||||||
'PA': (-79.40160, 8.80656),
|
'PA': (-79.40160, 8.80656),
|
||||||
'PE': (-78.66540, -7.54711),
|
'PE': (-78.66540, -7.54711),
|
||||||
'PF': (-145.05719, -16.70862),
|
'PF': (-145.05719, -16.70862),
|
||||||
'PG': (146.64600, -7.37427),
|
'PG': (146.64600, -7.37427),
|
||||||
'PH': (121.48359, 15.09965),
|
'PH': (121.48359, 15.09965),
|
||||||
'PK': (72.11347, 31.14629),
|
'PK': (72.11347, 31.14629),
|
||||||
'PL': (17.88136, 52.77182),
|
'PL': (17.88136, 52.77182),
|
||||||
'PM': (-56.19515, 46.78324),
|
'PM': (-56.19515, 46.78324),
|
||||||
'PN': (-130.10642, -25.06955),
|
'PN': (-130.10642, -25.06955),
|
||||||
'PR': (-65.88755, 18.37169),
|
'PR': (-65.88755, 18.37169),
|
||||||
'PS': (35.39801, 32.24773),
|
'PS': (35.39801, 32.24773),
|
||||||
'PT': (-8.45743, 40.11154),
|
'PT': (-8.45743, 40.11154),
|
||||||
'PW': (134.49645, 7.3245),
|
'PW': (134.49645, 7.3245),
|
||||||
'PY': (-59.51787, -22.41281),
|
'PY': (-59.51787, -22.41281),
|
||||||
'QA': (51.49903, 24.99816),
|
'QA': (51.49903, 24.99816),
|
||||||
'RE': (55.77345, -21.36388),
|
'RE': (55.77345, -21.36388),
|
||||||
'RO': (26.37632, 45.36120),
|
'RO': (26.37632, 45.36120),
|
||||||
'RS': (20.40371, 44.56413),
|
'RS': (20.40371, 44.56413),
|
||||||
'RU': (116.44060, 59.06780),
|
'RU': (116.44060, 59.06780),
|
||||||
'RW': (29.57882, -1.62404),
|
'RW': (29.57882, -1.62404),
|
||||||
'SA': (47.73169, 22.43790),
|
'SA': (47.73169, 22.43790),
|
||||||
'SB': (164.63894, -10.23606),
|
'SB': (164.63894, -10.23606),
|
||||||
'SC': (46.36566, -9.454),
|
'SC': (46.36566, -9.454),
|
||||||
'SD': (28.14720, 14.56423),
|
'SD': (28.14720, 14.56423),
|
||||||
'SE': (15.68667, 60.35568),
|
'SE': (15.68667, 60.35568),
|
||||||
'SG': (103.84187, 1.304),
|
'SG': (103.84187, 1.304),
|
||||||
'SH': (-12.28155, -37.11546),
|
'SH': (-12.28155, -37.11546),
|
||||||
'SI': (14.04738, 46.39085),
|
'SI': (14.04738, 46.39085),
|
||||||
'SJ': (15.27552, 79.23365),
|
'SJ': (15.27552, 79.23365),
|
||||||
'SK': (20.41603, 48.86970),
|
'SK': (20.41603, 48.86970),
|
||||||
'SL': (-11.47773, 8.78156),
|
'SL': (-11.47773, 8.78156),
|
||||||
'SM': (12.46062, 43.94279),
|
'SM': (12.46062, 43.94279),
|
||||||
'SN': (-15.37111, 14.99477),
|
'SN': (-15.37111, 14.99477),
|
||||||
'SO': (46.93383, 9.34094),
|
'SO': (46.93383, 9.34094),
|
||||||
'SR': (-55.42864, 4.56985),
|
'SR': (-55.42864, 4.56985),
|
||||||
'SS': (28.13573, 8.50933),
|
'SS': (28.13573, 8.50933),
|
||||||
'ST': (6.61025, 0.2215),
|
'ST': (6.61025, 0.2215),
|
||||||
'SV': (-89.36665, 13.43072),
|
'SV': (-89.36665, 13.43072),
|
||||||
'SX': (-63.15393, 17.9345),
|
'SX': (-63.15393, 17.9345),
|
||||||
'SY': (38.15513, 35.34221),
|
'SY': (38.15513, 35.34221),
|
||||||
'SZ': (31.78263, -26.14244),
|
'SZ': (31.78263, -26.14244),
|
||||||
'TC': (-71.32554, 21.35),
|
'TC': (-71.32554, 21.35),
|
||||||
'TD': (17.42092, 13.46223),
|
'TD': (17.42092, 13.46223),
|
||||||
'TF': (137.5, -67.5),
|
'TF': (137.5, -67.5),
|
||||||
'TG': (1.06983, 7.87677),
|
'TG': (1.06983, 7.87677),
|
||||||
'TH': (102.00877, 16.42310),
|
'TH': (102.00877, 16.42310),
|
||||||
'TJ': (71.91349, 39.01527),
|
'TJ': (71.91349, 39.01527),
|
||||||
'TK': (-171.82603, -9.20990),
|
'TK': (-171.82603, -9.20990),
|
||||||
'TL': (126.22520, -8.72636),
|
'TL': (126.22520, -8.72636),
|
||||||
'TM': (57.71603, 39.92534),
|
'TM': (57.71603, 39.92534),
|
||||||
'TN': (9.04958, 34.84199),
|
'TN': (9.04958, 34.84199),
|
||||||
'TO': (-176.99320, -23.11104),
|
'TO': (-176.99320, -23.11104),
|
||||||
'TR': (32.82002, 39.86350),
|
'TR': (32.82002, 39.86350),
|
||||||
'TT': (-60.70793, 11.1385),
|
'TT': (-60.70793, 11.1385),
|
||||||
'TV': (178.77499, -9.41685),
|
'TV': (178.77499, -9.41685),
|
||||||
'TW': (120.30074, 23.17002),
|
'TW': (120.30074, 23.17002),
|
||||||
'TZ': (33.53892, -5.01840),
|
'TZ': (33.53892, -5.01840),
|
||||||
'UA': (33.44335, 49.30619),
|
'UA': (33.44335, 49.30619),
|
||||||
'UG': (32.96523, 2.08584),
|
'UG': (32.96523, 2.08584),
|
||||||
'UM': (-169.50993, 16.74605),
|
'UM': (-169.50993, 16.74605),
|
||||||
'US': (-116.39535, 40.71379),
|
'US': (-116.39535, 40.71379),
|
||||||
'UY': (-56.46505, -33.62658),
|
'UY': (-56.46505, -33.62658),
|
||||||
'UZ': (61.35529, 42.96107),
|
'UZ': (61.35529, 42.96107),
|
||||||
'VA': (12.33197, 42.04931),
|
'VA': (12.33197, 42.04931),
|
||||||
'VC': (-61.09905, 13.316),
|
'VC': (-61.09905, 13.316),
|
||||||
'VE': (-64.88323, 7.69849),
|
'VE': (-64.88323, 7.69849),
|
||||||
'VG': (-64.62479, 18.419),
|
'VG': (-64.62479, 18.419),
|
||||||
'VI': (-64.88950, 18.32263),
|
'VI': (-64.88950, 18.32263),
|
||||||
'VN': (104.20179, 10.27644),
|
'VN': (104.20179, 10.27644),
|
||||||
'VU': (167.31919, -15.88687),
|
'VU': (167.31919, -15.88687),
|
||||||
'WF': (-176.20781, -13.28535),
|
'WF': (-176.20781, -13.28535),
|
||||||
'WS': (-172.10966, -13.85093),
|
'WS': (-172.10966, -13.85093),
|
||||||
'YE': (45.94562, 16.16338),
|
'YE': (45.94562, 16.16338),
|
||||||
'YT': (44.93774, -12.60882),
|
'YT': (44.93774, -12.60882),
|
||||||
'ZA': (23.19488, -30.43276),
|
'ZA': (23.19488, -30.43276),
|
||||||
'ZM': (26.38618, -14.39966),
|
'ZM': (26.38618, -14.39966),
|
||||||
'ZW': (30.12419, -19.86907)
|
'ZW': (30.12419, -19.86907)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,13 +2,11 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2022 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
from pathlib import Path
|
|
||||||
import os
|
|
||||||
|
|
||||||
from steps.geometry_alias import ALIASES
|
from steps.geometry_alias import ALIASES
|
||||||
|
|
||||||
|
|
||||||
class GeometryFactory:
|
class GeometryFactory:
|
||||||
""" Provides functions to create geometries from coordinates and data grids.
|
""" Provides functions to create geometries from coordinates and data grids.
|
||||||
"""
|
"""
|
||||||
@@ -47,7 +45,6 @@ class GeometryFactory:
|
|||||||
|
|
||||||
return "ST_SetSRID('{}'::geometry, 4326)".format(out)
|
return "ST_SetSRID('{}'::geometry, 4326)".format(out)
|
||||||
|
|
||||||
|
|
||||||
def mk_wkt_point(self, point):
|
def mk_wkt_point(self, point):
|
||||||
""" Parse a point description.
|
""" Parse a point description.
|
||||||
The point may either consist of 'x y' coordinates or a number
|
The point may either consist of 'x y' coordinates or a number
|
||||||
@@ -65,7 +62,6 @@ class GeometryFactory:
|
|||||||
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
|
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
|
||||||
return "{} {}".format(*pt)
|
return "{} {}".format(*pt)
|
||||||
|
|
||||||
|
|
||||||
def mk_wkt_points(self, geom):
|
def mk_wkt_points(self, geom):
|
||||||
""" Parse a list of points.
|
""" Parse a list of points.
|
||||||
The list must be a comma-separated list of points. Points
|
The list must be a comma-separated list of points. Points
|
||||||
@@ -73,7 +69,6 @@ class GeometryFactory:
|
|||||||
"""
|
"""
|
||||||
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
|
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
|
||||||
|
|
||||||
|
|
||||||
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
|
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
|
||||||
""" Replace the grid with one from the given lines.
|
""" Replace the grid with one from the given lines.
|
||||||
"""
|
"""
|
||||||
@@ -87,7 +82,6 @@ class GeometryFactory:
|
|||||||
x += grid_step
|
x += grid_step
|
||||||
y += grid_step
|
y += grid_step
|
||||||
|
|
||||||
|
|
||||||
def grid_node(self, nodeid):
|
def grid_node(self, nodeid):
|
||||||
""" Get the coordinates for the given grid node.
|
""" Get the coordinates for the given grid node.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2023 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Classes wrapping HTTP responses from the Nominatim API.
|
Classes wrapping HTTP responses from the Nominatim API.
|
||||||
@@ -11,7 +11,7 @@ import re
|
|||||||
import json
|
import json
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
from check_functions import Almost, OsmType, Field, check_for_attributes
|
from check_functions import OsmType, Field, check_for_attributes
|
||||||
|
|
||||||
|
|
||||||
class GenericResponse:
|
class GenericResponse:
|
||||||
@@ -45,7 +45,6 @@ class GenericResponse:
|
|||||||
else:
|
else:
|
||||||
self.result = [self.result]
|
self.result = [self.result]
|
||||||
|
|
||||||
|
|
||||||
def _parse_geojson(self):
|
def _parse_geojson(self):
|
||||||
self._parse_json()
|
self._parse_json()
|
||||||
if self.result:
|
if self.result:
|
||||||
@@ -76,7 +75,6 @@ class GenericResponse:
|
|||||||
new['__' + k] = v
|
new['__' + k] = v
|
||||||
self.result.append(new)
|
self.result.append(new)
|
||||||
|
|
||||||
|
|
||||||
def _parse_geocodejson(self):
|
def _parse_geocodejson(self):
|
||||||
self._parse_geojson()
|
self._parse_geojson()
|
||||||
if self.result:
|
if self.result:
|
||||||
@@ -87,7 +85,6 @@ class GenericResponse:
|
|||||||
inner = r.pop('geocoding')
|
inner = r.pop('geocoding')
|
||||||
r.update(inner)
|
r.update(inner)
|
||||||
|
|
||||||
|
|
||||||
def assert_address_field(self, idx, field, value):
|
def assert_address_field(self, idx, field, value):
|
||||||
""" Check that result rows`idx` has a field `field` with value `value`
|
""" Check that result rows`idx` has a field `field` with value `value`
|
||||||
in its address. If idx is None, then all results are checked.
|
in its address. If idx is None, then all results are checked.
|
||||||
@@ -103,7 +100,6 @@ class GenericResponse:
|
|||||||
address = self.result[idx]['address']
|
address = self.result[idx]['address']
|
||||||
self.check_row_field(idx, field, value, base=address)
|
self.check_row_field(idx, field, value, base=address)
|
||||||
|
|
||||||
|
|
||||||
def match_row(self, row, context=None, field=None):
|
def match_row(self, row, context=None, field=None):
|
||||||
""" Match the result fields against the given behave table row.
|
""" Match the result fields against the given behave table row.
|
||||||
"""
|
"""
|
||||||
@@ -139,7 +135,6 @@ class GenericResponse:
|
|||||||
else:
|
else:
|
||||||
self.check_row_field(i, name, Field(value), base=subdict)
|
self.check_row_field(i, name, Field(value), base=subdict)
|
||||||
|
|
||||||
|
|
||||||
def check_row(self, idx, check, msg):
|
def check_row(self, idx, check, msg):
|
||||||
""" Assert for the condition 'check' and print 'msg' on fail together
|
""" Assert for the condition 'check' and print 'msg' on fail together
|
||||||
with the contents of the failing result.
|
with the contents of the failing result.
|
||||||
@@ -154,7 +149,6 @@ class GenericResponse:
|
|||||||
|
|
||||||
assert check, _RowError(self.result[idx])
|
assert check, _RowError(self.result[idx])
|
||||||
|
|
||||||
|
|
||||||
def check_row_field(self, idx, field, expected, base=None):
|
def check_row_field(self, idx, field, expected, base=None):
|
||||||
""" Check field 'field' of result 'idx' for the expected value
|
""" Check field 'field' of result 'idx' for the expected value
|
||||||
and print a meaningful error if the condition fails.
|
and print a meaningful error if the condition fails.
|
||||||
@@ -172,7 +166,6 @@ class GenericResponse:
|
|||||||
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
|
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class SearchResponse(GenericResponse):
|
class SearchResponse(GenericResponse):
|
||||||
""" Specialised class for search and lookup responses.
|
""" Specialised class for search and lookup responses.
|
||||||
Transforms the xml response in a format similar to json.
|
Transforms the xml response in a format similar to json.
|
||||||
@@ -240,7 +233,8 @@ class ReverseResponse(GenericResponse):
|
|||||||
assert 'namedetails' not in self.result[0], "More than one namedetails in result"
|
assert 'namedetails' not in self.result[0], "More than one namedetails in result"
|
||||||
self.result[0]['namedetails'] = {}
|
self.result[0]['namedetails'] = {}
|
||||||
for tag in child:
|
for tag in child:
|
||||||
assert len(tag) == 0, f"Namedetails element '{tag.attrib['desc']}' has subelements"
|
assert len(tag) == 0, \
|
||||||
|
f"Namedetails element '{tag.attrib['desc']}' has subelements"
|
||||||
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
|
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
|
||||||
elif child.tag == 'geokml':
|
elif child.tag == 'geokml':
|
||||||
assert 'geokml' not in self.result[0], "More than one geokml in result"
|
assert 'geokml' not in self.result[0], "More than one geokml in result"
|
||||||
|
|||||||
@@ -2,10 +2,9 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import importlib
|
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
import psycopg
|
import psycopg
|
||||||
@@ -13,10 +12,9 @@ from psycopg import sql as pysql
|
|||||||
|
|
||||||
from nominatim_db import cli
|
from nominatim_db import cli
|
||||||
from nominatim_db.config import Configuration
|
from nominatim_db.config import Configuration
|
||||||
from nominatim_db.db.connection import Connection, register_hstore, execute_scalar
|
from nominatim_db.db.connection import register_hstore, execute_scalar
|
||||||
from nominatim_db.tools import refresh
|
|
||||||
from nominatim_db.tokenizer import factory as tokenizer_factory
|
from nominatim_db.tokenizer import factory as tokenizer_factory
|
||||||
from steps.utils import run_script
|
|
||||||
|
|
||||||
class NominatimEnvironment:
|
class NominatimEnvironment:
|
||||||
""" Collects all functions for the execution of Nominatim functions.
|
""" Collects all functions for the execution of Nominatim functions.
|
||||||
@@ -62,7 +60,6 @@ class NominatimEnvironment:
|
|||||||
dbargs['password'] = self.db_pass
|
dbargs['password'] = self.db_pass
|
||||||
return psycopg.connect(**dbargs)
|
return psycopg.connect(**dbargs)
|
||||||
|
|
||||||
|
|
||||||
def write_nominatim_config(self, dbname):
|
def write_nominatim_config(self, dbname):
|
||||||
""" Set up a custom test configuration that connects to the given
|
""" Set up a custom test configuration that connects to the given
|
||||||
database. This sets up the environment variables so that they can
|
database. This sets up the environment variables so that they can
|
||||||
@@ -101,7 +98,6 @@ class NominatimEnvironment:
|
|||||||
|
|
||||||
self.website_dir = tempfile.TemporaryDirectory()
|
self.website_dir = tempfile.TemporaryDirectory()
|
||||||
|
|
||||||
|
|
||||||
def get_test_config(self):
|
def get_test_config(self):
|
||||||
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
|
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
|
||||||
return cfg
|
return cfg
|
||||||
@@ -122,14 +118,13 @@ class NominatimEnvironment:
|
|||||||
|
|
||||||
return dsn
|
return dsn
|
||||||
|
|
||||||
|
|
||||||
def db_drop_database(self, name):
|
def db_drop_database(self, name):
|
||||||
""" Drop the database with the given name.
|
""" Drop the database with the given name.
|
||||||
"""
|
"""
|
||||||
with self.connect_database('postgres') as conn:
|
with self.connect_database('postgres') as conn:
|
||||||
conn.autocommit = True
|
conn.autocommit = True
|
||||||
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
|
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
|
||||||
+ pysql.Identifier(name))
|
+ pysql.Identifier(name))
|
||||||
|
|
||||||
def setup_template_db(self):
|
def setup_template_db(self):
|
||||||
""" Setup a template database that already contains common test data.
|
""" Setup a template database that already contains common test data.
|
||||||
@@ -153,13 +148,12 @@ class NominatimEnvironment:
|
|||||||
'--osm2pgsql-cache', '1',
|
'--osm2pgsql-cache', '1',
|
||||||
'--ignore-errors',
|
'--ignore-errors',
|
||||||
'--offline', '--index-noanalyse')
|
'--offline', '--index-noanalyse')
|
||||||
except:
|
except: # noqa: E722
|
||||||
self.db_drop_database(self.template_db)
|
self.db_drop_database(self.template_db)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self.run_nominatim('refresh', '--functions')
|
self.run_nominatim('refresh', '--functions')
|
||||||
|
|
||||||
|
|
||||||
def setup_api_db(self):
|
def setup_api_db(self):
|
||||||
""" Setup a test against the API test database.
|
""" Setup a test against the API test database.
|
||||||
"""
|
"""
|
||||||
@@ -184,13 +178,12 @@ class NominatimEnvironment:
|
|||||||
|
|
||||||
csv_path = str(testdata / 'full_en_phrases_test.csv')
|
csv_path = str(testdata / 'full_en_phrases_test.csv')
|
||||||
self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
|
self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
|
||||||
except:
|
except: # noqa: E722
|
||||||
self.db_drop_database(self.api_test_db)
|
self.db_drop_database(self.api_test_db)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
|
tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
|
||||||
|
|
||||||
|
|
||||||
def setup_unknown_db(self):
|
def setup_unknown_db(self):
|
||||||
""" Setup a test against a non-existing database.
|
""" Setup a test against a non-existing database.
|
||||||
"""
|
"""
|
||||||
@@ -213,7 +206,7 @@ class NominatimEnvironment:
|
|||||||
with self.connect_database(self.template_db) as conn:
|
with self.connect_database(self.template_db) as conn:
|
||||||
conn.autocommit = True
|
conn.autocommit = True
|
||||||
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
|
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
|
||||||
+ pysql.Identifier(self.test_db))
|
+ pysql.Identifier(self.test_db))
|
||||||
conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format(
|
conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format(
|
||||||
pysql.Identifier(self.test_db),
|
pysql.Identifier(self.test_db),
|
||||||
pysql.Identifier(self.template_db)))
|
pysql.Identifier(self.template_db)))
|
||||||
@@ -250,7 +243,6 @@ class NominatimEnvironment:
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def reindex_placex(self, db):
|
def reindex_placex(self, db):
|
||||||
""" Run the indexing step until all data in the placex has
|
""" Run the indexing step until all data in the placex has
|
||||||
been processed. Indexing during updates can produce more data
|
been processed. Indexing during updates can produce more data
|
||||||
@@ -259,18 +251,15 @@ class NominatimEnvironment:
|
|||||||
"""
|
"""
|
||||||
self.run_nominatim('index')
|
self.run_nominatim('index')
|
||||||
|
|
||||||
|
|
||||||
def run_nominatim(self, *cmdline):
|
def run_nominatim(self, *cmdline):
|
||||||
""" Run the nominatim command-line tool via the library.
|
""" Run the nominatim command-line tool via the library.
|
||||||
"""
|
"""
|
||||||
if self.website_dir is not None:
|
if self.website_dir is not None:
|
||||||
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
|
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
|
||||||
|
|
||||||
cli.nominatim(osm2pgsql_path=None,
|
cli.nominatim(cli_args=cmdline,
|
||||||
cli_args=cmdline,
|
|
||||||
environ=self.test_env)
|
environ=self.test_env)
|
||||||
|
|
||||||
|
|
||||||
def copy_from_place(self, db):
|
def copy_from_place(self, db):
|
||||||
""" Copy data from place to the placex and location_property_osmline
|
""" Copy data from place to the placex and location_property_osmline
|
||||||
tables invoking the appropriate triggers.
|
tables invoking the appropriate triggers.
|
||||||
@@ -293,7 +282,6 @@ class NominatimEnvironment:
|
|||||||
and osm_type='W'
|
and osm_type='W'
|
||||||
and ST_GeometryType(geometry) = 'ST_LineString'""")
|
and ST_GeometryType(geometry) = 'ST_LineString'""")
|
||||||
|
|
||||||
|
|
||||||
def create_api_request_func_starlette(self):
|
def create_api_request_func_starlette(self):
|
||||||
import nominatim_api.server.starlette.server
|
import nominatim_api.server.starlette.server
|
||||||
from asgi_lifespan import LifespanManager
|
from asgi_lifespan import LifespanManager
|
||||||
@@ -311,7 +299,6 @@ class NominatimEnvironment:
|
|||||||
|
|
||||||
return _request
|
return _request
|
||||||
|
|
||||||
|
|
||||||
def create_api_request_func_falcon(self):
|
def create_api_request_func_falcon(self):
|
||||||
import nominatim_api.server.falcon.server
|
import nominatim_api.server.falcon.server
|
||||||
import falcon.testing
|
import falcon.testing
|
||||||
@@ -326,6 +313,3 @@ class NominatimEnvironment:
|
|||||||
return response.text, response.status_code
|
return response.text, response.status_code
|
||||||
|
|
||||||
return _request
|
return _request
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2022 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Helper classes for filling the place table.
|
Helper classes for filling the place table.
|
||||||
@@ -10,12 +10,13 @@ Helper classes for filling the place table.
|
|||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
|
||||||
class PlaceColumn:
|
class PlaceColumn:
|
||||||
""" Helper class to collect contents from a behave table row and
|
""" Helper class to collect contents from a behave table row and
|
||||||
insert it into the place table.
|
insert it into the place table.
|
||||||
"""
|
"""
|
||||||
def __init__(self, context):
|
def __init__(self, context):
|
||||||
self.columns = {'admin_level' : 15}
|
self.columns = {'admin_level': 15}
|
||||||
self.context = context
|
self.context = context
|
||||||
self.geometry = None
|
self.geometry = None
|
||||||
|
|
||||||
@@ -28,9 +29,11 @@ class PlaceColumn:
|
|||||||
assert 'osm_type' in self.columns, "osm column missing"
|
assert 'osm_type' in self.columns, "osm column missing"
|
||||||
|
|
||||||
if force_name and 'name' not in self.columns:
|
if force_name and 'name' not in self.columns:
|
||||||
self._add_hstore('name', 'name',
|
self._add_hstore(
|
||||||
''.join(random.choice(string.printable)
|
'name',
|
||||||
for _ in range(int(random.random()*30))))
|
'name',
|
||||||
|
''.join(random.choices(string.printable, k=random.randrange(30))),
|
||||||
|
)
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@@ -96,7 +99,7 @@ class PlaceColumn:
|
|||||||
""" Issue a delete for the given OSM object.
|
""" Issue a delete for the given OSM object.
|
||||||
"""
|
"""
|
||||||
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
|
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
|
||||||
(self.columns['osm_type'] , self.columns['osm_id']))
|
(self.columns['osm_type'], self.columns['osm_id']))
|
||||||
|
|
||||||
def db_insert(self, cursor):
|
def db_insert(self, cursor):
|
||||||
""" Insert the collected data into the database.
|
""" Insert the collected data into the database.
|
||||||
@@ -104,7 +107,7 @@ class PlaceColumn:
|
|||||||
if self.columns['osm_type'] == 'N' and self.geometry is None:
|
if self.columns['osm_type'] == 'N' and self.geometry is None:
|
||||||
pt = self.context.osm.grid_node(self.columns['osm_id'])
|
pt = self.context.osm.grid_node(self.columns['osm_id'])
|
||||||
if pt is None:
|
if pt is None:
|
||||||
pt = (random.random()*360 - 180, random.random()*180 - 90)
|
pt = (random.uniform(-180, 180), random.uniform(-90, 90))
|
||||||
|
|
||||||
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
|
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -2,20 +2,16 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
""" Steps that run queries against the API.
|
""" Steps that run queries against the API.
|
||||||
"""
|
"""
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
import re
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
from urllib.parse import urlencode
|
|
||||||
|
|
||||||
from utils import run_script
|
|
||||||
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
|
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
|
||||||
from check_functions import Bbox, check_for_attributes
|
from check_functions import Bbox, check_for_attributes
|
||||||
from table_compare import NominatimID
|
from table_compare import NominatimID
|
||||||
@@ -68,7 +64,7 @@ def send_api_query(endpoint, params, fmt, context):
|
|||||||
getattr(context, 'http_headers', {})))
|
getattr(context, 'http_headers', {})))
|
||||||
|
|
||||||
|
|
||||||
@given(u'the HTTP header')
|
@given('the HTTP header')
|
||||||
def add_http_header(context):
|
def add_http_header(context):
|
||||||
if not hasattr(context, 'http_headers'):
|
if not hasattr(context, 'http_headers'):
|
||||||
context.http_headers = {}
|
context.http_headers = {}
|
||||||
@@ -77,7 +73,7 @@ def add_http_header(context):
|
|||||||
context.http_headers[h] = context.table[0][h]
|
context.http_headers[h] = context.table[0][h]
|
||||||
|
|
||||||
|
|
||||||
@when(u'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
|
@when(r'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
|
||||||
def website_search_request(context, fmt, query, addr):
|
def website_search_request(context, fmt, query, addr):
|
||||||
params = {}
|
params = {}
|
||||||
if query:
|
if query:
|
||||||
@@ -90,7 +86,7 @@ def website_search_request(context, fmt, query, addr):
|
|||||||
context.response = SearchResponse(outp, fmt or 'json', status)
|
context.response = SearchResponse(outp, fmt or 'json', status)
|
||||||
|
|
||||||
|
|
||||||
@when('sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
|
@when(r'sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
|
||||||
def api_endpoint_v1_reverse(context, lat, lon, fmt):
|
def api_endpoint_v1_reverse(context, lat, lon, fmt):
|
||||||
params = {}
|
params = {}
|
||||||
if lat is not None:
|
if lat is not None:
|
||||||
@@ -106,7 +102,7 @@ def api_endpoint_v1_reverse(context, lat, lon, fmt):
|
|||||||
context.response = ReverseResponse(outp, fmt or 'xml', status)
|
context.response = ReverseResponse(outp, fmt or 'xml', status)
|
||||||
|
|
||||||
|
|
||||||
@when('sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
|
@when(r'sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
|
||||||
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
|
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
|
||||||
params = {}
|
params = {}
|
||||||
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
|
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
|
||||||
@@ -115,7 +111,7 @@ def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
|
|||||||
context.response = ReverseResponse(outp, fmt or 'xml', status)
|
context.response = ReverseResponse(outp, fmt or 'xml', status)
|
||||||
|
|
||||||
|
|
||||||
@when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
|
@when(r'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
|
||||||
def website_details_request(context, fmt, query):
|
def website_details_request(context, fmt, query):
|
||||||
params = {}
|
params = {}
|
||||||
if query[0] in 'NWR':
|
if query[0] in 'NWR':
|
||||||
@@ -130,38 +126,45 @@ def website_details_request(context, fmt, query):
|
|||||||
|
|
||||||
context.response = GenericResponse(outp, fmt or 'json', status)
|
context.response = GenericResponse(outp, fmt or 'json', status)
|
||||||
|
|
||||||
@when(u'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
|
|
||||||
|
@when(r'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
|
||||||
def website_lookup_request(context, fmt, query):
|
def website_lookup_request(context, fmt, query):
|
||||||
params = { 'osm_ids' : query }
|
params = {'osm_ids': query}
|
||||||
outp, status = send_api_query('lookup', params, fmt, context)
|
outp, status = send_api_query('lookup', params, fmt, context)
|
||||||
|
|
||||||
context.response = SearchResponse(outp, fmt or 'xml', status)
|
context.response = SearchResponse(outp, fmt or 'xml', status)
|
||||||
|
|
||||||
@when(u'sending (?P<fmt>\S+ )?status query')
|
|
||||||
|
@when(r'sending (?P<fmt>\S+ )?status query')
|
||||||
def website_status_request(context, fmt):
|
def website_status_request(context, fmt):
|
||||||
params = {}
|
params = {}
|
||||||
outp, status = send_api_query('status', params, fmt, context)
|
outp, status = send_api_query('status', params, fmt, context)
|
||||||
|
|
||||||
context.response = StatusResponse(outp, fmt or 'text', status)
|
context.response = StatusResponse(outp, fmt or 'text', status)
|
||||||
|
|
||||||
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
|
|
||||||
|
@step(r'(?P<operator>less than|more than|exactly|at least|at most) '
|
||||||
|
r'(?P<number>\d+) results? (?:is|are) returned')
|
||||||
def validate_result_number(context, operator, number):
|
def validate_result_number(context, operator, number):
|
||||||
context.execute_steps("Then a HTTP 200 is returned")
|
context.execute_steps("Then a HTTP 200 is returned")
|
||||||
numres = len(context.response.result)
|
numres = len(context.response.result)
|
||||||
assert compare(operator, numres, int(number)), \
|
assert compare(operator, numres, int(number)), \
|
||||||
f"Bad number of results: expected {operator} {number}, got {numres}."
|
f"Bad number of results: expected {operator} {number}, got {numres}."
|
||||||
|
|
||||||
@then(u'a HTTP (?P<status>\d+) is returned')
|
|
||||||
|
@then(r'a HTTP (?P<status>\d+) is returned')
|
||||||
def check_http_return_status(context, status):
|
def check_http_return_status(context, status):
|
||||||
assert context.response.errorcode == int(status), \
|
assert context.response.errorcode == int(status), \
|
||||||
f"Return HTTP status is {context.response.errorcode}."\
|
f"Return HTTP status is {context.response.errorcode}."\
|
||||||
f" Full response:\n{context.response.page}"
|
f" Full response:\n{context.response.page}"
|
||||||
|
|
||||||
@then(u'the page contents equals "(?P<text>.+)"')
|
|
||||||
|
@then(r'the page contents equals "(?P<text>.+)"')
|
||||||
def check_page_content_equals(context, text):
|
def check_page_content_equals(context, text):
|
||||||
assert context.response.page == text
|
assert context.response.page == text
|
||||||
|
|
||||||
@then(u'the result is valid (?P<fmt>\w+)')
|
|
||||||
|
@then(r'the result is valid (?P<fmt>\w+)')
|
||||||
def step_impl(context, fmt):
|
def step_impl(context, fmt):
|
||||||
context.execute_steps("Then a HTTP 200 is returned")
|
context.execute_steps("Then a HTTP 200 is returned")
|
||||||
if fmt.strip() == 'html':
|
if fmt.strip() == 'html':
|
||||||
@@ -178,7 +181,7 @@ def step_impl(context, fmt):
|
|||||||
assert context.response.format == fmt
|
assert context.response.format == fmt
|
||||||
|
|
||||||
|
|
||||||
@then(u'a (?P<fmt>\w+) user error is returned')
|
@then(r'a (?P<fmt>\w+) user error is returned')
|
||||||
def check_page_error(context, fmt):
|
def check_page_error(context, fmt):
|
||||||
context.execute_steps("Then a HTTP 400 is returned")
|
context.execute_steps("Then a HTTP 400 is returned")
|
||||||
assert context.response.format == fmt
|
assert context.response.format == fmt
|
||||||
@@ -188,32 +191,34 @@ def check_page_error(context, fmt):
|
|||||||
else:
|
else:
|
||||||
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
|
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
|
||||||
|
|
||||||
@then(u'result header contains')
|
|
||||||
|
@then('result header contains')
|
||||||
def check_header_attr(context):
|
def check_header_attr(context):
|
||||||
context.execute_steps("Then a HTTP 200 is returned")
|
context.execute_steps("Then a HTTP 200 is returned")
|
||||||
for line in context.table:
|
for line in context.table:
|
||||||
assert line['attr'] in context.response.header, \
|
assert line['attr'] in context.response.header, \
|
||||||
f"Field '{line['attr']}' missing in header. Full header:\n{context.response.header}"
|
f"Field '{line['attr']}' missing in header. " \
|
||||||
|
f"Full header:\n{context.response.header}"
|
||||||
value = context.response.header[line['attr']]
|
value = context.response.header[line['attr']]
|
||||||
assert re.fullmatch(line['value'], value) is not None, \
|
assert re.fullmatch(line['value'], value) is not None, \
|
||||||
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
|
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
|
||||||
|
|
||||||
|
|
||||||
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)')
|
@then('result header has (?P<neg>not )?attributes (?P<attrs>.*)')
|
||||||
def check_header_no_attr(context, neg, attrs):
|
def check_header_no_attr(context, neg, attrs):
|
||||||
check_for_attributes(context.response.header, attrs,
|
check_for_attributes(context.response.header, attrs,
|
||||||
'absent' if neg else 'present')
|
'absent' if neg else 'present')
|
||||||
|
|
||||||
|
|
||||||
@then(u'results contain(?: in field (?P<field>.*))?')
|
@then(r'results contain(?: in field (?P<field>.*))?')
|
||||||
def step_impl(context, field):
|
def results_contain_in_field(context, field):
|
||||||
context.execute_steps("then at least 1 result is returned")
|
context.execute_steps("then at least 1 result is returned")
|
||||||
|
|
||||||
for line in context.table:
|
for line in context.table:
|
||||||
context.response.match_row(line, context=context, field=field)
|
context.response.match_row(line, context=context, field=field)
|
||||||
|
|
||||||
|
|
||||||
@then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
|
@then(r'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
|
||||||
def validate_attributes(context, lid, neg, attrs):
|
def validate_attributes(context, lid, neg, attrs):
|
||||||
for i in make_todo_list(context, lid):
|
for i in make_todo_list(context, lid):
|
||||||
check_for_attributes(context.response.result[i], attrs,
|
check_for_attributes(context.response.result[i], attrs,
|
||||||
@@ -221,7 +226,7 @@ def validate_attributes(context, lid, neg, attrs):
|
|||||||
|
|
||||||
|
|
||||||
@then(u'result addresses contain')
|
@then(u'result addresses contain')
|
||||||
def step_impl(context):
|
def result_addresses_contain(context):
|
||||||
context.execute_steps("then at least 1 result is returned")
|
context.execute_steps("then at least 1 result is returned")
|
||||||
|
|
||||||
for line in context.table:
|
for line in context.table:
|
||||||
@@ -231,8 +236,9 @@ def step_impl(context):
|
|||||||
if name != 'ID':
|
if name != 'ID':
|
||||||
context.response.assert_address_field(idx, name, value)
|
context.response.assert_address_field(idx, name, value)
|
||||||
|
|
||||||
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
|
|
||||||
def check_address(context, lid, neg, attrs):
|
@then(r'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
|
||||||
|
def check_address_has_types(context, lid, neg, attrs):
|
||||||
context.execute_steps(f"then more than {lid} results are returned")
|
context.execute_steps(f"then more than {lid} results are returned")
|
||||||
|
|
||||||
addr_parts = context.response.result[int(lid)]['address']
|
addr_parts = context.response.result[int(lid)]['address']
|
||||||
@@ -243,7 +249,8 @@ def check_address(context, lid, neg, attrs):
|
|||||||
else:
|
else:
|
||||||
assert attr in addr_parts
|
assert attr in addr_parts
|
||||||
|
|
||||||
@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
|
|
||||||
|
@then(r'address of result (?P<lid>\d+) (?P<complete>is|contains)')
|
||||||
def check_address(context, lid, complete):
|
def check_address(context, lid, complete):
|
||||||
context.execute_steps(f"then more than {lid} results are returned")
|
context.execute_steps(f"then more than {lid} results are returned")
|
||||||
|
|
||||||
@@ -258,7 +265,7 @@ def check_address(context, lid, complete):
|
|||||||
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
|
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
|
||||||
|
|
||||||
|
|
||||||
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
|
@then(r'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
|
||||||
def check_bounding_box_in_area(context, lid, coords):
|
def check_bounding_box_in_area(context, lid, coords):
|
||||||
expected = Bbox(coords)
|
expected = Bbox(coords)
|
||||||
|
|
||||||
@@ -269,7 +276,7 @@ def check_bounding_box_in_area(context, lid, coords):
|
|||||||
f"Bbox is not contained in {expected}")
|
f"Bbox is not contained in {expected}")
|
||||||
|
|
||||||
|
|
||||||
@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
|
@then(r'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
|
||||||
def check_centroid_in_area(context, lid, coords):
|
def check_centroid_in_area(context, lid, coords):
|
||||||
expected = Bbox(coords)
|
expected = Bbox(coords)
|
||||||
|
|
||||||
@@ -280,7 +287,7 @@ def check_centroid_in_area(context, lid, coords):
|
|||||||
f"Centroid is not inside {expected}")
|
f"Centroid is not inside {expected}")
|
||||||
|
|
||||||
|
|
||||||
@then(u'there are(?P<neg> no)? duplicates')
|
@then('there are(?P<neg> no)? duplicates')
|
||||||
def check_for_duplicates(context, neg):
|
def check_for_duplicates(context, neg):
|
||||||
context.execute_steps("then at least 1 result is returned")
|
context.execute_steps("then at least 1 result is returned")
|
||||||
|
|
||||||
@@ -298,4 +305,3 @@ def check_for_duplicates(context, neg):
|
|||||||
assert not has_dupe, f"Found duplicate for {dup}"
|
assert not has_dupe, f"Found duplicate for {dup}"
|
||||||
else:
|
else:
|
||||||
assert has_dupe, "No duplicates found"
|
assert has_dupe, "No duplicates found"
|
||||||
|
|
||||||
|
|||||||
@@ -2,9 +2,8 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
import logging
|
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
|
||||||
import psycopg
|
import psycopg
|
||||||
@@ -13,9 +12,9 @@ from psycopg import sql as pysql
|
|||||||
from place_inserter import PlaceColumn
|
from place_inserter import PlaceColumn
|
||||||
from table_compare import NominatimID, DBRow
|
from table_compare import NominatimID, DBRow
|
||||||
|
|
||||||
from nominatim_db.indexer import indexer
|
|
||||||
from nominatim_db.tokenizer import factory as tokenizer_factory
|
from nominatim_db.tokenizer import factory as tokenizer_factory
|
||||||
|
|
||||||
|
|
||||||
def check_database_integrity(context):
|
def check_database_integrity(context):
|
||||||
""" Check some generic constraints on the tables.
|
""" Check some generic constraints on the tables.
|
||||||
"""
|
"""
|
||||||
@@ -31,10 +30,9 @@ def check_database_integrity(context):
|
|||||||
cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
|
cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
|
||||||
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
|
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
|
||||||
|
|
||||||
|
# GIVEN ##################################
|
||||||
|
|
||||||
|
|
||||||
################################ GIVEN ##################################
|
|
||||||
|
|
||||||
@given("the (?P<named>named )?places")
|
@given("the (?P<named>named )?places")
|
||||||
def add_data_to_place_table(context, named):
|
def add_data_to_place_table(context, named):
|
||||||
""" Add entries into the place table. 'named places' makes sure that
|
""" Add entries into the place table. 'named places' makes sure that
|
||||||
@@ -46,6 +44,7 @@ def add_data_to_place_table(context, named):
|
|||||||
PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
|
PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
|
||||||
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
|
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
|
||||||
|
|
||||||
|
|
||||||
@given("the relations")
|
@given("the relations")
|
||||||
def add_data_to_planet_relations(context):
|
def add_data_to_planet_relations(context):
|
||||||
""" Add entries into the osm2pgsql relation middle table. This is needed
|
""" Add entries into the osm2pgsql relation middle table. This is needed
|
||||||
@@ -77,9 +76,11 @@ def add_data_to_planet_relations(context):
|
|||||||
else:
|
else:
|
||||||
members = None
|
members = None
|
||||||
|
|
||||||
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")])
|
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings
|
||||||
|
if h.startswith("tags+")])
|
||||||
|
|
||||||
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags)
|
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off,
|
||||||
|
parts, members, tags)
|
||||||
VALUES (%s, %s, %s, %s, %s, %s)""",
|
VALUES (%s, %s, %s, %s, %s, %s)""",
|
||||||
(r['id'], last_node, last_way, parts, members, list(tags)))
|
(r['id'], last_node, last_way, parts, members, list(tags)))
|
||||||
else:
|
else:
|
||||||
@@ -99,6 +100,7 @@ def add_data_to_planet_relations(context):
|
|||||||
(r['id'], psycopg.types.json.Json(tags),
|
(r['id'], psycopg.types.json.Json(tags),
|
||||||
psycopg.types.json.Json(members)))
|
psycopg.types.json.Json(members)))
|
||||||
|
|
||||||
|
|
||||||
@given("the ways")
|
@given("the ways")
|
||||||
def add_data_to_planet_ways(context):
|
def add_data_to_planet_ways(context):
|
||||||
""" Add entries into the osm2pgsql way middle table. This is necessary for
|
""" Add entries into the osm2pgsql way middle table. This is necessary for
|
||||||
@@ -110,16 +112,18 @@ def add_data_to_planet_ways(context):
|
|||||||
json_tags = row is not None and row['value'] != '1'
|
json_tags = row is not None and row['value'] != '1'
|
||||||
for r in context.table:
|
for r in context.table:
|
||||||
if json_tags:
|
if json_tags:
|
||||||
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings if h.startswith("tags+")})
|
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings
|
||||||
|
if h.startswith("tags+")})
|
||||||
else:
|
else:
|
||||||
tags = list(chain.from_iterable([(h[5:], r[h])
|
tags = list(chain.from_iterable([(h[5:], r[h])
|
||||||
for h in r.headings if h.startswith("tags+")]))
|
for h in r.headings if h.startswith("tags+")]))
|
||||||
nodes = [ int(x.strip()) for x in r['nodes'].split(',') ]
|
nodes = [int(x.strip()) for x in r['nodes'].split(',')]
|
||||||
|
|
||||||
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
|
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
|
||||||
(r['id'], nodes, tags))
|
(r['id'], nodes, tags))
|
||||||
|
|
||||||
################################ WHEN ##################################
|
# WHEN ##################################
|
||||||
|
|
||||||
|
|
||||||
@when("importing")
|
@when("importing")
|
||||||
def import_and_index_data_from_place_table(context):
|
def import_and_index_data_from_place_table(context):
|
||||||
@@ -136,6 +140,7 @@ def import_and_index_data_from_place_table(context):
|
|||||||
# itself.
|
# itself.
|
||||||
context.log_capture.buffer.clear()
|
context.log_capture.buffer.clear()
|
||||||
|
|
||||||
|
|
||||||
@when("updating places")
|
@when("updating places")
|
||||||
def update_place_table(context):
|
def update_place_table(context):
|
||||||
""" Update the place table with the given data. Also runs all triggers
|
""" Update the place table with the given data. Also runs all triggers
|
||||||
@@ -164,6 +169,7 @@ def update_postcodes(context):
|
|||||||
"""
|
"""
|
||||||
context.nominatim.run_nominatim('refresh', '--postcodes')
|
context.nominatim.run_nominatim('refresh', '--postcodes')
|
||||||
|
|
||||||
|
|
||||||
@when("marking for delete (?P<oids>.*)")
|
@when("marking for delete (?P<oids>.*)")
|
||||||
def delete_places(context, oids):
|
def delete_places(context, oids):
|
||||||
""" Remove entries from the place table. Multiple ids may be given
|
""" Remove entries from the place table. Multiple ids may be given
|
||||||
@@ -184,7 +190,8 @@ def delete_places(context, oids):
|
|||||||
# itself.
|
# itself.
|
||||||
context.log_capture.buffer.clear()
|
context.log_capture.buffer.clear()
|
||||||
|
|
||||||
################################ THEN ##################################
|
# THEN ##################################
|
||||||
|
|
||||||
|
|
||||||
@then("(?P<table>placex|place) contains(?P<exact> exactly)?")
|
@then("(?P<table>placex|place) contains(?P<exact> exactly)?")
|
||||||
def check_place_contents(context, table, exact):
|
def check_place_contents(context, table, exact):
|
||||||
@@ -201,7 +208,8 @@ def check_place_contents(context, table, exact):
|
|||||||
expected_content = set()
|
expected_content = set()
|
||||||
for row in context.table:
|
for row in context.table:
|
||||||
nid = NominatimID(row['object'])
|
nid = NominatimID(row['object'])
|
||||||
query = 'SELECT *, ST_AsText(geometry) as geomtxt, ST_GeometryType(geometry) as geometrytype'
|
query = """SELECT *, ST_AsText(geometry) as geomtxt,
|
||||||
|
ST_GeometryType(geometry) as geometrytype """
|
||||||
if table == 'placex':
|
if table == 'placex':
|
||||||
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
|
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
|
||||||
query += " FROM %s WHERE {}" % (table, )
|
query += " FROM %s WHERE {}" % (table, )
|
||||||
@@ -261,17 +269,18 @@ def check_search_name_contents(context, exclude):
|
|||||||
|
|
||||||
if not exclude:
|
if not exclude:
|
||||||
assert len(tokens) >= len(items), \
|
assert len(tokens) >= len(items), \
|
||||||
"No word entry found for {}. Entries found: {!s}".format(value, len(tokens))
|
f"No word entry found for {value}. Entries found: {len(tokens)}"
|
||||||
for word, token, wid in tokens:
|
for word, token, wid in tokens:
|
||||||
if exclude:
|
if exclude:
|
||||||
assert wid not in res[name], \
|
assert wid not in res[name], \
|
||||||
"Found term for {}/{}: {}".format(nid, name, wid)
|
"Found term for {}/{}: {}".format(nid, name, wid)
|
||||||
else:
|
else:
|
||||||
assert wid in res[name], \
|
assert wid in res[name], \
|
||||||
"Missing term for {}/{}: {}".format(nid, name, wid)
|
"Missing term for {}/{}: {}".format(nid, name, wid)
|
||||||
elif name != 'object':
|
elif name != 'object':
|
||||||
assert db_row.contains(name, value), db_row.assert_msg(name, value)
|
assert db_row.contains(name, value), db_row.assert_msg(name, value)
|
||||||
|
|
||||||
|
|
||||||
@then("search_name has no entry for (?P<oid>.*)")
|
@then("search_name has no entry for (?P<oid>.*)")
|
||||||
def check_search_name_has_entry(context, oid):
|
def check_search_name_has_entry(context, oid):
|
||||||
""" Check that there is noentry in the search_name table for the given
|
""" Check that there is noentry in the search_name table for the given
|
||||||
@@ -283,6 +292,7 @@ def check_search_name_has_entry(context, oid):
|
|||||||
assert cur.rowcount == 0, \
|
assert cur.rowcount == 0, \
|
||||||
"Found {} entries for ID {}".format(cur.rowcount, oid)
|
"Found {} entries for ID {}".format(cur.rowcount, oid)
|
||||||
|
|
||||||
|
|
||||||
@then("location_postcode contains exactly")
|
@then("location_postcode contains exactly")
|
||||||
def check_location_postcode(context):
|
def check_location_postcode(context):
|
||||||
""" Check full contents for location_postcode table. Each row represents a table row
|
""" Check full contents for location_postcode table. Each row represents a table row
|
||||||
@@ -294,21 +304,22 @@ def check_location_postcode(context):
|
|||||||
with context.db.cursor() as cur:
|
with context.db.cursor() as cur:
|
||||||
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
|
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
|
||||||
assert cur.rowcount == len(list(context.table)), \
|
assert cur.rowcount == len(list(context.table)), \
|
||||||
"Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table)))
|
"Postcode table has {cur.rowcount} rows, expected {len(list(context.table))}."
|
||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
for row in cur:
|
for row in cur:
|
||||||
key = (row['country_code'], row['postcode'])
|
key = (row['country_code'], row['postcode'])
|
||||||
assert key not in results, "Postcode table has duplicate entry: {}".format(row)
|
assert key not in results, "Postcode table has duplicate entry: {}".format(row)
|
||||||
results[key] = DBRow((row['country_code'],row['postcode']), row, context)
|
results[key] = DBRow((row['country_code'], row['postcode']), row, context)
|
||||||
|
|
||||||
for row in context.table:
|
for row in context.table:
|
||||||
db_row = results.get((row['country'],row['postcode']))
|
db_row = results.get((row['country'], row['postcode']))
|
||||||
assert db_row is not None, \
|
assert db_row is not None, \
|
||||||
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
|
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
|
||||||
|
|
||||||
db_row.assert_row(row, ('country', 'postcode'))
|
db_row.assert_row(row, ('country', 'postcode'))
|
||||||
|
|
||||||
|
|
||||||
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
|
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
|
||||||
def check_word_table_for_postcodes(context, exclude, postcodes):
|
def check_word_table_for_postcodes(context, exclude, postcodes):
|
||||||
""" Check that the tokenizer produces postcode tokens for the given
|
""" Check that the tokenizer produces postcode tokens for the given
|
||||||
@@ -333,7 +344,8 @@ def check_word_table_for_postcodes(context, exclude, postcodes):
|
|||||||
assert len(found) == 0, f"Unexpected postcodes: {found}"
|
assert len(found) == 0, f"Unexpected postcodes: {found}"
|
||||||
else:
|
else:
|
||||||
assert set(found) == set(plist), \
|
assert set(found) == set(plist), \
|
||||||
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
|
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
|
||||||
|
|
||||||
|
|
||||||
@then("place_addressline contains")
|
@then("place_addressline contains")
|
||||||
def check_place_addressline(context):
|
def check_place_addressline(context):
|
||||||
@@ -352,11 +364,12 @@ def check_place_addressline(context):
|
|||||||
WHERE place_id = %s AND address_place_id = %s""",
|
WHERE place_id = %s AND address_place_id = %s""",
|
||||||
(pid, apid))
|
(pid, apid))
|
||||||
assert cur.rowcount > 0, \
|
assert cur.rowcount > 0, \
|
||||||
"No rows found for place %s and address %s" % (row['object'], row['address'])
|
f"No rows found for place {row['object']} and address {row['address']}."
|
||||||
|
|
||||||
for res in cur:
|
for res in cur:
|
||||||
DBRow(nid, res, context).assert_row(row, ('address', 'object'))
|
DBRow(nid, res, context).assert_row(row, ('address', 'object'))
|
||||||
|
|
||||||
|
|
||||||
@then("place_addressline doesn't contain")
|
@then("place_addressline doesn't contain")
|
||||||
def check_place_addressline_exclude(context):
|
def check_place_addressline_exclude(context):
|
||||||
""" Check that the place_addressline doesn't contain any entries for the
|
""" Check that the place_addressline doesn't contain any entries for the
|
||||||
@@ -371,9 +384,10 @@ def check_place_addressline_exclude(context):
|
|||||||
WHERE place_id = %s AND address_place_id = %s""",
|
WHERE place_id = %s AND address_place_id = %s""",
|
||||||
(pid, apid))
|
(pid, apid))
|
||||||
assert cur.rowcount == 0, \
|
assert cur.rowcount == 0, \
|
||||||
"Row found for place %s and address %s" % (row['object'], row['address'])
|
f"Row found for place {row['object']} and address {row['address']}."
|
||||||
|
|
||||||
@then("W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
|
|
||||||
|
@then(r"W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
|
||||||
def check_location_property_osmline(context, oid, neg):
|
def check_location_property_osmline(context, oid, neg):
|
||||||
""" Check that the given way is present in the interpolation table.
|
""" Check that the given way is present in the interpolation table.
|
||||||
"""
|
"""
|
||||||
@@ -392,7 +406,7 @@ def check_location_property_osmline(context, oid, neg):
|
|||||||
for i in todo:
|
for i in todo:
|
||||||
row = context.table[i]
|
row = context.table[i]
|
||||||
if (int(row['start']) == res['startnumber']
|
if (int(row['start']) == res['startnumber']
|
||||||
and int(row['end']) == res['endnumber']):
|
and int(row['end']) == res['endnumber']):
|
||||||
todo.remove(i)
|
todo.remove(i)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@@ -402,8 +416,9 @@ def check_location_property_osmline(context, oid, neg):
|
|||||||
|
|
||||||
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
|
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
|
||||||
|
|
||||||
|
|
||||||
@then("location_property_osmline contains(?P<exact> exactly)?")
|
@then("location_property_osmline contains(?P<exact> exactly)?")
|
||||||
def check_place_contents(context, exact):
|
def check_osmline_contents(context, exact):
|
||||||
""" Check contents of the interpolation table. Each row represents a table row
|
""" Check contents of the interpolation table. Each row represents a table row
|
||||||
and all data must match. Data not present in the expected table, may
|
and all data must match. Data not present in the expected table, may
|
||||||
be arbitrary. The rows are identified via the 'object' column which must
|
be arbitrary. The rows are identified via the 'object' column which must
|
||||||
@@ -447,4 +462,3 @@ def check_place_contents(context, exact):
|
|||||||
assert expected_content == actual, \
|
assert expected_content == actual, \
|
||||||
f"Missing entries: {expected_content - actual}\n" \
|
f"Missing entries: {expected_content - actual}\n" \
|
||||||
f"Not expected in table: {actual - expected_content}"
|
f"Not expected in table: {actual - expected_content}"
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ from nominatim_db.tools.replication import run_osm2pgsql_updates
|
|||||||
|
|
||||||
from geometry_alias import ALIASES
|
from geometry_alias import ALIASES
|
||||||
|
|
||||||
|
|
||||||
def get_osm2pgsql_options(nominatim_env, fname, append):
|
def get_osm2pgsql_options(nominatim_env, fname, append):
|
||||||
return dict(import_file=fname,
|
return dict(import_file=fname,
|
||||||
osm2pgsql='osm2pgsql',
|
osm2pgsql='osm2pgsql',
|
||||||
@@ -25,8 +26,7 @@ def get_osm2pgsql_options(nominatim_env, fname, append):
|
|||||||
flatnode_file='',
|
flatnode_file='',
|
||||||
tablespaces=dict(slim_data='', slim_index='',
|
tablespaces=dict(slim_data='', slim_index='',
|
||||||
main_data='', main_index=''),
|
main_data='', main_index=''),
|
||||||
append=append
|
append=append)
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def write_opl_file(opl, grid):
|
def write_opl_file(opl, grid):
|
||||||
@@ -41,14 +41,14 @@ def write_opl_file(opl, grid):
|
|||||||
if line.startswith('n') and line.find(' x') < 0:
|
if line.startswith('n') and line.find(' x') < 0:
|
||||||
coord = grid.grid_node(int(line[1:].split(' ')[0]))
|
coord = grid.grid_node(int(line[1:].split(' ')[0]))
|
||||||
if coord is None:
|
if coord is None:
|
||||||
coord = (random.random() * 360 - 180,
|
coord = (random.uniform(-180, 180), random.uniform(-90, 90))
|
||||||
random.random() * 180 - 90)
|
|
||||||
line += " x%f y%f" % coord
|
line += " x%f y%f" % coord
|
||||||
fd.write(line.encode('utf-8'))
|
fd.write(line.encode('utf-8'))
|
||||||
fd.write(b'\n')
|
fd.write(b'\n')
|
||||||
|
|
||||||
return fd.name
|
return fd.name
|
||||||
|
|
||||||
|
|
||||||
@given('the lua style file')
|
@given('the lua style file')
|
||||||
def lua_style_file(context):
|
def lua_style_file(context):
|
||||||
""" Define a custom style file to use for the import.
|
""" Define a custom style file to use for the import.
|
||||||
@@ -91,7 +91,7 @@ def define_node_grid(context, grid_step, origin):
|
|||||||
@when(u'loading osm data')
|
@when(u'loading osm data')
|
||||||
def load_osm_file(context):
|
def load_osm_file(context):
|
||||||
"""
|
"""
|
||||||
Load the given data into a freshly created test data using osm2pgsql.
|
Load the given data into a freshly created test database using osm2pgsql.
|
||||||
No further indexing is done.
|
No further indexing is done.
|
||||||
|
|
||||||
The data is expected as attached text in OPL format.
|
The data is expected as attached text in OPL format.
|
||||||
@@ -103,13 +103,14 @@ def load_osm_file(context):
|
|||||||
finally:
|
finally:
|
||||||
os.remove(fname)
|
os.remove(fname)
|
||||||
|
|
||||||
### reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
|
# reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
|
||||||
cur = context.db.cursor()
|
cur = context.db.cursor()
|
||||||
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
|
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
|
||||||
FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
|
FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
|
||||||
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
|
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
|
||||||
FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
|
FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
|
||||||
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique on place using btree(osm_id,osm_type,class,type)""")
|
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique ON place
|
||||||
|
USING btree(osm_id,osm_type,class,type)""")
|
||||||
context.db.commit()
|
context.db.commit()
|
||||||
|
|
||||||
|
|
||||||
@@ -133,6 +134,7 @@ def update_from_osm_file(context):
|
|||||||
finally:
|
finally:
|
||||||
os.remove(fname)
|
os.remove(fname)
|
||||||
|
|
||||||
|
|
||||||
@when('indexing')
|
@when('indexing')
|
||||||
def index_database(context):
|
def index_database(context):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -2,21 +2,21 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2022 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Functions to facilitate accessing and comparing the content of DB tables.
|
Functions to facilitate accessing and comparing the content of DB tables.
|
||||||
"""
|
"""
|
||||||
|
import math
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import psycopg
|
import psycopg
|
||||||
from psycopg import sql as pysql
|
from psycopg import sql as pysql
|
||||||
|
|
||||||
from steps.check_functions import Almost
|
|
||||||
|
|
||||||
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
|
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
|
||||||
|
|
||||||
|
|
||||||
class NominatimID:
|
class NominatimID:
|
||||||
""" Splits a unique identifier for places into its components.
|
""" Splits a unique identifier for places into its components.
|
||||||
As place_ids cannot be used for testing, we use a unique
|
As place_ids cannot be used for testing, we use a unique
|
||||||
@@ -147,10 +147,10 @@ class DBRow:
|
|||||||
return str(actual) == expected
|
return str(actual) == expected
|
||||||
|
|
||||||
def _compare_place_id(self, actual, expected):
|
def _compare_place_id(self, actual, expected):
|
||||||
if expected == '0':
|
if expected == '0':
|
||||||
return actual == 0
|
return actual == 0
|
||||||
|
|
||||||
with self.context.db.cursor() as cur:
|
with self.context.db.cursor() as cur:
|
||||||
return NominatimID(expected).get_place_id(cur) == actual
|
return NominatimID(expected).get_place_id(cur) == actual
|
||||||
|
|
||||||
def _has_centroid(self, expected):
|
def _has_centroid(self, expected):
|
||||||
@@ -166,13 +166,15 @@ class DBRow:
|
|||||||
else:
|
else:
|
||||||
x, y = self.context.osm.grid_node(int(expected))
|
x, y = self.context.osm.grid_node(int(expected))
|
||||||
|
|
||||||
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy']
|
return math.isclose(float(x), self.db_row['cx']) \
|
||||||
|
and math.isclose(float(y), self.db_row['cy'])
|
||||||
|
|
||||||
def _has_geometry(self, expected):
|
def _has_geometry(self, expected):
|
||||||
geom = self.context.osm.parse_geometry(expected)
|
geom = self.context.osm.parse_geometry(expected)
|
||||||
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
|
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
|
||||||
cur.execute(pysql.SQL("""SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
|
cur.execute(pysql.SQL("""
|
||||||
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
|
SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
|
||||||
|
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
|
||||||
.format(pysql.SQL(geom),
|
.format(pysql.SQL(geom),
|
||||||
pysql.Literal(self.db_row['geomtxt'])))
|
pysql.Literal(self.db_row['geomtxt'])))
|
||||||
return cur.fetchone()[0]
|
return cur.fetchone()[0]
|
||||||
@@ -187,7 +189,8 @@ class DBRow:
|
|||||||
else:
|
else:
|
||||||
msg += " No such column."
|
msg += " No such column."
|
||||||
|
|
||||||
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row), indent=4, default=str))
|
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row),
|
||||||
|
indent=4, default=str))
|
||||||
|
|
||||||
def _get_actual(self, name):
|
def _get_actual(self, name):
|
||||||
if '+' in name:
|
if '+' in name:
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
#
|
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
|
||||||
#
|
|
||||||
# Copyright (C) 2022 by the Nominatim developer community.
|
|
||||||
# For a full list of authors see the git log.
|
|
||||||
"""
|
|
||||||
Various smaller helps for step execution.
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def run_script(cmd, **kwargs):
|
|
||||||
""" Run the given command, check that it is successful and output
|
|
||||||
when necessary.
|
|
||||||
"""
|
|
||||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
||||||
**kwargs)
|
|
||||||
(outp, outerr) = proc.communicate()
|
|
||||||
outp = outp.decode('utf-8')
|
|
||||||
outerr = outerr.decode('utf-8').replace('\\n', '\n')
|
|
||||||
LOG.debug("Run command: %s\n%s\n%s", cmd, outp, outerr)
|
|
||||||
|
|
||||||
assert proc.returncode == 0, "Script '{}' failed:\n{}\n{}\n".format(cmd[0], outp, outerr)
|
|
||||||
|
|
||||||
return outp, outerr
|
|
||||||
@@ -2,14 +2,13 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Helper fixtures for API call tests.
|
Helper fixtures for API call tests.
|
||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
import time
|
|
||||||
import datetime as dt
|
import datetime as dt
|
||||||
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
@@ -20,27 +19,25 @@ from nominatim_api.search.query_analyzer_factory import make_query_analyzer
|
|||||||
from nominatim_db.tools import convert_sqlite
|
from nominatim_db.tools import convert_sqlite
|
||||||
import nominatim_api.logging as loglib
|
import nominatim_api.logging as loglib
|
||||||
|
|
||||||
|
|
||||||
class APITester:
|
class APITester:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.api = napi.NominatimAPI()
|
self.api = napi.NominatimAPI()
|
||||||
self.async_to_sync(self.api._async_api.setup_database())
|
self.async_to_sync(self.api._async_api.setup_database())
|
||||||
|
|
||||||
|
|
||||||
def async_to_sync(self, func):
|
def async_to_sync(self, func):
|
||||||
""" Run an asynchronous function until completion using the
|
""" Run an asynchronous function until completion using the
|
||||||
internal loop of the API.
|
internal loop of the API.
|
||||||
"""
|
"""
|
||||||
return self.api._loop.run_until_complete(func)
|
return self.api._loop.run_until_complete(func)
|
||||||
|
|
||||||
|
|
||||||
def add_data(self, table, data):
|
def add_data(self, table, data):
|
||||||
""" Insert data into the given table.
|
""" Insert data into the given table.
|
||||||
"""
|
"""
|
||||||
sql = getattr(self.api._async_api._tables, table).insert()
|
sql = getattr(self.api._async_api._tables, table).insert()
|
||||||
self.async_to_sync(self.exec_async(sql, data))
|
self.async_to_sync(self.exec_async(sql, data))
|
||||||
|
|
||||||
|
|
||||||
def add_placex(self, **kw):
|
def add_placex(self, **kw):
|
||||||
name = kw.get('name')
|
name = kw.get('name')
|
||||||
if isinstance(name, str):
|
if isinstance(name, str):
|
||||||
@@ -50,30 +47,29 @@ class APITester:
|
|||||||
geometry = kw.get('geometry', 'POINT(%f %f)' % centroid)
|
geometry = kw.get('geometry', 'POINT(%f %f)' % centroid)
|
||||||
|
|
||||||
self.add_data('placex',
|
self.add_data('placex',
|
||||||
{'place_id': kw.get('place_id', 1000),
|
{'place_id': kw.get('place_id', 1000),
|
||||||
'osm_type': kw.get('osm_type', 'W'),
|
'osm_type': kw.get('osm_type', 'W'),
|
||||||
'osm_id': kw.get('osm_id', 4),
|
'osm_id': kw.get('osm_id', 4),
|
||||||
'class_': kw.get('class_', 'highway'),
|
'class_': kw.get('class_', 'highway'),
|
||||||
'type': kw.get('type', 'residential'),
|
'type': kw.get('type', 'residential'),
|
||||||
'name': name,
|
'name': name,
|
||||||
'address': kw.get('address'),
|
'address': kw.get('address'),
|
||||||
'extratags': kw.get('extratags'),
|
'extratags': kw.get('extratags'),
|
||||||
'parent_place_id': kw.get('parent_place_id'),
|
'parent_place_id': kw.get('parent_place_id'),
|
||||||
'linked_place_id': kw.get('linked_place_id'),
|
'linked_place_id': kw.get('linked_place_id'),
|
||||||
'admin_level': kw.get('admin_level', 15),
|
'admin_level': kw.get('admin_level', 15),
|
||||||
'country_code': kw.get('country_code'),
|
'country_code': kw.get('country_code'),
|
||||||
'housenumber': kw.get('housenumber'),
|
'housenumber': kw.get('housenumber'),
|
||||||
'postcode': kw.get('postcode'),
|
'postcode': kw.get('postcode'),
|
||||||
'wikipedia': kw.get('wikipedia'),
|
'wikipedia': kw.get('wikipedia'),
|
||||||
'rank_search': kw.get('rank_search', 30),
|
'rank_search': kw.get('rank_search', 30),
|
||||||
'rank_address': kw.get('rank_address', 30),
|
'rank_address': kw.get('rank_address', 30),
|
||||||
'importance': kw.get('importance'),
|
'importance': kw.get('importance'),
|
||||||
'centroid': 'POINT(%f %f)' % centroid,
|
'centroid': 'POINT(%f %f)' % centroid,
|
||||||
'indexed_status': kw.get('indexed_status', 0),
|
'indexed_status': kw.get('indexed_status', 0),
|
||||||
'indexed_date': kw.get('indexed_date',
|
'indexed_date': kw.get('indexed_date',
|
||||||
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
||||||
'geometry': geometry})
|
'geometry': geometry})
|
||||||
|
|
||||||
|
|
||||||
def add_address_placex(self, object_id, **kw):
|
def add_address_placex(self, object_id, **kw):
|
||||||
self.add_placex(**kw)
|
self.add_placex(**kw)
|
||||||
@@ -85,46 +81,42 @@ class APITester:
|
|||||||
'fromarea': kw.get('fromarea', False),
|
'fromarea': kw.get('fromarea', False),
|
||||||
'isaddress': kw.get('isaddress', True)})
|
'isaddress': kw.get('isaddress', True)})
|
||||||
|
|
||||||
|
|
||||||
def add_osmline(self, **kw):
|
def add_osmline(self, **kw):
|
||||||
self.add_data('osmline',
|
self.add_data('osmline',
|
||||||
{'place_id': kw.get('place_id', 10000),
|
{'place_id': kw.get('place_id', 10000),
|
||||||
'osm_id': kw.get('osm_id', 4004),
|
'osm_id': kw.get('osm_id', 4004),
|
||||||
'parent_place_id': kw.get('parent_place_id'),
|
'parent_place_id': kw.get('parent_place_id'),
|
||||||
'indexed_date': kw.get('indexed_date',
|
'indexed_date': kw.get('indexed_date',
|
||||||
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
||||||
'startnumber': kw.get('startnumber', 2),
|
'startnumber': kw.get('startnumber', 2),
|
||||||
'endnumber': kw.get('endnumber', 6),
|
'endnumber': kw.get('endnumber', 6),
|
||||||
'step': kw.get('step', 2),
|
'step': kw.get('step', 2),
|
||||||
'address': kw.get('address'),
|
'address': kw.get('address'),
|
||||||
'postcode': kw.get('postcode'),
|
'postcode': kw.get('postcode'),
|
||||||
'country_code': kw.get('country_code'),
|
'country_code': kw.get('country_code'),
|
||||||
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
|
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
|
||||||
|
|
||||||
|
|
||||||
def add_tiger(self, **kw):
|
def add_tiger(self, **kw):
|
||||||
self.add_data('tiger',
|
self.add_data('tiger',
|
||||||
{'place_id': kw.get('place_id', 30000),
|
{'place_id': kw.get('place_id', 30000),
|
||||||
'parent_place_id': kw.get('parent_place_id'),
|
'parent_place_id': kw.get('parent_place_id'),
|
||||||
'startnumber': kw.get('startnumber', 2),
|
'startnumber': kw.get('startnumber', 2),
|
||||||
'endnumber': kw.get('endnumber', 6),
|
'endnumber': kw.get('endnumber', 6),
|
||||||
'step': kw.get('step', 2),
|
'step': kw.get('step', 2),
|
||||||
'postcode': kw.get('postcode'),
|
'postcode': kw.get('postcode'),
|
||||||
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
|
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
|
||||||
|
|
||||||
|
|
||||||
def add_postcode(self, **kw):
|
def add_postcode(self, **kw):
|
||||||
self.add_data('postcode',
|
self.add_data('postcode',
|
||||||
{'place_id': kw.get('place_id', 1000),
|
{'place_id': kw.get('place_id', 1000),
|
||||||
'parent_place_id': kw.get('parent_place_id'),
|
'parent_place_id': kw.get('parent_place_id'),
|
||||||
'country_code': kw.get('country_code'),
|
'country_code': kw.get('country_code'),
|
||||||
'postcode': kw.get('postcode'),
|
'postcode': kw.get('postcode'),
|
||||||
'rank_search': kw.get('rank_search', 20),
|
'rank_search': kw.get('rank_search', 20),
|
||||||
'rank_address': kw.get('rank_address', 22),
|
'rank_address': kw.get('rank_address', 22),
|
||||||
'indexed_date': kw.get('indexed_date',
|
'indexed_date': kw.get('indexed_date',
|
||||||
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
|
||||||
'geometry': kw.get('geometry', 'POINT(23 34)')})
|
'geometry': kw.get('geometry', 'POINT(23 34)')})
|
||||||
|
|
||||||
|
|
||||||
def add_country(self, country_code, geometry):
|
def add_country(self, country_code, geometry):
|
||||||
self.add_data('country_grid',
|
self.add_data('country_grid',
|
||||||
@@ -132,14 +124,12 @@ class APITester:
|
|||||||
'area': 0.1,
|
'area': 0.1,
|
||||||
'geometry': geometry})
|
'geometry': geometry})
|
||||||
|
|
||||||
|
|
||||||
def add_country_name(self, country_code, names, partition=0):
|
def add_country_name(self, country_code, names, partition=0):
|
||||||
self.add_data('country_name',
|
self.add_data('country_name',
|
||||||
{'country_code': country_code,
|
{'country_code': country_code,
|
||||||
'name': names,
|
'name': names,
|
||||||
'partition': partition})
|
'partition': partition})
|
||||||
|
|
||||||
|
|
||||||
def add_search_name(self, place_id, **kw):
|
def add_search_name(self, place_id, **kw):
|
||||||
centroid = kw.get('centroid', (23.0, 34.0))
|
centroid = kw.get('centroid', (23.0, 34.0))
|
||||||
self.add_data('search_name',
|
self.add_data('search_name',
|
||||||
@@ -152,7 +142,6 @@ class APITester:
|
|||||||
'country_code': kw.get('country_code', 'xx'),
|
'country_code': kw.get('country_code', 'xx'),
|
||||||
'centroid': 'POINT(%f %f)' % centroid})
|
'centroid': 'POINT(%f %f)' % centroid})
|
||||||
|
|
||||||
|
|
||||||
def add_class_type_table(self, cls, typ):
|
def add_class_type_table(self, cls, typ):
|
||||||
self.async_to_sync(
|
self.async_to_sync(
|
||||||
self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ}
|
self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ}
|
||||||
@@ -160,7 +149,6 @@ class APITester:
|
|||||||
WHERE class = '{cls}' AND type = '{typ}')
|
WHERE class = '{cls}' AND type = '{typ}')
|
||||||
""")))
|
""")))
|
||||||
|
|
||||||
|
|
||||||
def add_word_table(self, content):
|
def add_word_table(self, content):
|
||||||
data = [dict(zip(['word_id', 'word_token', 'type', 'word', 'info'], c))
|
data = [dict(zip(['word_id', 'word_token', 'type', 'word', 'info'], c))
|
||||||
for c in content]
|
for c in content]
|
||||||
@@ -176,12 +164,10 @@ class APITester:
|
|||||||
|
|
||||||
self.async_to_sync(_do_sql())
|
self.async_to_sync(_do_sql())
|
||||||
|
|
||||||
|
|
||||||
async def exec_async(self, sql, *args, **kwargs):
|
async def exec_async(self, sql, *args, **kwargs):
|
||||||
async with self.api._async_api.begin() as conn:
|
async with self.api._async_api.begin() as conn:
|
||||||
return await conn.execute(sql, *args, **kwargs)
|
return await conn.execute(sql, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
async def create_tables(self):
|
async def create_tables(self):
|
||||||
async with self.api._async_api._engine.begin() as conn:
|
async with self.api._async_api._engine.begin() as conn:
|
||||||
await conn.run_sync(self.api._async_api._tables.meta.create_all)
|
await conn.run_sync(self.api._async_api._tables.meta.create_all)
|
||||||
@@ -212,11 +198,12 @@ def frontend(request, event_loop, tmp_path):
|
|||||||
db = str(tmp_path / 'test_nominatim_python_unittest.sqlite')
|
db = str(tmp_path / 'test_nominatim_python_unittest.sqlite')
|
||||||
|
|
||||||
def mkapi(apiobj, options={'reverse'}):
|
def mkapi(apiobj, options={'reverse'}):
|
||||||
apiobj.add_data('properties',
|
apiobj.add_data(
|
||||||
[{'property': 'tokenizer', 'value': 'icu'},
|
'properties',
|
||||||
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
|
[{'property': 'tokenizer', 'value': 'icu'},
|
||||||
{'property': 'tokenizer_import_transliteration', 'value': "'1' > '/1/'; 'ä' > 'ä '"},
|
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
|
||||||
])
|
{'property': 'tokenizer_import_transliteration',
|
||||||
|
'value': "'1' > '/1/'; 'ä' > 'ä '"}])
|
||||||
|
|
||||||
async def _do_sql():
|
async def _do_sql():
|
||||||
async with apiobj.api._async_api.begin() as conn:
|
async with apiobj.api._async_api.begin() as conn:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Provides dummy implementations of ASGIAdaptor for testing.
|
Provides dummy implementations of ASGIAdaptor for testing.
|
||||||
@@ -13,6 +13,7 @@ import nominatim_api.v1.server_glue as glue
|
|||||||
from nominatim_api.v1.format import dispatch as formatting
|
from nominatim_api.v1.format import dispatch as formatting
|
||||||
from nominatim_api.config import Configuration
|
from nominatim_api.config import Configuration
|
||||||
|
|
||||||
|
|
||||||
class FakeError(BaseException):
|
class FakeError(BaseException):
|
||||||
|
|
||||||
def __init__(self, msg, status):
|
def __init__(self, msg, status):
|
||||||
@@ -22,8 +23,10 @@ class FakeError(BaseException):
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f'{self.status} -- {self.msg}'
|
return f'{self.status} -- {self.msg}'
|
||||||
|
|
||||||
|
|
||||||
FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type'])
|
FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type'])
|
||||||
|
|
||||||
|
|
||||||
class FakeAdaptor(glue.ASGIAdaptor):
|
class FakeAdaptor(glue.ASGIAdaptor):
|
||||||
|
|
||||||
def __init__(self, params=None, headers=None, config=None):
|
def __init__(self, params=None, headers=None, config=None):
|
||||||
@@ -31,23 +34,18 @@ class FakeAdaptor(glue.ASGIAdaptor):
|
|||||||
self.headers = headers or {}
|
self.headers = headers or {}
|
||||||
self._config = config or Configuration(None)
|
self._config = config or Configuration(None)
|
||||||
|
|
||||||
|
|
||||||
def get(self, name, default=None):
|
def get(self, name, default=None):
|
||||||
return self.params.get(name, default)
|
return self.params.get(name, default)
|
||||||
|
|
||||||
|
|
||||||
def get_header(self, name, default=None):
|
def get_header(self, name, default=None):
|
||||||
return self.headers.get(name, default)
|
return self.headers.get(name, default)
|
||||||
|
|
||||||
|
|
||||||
def error(self, msg, status=400):
|
def error(self, msg, status=400):
|
||||||
return FakeError(msg, status)
|
return FakeError(msg, status)
|
||||||
|
|
||||||
|
|
||||||
def create_response(self, status, output, num_results):
|
def create_response(self, status, output, num_results):
|
||||||
return FakeResponse(status, output, self.content_type)
|
return FakeResponse(status, output, self.content_type)
|
||||||
|
|
||||||
|
|
||||||
def base_uri(self):
|
def base_uri(self):
|
||||||
return 'http://test'
|
return 'http://test'
|
||||||
|
|
||||||
@@ -56,5 +54,3 @@ class FakeAdaptor(glue.ASGIAdaptor):
|
|||||||
|
|
||||||
def formatting(self):
|
def formatting(self):
|
||||||
return formatting
|
return formatting
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,21 +2,18 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Tests for normalizing search queries.
|
Tests for normalizing search queries.
|
||||||
"""
|
"""
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from icu import Transliterator
|
from icu import Transliterator
|
||||||
|
|
||||||
import nominatim_api.search.query as qmod
|
import nominatim_api.search.query as qmod
|
||||||
from nominatim_api.query_preprocessing.config import QueryConfig
|
from nominatim_api.query_preprocessing.config import QueryConfig
|
||||||
from nominatim_api.query_preprocessing import normalize
|
from nominatim_api.query_preprocessing import normalize
|
||||||
|
|
||||||
|
|
||||||
def run_preprocessor_on(query, norm):
|
def run_preprocessor_on(query, norm):
|
||||||
normalizer = Transliterator.createFromRules("normalization", norm)
|
normalizer = Transliterator.createFromRules("normalization", norm)
|
||||||
proc = normalize.create(QueryConfig().set_normalizer(normalizer))
|
proc = normalize.create(QueryConfig().set_normalizer(normalizer))
|
||||||
@@ -26,9 +23,9 @@ def run_preprocessor_on(query, norm):
|
|||||||
|
|
||||||
def test_normalize_simple():
|
def test_normalize_simple():
|
||||||
norm = ':: lower();'
|
norm = ':: lower();'
|
||||||
query = [qmod.Phrase(qmod.PhraseType.NONE, 'Hallo')]
|
query = [qmod.Phrase(qmod.PHRASE_ANY, 'Hallo')]
|
||||||
|
|
||||||
out = run_preprocessor_on(query, norm)
|
out = run_preprocessor_on(query, norm)
|
||||||
|
|
||||||
assert len(out) == 1
|
assert len(out) == 1
|
||||||
assert out == [qmod.Phrase(qmod.PhraseType.NONE, 'hallo')]
|
assert out == [qmod.Phrase(qmod.PHRASE_ANY, 'hallo')]
|
||||||
|
|||||||
@@ -0,0 +1,31 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
|
#
|
||||||
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
|
# For a full list of authors see the git log.
|
||||||
|
"""
|
||||||
|
Tests for japanese phrase splitting.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import nominatim_api.search.query as qmod
|
||||||
|
from nominatim_api.query_preprocessing.config import QueryConfig
|
||||||
|
from nominatim_api.query_preprocessing import split_japanese_phrases
|
||||||
|
|
||||||
|
|
||||||
|
def run_preprocessor_on(query):
|
||||||
|
proc = split_japanese_phrases.create(QueryConfig().set_normalizer(None))
|
||||||
|
|
||||||
|
return proc(query)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('inp,outp', [('大阪府大阪市大阪', '大阪府:大阪市:大阪'),
|
||||||
|
('大阪府大阪', '大阪府:大阪'),
|
||||||
|
('大阪市大阪', '大阪市:大阪')])
|
||||||
|
def test_split_phrases(inp, outp):
|
||||||
|
query = [qmod.Phrase(qmod.PHRASE_ANY, inp)]
|
||||||
|
|
||||||
|
out = run_preprocessor_on(query)
|
||||||
|
|
||||||
|
assert out == [qmod.Phrase(qmod.PHRASE_ANY, outp)]
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Tests for tokenized query data structures.
|
Tests for tokenized query data structures.
|
||||||
@@ -11,6 +11,7 @@ import pytest
|
|||||||
|
|
||||||
from nominatim_api.search import query
|
from nominatim_api.search import query
|
||||||
|
|
||||||
|
|
||||||
class MyToken(query.Token):
|
class MyToken(query.Token):
|
||||||
|
|
||||||
def get_category(self):
|
def get_category(self):
|
||||||
@@ -22,42 +23,44 @@ def mktoken(tid: int):
|
|||||||
lookup_word='foo')
|
lookup_word='foo')
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),
|
@pytest.fixture
|
||||||
('AMENITY', 'QUALIFIER'),
|
def qnode():
|
||||||
('STREET', 'PARTIAL'),
|
return query.QueryNode(query.BREAK_PHRASE, query.PHRASE_ANY, 0.0, '', '')
|
||||||
('CITY', 'WORD'),
|
|
||||||
('COUNTRY', 'COUNTRY'),
|
|
||||||
('POSTCODE', 'POSTCODE')])
|
@pytest.mark.parametrize('ptype,ttype', [(query.PHRASE_ANY, 'W'),
|
||||||
|
(query.PHRASE_AMENITY, 'Q'),
|
||||||
|
(query.PHRASE_STREET, 'w'),
|
||||||
|
(query.PHRASE_CITY, 'W'),
|
||||||
|
(query.PHRASE_COUNTRY, 'C'),
|
||||||
|
(query.PHRASE_POSTCODE, 'P')])
|
||||||
def test_phrase_compatible(ptype, ttype):
|
def test_phrase_compatible(ptype, ttype):
|
||||||
assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype], False)
|
assert query._phrase_compatible_with(ptype, ttype, False)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('ptype', ['COUNTRY', 'POSTCODE'])
|
@pytest.mark.parametrize('ptype', [query.PHRASE_COUNTRY, query.PHRASE_POSTCODE])
|
||||||
def test_phrase_incompatible(ptype):
|
def test_phrase_incompatible(ptype):
|
||||||
assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL, True)
|
assert not query._phrase_compatible_with(ptype, query.TOKEN_PARTIAL, True)
|
||||||
|
|
||||||
|
|
||||||
def test_query_node_empty():
|
def test_query_node_empty(qnode):
|
||||||
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
|
assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
|
||||||
|
assert qnode.get_tokens(3, query.TOKEN_WORD) is None
|
||||||
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
|
|
||||||
assert qn.get_tokens(3, query.TokenType.WORD) is None
|
|
||||||
|
|
||||||
|
|
||||||
def test_query_node_with_content():
|
def test_query_node_with_content(qnode):
|
||||||
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
|
qnode.starting.append(query.TokenList(2, query.TOKEN_PARTIAL, [mktoken(100), mktoken(101)]))
|
||||||
qn.starting.append(query.TokenList(2, query.TokenType.PARTIAL, [mktoken(100), mktoken(101)]))
|
qnode.starting.append(query.TokenList(2, query.TOKEN_WORD, [mktoken(1000)]))
|
||||||
qn.starting.append(query.TokenList(2, query.TokenType.WORD, [mktoken(1000)]))
|
|
||||||
|
|
||||||
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
|
assert not qnode.has_tokens(3, query.TOKEN_PARTIAL)
|
||||||
assert not qn.has_tokens(2, query.TokenType.COUNTRY)
|
assert not qnode.has_tokens(2, query.TOKEN_COUNTRY)
|
||||||
assert qn.has_tokens(2, query.TokenType.PARTIAL)
|
assert qnode.has_tokens(2, query.TOKEN_PARTIAL)
|
||||||
assert qn.has_tokens(2, query.TokenType.WORD)
|
assert qnode.has_tokens(2, query.TOKEN_WORD)
|
||||||
|
|
||||||
assert qn.get_tokens(3, query.TokenType.PARTIAL) is None
|
assert qnode.get_tokens(3, query.TOKEN_PARTIAL) is None
|
||||||
assert qn.get_tokens(2, query.TokenType.COUNTRY) is None
|
assert qnode.get_tokens(2, query.TOKEN_COUNTRY) is None
|
||||||
assert len(qn.get_tokens(2, query.TokenType.PARTIAL)) == 2
|
assert len(qnode.get_tokens(2, query.TOKEN_PARTIAL)) == 2
|
||||||
assert len(qn.get_tokens(2, query.TokenType.WORD)) == 1
|
assert len(qnode.get_tokens(2, query.TOKEN_WORD)) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_query_struct_empty():
|
def test_query_struct_empty():
|
||||||
@@ -67,19 +70,19 @@ def test_query_struct_empty():
|
|||||||
|
|
||||||
|
|
||||||
def test_query_struct_with_tokens():
|
def test_query_struct_with_tokens():
|
||||||
q = query.QueryStruct([query.Phrase(query.PhraseType.NONE, 'foo bar')])
|
q = query.QueryStruct([query.Phrase(query.PHRASE_ANY, 'foo bar')])
|
||||||
q.add_node(query.BreakType.WORD, query.PhraseType.NONE)
|
q.add_node(query.BREAK_WORD, query.PHRASE_ANY)
|
||||||
q.add_node(query.BreakType.END, query.PhraseType.NONE)
|
q.add_node(query.BREAK_END, query.PHRASE_ANY)
|
||||||
|
|
||||||
assert q.num_token_slots() == 2
|
assert q.num_token_slots() == 2
|
||||||
|
|
||||||
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
|
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
|
||||||
q.add_token(query.TokenRange(1, 2), query.TokenType.PARTIAL, mktoken(2))
|
q.add_token(query.TokenRange(1, 2), query.TOKEN_PARTIAL, mktoken(2))
|
||||||
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(99))
|
q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(99))
|
||||||
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(98))
|
q.add_token(query.TokenRange(1, 2), query.TOKEN_WORD, mktoken(98))
|
||||||
|
|
||||||
assert q.get_tokens(query.TokenRange(0, 2), query.TokenType.WORD) == []
|
assert q.get_tokens(query.TokenRange(0, 2), query.TOKEN_WORD) == []
|
||||||
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.WORD)) == 2
|
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_WORD)) == 2
|
||||||
|
|
||||||
partials = q.get_partials_list(query.TokenRange(0, 2))
|
partials = q.get_partials_list(query.TokenRange(0, 2))
|
||||||
|
|
||||||
@@ -91,45 +94,44 @@ def test_query_struct_with_tokens():
|
|||||||
|
|
||||||
|
|
||||||
def test_query_struct_incompatible_token():
|
def test_query_struct_incompatible_token():
|
||||||
q = query.QueryStruct([query.Phrase(query.PhraseType.COUNTRY, 'foo bar')])
|
q = query.QueryStruct([query.Phrase(query.PHRASE_COUNTRY, 'foo bar')])
|
||||||
q.add_node(query.BreakType.WORD, query.PhraseType.COUNTRY)
|
q.add_node(query.BREAK_WORD, query.PHRASE_COUNTRY)
|
||||||
q.add_node(query.BreakType.END, query.PhraseType.NONE)
|
q.add_node(query.BREAK_END, query.PHRASE_ANY)
|
||||||
|
|
||||||
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
|
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
|
||||||
q.add_token(query.TokenRange(1, 2), query.TokenType.COUNTRY, mktoken(100))
|
q.add_token(query.TokenRange(1, 2), query.TOKEN_COUNTRY, mktoken(100))
|
||||||
|
|
||||||
assert q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL) == []
|
assert q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL) == []
|
||||||
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.COUNTRY)) == 1
|
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_COUNTRY)) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_query_struct_amenity_single_word():
|
def test_query_struct_amenity_single_word():
|
||||||
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'bar')])
|
q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'bar')])
|
||||||
q.add_node(query.BreakType.END, query.PhraseType.NONE)
|
q.add_node(query.BREAK_END, query.PHRASE_ANY)
|
||||||
|
|
||||||
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
|
q.add_token(query.TokenRange(0, 1), query.TOKEN_PARTIAL, mktoken(1))
|
||||||
q.add_token(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM, mktoken(2))
|
q.add_token(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM, mktoken(2))
|
||||||
q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3))
|
q.add_token(query.TokenRange(0, 1), query.TOKEN_QUALIFIER, mktoken(3))
|
||||||
|
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 1
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 1
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 0
|
||||||
|
|
||||||
|
|
||||||
def test_query_struct_amenity_two_words():
|
def test_query_struct_amenity_two_words():
|
||||||
q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'foo bar')])
|
q = query.QueryStruct([query.Phrase(query.PHRASE_AMENITY, 'foo bar')])
|
||||||
q.add_node(query.BreakType.WORD, query.PhraseType.AMENITY)
|
q.add_node(query.BREAK_WORD, query.PHRASE_AMENITY)
|
||||||
q.add_node(query.BreakType.END, query.PhraseType.NONE)
|
q.add_node(query.BREAK_END, query.PHRASE_ANY)
|
||||||
|
|
||||||
for trange in [(0, 1), (1, 2)]:
|
for trange in [(0, 1), (1, 2)]:
|
||||||
q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1))
|
q.add_token(query.TokenRange(*trange), query.TOKEN_PARTIAL, mktoken(1))
|
||||||
q.add_token(query.TokenRange(*trange), query.TokenType.NEAR_ITEM, mktoken(2))
|
q.add_token(query.TokenRange(*trange), query.TOKEN_NEAR_ITEM, mktoken(2))
|
||||||
q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3))
|
q.add_token(query.TokenRange(*trange), query.TOKEN_QUALIFIER, mktoken(3))
|
||||||
|
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_PARTIAL)) == 1
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 0
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_NEAR_ITEM)) == 0
|
||||||
assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1
|
assert len(q.get_tokens(query.TokenRange(0, 1), query.TOKEN_QUALIFIER)) == 1
|
||||||
|
|
||||||
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1
|
|
||||||
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.NEAR_ITEM)) == 0
|
|
||||||
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1
|
|
||||||
|
|
||||||
|
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_PARTIAL)) == 1
|
||||||
|
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_NEAR_ITEM)) == 0
|
||||||
|
assert len(q.get_tokens(query.TokenRange(1, 2), query.TOKEN_QUALIFIER)) == 1
|
||||||
|
|||||||
@@ -9,38 +9,39 @@ Tests for creating abstract searches from token assignments.
|
|||||||
"""
|
"""
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from nominatim_api.search.query import Token, TokenRange, BreakType, PhraseType, TokenType, QueryStruct, Phrase
|
from nominatim_api.search.query import Token, TokenRange, QueryStruct, Phrase
|
||||||
|
import nominatim_api.search.query as qmod
|
||||||
from nominatim_api.search.db_search_builder import SearchBuilder
|
from nominatim_api.search.db_search_builder import SearchBuilder
|
||||||
from nominatim_api.search.token_assignment import TokenAssignment
|
from nominatim_api.search.token_assignment import TokenAssignment
|
||||||
from nominatim_api.types import SearchDetails
|
from nominatim_api.types import SearchDetails
|
||||||
import nominatim_api.search.db_searches as dbs
|
import nominatim_api.search.db_searches as dbs
|
||||||
|
|
||||||
|
|
||||||
class MyToken(Token):
|
class MyToken(Token):
|
||||||
def get_category(self):
|
def get_category(self):
|
||||||
return 'this', 'that'
|
return 'this', 'that'
|
||||||
|
|
||||||
|
|
||||||
def make_query(*args):
|
def make_query(*args):
|
||||||
q = QueryStruct([Phrase(PhraseType.NONE, '')])
|
q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
|
||||||
|
|
||||||
for _ in range(max(inner[0] for tlist in args for inner in tlist)):
|
for _ in range(max(inner[0] for tlist in args for inner in tlist)):
|
||||||
q.add_node(BreakType.WORD, PhraseType.NONE)
|
q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
|
||||||
q.add_node(BreakType.END, PhraseType.NONE)
|
q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
|
||||||
|
|
||||||
for start, tlist in enumerate(args):
|
for start, tlist in enumerate(args):
|
||||||
for end, ttype, tinfo in tlist:
|
for end, ttype, tinfo in tlist:
|
||||||
for tid, word in tinfo:
|
for tid, word in tinfo:
|
||||||
q.add_token(TokenRange(start, end), ttype,
|
q.add_token(TokenRange(start, end), ttype,
|
||||||
MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0,
|
MyToken(penalty=0.5 if ttype == qmod.TOKEN_PARTIAL else 0.0,
|
||||||
token=tid, count=1, addr_count=1,
|
token=tid, count=1, addr_count=1,
|
||||||
lookup_word=word))
|
lookup_word=word))
|
||||||
|
|
||||||
|
|
||||||
return q
|
return q
|
||||||
|
|
||||||
|
|
||||||
def test_country_search():
|
def test_country_search():
|
||||||
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
|
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
||||||
@@ -54,7 +55,7 @@ def test_country_search():
|
|||||||
|
|
||||||
|
|
||||||
def test_country_search_with_country_restriction():
|
def test_country_search_with_country_restriction():
|
||||||
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
|
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
||||||
@@ -68,7 +69,7 @@ def test_country_search_with_country_restriction():
|
|||||||
|
|
||||||
|
|
||||||
def test_country_search_with_conflicting_country_restriction():
|
def test_country_search_with_conflicting_country_restriction():
|
||||||
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
|
q = make_query([(1, qmod.TOKEN_COUNTRY, [(2, 'de'), (3, 'en')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
|
||||||
@@ -77,7 +78,7 @@ def test_country_search_with_conflicting_country_restriction():
|
|||||||
|
|
||||||
|
|
||||||
def test_postcode_search_simple():
|
def test_postcode_search_simple():
|
||||||
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])])
|
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1))))
|
||||||
@@ -93,8 +94,8 @@ def test_postcode_search_simple():
|
|||||||
|
|
||||||
|
|
||||||
def test_postcode_with_country():
|
def test_postcode_with_country():
|
||||||
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
|
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
|
||||||
[(2, TokenType.COUNTRY, [(1, 'xx')])])
|
[(2, qmod.TOKEN_COUNTRY, [(1, 'xx')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
||||||
@@ -111,8 +112,8 @@ def test_postcode_with_country():
|
|||||||
|
|
||||||
|
|
||||||
def test_postcode_with_address():
|
def test_postcode_with_address():
|
||||||
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
|
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
|
||||||
[(2, TokenType.PARTIAL, [(100, 'word')])])
|
[(2, qmod.TOKEN_PARTIAL, [(100, 'word')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
||||||
@@ -129,9 +130,9 @@ def test_postcode_with_address():
|
|||||||
|
|
||||||
|
|
||||||
def test_postcode_with_address_with_full_word():
|
def test_postcode_with_address_with_full_word():
|
||||||
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
|
q = make_query([(1, qmod.TOKEN_POSTCODE, [(34, '2367')])],
|
||||||
[(2, TokenType.PARTIAL, [(100, 'word')]),
|
[(2, qmod.TOKEN_PARTIAL, [(100, 'word')]),
|
||||||
(2, TokenType.WORD, [(1, 'full')])])
|
(2, qmod.TOKEN_WORD, [(1, 'full')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
|
||||||
@@ -150,7 +151,7 @@ def test_postcode_with_address_with_full_word():
|
|||||||
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
|
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
|
||||||
{'near': '10,10'}])
|
{'near': '10,10'}])
|
||||||
def test_near_item_only(kwargs):
|
def test_near_item_only(kwargs):
|
||||||
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
|
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
|
||||||
@@ -166,7 +167,7 @@ def test_near_item_only(kwargs):
|
|||||||
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
|
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
|
||||||
{}])
|
{}])
|
||||||
def test_near_item_skipped(kwargs):
|
def test_near_item_skipped(kwargs):
|
||||||
q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])])
|
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(2, 'foo')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1))))
|
||||||
@@ -175,8 +176,8 @@ def test_near_item_skipped(kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def test_name_only_search():
|
def test_name_only_search():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])])
|
(1, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
||||||
@@ -194,9 +195,9 @@ def test_name_only_search():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_qualifier():
|
def test_name_with_qualifier():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])],
|
(1, qmod.TOKEN_WORD, [(100, 'a')])],
|
||||||
[(2, TokenType.QUALIFIER, [(55, 'hotel')])])
|
[(2, qmod.TOKEN_QUALIFIER, [(55, 'hotel')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
||||||
@@ -215,9 +216,9 @@ def test_name_with_qualifier():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_housenumber_search():
|
def test_name_with_housenumber_search():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])],
|
(1, qmod.TOKEN_WORD, [(100, 'a')])],
|
||||||
[(2, TokenType.HOUSENUMBER, [(66, '66')])])
|
[(2, qmod.TOKEN_HOUSENUMBER, [(66, '66')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
||||||
@@ -235,13 +236,12 @@ def test_name_with_housenumber_search():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_and_address():
|
def test_name_and_address():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])],
|
(1, qmod.TOKEN_WORD, [(100, 'a')])],
|
||||||
[(2, TokenType.PARTIAL, [(2, 'b')]),
|
[(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
|
||||||
(2, TokenType.WORD, [(101, 'b')])],
|
(2, qmod.TOKEN_WORD, [(101, 'b')])],
|
||||||
[(3, TokenType.PARTIAL, [(3, 'c')]),
|
[(3, qmod.TOKEN_PARTIAL, [(3, 'c')]),
|
||||||
(3, TokenType.WORD, [(102, 'c')])]
|
(3, qmod.TOKEN_WORD, [(102, 'c')])])
|
||||||
)
|
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
||||||
@@ -260,14 +260,13 @@ def test_name_and_address():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_and_complex_address():
|
def test_name_and_complex_address():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])],
|
(1, qmod.TOKEN_WORD, [(100, 'a')])],
|
||||||
[(2, TokenType.PARTIAL, [(2, 'b')]),
|
[(2, qmod.TOKEN_PARTIAL, [(2, 'b')]),
|
||||||
(3, TokenType.WORD, [(101, 'bc')])],
|
(3, qmod.TOKEN_WORD, [(101, 'bc')])],
|
||||||
[(3, TokenType.PARTIAL, [(3, 'c')])],
|
[(3, qmod.TOKEN_PARTIAL, [(3, 'c')])],
|
||||||
[(4, TokenType.PARTIAL, [(4, 'd')]),
|
[(4, qmod.TOKEN_PARTIAL, [(4, 'd')]),
|
||||||
(4, TokenType.WORD, [(103, 'd')])]
|
(4, qmod.TOKEN_WORD, [(103, 'd')])])
|
||||||
)
|
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
|
||||||
@@ -286,9 +285,9 @@ def test_name_and_complex_address():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_only_near_search():
|
def test_name_only_near_search():
|
||||||
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
|
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
|
||||||
[(2, TokenType.PARTIAL, [(1, 'a')]),
|
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(2, TokenType.WORD, [(100, 'a')])])
|
(2, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
||||||
@@ -302,8 +301,8 @@ def test_name_only_near_search():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_only_search_with_category():
|
def test_name_only_search_with_category():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])])
|
(1, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
||||||
@@ -316,9 +315,9 @@ def test_name_only_search_with_category():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_near_item_search_with_category_mismatch():
|
def test_name_with_near_item_search_with_category_mismatch():
|
||||||
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
|
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
|
||||||
[(2, TokenType.PARTIAL, [(1, 'a')]),
|
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(2, TokenType.WORD, [(100, 'a')])])
|
(2, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
||||||
@@ -328,9 +327,9 @@ def test_name_with_near_item_search_with_category_mismatch():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_near_item_search_with_category_match():
|
def test_name_with_near_item_search_with_category_match():
|
||||||
q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])],
|
q = make_query([(1, qmod.TOKEN_NEAR_ITEM, [(88, 'g')])],
|
||||||
[(2, TokenType.PARTIAL, [(1, 'a')]),
|
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(2, TokenType.WORD, [(100, 'a')])])
|
(2, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
|
||||||
('this', 'that')]}))
|
('this', 'that')]}))
|
||||||
|
|
||||||
@@ -345,9 +344,9 @@ def test_name_with_near_item_search_with_category_match():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_qualifier_search_with_category_mismatch():
|
def test_name_with_qualifier_search_with_category_mismatch():
|
||||||
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])],
|
q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
|
||||||
[(2, TokenType.PARTIAL, [(1, 'a')]),
|
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(2, TokenType.WORD, [(100, 'a')])])
|
(2, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
|
||||||
@@ -357,9 +356,9 @@ def test_name_with_qualifier_search_with_category_mismatch():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_with_qualifier_search_with_category_match():
|
def test_name_with_qualifier_search_with_category_match():
|
||||||
q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])],
|
q = make_query([(1, qmod.TOKEN_QUALIFIER, [(88, 'g')])],
|
||||||
[(2, TokenType.PARTIAL, [(1, 'a')]),
|
[(2, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(2, TokenType.WORD, [(100, 'a')])])
|
(2, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'),
|
||||||
('this', 'that')]}))
|
('this', 'that')]}))
|
||||||
|
|
||||||
@@ -374,8 +373,8 @@ def test_name_with_qualifier_search_with_category_match():
|
|||||||
|
|
||||||
|
|
||||||
def test_name_only_search_with_countries():
|
def test_name_only_search_with_countries():
|
||||||
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
|
q = make_query([(1, qmod.TOKEN_PARTIAL, [(1, 'a')]),
|
||||||
(1, TokenType.WORD, [(100, 'a')])])
|
(1, qmod.TOKEN_WORD, [(100, 'a')])])
|
||||||
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'}))
|
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'}))
|
||||||
|
|
||||||
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
|
||||||
@@ -391,19 +390,19 @@ def test_name_only_search_with_countries():
|
|||||||
|
|
||||||
def make_counted_searches(name_part, name_full, address_part, address_full,
|
def make_counted_searches(name_part, name_full, address_part, address_full,
|
||||||
num_address_parts=1):
|
num_address_parts=1):
|
||||||
q = QueryStruct([Phrase(PhraseType.NONE, '')])
|
q = QueryStruct([Phrase(qmod.PHRASE_ANY, '')])
|
||||||
for i in range(1 + num_address_parts):
|
for i in range(1 + num_address_parts):
|
||||||
q.add_node(BreakType.WORD, PhraseType.NONE)
|
q.add_node(qmod.BREAK_WORD, qmod.PHRASE_ANY)
|
||||||
q.add_node(BreakType.END, PhraseType.NONE)
|
q.add_node(qmod.BREAK_END, qmod.PHRASE_ANY)
|
||||||
|
|
||||||
q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
|
q.add_token(TokenRange(0, 1), qmod.TOKEN_PARTIAL,
|
||||||
MyToken(0.5, 1, name_part, 1, 'name_part'))
|
MyToken(0.5, 1, name_part, 1, 'name_part'))
|
||||||
q.add_token(TokenRange(0, 1), TokenType.WORD,
|
q.add_token(TokenRange(0, 1), qmod.TOKEN_WORD,
|
||||||
MyToken(0, 101, name_full, 1, 'name_full'))
|
MyToken(0, 101, name_full, 1, 'name_full'))
|
||||||
for i in range(num_address_parts):
|
for i in range(num_address_parts):
|
||||||
q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL,
|
q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_PARTIAL,
|
||||||
MyToken(0.5, 2, address_part, 1, 'address_part'))
|
MyToken(0.5, 2, address_part, 1, 'address_part'))
|
||||||
q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD,
|
q.add_token(TokenRange(i + 1, i + 2), qmod.TOKEN_WORD,
|
||||||
MyToken(0, 102, address_full, 1, 'address_full'))
|
MyToken(0, 102, address_full, 1, 'address_full'))
|
||||||
|
|
||||||
builder = SearchBuilder(q, SearchDetails())
|
builder = SearchBuilder(q, SearchDetails())
|
||||||
@@ -422,8 +421,8 @@ def test_infrequent_partials_in_name():
|
|||||||
assert len(search.lookups) == 2
|
assert len(search.lookups) == 2
|
||||||
assert len(search.rankings) == 2
|
assert len(search.rankings) == 2
|
||||||
|
|
||||||
assert set((l.column, l.lookup_type.__name__) for l in search.lookups) == \
|
assert set((s.column, s.lookup_type.__name__) for s in search.lookups) == \
|
||||||
{('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')}
|
{('name_vector', 'LookupAll'), ('nameaddress_vector', 'Restrict')}
|
||||||
|
|
||||||
|
|
||||||
def test_frequent_partials_in_name_and_address():
|
def test_frequent_partials_in_name_and_address():
|
||||||
@@ -434,10 +433,10 @@ def test_frequent_partials_in_name_and_address():
|
|||||||
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
|
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
|
||||||
searches.sort(key=lambda s: s.penalty)
|
searches.sort(key=lambda s: s.penalty)
|
||||||
|
|
||||||
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \
|
assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
|
||||||
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
|
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
|
||||||
assert set((l.column, l.lookup_type.__name__) for l in searches[1].lookups) == \
|
assert set((s.column, s.lookup_type.__name__) for s in searches[1].lookups) == \
|
||||||
{('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')}
|
{('nameaddress_vector', 'LookupAll'), ('name_vector', 'LookupAll')}
|
||||||
|
|
||||||
|
|
||||||
def test_too_frequent_partials_in_name_and_address():
|
def test_too_frequent_partials_in_name_and_address():
|
||||||
@@ -448,5 +447,5 @@ def test_too_frequent_partials_in_name_and_address():
|
|||||||
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
|
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
|
||||||
searches.sort(key=lambda s: s.penalty)
|
searches.sort(key=lambda s: s.penalty)
|
||||||
|
|
||||||
assert set((l.column, l.lookup_type.__name__) for l in searches[0].lookups) == \
|
assert set((s.column, s.lookup_type.__name__) for s in searches[0].lookups) == \
|
||||||
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
|
{('name_vector', 'LookupAny'), ('nameaddress_vector', 'Restrict')}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Tests for query analyzer for ICU tokenizer.
|
Tests for query analyzer for ICU tokenizer.
|
||||||
@@ -11,11 +11,13 @@ import pytest
|
|||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
|
|
||||||
from nominatim_api import NominatimAPIAsync
|
from nominatim_api import NominatimAPIAsync
|
||||||
from nominatim_api.search.query import Phrase, PhraseType, TokenType, BreakType
|
from nominatim_api.search.query import Phrase
|
||||||
|
import nominatim_api.search.query as qmod
|
||||||
import nominatim_api.search.icu_tokenizer as tok
|
import nominatim_api.search.icu_tokenizer as tok
|
||||||
from nominatim_api.logging import set_log_output, get_and_disable
|
from nominatim_api.logging import set_log_output, get_and_disable
|
||||||
|
|
||||||
async def add_word(conn, word_id, word_token, wtype, word, info = None):
|
|
||||||
|
async def add_word(conn, word_id, word_token, wtype, word, info=None):
|
||||||
t = conn.t.meta.tables['word']
|
t = conn.t.meta.tables['word']
|
||||||
await conn.execute(t.insert(), {'word_id': word_id,
|
await conn.execute(t.insert(), {'word_id': word_id,
|
||||||
'word_token': word_token,
|
'word_token': word_token,
|
||||||
@@ -25,7 +27,8 @@ async def add_word(conn, word_id, word_token, wtype, word, info = None):
|
|||||||
|
|
||||||
|
|
||||||
def make_phrase(query):
|
def make_phrase(query):
|
||||||
return [Phrase(PhraseType.NONE, s) for s in query.split(',')]
|
return [Phrase(qmod.PHRASE_ANY, s) for s in query.split(',')]
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture
|
@pytest_asyncio.fixture
|
||||||
async def conn(table_factory):
|
async def conn(table_factory):
|
||||||
@@ -62,7 +65,7 @@ async def test_single_phrase_with_unknown_terms(conn):
|
|||||||
query = await ana.analyze_query(make_phrase('foo BAR'))
|
query = await ana.analyze_query(make_phrase('foo BAR'))
|
||||||
|
|
||||||
assert len(query.source) == 1
|
assert len(query.source) == 1
|
||||||
assert query.source[0].ptype == PhraseType.NONE
|
assert query.source[0].ptype == qmod.PHRASE_ANY
|
||||||
assert query.source[0].text == 'foo bar'
|
assert query.source[0].text == 'foo bar'
|
||||||
|
|
||||||
assert query.num_token_slots() == 2
|
assert query.num_token_slots() == 2
|
||||||
@@ -96,17 +99,15 @@ async def test_splitting_in_transliteration(conn):
|
|||||||
assert query.num_token_slots() == 2
|
assert query.num_token_slots() == 2
|
||||||
assert query.nodes[0].starting
|
assert query.nodes[0].starting
|
||||||
assert query.nodes[1].starting
|
assert query.nodes[1].starting
|
||||||
assert query.nodes[1].btype == BreakType.TOKEN
|
assert query.nodes[1].btype == qmod.BREAK_TOKEN
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@pytest.mark.parametrize('term,order', [('23456', ['POSTCODE', 'HOUSENUMBER', 'WORD', 'PARTIAL']),
|
@pytest.mark.parametrize('term,order', [('23456', ['P', 'H', 'W', 'w']),
|
||||||
('3', ['HOUSENUMBER', 'POSTCODE', 'WORD', 'PARTIAL'])
|
('3', ['H', 'W', 'w'])])
|
||||||
])
|
|
||||||
async def test_penalty_postcodes_and_housenumbers(conn, term, order):
|
async def test_penalty_postcodes_and_housenumbers(conn, term, order):
|
||||||
ana = await tok.create_query_analyzer(conn)
|
ana = await tok.create_query_analyzer(conn)
|
||||||
|
|
||||||
await add_word(conn, 1, term, 'P', None)
|
|
||||||
await add_word(conn, 2, term, 'H', term)
|
await add_word(conn, 2, term, 'H', term)
|
||||||
await add_word(conn, 3, term, 'w', term)
|
await add_word(conn, 3, term, 'w', term)
|
||||||
await add_word(conn, 4, term, 'W', term)
|
await add_word(conn, 4, term, 'W', term)
|
||||||
@@ -115,11 +116,12 @@ async def test_penalty_postcodes_and_housenumbers(conn, term, order):
|
|||||||
|
|
||||||
assert query.num_token_slots() == 1
|
assert query.num_token_slots() == 1
|
||||||
|
|
||||||
torder = [(tl.tokens[0].penalty, tl.ttype.name) for tl in query.nodes[0].starting]
|
torder = [(tl.tokens[0].penalty, tl.ttype) for tl in query.nodes[0].starting]
|
||||||
torder.sort()
|
torder.sort()
|
||||||
|
|
||||||
assert [t[1] for t in torder] == order
|
assert [t[1] for t in torder] == order
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_category_words_only_at_beginning(conn):
|
async def test_category_words_only_at_beginning(conn):
|
||||||
ana = await tok.create_query_analyzer(conn)
|
ana = await tok.create_query_analyzer(conn)
|
||||||
@@ -131,7 +133,7 @@ async def test_category_words_only_at_beginning(conn):
|
|||||||
|
|
||||||
assert query.num_token_slots() == 3
|
assert query.num_token_slots() == 3
|
||||||
assert len(query.nodes[0].starting) == 1
|
assert len(query.nodes[0].starting) == 1
|
||||||
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
|
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
|
||||||
assert not query.nodes[2].starting
|
assert not query.nodes[2].starting
|
||||||
|
|
||||||
|
|
||||||
@@ -145,7 +147,7 @@ async def test_freestanding_qualifier_words_become_category(conn):
|
|||||||
|
|
||||||
assert query.num_token_slots() == 1
|
assert query.num_token_slots() == 1
|
||||||
assert len(query.nodes[0].starting) == 1
|
assert len(query.nodes[0].starting) == 1
|
||||||
assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM
|
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_NEAR_ITEM
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -158,9 +160,9 @@ async def test_qualifier_words(conn):
|
|||||||
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
|
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
|
||||||
|
|
||||||
assert query.num_token_slots() == 5
|
assert query.num_token_slots() == 5
|
||||||
assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.QUALIFIER}
|
assert set(t.ttype for t in query.nodes[0].starting) == {qmod.TOKEN_QUALIFIER}
|
||||||
assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
|
assert set(t.ttype for t in query.nodes[2].starting) == {qmod.TOKEN_QUALIFIER}
|
||||||
assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.QUALIFIER}
|
assert set(t.ttype for t in query.nodes[4].starting) == {qmod.TOKEN_QUALIFIER}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -172,14 +174,16 @@ async def test_add_unknown_housenumbers(conn):
|
|||||||
query = await ana.analyze_query(make_phrase('466 23 99834 34a'))
|
query = await ana.analyze_query(make_phrase('466 23 99834 34a'))
|
||||||
|
|
||||||
assert query.num_token_slots() == 4
|
assert query.num_token_slots() == 4
|
||||||
assert query.nodes[0].starting[0].ttype == TokenType.HOUSENUMBER
|
assert query.nodes[0].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
|
||||||
assert len(query.nodes[0].starting[0].tokens) == 1
|
assert len(query.nodes[0].starting[0].tokens) == 1
|
||||||
assert query.nodes[0].starting[0].tokens[0].token == 0
|
assert query.nodes[0].starting[0].tokens[0].token == 0
|
||||||
assert query.nodes[1].starting[0].ttype == TokenType.HOUSENUMBER
|
assert query.nodes[1].starting[0].ttype == qmod.TOKEN_HOUSENUMBER
|
||||||
assert len(query.nodes[1].starting[0].tokens) == 1
|
assert len(query.nodes[1].starting[0].tokens) == 1
|
||||||
assert query.nodes[1].starting[0].tokens[0].token == 1
|
assert query.nodes[1].starting[0].tokens[0].token == 1
|
||||||
assert not query.nodes[2].starting
|
assert query.nodes[2].has_tokens(3, qmod.TOKEN_POSTCODE)
|
||||||
assert not query.nodes[3].starting
|
assert not query.nodes[2].has_tokens(3, qmod.TOKEN_HOUSENUMBER)
|
||||||
|
assert not query.nodes[2].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
|
||||||
|
assert not query.nodes[3].has_tokens(4, qmod.TOKEN_HOUSENUMBER)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
|||||||
171
test/python/api/search/test_postcode_parser.py
Normal file
171
test/python/api/search/test_postcode_parser.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
|
#
|
||||||
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
|
# For a full list of authors see the git log.
|
||||||
|
"""
|
||||||
|
Test for parsing of postcodes in queries.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
from itertools import zip_longest
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from nominatim_api.search.postcode_parser import PostcodeParser
|
||||||
|
from nominatim_api.search.query import QueryStruct, PHRASE_ANY, PHRASE_POSTCODE, PHRASE_STREET
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def pc_config(project_env):
|
||||||
|
country_file = project_env.project_dir / 'country_settings.yaml'
|
||||||
|
country_file.write_text(r"""
|
||||||
|
ab:
|
||||||
|
postcode:
|
||||||
|
pattern: "ddddd ll"
|
||||||
|
ba:
|
||||||
|
postcode:
|
||||||
|
pattern: "ddddd"
|
||||||
|
de:
|
||||||
|
postcode:
|
||||||
|
pattern: "ddddd"
|
||||||
|
gr:
|
||||||
|
postcode:
|
||||||
|
pattern: "(ddd) ?(dd)"
|
||||||
|
output: \1 \2
|
||||||
|
in:
|
||||||
|
postcode:
|
||||||
|
pattern: "(ddd) ?(ddd)"
|
||||||
|
output: \1\2
|
||||||
|
mc:
|
||||||
|
postcode:
|
||||||
|
pattern: "980dd"
|
||||||
|
mz:
|
||||||
|
postcode:
|
||||||
|
pattern: "(dddd)(?:-dd)?"
|
||||||
|
bn:
|
||||||
|
postcode:
|
||||||
|
pattern: "(ll) ?(dddd)"
|
||||||
|
output: \1\2
|
||||||
|
ky:
|
||||||
|
postcode:
|
||||||
|
pattern: "(d)-(dddd)"
|
||||||
|
output: KY\1-\2
|
||||||
|
|
||||||
|
gb:
|
||||||
|
postcode:
|
||||||
|
pattern: "(l?ld[A-Z0-9]?) ?(dll)"
|
||||||
|
output: \1 \2
|
||||||
|
|
||||||
|
""")
|
||||||
|
|
||||||
|
return project_env
|
||||||
|
|
||||||
|
|
||||||
|
def mk_query(inp):
|
||||||
|
query = QueryStruct([])
|
||||||
|
phrase_split = re.split(r"([ ,:'-])", inp)
|
||||||
|
|
||||||
|
for word, breakchar in zip_longest(*[iter(phrase_split)]*2, fillvalue='>'):
|
||||||
|
query.add_node(breakchar, PHRASE_ANY, 0.1, word, word)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('query,pos', [('45325 Berlin', 0),
|
||||||
|
('45325:Berlin', 0),
|
||||||
|
('45325,Berlin', 0),
|
||||||
|
('Berlin 45325', 1),
|
||||||
|
('Berlin,45325', 1),
|
||||||
|
('Berlin:45325', 1),
|
||||||
|
('Hansastr,45325 Berlin', 1),
|
||||||
|
('Hansastr 45325 Berlin', 1)])
|
||||||
|
def test_simple_postcode(pc_config, query, pos):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
result = parser.parse(mk_query(query))
|
||||||
|
|
||||||
|
assert result == {(pos, pos + 1, '45325'), (pos, pos + 1, '453 25')}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('query', ['EC1R 3HF', 'ec1r 3hf'])
|
||||||
|
def test_postcode_matching_case_insensitive(pc_config, query):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert parser.parse(mk_query(query)) == {(0, 2, 'EC1R 3HF')}
|
||||||
|
|
||||||
|
|
||||||
|
def test_contained_postcode(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert parser.parse(mk_query('12345 dx')) == {(0, 1, '12345'), (0, 1, '123 45'),
|
||||||
|
(0, 2, '12345 DX')}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('query,frm,to', [('345987', 0, 1), ('345 987', 0, 2),
|
||||||
|
('Aina 345 987', 1, 3),
|
||||||
|
('Aina 23 345 987 ff', 2, 4)])
|
||||||
|
def test_postcode_with_space(pc_config, query, frm, to):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
result = parser.parse(mk_query(query))
|
||||||
|
|
||||||
|
assert result == {(frm, to, '345987')}
|
||||||
|
|
||||||
|
|
||||||
|
def test_overlapping_postcode(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert parser.parse(mk_query('123 456 78')) == {(0, 2, '123456'), (1, 3, '456 78')}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('query', ['45325-Berlin', "45325'Berlin",
|
||||||
|
'Berlin-45325', "Berlin'45325", '45325Berlin'
|
||||||
|
'345-987', "345'987", '345,987', '345:987'])
|
||||||
|
def test_not_a_postcode(pc_config, query):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert not parser.parse(mk_query(query))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('query', ['ba 12233', 'ba-12233'])
|
||||||
|
def test_postcode_with_country_prefix(pc_config, query):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert (0, 2, '12233') in parser.parse(mk_query(query))
|
||||||
|
|
||||||
|
|
||||||
|
def test_postcode_with_joined_country_prefix(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert parser.parse(mk_query('ba12233')) == {(0, 1, '12233')}
|
||||||
|
|
||||||
|
|
||||||
|
def test_postcode_with_non_matching_country_prefix(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
assert not parser.parse(mk_query('ky12233'))
|
||||||
|
|
||||||
|
|
||||||
|
def test_postcode_inside_postcode_phrase(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
query = QueryStruct([])
|
||||||
|
query.nodes[-1].ptype = PHRASE_STREET
|
||||||
|
query.add_node(',', PHRASE_STREET, 0.1, '12345', '12345')
|
||||||
|
query.add_node(',', PHRASE_POSTCODE, 0.1, 'xz', 'xz')
|
||||||
|
query.add_node('>', PHRASE_POSTCODE, 0.1, '4444', '4444')
|
||||||
|
|
||||||
|
assert parser.parse(query) == {(2, 3, '4444')}
|
||||||
|
|
||||||
|
|
||||||
|
def test_partial_postcode_in_postcode_phrase(pc_config):
|
||||||
|
parser = PostcodeParser(pc_config)
|
||||||
|
|
||||||
|
query = QueryStruct([])
|
||||||
|
query.nodes[-1].ptype = PHRASE_POSTCODE
|
||||||
|
query.add_node(' ', PHRASE_POSTCODE, 0.1, '2224', '2224')
|
||||||
|
query.add_node('>', PHRASE_POSTCODE, 0.1, '12345', '12345')
|
||||||
|
|
||||||
|
assert not parser.parse(query)
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Test data types for search queries.
|
Test data types for search queries.
|
||||||
@@ -11,14 +11,15 @@ import pytest
|
|||||||
|
|
||||||
import nominatim_api.search.query as nq
|
import nominatim_api.search.query as nq
|
||||||
|
|
||||||
|
|
||||||
def test_token_range_equal():
|
def test_token_range_equal():
|
||||||
assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3)
|
assert nq.TokenRange(2, 3) == nq.TokenRange(2, 3)
|
||||||
assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3))
|
assert not (nq.TokenRange(2, 3) != nq.TokenRange(2, 3))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)),
|
@pytest.mark.parametrize('lop,rop', [((1, 2), (3, 4)),
|
||||||
((3, 4), (3, 5)),
|
((3, 4), (3, 5)),
|
||||||
((10, 12), (11, 12))])
|
((10, 12), (11, 12))])
|
||||||
def test_token_range_unequal(lop, rop):
|
def test_token_range_unequal(lop, rop):
|
||||||
assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop))
|
assert not (nq.TokenRange(*lop) == nq.TokenRange(*rop))
|
||||||
assert nq.TokenRange(*lop) != nq.TokenRange(*rop)
|
assert nq.TokenRange(*lop) != nq.TokenRange(*rop)
|
||||||
@@ -28,17 +29,17 @@ def test_token_range_lt():
|
|||||||
assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12)
|
assert nq.TokenRange(1, 3) < nq.TokenRange(10, 12)
|
||||||
assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8)
|
assert nq.TokenRange(5, 6) < nq.TokenRange(7, 8)
|
||||||
assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5)
|
assert nq.TokenRange(1, 4) < nq.TokenRange(4, 5)
|
||||||
assert not(nq.TokenRange(5, 6) < nq.TokenRange(5, 6))
|
assert not (nq.TokenRange(5, 6) < nq.TokenRange(5, 6))
|
||||||
assert not(nq.TokenRange(10, 11) < nq.TokenRange(4, 5))
|
assert not (nq.TokenRange(10, 11) < nq.TokenRange(4, 5))
|
||||||
|
|
||||||
|
|
||||||
def test_token_rankge_gt():
|
def test_token_rankge_gt():
|
||||||
assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2)
|
assert nq.TokenRange(3, 4) > nq.TokenRange(1, 2)
|
||||||
assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11)
|
assert nq.TokenRange(100, 200) > nq.TokenRange(10, 11)
|
||||||
assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10)
|
assert nq.TokenRange(10, 11) > nq.TokenRange(4, 10)
|
||||||
assert not(nq.TokenRange(5, 6) > nq.TokenRange(5, 6))
|
assert not (nq.TokenRange(5, 6) > nq.TokenRange(5, 6))
|
||||||
assert not(nq.TokenRange(1, 2) > nq.TokenRange(3, 4))
|
assert not (nq.TokenRange(1, 2) > nq.TokenRange(3, 4))
|
||||||
assert not(nq.TokenRange(4, 10) > nq.TokenRange(3, 5))
|
assert not (nq.TokenRange(4, 10) > nq.TokenRange(3, 5))
|
||||||
|
|
||||||
|
|
||||||
def test_token_range_unimplemented_ops():
|
def test_token_range_unimplemented_ops():
|
||||||
@@ -46,3 +47,19 @@ def test_token_range_unimplemented_ops():
|
|||||||
nq.TokenRange(1, 3) <= nq.TokenRange(10, 12)
|
nq.TokenRange(1, 3) <= nq.TokenRange(10, 12)
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
nq.TokenRange(1, 3) >= nq.TokenRange(10, 12)
|
nq.TokenRange(1, 3) >= nq.TokenRange(10, 12)
|
||||||
|
|
||||||
|
|
||||||
|
def test_query_extract_words():
|
||||||
|
q = nq.QueryStruct([])
|
||||||
|
q.add_node(nq.BREAK_WORD, nq.PHRASE_ANY, 0.1, '12', '')
|
||||||
|
q.add_node(nq.BREAK_TOKEN, nq.PHRASE_ANY, 0.0, 'ab', '')
|
||||||
|
q.add_node(nq.BREAK_PHRASE, nq.PHRASE_ANY, 0.0, '12', '')
|
||||||
|
q.add_node(nq.BREAK_END, nq.PHRASE_ANY, 0.5, 'hallo', '')
|
||||||
|
|
||||||
|
words = q.extract_words(base_penalty=1.0)
|
||||||
|
|
||||||
|
assert set(words.keys()) \
|
||||||
|
== {'12', 'ab', 'hallo', '12 ab', 'ab 12', '12 ab 12'}
|
||||||
|
assert sorted(words['12']) == [nq.TokenRange(0, 1, 1.0), nq.TokenRange(2, 3, 1.0)]
|
||||||
|
assert words['12 ab'] == [nq.TokenRange(0, 2, 1.1)]
|
||||||
|
assert words['hallo'] == [nq.TokenRange(3, 4, 1.0)]
|
||||||
|
|||||||
@@ -2,18 +2,17 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Tests for query analyzer creation.
|
Tests for query analyzer creation.
|
||||||
"""
|
"""
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from nominatim_api.search.query_analyzer_factory import make_query_analyzer
|
from nominatim_api.search.query_analyzer_factory import make_query_analyzer
|
||||||
from nominatim_api.search.icu_tokenizer import ICUQueryAnalyzer
|
from nominatim_api.search.icu_tokenizer import ICUQueryAnalyzer
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_import_icu_tokenizer(table_factory, api):
|
async def test_import_icu_tokenizer(table_factory, api):
|
||||||
table_factory('nominatim_properties',
|
table_factory('nominatim_properties',
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# This file is part of Nominatim. (https://nominatim.org)
|
# This file is part of Nominatim. (https://nominatim.org)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2024 by the Nominatim developer community.
|
# Copyright (C) 2025 by the Nominatim developer community.
|
||||||
# For a full list of authors see the git log.
|
# For a full list of authors see the git log.
|
||||||
"""
|
"""
|
||||||
Tests for running the country searcher.
|
Tests for running the country searcher.
|
||||||
@@ -48,6 +48,7 @@ def test_find_from_placex(apiobj, frontend):
|
|||||||
assert results[0].place_id == 55
|
assert results[0].place_id == 55
|
||||||
assert results[0].accuracy == 0.8
|
assert results[0].accuracy == 0.8
|
||||||
|
|
||||||
|
|
||||||
def test_find_from_fallback_countries(apiobj, frontend):
|
def test_find_from_fallback_countries(apiobj, frontend):
|
||||||
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
|
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
|
||||||
apiobj.add_country_name('ro', {'name': 'România'})
|
apiobj.add_country_name('ro', {'name': 'România'})
|
||||||
@@ -87,7 +88,6 @@ class TestCountryParameters:
|
|||||||
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
|
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
|
||||||
apiobj.add_country_name('ro', {'name': 'România'})
|
apiobj.add_country_name('ro', {'name': 'România'})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
|
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
|
||||||
napi.GeometryFormat.KML,
|
napi.GeometryFormat.KML,
|
||||||
napi.GeometryFormat.SVG,
|
napi.GeometryFormat.SVG,
|
||||||
@@ -100,7 +100,6 @@ class TestCountryParameters:
|
|||||||
assert len(results) == 1
|
assert len(results) == 1
|
||||||
assert geom.name.lower() in results[0].geometry
|
assert geom.name.lower() in results[0].geometry
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('pid,rids', [(76, [55]), (55, [])])
|
@pytest.mark.parametrize('pid,rids', [(76, [55]), (55, [])])
|
||||||
def test_exclude_place_id(self, apiobj, frontend, pid, rids):
|
def test_exclude_place_id(self, apiobj, frontend, pid, rids):
|
||||||
results = run_search(apiobj, frontend, 0.5, ['yw', 'ro'],
|
results = run_search(apiobj, frontend, 0.5, ['yw', 'ro'],
|
||||||
@@ -108,7 +107,6 @@ class TestCountryParameters:
|
|||||||
|
|
||||||
assert [r.place_id for r in results] == rids
|
assert [r.place_id for r in results] == rids
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('viewbox,rids', [((9, 9, 11, 11), [55]),
|
@pytest.mark.parametrize('viewbox,rids', [((9, 9, 11, 11), [55]),
|
||||||
((-10, -10, -3, -3), [])])
|
((-10, -10, -3, -3), [])])
|
||||||
def test_bounded_viewbox_in_placex(self, apiobj, frontend, viewbox, rids):
|
def test_bounded_viewbox_in_placex(self, apiobj, frontend, viewbox, rids):
|
||||||
@@ -118,9 +116,8 @@ class TestCountryParameters:
|
|||||||
|
|
||||||
assert [r.place_id for r in results] == rids
|
assert [r.place_id for r in results] == rids
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('viewbox,numres', [((0, 0, 1, 1), 1),
|
@pytest.mark.parametrize('viewbox,numres', [((0, 0, 1, 1), 1),
|
||||||
((-10, -10, -3, -3), 0)])
|
((-10, -10, -3, -3), 0)])
|
||||||
def test_bounded_viewbox_in_fallback(self, apiobj, frontend, viewbox, numres):
|
def test_bounded_viewbox_in_fallback(self, apiobj, frontend, viewbox, numres):
|
||||||
results = run_search(apiobj, frontend, 0.5, ['ro'],
|
results = run_search(apiobj, frontend, 0.5, ['ro'],
|
||||||
details=SearchDetails.from_kwargs({'viewbox': viewbox,
|
details=SearchDetails.from_kwargs({'viewbox': viewbox,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user