mirror of
https://github.com/osm-search/Nominatim.git
synced 2026-02-26 11:08:13 +00:00
Merge pull request #1901 from lonvia/speed-up-indexing
Batch-index places at rank 30
This commit is contained in:
0
nominatim/indexer/__init__.py
Normal file
0
nominatim/indexer/__init__.py
Normal file
52
nominatim/indexer/progress.py
Normal file
52
nominatim/indexer/progress.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
#
|
||||||
|
# This file is part of Nominatim.
|
||||||
|
# Copyright (C) 2020 Sarah Hoffmann
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
log = logging.getLogger()
|
||||||
|
|
||||||
|
class ProgressLogger(object):
|
||||||
|
""" Tracks and prints progress for the indexing process.
|
||||||
|
`name` is the name of the indexing step being tracked.
|
||||||
|
`total` sets up the total number of items that need processing.
|
||||||
|
`log_interval` denotes the interval in seconds at which progres
|
||||||
|
should be reported.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name, total, log_interval=1):
|
||||||
|
self.name = name
|
||||||
|
self.total_places = total
|
||||||
|
self.done_places = 0
|
||||||
|
self.rank_start_time = datetime.now()
|
||||||
|
self.next_info = 100 if log.isEnabledFor(logging.INFO) else total + 1
|
||||||
|
|
||||||
|
def add(self, num=1):
|
||||||
|
""" Mark `num` places as processed. Print a log message if the
|
||||||
|
logging is at least info and the log interval has past.
|
||||||
|
"""
|
||||||
|
self.done_places += num
|
||||||
|
|
||||||
|
if self.done_places >= self.next_info:
|
||||||
|
now = datetime.now()
|
||||||
|
done_time = (now - self.rank_start_time).total_seconds()
|
||||||
|
places_per_sec = self.done_places / done_time
|
||||||
|
eta = (self.total_places - self.done_places)/places_per_sec
|
||||||
|
|
||||||
|
log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}"
|
||||||
|
.format(self.done_places, int(done_time),
|
||||||
|
places_per_sec, self.name, eta))
|
||||||
|
|
||||||
|
self.next_info += int(places_per_sec)
|
||||||
|
|
||||||
|
def done(self):
|
||||||
|
""" Print final staticstics about the progress.
|
||||||
|
"""
|
||||||
|
rank_end_time = datetime.now()
|
||||||
|
diff_seconds = (rank_end_time-self.rank_start_time).total_seconds()
|
||||||
|
|
||||||
|
log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format(
|
||||||
|
self.done_places, self.total_places, int(diff_seconds),
|
||||||
|
self.done_places/diff_seconds, self.name))
|
||||||
@@ -32,6 +32,8 @@ import psycopg2
|
|||||||
from psycopg2.extras import wait_select
|
from psycopg2.extras import wait_select
|
||||||
import select
|
import select
|
||||||
|
|
||||||
|
from indexer.progress import ProgressLogger
|
||||||
|
|
||||||
log = logging.getLogger()
|
log = logging.getLogger()
|
||||||
|
|
||||||
def make_connection(options, asynchronous=False):
|
def make_connection(options, asynchronous=False):
|
||||||
@@ -55,24 +57,19 @@ class RankRunner(object):
|
|||||||
def name(self):
|
def name(self):
|
||||||
return "rank {}".format(self.rank)
|
return "rank {}".format(self.rank)
|
||||||
|
|
||||||
def sql_index_sectors(self):
|
def sql_count_objects(self):
|
||||||
return """SELECT geometry_sector, count(*) FROM placex
|
return """SELECT count(*) FROM placex
|
||||||
WHERE rank_search = {} and indexed_status > 0
|
WHERE rank_search = {} and indexed_status > 0
|
||||||
GROUP BY geometry_sector
|
""".format(self.rank)
|
||||||
ORDER BY geometry_sector""".format(self.rank)
|
|
||||||
|
|
||||||
def sql_nosector_places(self):
|
def sql_get_objects(self):
|
||||||
return """SELECT place_id FROM placex
|
return """SELECT place_id FROM placex
|
||||||
WHERE indexed_status > 0 and rank_search = {}
|
WHERE indexed_status > 0 and rank_search = {}
|
||||||
ORDER BY geometry_sector""".format(self.rank)
|
ORDER BY geometry_sector""".format(self.rank)
|
||||||
|
|
||||||
def sql_sector_places(self):
|
def sql_index_place(self, ids):
|
||||||
return """SELECT place_id FROM placex
|
return "UPDATE placex SET indexed_status = 0 WHERE place_id IN ({})"\
|
||||||
WHERE indexed_status > 0 and rank_search = {}
|
.format(','.join((str(i) for i in ids)))
|
||||||
and geometry_sector = %s""".format(self.rank)
|
|
||||||
|
|
||||||
def sql_index_place(self):
|
|
||||||
return "UPDATE placex SET indexed_status = 0 WHERE place_id = %s"
|
|
||||||
|
|
||||||
|
|
||||||
class InterpolationRunner(object):
|
class InterpolationRunner(object):
|
||||||
@@ -83,25 +80,19 @@ class InterpolationRunner(object):
|
|||||||
def name(self):
|
def name(self):
|
||||||
return "interpolation lines (location_property_osmline)"
|
return "interpolation lines (location_property_osmline)"
|
||||||
|
|
||||||
def sql_index_sectors(self):
|
def sql_count_objects(self):
|
||||||
return """SELECT geometry_sector, count(*) FROM location_property_osmline
|
return """SELECT count(*) FROM location_property_osmline
|
||||||
WHERE indexed_status > 0
|
WHERE indexed_status > 0"""
|
||||||
GROUP BY geometry_sector
|
|
||||||
ORDER BY geometry_sector"""
|
|
||||||
|
|
||||||
def sql_nosector_places(self):
|
def sql_get_objects(self):
|
||||||
return """SELECT place_id FROM location_property_osmline
|
return """SELECT place_id FROM location_property_osmline
|
||||||
WHERE indexed_status > 0
|
WHERE indexed_status > 0
|
||||||
ORDER BY geometry_sector"""
|
ORDER BY geometry_sector"""
|
||||||
|
|
||||||
def sql_sector_places(self):
|
def sql_index_place(self, ids):
|
||||||
return """SELECT place_id FROM location_property_osmline
|
|
||||||
WHERE indexed_status > 0 and geometry_sector = %s
|
|
||||||
ORDER BY geometry_sector"""
|
|
||||||
|
|
||||||
def sql_index_place(self):
|
|
||||||
return """UPDATE location_property_osmline
|
return """UPDATE location_property_osmline
|
||||||
SET indexed_status = 0 WHERE place_id = %s"""
|
SET indexed_status = 0 WHERE place_id IN ({})"""\
|
||||||
|
.format(','.join((str(i) for i in ids)))
|
||||||
|
|
||||||
|
|
||||||
class DBConnection(object):
|
class DBConnection(object):
|
||||||
@@ -210,83 +201,48 @@ class Indexer(object):
|
|||||||
self.index(RankRunner(rank))
|
self.index(RankRunner(rank))
|
||||||
|
|
||||||
if self.maxrank == 30:
|
if self.maxrank == 30:
|
||||||
self.index(InterpolationRunner())
|
self.index(InterpolationRunner(), 20)
|
||||||
|
|
||||||
self.index(RankRunner(self.maxrank))
|
self.index(RankRunner(self.maxrank), 20)
|
||||||
|
|
||||||
def index(self, obj):
|
def index(self, obj, batch=1):
|
||||||
""" Index a single rank or table. `obj` describes the SQL to use
|
""" Index a single rank or table. `obj` describes the SQL to use
|
||||||
for indexing.
|
for indexing. `batch` describes the number of objects that
|
||||||
|
should be processed with a single SQL statement
|
||||||
"""
|
"""
|
||||||
log.warning("Starting {}".format(obj.name()))
|
log.warning("Starting {}".format(obj.name()))
|
||||||
|
|
||||||
cur = self.conn.cursor(name='main')
|
cur = self.conn.cursor()
|
||||||
cur.execute(obj.sql_index_sectors())
|
cur.execute(obj.sql_count_objects())
|
||||||
|
|
||||||
total_tuples = 0
|
total_tuples = cur.fetchone()[0]
|
||||||
for r in cur:
|
log.debug("Total number of rows: {}".format(total_tuples))
|
||||||
total_tuples += r[1]
|
|
||||||
log.debug("Total number of rows; {}".format(total_tuples))
|
|
||||||
|
|
||||||
cur.scroll(0, mode='absolute')
|
cur.close()
|
||||||
|
|
||||||
next_thread = self.find_free_thread()
|
next_thread = self.find_free_thread()
|
||||||
done_tuples = 0
|
progress = ProgressLogger(obj.name(), total_tuples)
|
||||||
rank_start_time = datetime.now()
|
|
||||||
|
|
||||||
sector_sql = obj.sql_sector_places()
|
cur = self.conn.cursor(name='places')
|
||||||
index_sql = obj.sql_index_place()
|
cur.execute(obj.sql_get_objects())
|
||||||
min_grouped_tuples = total_tuples - len(self.threads) * 1000
|
|
||||||
|
|
||||||
next_info = 100 if log.isEnabledFor(logging.INFO) else total_tuples + 1
|
while True:
|
||||||
|
places = [p[0] for p in cur.fetchmany(batch)]
|
||||||
for r in cur:
|
if len(places) == 0:
|
||||||
sector = r[0]
|
|
||||||
|
|
||||||
# Should we do the remaining ones together?
|
|
||||||
do_all = done_tuples > min_grouped_tuples
|
|
||||||
|
|
||||||
pcur = self.conn.cursor(name='places')
|
|
||||||
|
|
||||||
if do_all:
|
|
||||||
pcur.execute(obj.sql_nosector_places())
|
|
||||||
else:
|
|
||||||
pcur.execute(sector_sql, (sector, ))
|
|
||||||
|
|
||||||
for place in pcur:
|
|
||||||
place_id = place[0]
|
|
||||||
log.debug("Processing place {}".format(place_id))
|
|
||||||
thread = next(next_thread)
|
|
||||||
|
|
||||||
thread.perform(index_sql, (place_id,))
|
|
||||||
done_tuples += 1
|
|
||||||
|
|
||||||
if done_tuples >= next_info:
|
|
||||||
now = datetime.now()
|
|
||||||
done_time = (now - rank_start_time).total_seconds()
|
|
||||||
tuples_per_sec = done_tuples / done_time
|
|
||||||
log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}"
|
|
||||||
.format(done_tuples, int(done_time),
|
|
||||||
tuples_per_sec, obj.name(),
|
|
||||||
(total_tuples - done_tuples)/tuples_per_sec))
|
|
||||||
next_info += int(tuples_per_sec)
|
|
||||||
|
|
||||||
pcur.close()
|
|
||||||
|
|
||||||
if do_all:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
|
log.debug("Processing places: {}".format(places))
|
||||||
|
thread = next(next_thread)
|
||||||
|
|
||||||
|
thread.perform(obj.sql_index_place(places))
|
||||||
|
progress.add(len(places))
|
||||||
|
|
||||||
cur.close()
|
cur.close()
|
||||||
|
|
||||||
for t in self.threads:
|
for t in self.threads:
|
||||||
t.wait()
|
t.wait()
|
||||||
|
|
||||||
rank_end_time = datetime.now()
|
progress.done()
|
||||||
diff_seconds = (rank_end_time-rank_start_time).total_seconds()
|
|
||||||
|
|
||||||
log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format(
|
|
||||||
done_tuples, total_tuples, int(diff_seconds),
|
|
||||||
done_tuples/diff_seconds, obj.name()))
|
|
||||||
|
|
||||||
def find_free_thread(self):
|
def find_free_thread(self):
|
||||||
""" Generator that returns the next connection that is free for
|
""" Generator that returns the next connection that is free for
|
||||||
|
|||||||
Reference in New Issue
Block a user