search.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2015, 2016 OpenMarket Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import logging
  16. import re
  17. from collections import namedtuple
  18. from canonicaljson import json
  19. from twisted.internet import defer
  20. from synapse.api.errors import SynapseError
  21. from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
  22. from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
  23. from synapse.storage.database import Database
  24. from synapse.storage.engines import PostgresEngine, Sqlite3Engine
  25. logger = logging.getLogger(__name__)
  26. SearchEntry = namedtuple(
  27. "SearchEntry",
  28. ["key", "value", "event_id", "room_id", "stream_ordering", "origin_server_ts"],
  29. )
  30. class SearchWorkerStore(SQLBaseStore):
  31. def store_search_entries_txn(self, txn, entries):
  32. """Add entries to the search table
  33. Args:
  34. txn (cursor):
  35. entries (iterable[SearchEntry]):
  36. entries to be added to the table
  37. """
  38. if not self.hs.config.enable_search:
  39. return
  40. if isinstance(self.database_engine, PostgresEngine):
  41. sql = (
  42. "INSERT INTO event_search"
  43. " (event_id, room_id, key, vector, stream_ordering, origin_server_ts)"
  44. " VALUES (?,?,?,to_tsvector('english', ?),?,?)"
  45. )
  46. args = (
  47. (
  48. entry.event_id,
  49. entry.room_id,
  50. entry.key,
  51. entry.value,
  52. entry.stream_ordering,
  53. entry.origin_server_ts,
  54. )
  55. for entry in entries
  56. )
  57. txn.executemany(sql, args)
  58. elif isinstance(self.database_engine, Sqlite3Engine):
  59. sql = (
  60. "INSERT INTO event_search (event_id, room_id, key, value)"
  61. " VALUES (?,?,?,?)"
  62. )
  63. args = (
  64. (entry.event_id, entry.room_id, entry.key, entry.value)
  65. for entry in entries
  66. )
  67. txn.executemany(sql, args)
  68. else:
  69. # This should be unreachable.
  70. raise Exception("Unrecognized database engine")
  71. class SearchBackgroundUpdateStore(SearchWorkerStore):
  72. EVENT_SEARCH_UPDATE_NAME = "event_search"
  73. EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order"
  74. EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist"
  75. EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin"
  76. def __init__(self, database: Database, db_conn, hs):
  77. super(SearchBackgroundUpdateStore, self).__init__(database, db_conn, hs)
  78. if not hs.config.enable_search:
  79. return
  80. self.db.updates.register_background_update_handler(
  81. self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
  82. )
  83. self.db.updates.register_background_update_handler(
  84. self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order
  85. )
  86. # we used to have a background update to turn the GIN index into a
  87. # GIST one; we no longer do that (obviously) because we actually want
  88. # a GIN index. However, it's possible that some people might still have
  89. # the background update queued, so we register a handler to clear the
  90. # background update.
  91. self.db.updates.register_noop_background_update(
  92. self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME
  93. )
  94. self.db.updates.register_background_update_handler(
  95. self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search
  96. )
  97. @defer.inlineCallbacks
  98. def _background_reindex_search(self, progress, batch_size):
  99. # we work through the events table from highest stream id to lowest
  100. target_min_stream_id = progress["target_min_stream_id_inclusive"]
  101. max_stream_id = progress["max_stream_id_exclusive"]
  102. rows_inserted = progress.get("rows_inserted", 0)
  103. TYPES = ["m.room.name", "m.room.message", "m.room.topic"]
  104. def reindex_search_txn(txn):
  105. sql = (
  106. "SELECT stream_ordering, event_id, room_id, type, json, "
  107. " origin_server_ts FROM events"
  108. " JOIN event_json USING (room_id, event_id)"
  109. " WHERE ? <= stream_ordering AND stream_ordering < ?"
  110. " AND (%s)"
  111. " ORDER BY stream_ordering DESC"
  112. " LIMIT ?"
  113. ) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
  114. txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
  115. # we could stream straight from the results into
  116. # store_search_entries_txn with a generator function, but that
  117. # would mean having two cursors open on the database at once.
  118. # Instead we just build a list of results.
  119. rows = self.db.cursor_to_dict(txn)
  120. if not rows:
  121. return 0
  122. min_stream_id = rows[-1]["stream_ordering"]
  123. event_search_rows = []
  124. for row in rows:
  125. try:
  126. event_id = row["event_id"]
  127. room_id = row["room_id"]
  128. etype = row["type"]
  129. stream_ordering = row["stream_ordering"]
  130. origin_server_ts = row["origin_server_ts"]
  131. try:
  132. event_json = json.loads(row["json"])
  133. content = event_json["content"]
  134. except Exception:
  135. continue
  136. if etype == "m.room.message":
  137. key = "content.body"
  138. value = content["body"]
  139. elif etype == "m.room.topic":
  140. key = "content.topic"
  141. value = content["topic"]
  142. elif etype == "m.room.name":
  143. key = "content.name"
  144. value = content["name"]
  145. else:
  146. raise Exception("unexpected event type %s" % etype)
  147. except (KeyError, AttributeError):
  148. # If the event is missing a necessary field then
  149. # skip over it.
  150. continue
  151. if not isinstance(value, str):
  152. # If the event body, name or topic isn't a string
  153. # then skip over it
  154. continue
  155. event_search_rows.append(
  156. SearchEntry(
  157. key=key,
  158. value=value,
  159. event_id=event_id,
  160. room_id=room_id,
  161. stream_ordering=stream_ordering,
  162. origin_server_ts=origin_server_ts,
  163. )
  164. )
  165. self.store_search_entries_txn(txn, event_search_rows)
  166. progress = {
  167. "target_min_stream_id_inclusive": target_min_stream_id,
  168. "max_stream_id_exclusive": min_stream_id,
  169. "rows_inserted": rows_inserted + len(event_search_rows),
  170. }
  171. self.db.updates._background_update_progress_txn(
  172. txn, self.EVENT_SEARCH_UPDATE_NAME, progress
  173. )
  174. return len(event_search_rows)
  175. result = yield self.db.runInteraction(
  176. self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
  177. )
  178. if not result:
  179. yield self.db.updates._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
  180. return result
  181. @defer.inlineCallbacks
  182. def _background_reindex_gin_search(self, progress, batch_size):
  183. """This handles old synapses which used GIST indexes, if any;
  184. converting them back to be GIN as per the actual schema.
  185. """
  186. def create_index(conn):
  187. conn.rollback()
  188. # we have to set autocommit, because postgres refuses to
  189. # CREATE INDEX CONCURRENTLY without it.
  190. conn.set_session(autocommit=True)
  191. try:
  192. c = conn.cursor()
  193. # if we skipped the conversion to GIST, we may already/still
  194. # have an event_search_fts_idx; unfortunately postgres 9.4
  195. # doesn't support CREATE INDEX IF EXISTS so we just catch the
  196. # exception and ignore it.
  197. import psycopg2
  198. try:
  199. c.execute(
  200. "CREATE INDEX CONCURRENTLY event_search_fts_idx"
  201. " ON event_search USING GIN (vector)"
  202. )
  203. except psycopg2.ProgrammingError as e:
  204. logger.warning(
  205. "Ignoring error %r when trying to switch from GIST to GIN", e
  206. )
  207. # we should now be able to delete the GIST index.
  208. c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist")
  209. finally:
  210. conn.set_session(autocommit=False)
  211. if isinstance(self.database_engine, PostgresEngine):
  212. yield self.db.runWithConnection(create_index)
  213. yield self.db.updates._end_background_update(
  214. self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME
  215. )
  216. return 1
  217. @defer.inlineCallbacks
  218. def _background_reindex_search_order(self, progress, batch_size):
  219. target_min_stream_id = progress["target_min_stream_id_inclusive"]
  220. max_stream_id = progress["max_stream_id_exclusive"]
  221. rows_inserted = progress.get("rows_inserted", 0)
  222. have_added_index = progress["have_added_indexes"]
  223. if not have_added_index:
  224. def create_index(conn):
  225. conn.rollback()
  226. conn.set_session(autocommit=True)
  227. c = conn.cursor()
  228. # We create with NULLS FIRST so that when we search *backwards*
  229. # we get the ones with non null origin_server_ts *first*
  230. c.execute(
  231. "CREATE INDEX CONCURRENTLY event_search_room_order ON event_search("
  232. "room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
  233. )
  234. c.execute(
  235. "CREATE INDEX CONCURRENTLY event_search_order ON event_search("
  236. "origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
  237. )
  238. conn.set_session(autocommit=False)
  239. yield self.db.runWithConnection(create_index)
  240. pg = dict(progress)
  241. pg["have_added_indexes"] = True
  242. yield self.db.runInteraction(
  243. self.EVENT_SEARCH_ORDER_UPDATE_NAME,
  244. self.db.updates._background_update_progress_txn,
  245. self.EVENT_SEARCH_ORDER_UPDATE_NAME,
  246. pg,
  247. )
  248. def reindex_search_txn(txn):
  249. sql = (
  250. "UPDATE event_search AS es SET stream_ordering = e.stream_ordering,"
  251. " origin_server_ts = e.origin_server_ts"
  252. " FROM events AS e"
  253. " WHERE e.event_id = es.event_id"
  254. " AND ? <= e.stream_ordering AND e.stream_ordering < ?"
  255. " RETURNING es.stream_ordering"
  256. )
  257. min_stream_id = max_stream_id - batch_size
  258. txn.execute(sql, (min_stream_id, max_stream_id))
  259. rows = txn.fetchall()
  260. if min_stream_id < target_min_stream_id:
  261. # We've recached the end.
  262. return len(rows), False
  263. progress = {
  264. "target_min_stream_id_inclusive": target_min_stream_id,
  265. "max_stream_id_exclusive": min_stream_id,
  266. "rows_inserted": rows_inserted + len(rows),
  267. "have_added_indexes": True,
  268. }
  269. self.db.updates._background_update_progress_txn(
  270. txn, self.EVENT_SEARCH_ORDER_UPDATE_NAME, progress
  271. )
  272. return len(rows), True
  273. num_rows, finished = yield self.db.runInteraction(
  274. self.EVENT_SEARCH_ORDER_UPDATE_NAME, reindex_search_txn
  275. )
  276. if not finished:
  277. yield self.db.updates._end_background_update(
  278. self.EVENT_SEARCH_ORDER_UPDATE_NAME
  279. )
  280. return num_rows
  281. class SearchStore(SearchBackgroundUpdateStore):
  282. def __init__(self, database: Database, db_conn, hs):
  283. super(SearchStore, self).__init__(database, db_conn, hs)
  284. @defer.inlineCallbacks
  285. def search_msgs(self, room_ids, search_term, keys):
  286. """Performs a full text search over events with given keys.
  287. Args:
  288. room_ids (list): List of room ids to search in
  289. search_term (str): Search term to search for
  290. keys (list): List of keys to search in, currently supports
  291. "content.body", "content.name", "content.topic"
  292. Returns:
  293. list of dicts
  294. """
  295. clauses = []
  296. search_query = _parse_query(self.database_engine, search_term)
  297. args = []
  298. # Make sure we don't explode because the person is in too many rooms.
  299. # We filter the results below regardless.
  300. if len(room_ids) < 500:
  301. clause, args = make_in_list_sql_clause(
  302. self.database_engine, "room_id", room_ids
  303. )
  304. clauses = [clause]
  305. local_clauses = []
  306. for key in keys:
  307. local_clauses.append("key = ?")
  308. args.append(key)
  309. clauses.append("(%s)" % (" OR ".join(local_clauses),))
  310. count_args = args
  311. count_clauses = clauses
  312. if isinstance(self.database_engine, PostgresEngine):
  313. sql = (
  314. "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) AS rank,"
  315. " room_id, event_id"
  316. " FROM event_search"
  317. " WHERE vector @@ to_tsquery('english', ?)"
  318. )
  319. args = [search_query, search_query] + args
  320. count_sql = (
  321. "SELECT room_id, count(*) as count FROM event_search"
  322. " WHERE vector @@ to_tsquery('english', ?)"
  323. )
  324. count_args = [search_query] + count_args
  325. elif isinstance(self.database_engine, Sqlite3Engine):
  326. sql = (
  327. "SELECT rank(matchinfo(event_search)) as rank, room_id, event_id"
  328. " FROM event_search"
  329. " WHERE value MATCH ?"
  330. )
  331. args = [search_query] + args
  332. count_sql = (
  333. "SELECT room_id, count(*) as count FROM event_search"
  334. " WHERE value MATCH ?"
  335. )
  336. count_args = [search_term] + count_args
  337. else:
  338. # This should be unreachable.
  339. raise Exception("Unrecognized database engine")
  340. for clause in clauses:
  341. sql += " AND " + clause
  342. for clause in count_clauses:
  343. count_sql += " AND " + clause
  344. # We add an arbitrary limit here to ensure we don't try to pull the
  345. # entire table from the database.
  346. sql += " ORDER BY rank DESC LIMIT 500"
  347. results = yield self.db.execute(
  348. "search_msgs", self.db.cursor_to_dict, sql, *args
  349. )
  350. results = list(filter(lambda row: row["room_id"] in room_ids, results))
  351. # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
  352. # search results (which is a data leak)
  353. events = yield self.get_events_as_list(
  354. [r["event_id"] for r in results],
  355. redact_behaviour=EventRedactBehaviour.BLOCK,
  356. )
  357. event_map = {ev.event_id: ev for ev in events}
  358. highlights = None
  359. if isinstance(self.database_engine, PostgresEngine):
  360. highlights = yield self._find_highlights_in_postgres(search_query, events)
  361. count_sql += " GROUP BY room_id"
  362. count_results = yield self.db.execute(
  363. "search_rooms_count", self.db.cursor_to_dict, count_sql, *count_args
  364. )
  365. count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
  366. return {
  367. "results": [
  368. {"event": event_map[r["event_id"]], "rank": r["rank"]}
  369. for r in results
  370. if r["event_id"] in event_map
  371. ],
  372. "highlights": highlights,
  373. "count": count,
  374. }
  375. @defer.inlineCallbacks
  376. def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None):
  377. """Performs a full text search over events with given keys.
  378. Args:
  379. room_id (list): The room_ids to search in
  380. search_term (str): Search term to search for
  381. keys (list): List of keys to search in, currently supports
  382. "content.body", "content.name", "content.topic"
  383. pagination_token (str): A pagination token previously returned
  384. Returns:
  385. list of dicts
  386. """
  387. clauses = []
  388. search_query = _parse_query(self.database_engine, search_term)
  389. args = []
  390. # Make sure we don't explode because the person is in too many rooms.
  391. # We filter the results below regardless.
  392. if len(room_ids) < 500:
  393. clause, args = make_in_list_sql_clause(
  394. self.database_engine, "room_id", room_ids
  395. )
  396. clauses = [clause]
  397. local_clauses = []
  398. for key in keys:
  399. local_clauses.append("key = ?")
  400. args.append(key)
  401. clauses.append("(%s)" % (" OR ".join(local_clauses),))
  402. # take copies of the current args and clauses lists, before adding
  403. # pagination clauses to main query.
  404. count_args = list(args)
  405. count_clauses = list(clauses)
  406. if pagination_token:
  407. try:
  408. origin_server_ts, stream = pagination_token.split(",")
  409. origin_server_ts = int(origin_server_ts)
  410. stream = int(stream)
  411. except Exception:
  412. raise SynapseError(400, "Invalid pagination token")
  413. clauses.append(
  414. "(origin_server_ts < ?"
  415. " OR (origin_server_ts = ? AND stream_ordering < ?))"
  416. )
  417. args.extend([origin_server_ts, origin_server_ts, stream])
  418. if isinstance(self.database_engine, PostgresEngine):
  419. sql = (
  420. "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) as rank,"
  421. " origin_server_ts, stream_ordering, room_id, event_id"
  422. " FROM event_search"
  423. " WHERE vector @@ to_tsquery('english', ?) AND "
  424. )
  425. args = [search_query, search_query] + args
  426. count_sql = (
  427. "SELECT room_id, count(*) as count FROM event_search"
  428. " WHERE vector @@ to_tsquery('english', ?) AND "
  429. )
  430. count_args = [search_query] + count_args
  431. elif isinstance(self.database_engine, Sqlite3Engine):
  432. # We use CROSS JOIN here to ensure we use the right indexes.
  433. # https://sqlite.org/optoverview.html#crossjoin
  434. #
  435. # We want to use the full text search index on event_search to
  436. # extract all possible matches first, then lookup those matches
  437. # in the events table to get the topological ordering. We need
  438. # to use the indexes in this order because sqlite refuses to
  439. # MATCH unless it uses the full text search index
  440. sql = (
  441. "SELECT rank(matchinfo) as rank, room_id, event_id,"
  442. " origin_server_ts, stream_ordering"
  443. " FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
  444. " FROM event_search"
  445. " WHERE value MATCH ?"
  446. " )"
  447. " CROSS JOIN events USING (event_id)"
  448. " WHERE "
  449. )
  450. args = [search_query] + args
  451. count_sql = (
  452. "SELECT room_id, count(*) as count FROM event_search"
  453. " WHERE value MATCH ? AND "
  454. )
  455. count_args = [search_term] + count_args
  456. else:
  457. # This should be unreachable.
  458. raise Exception("Unrecognized database engine")
  459. sql += " AND ".join(clauses)
  460. count_sql += " AND ".join(count_clauses)
  461. # We add an arbitrary limit here to ensure we don't try to pull the
  462. # entire table from the database.
  463. if isinstance(self.database_engine, PostgresEngine):
  464. sql += (
  465. " ORDER BY origin_server_ts DESC NULLS LAST,"
  466. " stream_ordering DESC NULLS LAST LIMIT ?"
  467. )
  468. elif isinstance(self.database_engine, Sqlite3Engine):
  469. sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?"
  470. else:
  471. raise Exception("Unrecognized database engine")
  472. args.append(limit)
  473. results = yield self.db.execute(
  474. "search_rooms", self.db.cursor_to_dict, sql, *args
  475. )
  476. results = list(filter(lambda row: row["room_id"] in room_ids, results))
  477. # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
  478. # search results (which is a data leak)
  479. events = yield self.get_events_as_list(
  480. [r["event_id"] for r in results],
  481. redact_behaviour=EventRedactBehaviour.BLOCK,
  482. )
  483. event_map = {ev.event_id: ev for ev in events}
  484. highlights = None
  485. if isinstance(self.database_engine, PostgresEngine):
  486. highlights = yield self._find_highlights_in_postgres(search_query, events)
  487. count_sql += " GROUP BY room_id"
  488. count_results = yield self.db.execute(
  489. "search_rooms_count", self.db.cursor_to_dict, count_sql, *count_args
  490. )
  491. count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
  492. return {
  493. "results": [
  494. {
  495. "event": event_map[r["event_id"]],
  496. "rank": r["rank"],
  497. "pagination_token": "%s,%s"
  498. % (r["origin_server_ts"], r["stream_ordering"]),
  499. }
  500. for r in results
  501. if r["event_id"] in event_map
  502. ],
  503. "highlights": highlights,
  504. "count": count,
  505. }
  506. def _find_highlights_in_postgres(self, search_query, events):
  507. """Given a list of events and a search term, return a list of words
  508. that match from the content of the event.
  509. This is used to give a list of words that clients can match against to
  510. highlight the matching parts.
  511. Args:
  512. search_query (str)
  513. events (list): A list of events
  514. Returns:
  515. deferred : A set of strings.
  516. """
  517. def f(txn):
  518. highlight_words = set()
  519. for event in events:
  520. # As a hack we simply join values of all possible keys. This is
  521. # fine since we're only using them to find possible highlights.
  522. values = []
  523. for key in ("body", "name", "topic"):
  524. v = event.content.get(key, None)
  525. if v:
  526. values.append(v)
  527. if not values:
  528. continue
  529. value = " ".join(values)
  530. # We need to find some values for StartSel and StopSel that
  531. # aren't in the value so that we can pick results out.
  532. start_sel = "<"
  533. stop_sel = ">"
  534. while start_sel in value:
  535. start_sel += "<"
  536. while stop_sel in value:
  537. stop_sel += ">"
  538. query = "SELECT ts_headline(?, to_tsquery('english', ?), %s)" % (
  539. _to_postgres_options(
  540. {
  541. "StartSel": start_sel,
  542. "StopSel": stop_sel,
  543. "MaxFragments": "50",
  544. }
  545. )
  546. )
  547. txn.execute(query, (value, search_query))
  548. (headline,) = txn.fetchall()[0]
  549. # Now we need to pick the possible highlights out of the haedline
  550. # result.
  551. matcher_regex = "%s(.*?)%s" % (
  552. re.escape(start_sel),
  553. re.escape(stop_sel),
  554. )
  555. res = re.findall(matcher_regex, headline)
  556. highlight_words.update([r.lower() for r in res])
  557. return highlight_words
  558. return self.db.runInteraction("_find_highlights", f)
  559. def _to_postgres_options(options_dict):
  560. return "'%s'" % (",".join("%s=%s" % (k, v) for k, v in options_dict.items()),)
  561. def _parse_query(database_engine, search_term):
  562. """Takes a plain unicode string from the user and converts it into a form
  563. that can be passed to database.
  564. We use this so that we can add prefix matching, which isn't something
  565. that is supported by default.
  566. """
  567. # Pull out the individual words, discarding any non-word characters.
  568. results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
  569. if isinstance(database_engine, PostgresEngine):
  570. return " & ".join(result + ":*" for result in results)
  571. elif isinstance(database_engine, Sqlite3Engine):
  572. return " & ".join(result + "*" for result in results)
  573. else:
  574. # This should be unreachable.
  575. raise Exception("Unrecognized database engine")