Autopsy  4.14.0
Graphical digital forensics platform for The Sleuth Kit and other tools.
LuceneQuery.java
Go to the documentation of this file.
1 /*
2  * Autopsy Forensic Browser
3  *
4  * Copyright 2011-2017 Basis Technology Corp.
5  * Contact: carrier <at> sleuthkit <dot> org
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 package org.sleuthkit.autopsy.keywordsearch;
20 
21 import java.util.ArrayList;
22 import java.util.Collection;
23 import java.util.List;
24 import java.util.Map;
25 import java.util.logging.Level;
26 import org.apache.commons.lang3.StringUtils;
27 import org.apache.commons.lang3.math.NumberUtils;
28 import org.apache.solr.client.solrj.SolrQuery;
29 import org.apache.solr.client.solrj.SolrRequest;
30 import org.apache.solr.client.solrj.SolrRequest.METHOD;
31 import org.apache.solr.client.solrj.response.QueryResponse;
32 import org.apache.solr.common.SolrDocument;
33 import org.apache.solr.common.SolrDocumentList;
34 import org.apache.solr.common.params.CursorMarkParams;
38 import org.sleuthkit.datamodel.BlackboardArtifact;
39 import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
40 import org.sleuthkit.datamodel.BlackboardAttribute;
41 import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
42 import org.sleuthkit.datamodel.Content;
43 import org.sleuthkit.datamodel.TskCoreException;
44 import org.sleuthkit.datamodel.TskException;
45 
50 class LuceneQuery implements KeywordSearchQuery {
51 
52  private static final Logger logger = Logger.getLogger(LuceneQuery.class.getName());
53  private String keywordStringEscaped;
54  private boolean isEscaped;
55  private final Keyword originalKeyword;
56  private final KeywordList keywordList;
57  private final List<KeywordQueryFilter> filters = new ArrayList<>();
58  private String field = null;
59  private static final int MAX_RESULTS_PER_CURSOR_MARK = 512;
60  static final int SNIPPET_LENGTH = 50;
61  static final String HIGHLIGHT_FIELD = Server.Schema.TEXT.toString();
62 
63  private static final boolean DEBUG = (Version.getBuildType() == Version.Type.DEVELOPMENT);
64 
70  LuceneQuery(KeywordList keywordList, Keyword keyword) {
71  this.keywordList = keywordList;
72  this.originalKeyword = keyword;
73  this.keywordStringEscaped = this.originalKeyword.getSearchTerm();
74  }
75 
76  @Override
77  public void addFilter(KeywordQueryFilter filter) {
78  this.filters.add(filter);
79  }
80 
81  @Override
82  public void setField(String field) {
83  this.field = field;
84  }
85 
86  @Override
87  public void setSubstringQuery() {
88  // Note that this is not a full substring search. Normally substring
89  // searches will be done with TermComponentQuery objects instead.
90  keywordStringEscaped += "*";
91  }
92 
93  @Override
94  public void escape() {
95  keywordStringEscaped = KeywordSearchUtil.escapeLuceneQuery(originalKeyword.getSearchTerm());
96  isEscaped = true;
97  }
98 
99  @Override
100  public boolean isEscaped() {
101  return isEscaped;
102  }
103 
104  @Override
105  public boolean isLiteral() {
106  return originalKeyword.searchTermIsLiteral();
107  }
108 
109  @Override
110  public String getEscapedQueryString() {
111  return this.keywordStringEscaped;
112  }
113 
114  @Override
115  public String getQueryString() {
116  return this.originalKeyword.getSearchTerm();
117  }
118 
119  @Override
120  public KeywordList getKeywordList() {
121  return keywordList;
122  }
123 
124  @Override
125  public QueryResults performQuery() throws KeywordSearchModuleException, NoOpenCoreException {
126 
127  final Server solrServer = KeywordSearch.getServer();
128  double indexSchemaVersion = NumberUtils.toDouble(solrServer.getIndexInfo().getSchemaVersion());
129 
130  SolrQuery solrQuery = createAndConfigureSolrQuery(KeywordSearchSettings.getShowSnippets());
131 
132  final String strippedQueryString = StringUtils.strip(getQueryString(), "\"");
133 
134  String cursorMark = CursorMarkParams.CURSOR_MARK_START;
135  boolean allResultsProcessed = false;
136  List<KeywordHit> matches = new ArrayList<>();
137  LanguageSpecificContentQueryHelper.QueryResults languageSpecificQueryResults = new LanguageSpecificContentQueryHelper.QueryResults();
138  while (!allResultsProcessed) {
139  solrQuery.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark);
140  QueryResponse response = solrServer.query(solrQuery, SolrRequest.METHOD.POST);
141  SolrDocumentList resultList = response.getResults();
142  // objectId_chunk -> "text" -> List of previews
143  Map<String, Map<String, List<String>>> highlightResponse = response.getHighlighting();
144 
145  if (2.2 <= indexSchemaVersion) {
146  languageSpecificQueryResults.highlighting.putAll(response.getHighlighting());
147  }
148 
149  for (SolrDocument resultDoc : resultList) {
150  if (2.2 <= indexSchemaVersion) {
151  Object language = resultDoc.getFieldValue(Server.Schema.LANGUAGE.toString());
152  if (language != null) {
153  LanguageSpecificContentQueryHelper.updateQueryResults(languageSpecificQueryResults, resultDoc);
154  }
155  }
156 
157  try {
158  /*
159  * for each result doc, check that the first occurence of
160  * that term is before the window. if all the ocurences
161  * start within the window, don't record them for this
162  * chunk, they will get picked up in the next one.
163  */
164  final String docId = resultDoc.getFieldValue(Server.Schema.ID.toString()).toString();
165  final Integer chunkSize = (Integer) resultDoc.getFieldValue(Server.Schema.CHUNK_SIZE.toString());
166  final Collection<Object> content = resultDoc.getFieldValues(Server.Schema.CONTENT_STR.toString());
167 
168  // if the document has language, it should be hit in language specific content fields. So skip here.
169  if (resultDoc.containsKey(Server.Schema.LANGUAGE.toString())) {
170  continue;
171  }
172 
173  if (indexSchemaVersion < 2.0) {
174  //old schema versions don't support chunk_size or the content_str fields, so just accept hits
175  matches.add(createKeywordtHit(highlightResponse, docId));
176  } else {
177  //check against file name and actual content seperately.
178  for (Object content_obj : content) {
179  String content_str = (String) content_obj;
180  //for new schemas, check that the hit is before the chunk/window boundary.
181  int firstOccurence = StringUtils.indexOfIgnoreCase(content_str, strippedQueryString);
182  //there is no chunksize field for "parent" entries in the index
183  if (chunkSize == null || chunkSize == 0 || (firstOccurence > -1 && firstOccurence < chunkSize)) {
184  matches.add(createKeywordtHit(highlightResponse, docId));
185  }
186  }
187  }
188  } catch (TskException ex) {
189  throw new KeywordSearchModuleException(ex);
190  }
191  }
192  String nextCursorMark = response.getNextCursorMark();
193  if (cursorMark.equals(nextCursorMark)) {
194  allResultsProcessed = true;
195  }
196  cursorMark = nextCursorMark;
197  }
198 
199  List<KeywordHit> mergedMatches;
200  if (2.2 <= indexSchemaVersion) {
201  mergedMatches = LanguageSpecificContentQueryHelper.mergeKeywordHits(matches, originalKeyword, languageSpecificQueryResults);
202  } else {
203  mergedMatches = matches;
204  }
205 
206  QueryResults results = new QueryResults(this);
207  //in case of single term literal query there is only 1 term
208  results.addResult(new Keyword(originalKeyword.getSearchTerm(), true, true, originalKeyword.getListName(), originalKeyword.getOriginalTerm()), mergedMatches);
209 
210  return results;
211  }
212 
213  @Override
214  public boolean validate() {
215  return StringUtils.isNotBlank(originalKeyword.getSearchTerm());
216  }
217 
234  @Override
235  public BlackboardArtifact createKeywordHitArtifact(Content content, Keyword foundKeyword, KeywordHit hit, String snippet, String listName) {
236  final String MODULE_NAME = KeywordSearchModuleFactory.getModuleName();
237 
238  Collection<BlackboardAttribute> attributes = new ArrayList<>();
239  BlackboardArtifact bba;
240  try {
241  bba = content.newArtifact(ARTIFACT_TYPE.TSK_KEYWORD_HIT);
242  } catch (TskCoreException e) {
243  logger.log(Level.WARNING, "Error adding bb artifact for keyword hit", e); //NON-NLS
244  return null;
245  }
246 
247  if (snippet != null) {
248  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW, MODULE_NAME, snippet));
249  }
250  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_KEYWORD, MODULE_NAME, foundKeyword.getSearchTerm()));
251  if (StringUtils.isNotBlank(listName)) {
252  attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_SET_NAME, MODULE_NAME, listName));
253  }
254 
255  if (originalKeyword != null) {
256  BlackboardAttribute.ATTRIBUTE_TYPE selType = originalKeyword.getArtifactAttributeType();
257  if (selType != null) {
258  attributes.add(new BlackboardAttribute(selType, MODULE_NAME, foundKeyword.getSearchTerm()));
259  }
260 
261  if (originalKeyword.searchTermIsWholeWord()) {
262  attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE, MODULE_NAME, KeywordSearch.QueryType.LITERAL.ordinal()));
263  } else {
264  attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE, MODULE_NAME, KeywordSearch.QueryType.SUBSTRING.ordinal()));
265  }
266  }
267 
268  hit.getArtifactID().ifPresent(artifactID
269  -> attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT, MODULE_NAME, artifactID))
270  );
271 
272  try {
273  bba.addAttributes(attributes); //write out to bb
274  return bba;
275  } catch (TskCoreException e) {
276  logger.log(Level.WARNING, "Error adding bb attributes to artifact", e); //NON-NLS
277  return null;
278  }
279  }
280 
281 
282  /*
283  * Create the query object for the stored keyword
284  *
285  * @param snippets True if query should request snippets
286  *
287  * @return
288  */
289  private SolrQuery createAndConfigureSolrQuery(boolean snippets) throws NoOpenCoreException, KeywordSearchModuleException {
290  double indexSchemaVersion = NumberUtils.toDouble(KeywordSearch.getServer().getIndexInfo().getSchemaVersion());
291 
292  SolrQuery q = new SolrQuery();
293  q.setShowDebugInfo(DEBUG); //debug
294  // Wrap the query string in quotes if this is a literal search term.
295  String queryStr = originalKeyword.searchTermIsLiteral()
296  ? KeywordSearchUtil.quoteQuery(keywordStringEscaped) : keywordStringEscaped;
297 
298  // Run the query against an optional alternative field.
299  if (field != null) {
300  //use the optional field
301  queryStr = field + ":" + queryStr;
302  q.setQuery(queryStr);
303  } else if (2.2 <= indexSchemaVersion && originalKeyword.searchTermIsLiteral()) {
304  q.setQuery(LanguageSpecificContentQueryHelper.expandQueryString(queryStr));
305  } else {
306  q.setQuery(queryStr);
307  }
308  q.setRows(MAX_RESULTS_PER_CURSOR_MARK);
309  // Setting the sort order is necessary for cursor based paging to work.
310  q.setSort(SolrQuery.SortClause.asc(Server.Schema.ID.toString()));
311 
312  q.setFields(Server.Schema.ID.toString(),
313  Server.Schema.CHUNK_SIZE.toString(),
314  Server.Schema.CONTENT_STR.toString());
315 
316  if (2.2 <= indexSchemaVersion && originalKeyword.searchTermIsLiteral()) {
317  q.addField(Server.Schema.LANGUAGE.toString());
318  LanguageSpecificContentQueryHelper.configureTermfreqQuery(q, keywordStringEscaped);
319  }
320 
321  for (KeywordQueryFilter filter : filters) {
322  q.addFilterQuery(filter.toString());
323  }
324 
325  if (snippets) {
326  configurwQueryForHighlighting(q);
327  }
328 
329  return q;
330  }
331 
338  private static void configurwQueryForHighlighting(SolrQuery q) throws NoOpenCoreException {
339  double indexSchemaVersion = NumberUtils.toDouble(KeywordSearch.getServer().getIndexInfo().getSchemaVersion());
340  if (2.2 <= indexSchemaVersion) {
341  for (Server.Schema field : LanguageSpecificContentQueryHelper.getQueryFields()) {
342  q.addHighlightField(field.toString());
343  }
344  } else {
345  q.addHighlightField(HIGHLIGHT_FIELD);
346  }
347 
348  q.setHighlightSnippets(1);
349  q.setHighlightFragsize(SNIPPET_LENGTH);
350 
351  //tune the highlighter
352  q.setParam("hl.useFastVectorHighlighter", "on"); //fast highlighter scales better than standard one NON-NLS
353  q.setParam("hl.tag.pre", "&laquo;"); //makes sense for FastVectorHighlighter only NON-NLS
354  q.setParam("hl.tag.post", "&laquo;"); //makes sense for FastVectorHighlighter only NON-NLS
355  q.setParam("hl.fragListBuilder", "simple"); //makes sense for FastVectorHighlighter only NON-NLS
356 
357  //Solr bug if fragCharSize is smaller than Query string, StringIndexOutOfBoundsException is thrown.
358  q.setParam("hl.fragCharSize", Integer.toString(q.getQuery().length())); //makes sense for FastVectorHighlighter only NON-NLS
359 
360  //docs says makes sense for the original Highlighter only, but not really
361  //analyze all content SLOW! consider lowering
362  q.setParam("hl.maxAnalyzedChars", Server.HL_ANALYZE_CHARS_UNLIMITED); //NON-NLS
363  }
364 
365  private KeywordHit createKeywordtHit(Map<String, Map<String, List<String>>> highlightResponse, String docId) throws TskException {
370  String snippet = "";
371  if (KeywordSearchSettings.getShowSnippets()) {
372  List<String> snippetList = highlightResponse.get(docId).get(Server.Schema.TEXT.toString());
373  // list is null if there wasn't a snippet
374  if (snippetList != null) {
375  snippet = EscapeUtil.unEscapeHtml(snippetList.get(0)).trim();
376  }
377  }
378 
379  return new KeywordHit(docId, snippet, originalKeyword.getSearchTerm());
380  }
381 
396  static String querySnippet(String query, long solrObjectId, boolean isRegex, boolean group) throws NoOpenCoreException {
397  return querySnippet(query, solrObjectId, 0, isRegex, group);
398  }
399 
415  static String querySnippet(String query, long solrObjectId, int chunkID, boolean isRegex, boolean group) throws NoOpenCoreException {
416  SolrQuery q = new SolrQuery();
417  q.setShowDebugInfo(DEBUG); //debug
418 
419  String queryStr;
420  if (isRegex) {
421  queryStr = HIGHLIGHT_FIELD + ":"
422  + (group ? KeywordSearchUtil.quoteQuery(query)
423  : query);
424  } else {
425  /*
426  * simplify query/escaping and use default field always force
427  * grouping/quotes
428  */
429  queryStr = KeywordSearchUtil.quoteQuery(query);
430  }
431  q.setQuery(queryStr);
432 
433  String contentIDStr = (chunkID == 0)
434  ? Long.toString(solrObjectId)
435  : Server.getChunkIdString(solrObjectId, chunkID);
436  String idQuery = Server.Schema.ID.toString() + ":" + KeywordSearchUtil.escapeLuceneQuery(contentIDStr);
437  q.addFilterQuery(idQuery);
438 
439  configurwQueryForHighlighting(q);
440 
441  Server solrServer = KeywordSearch.getServer();
442 
443  try {
444  QueryResponse response = solrServer.query(q, METHOD.POST);
445  Map<String, Map<String, List<String>>> responseHighlight = response.getHighlighting();
446  Map<String, List<String>> responseHighlightID = responseHighlight.get(contentIDStr);
447  if (responseHighlightID == null) {
448  return "";
449  }
450  double indexSchemaVersion = NumberUtils.toDouble(solrServer.getIndexInfo().getSchemaVersion());
451  List<String> contentHighlights;
452  if (2.2 <= indexSchemaVersion) {
453  contentHighlights = LanguageSpecificContentQueryHelper.getHighlights(responseHighlightID).orElse(null);
454  } else {
455  contentHighlights = responseHighlightID.get(LuceneQuery.HIGHLIGHT_FIELD);
456  }
457  if (contentHighlights == null) {
458  return "";
459  } else {
460  // extracted content is HTML-escaped, but snippet goes in a plain text field
461  return EscapeUtil.unEscapeHtml(contentHighlights.get(0)).trim();
462  }
463  } catch (NoOpenCoreException ex) {
464  logger.log(Level.SEVERE, "Error executing Lucene Solr Query: " + query + ". Solr doc id " + solrObjectId + ", chunkID " + chunkID, ex); //NON-NLS
465  throw ex;
466  } catch (KeywordSearchModuleException ex) {
467  logger.log(Level.SEVERE, "Error executing Lucene Solr Query: " + query + ". Solr doc id " + solrObjectId + ", chunkID " + chunkID, ex); //NON-NLS
468  return "";
469  }
470  }
471 }

Copyright © 2012-2020 Basis Technology. Generated on: Wed Apr 8 2020
This work is licensed under a Creative Commons Attribution-Share Alike 3.0 United States License.