Autopsy  4.8.0
Graphical digital forensics platform for The Sleuth Kit and other tools.
KeywordSearchIngestModule.java
Go to the documentation of this file.
1 /*
2  * Autopsy Forensic Browser
3  *
4  * Copyright 2011-2018 Basis Technology Corp.
5  * Contact: carrier <at> sleuthkit <dot> org
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 package org.sleuthkit.autopsy.keywordsearch;
20 
21 import java.util.ArrayList;
22 import java.util.HashMap;
23 import java.util.List;
24 import java.util.Map;
25 import java.util.concurrent.atomic.AtomicInteger;
26 import java.util.logging.Level;
27 import org.openide.util.NbBundle;
28 import org.openide.util.NbBundle.Messages;
43 import org.sleuthkit.datamodel.AbstractFile;
44 import org.sleuthkit.datamodel.TskData;
45 import org.sleuthkit.datamodel.TskData.FileKnown;
46 
55 @NbBundle.Messages({
56  "# {0} - Reason for not starting Solr", "KeywordSearchIngestModule.init.tryStopSolrMsg={0}<br />Please try stopping Java Solr processes if any exist and restart the application.",
57  "KeywordSearchIngestModule.init.badInitMsg=Keyword search server was not properly initialized, cannot run keyword search ingest.",
58  "SolrConnectionCheck.Port=Invalid port number.",
59  "# {0} - Reason for not connecting to Solr", "KeywordSearchIngestModule.init.exception.errConnToSolr.msg=Error connecting to SOLR server: {0}.",
60  "KeywordSearchIngestModule.startUp.noOpenCore.msg=The index could not be opened or does not exist.",
61  "CannotRunFileTypeDetection=Unable to run file type detection."
62 })
63 public final class KeywordSearchIngestModule implements FileIngestModule {
64 
65  enum UpdateFrequency {
66 
67  FAST(20),
68  AVG(10),
69  SLOW(5),
70  SLOWEST(1),
71  NONE(Integer.MAX_VALUE),
72  DEFAULT(5);
73  private final int time;
74 
75  UpdateFrequency(int time) {
76  this.time = time;
77  }
78 
79  int getTime() {
80  return time;
81  }
82  };
83  private static final Logger logger = Logger.getLogger(KeywordSearchIngestModule.class.getName());
84  private final IngestServices services = IngestServices.getInstance();
85  private Ingester ingester = null;
86  private Indexer indexer;
88 //only search images from current ingest, not images previously ingested/indexed
89  //accessed read-only by searcher thread
90 
91  private boolean startedSearching = false;
92  private List<ContentTextExtractor> textExtractors;
93  private StringsTextExtractor stringExtractor;
94  private TextFileExtractor txtFileExtractor;
95  private final KeywordSearchJobSettings settings;
96  private boolean initialized = false;
97  private long jobId;
98  private long dataSourceId;
99  private static final AtomicInteger instanceCount = new AtomicInteger(0); //just used for logging
100  private int instanceNum = 0;
101  private static final IngestModuleReferenceCounter refCounter = new IngestModuleReferenceCounter();
103 
104  private enum IngestStatus {
105 
111  SKIPPED_ERROR_IO
112  };
113  private static final Map<Long, Map<Long, IngestStatus>> ingestStatus = new HashMap<>(); //guarded by itself
114 
123  private static void putIngestStatus(long ingestJobId, long fileId, IngestStatus status) {
124  synchronized (ingestStatus) {
125  Map<Long, IngestStatus> ingestStatusForJob = ingestStatus.get(ingestJobId);
126  if (ingestStatusForJob == null) {
127  ingestStatusForJob = new HashMap<>();
128  ingestStatus.put(ingestJobId, ingestStatusForJob);
129  }
130  ingestStatusForJob.put(fileId, status);
131  ingestStatus.put(ingestJobId, ingestStatusForJob);
132  }
133  }
134 
135  KeywordSearchIngestModule(KeywordSearchJobSettings settings) {
136  this.settings = settings;
137  instanceNum = instanceCount.getAndIncrement();
138  }
139 
145  @Messages({
146  "KeywordSearchIngestModule.startupMessage.failedToGetIndexSchema=Failed to get schema version for text index.",
147  "# {0} - Solr version number", "KeywordSearchIngestModule.startupException.indexSolrVersionNotSupported=Adding text no longer supported for Solr version {0} of the text index.",
148  "# {0} - schema version number", "KeywordSearchIngestModule.startupException.indexSchemaNotSupported=Adding text no longer supported for schema version {0} of the text index.",
149  "KeywordSearchIngestModule.noOpenCase.errMsg=No open case available."
150  })
151  @Override
152  public void startUp(IngestJobContext context) throws IngestModuleException {
153  initialized = false;
154  jobId = context.getJobId();
155  dataSourceId = context.getDataSource().getId();
156 
157  Server server = KeywordSearch.getServer();
158  if (server.coreIsOpen() == false) {
159  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_startUp_noOpenCore_msg());
160  }
161 
162  try {
163  Index indexInfo = server.getIndexInfo();
164  if (!IndexFinder.getCurrentSolrVersion().equals(indexInfo.getSolrVersion())) {
165  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_startupException_indexSolrVersionNotSupported(indexInfo.getSolrVersion()));
166  }
167  if (!indexInfo.isCompatible(IndexFinder.getCurrentSchemaVersion())) {
168  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_startupException_indexSchemaNotSupported(indexInfo.getSchemaVersion()));
169  }
170  } catch (NoOpenCoreException ex) {
171  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_startupMessage_failedToGetIndexSchema(), ex);
172  }
173 
174  try {
175  fileTypeDetector = new FileTypeDetector();
177  throw new IngestModuleException(Bundle.CannotRunFileTypeDetection(), ex);
178  }
179 
180  ingester = Ingester.getDefault();
181  this.context = context;
182 
183  // increment the module reference count
184  // if first instance of this module for this job then check the server and existence of keywords
185  Case openCase;
186  try {
187  openCase = Case.getCurrentCaseThrows();
188  } catch (NoCurrentCaseException ex) {
189  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_noOpenCase_errMsg(), ex);
190  }
191  if (refCounter.incrementAndGet(jobId) == 1) {
192  if (openCase.getCaseType() == Case.CaseType.MULTI_USER_CASE) {
193  // for multi-user cases need to verify connection to remore SOLR server
194  KeywordSearchService kwsService = new SolrSearchService();
196  int port;
197  try {
198  port = Integer.parseInt(properties.getPort());
199  } catch (NumberFormatException ex) {
200  // if there is an error parsing the port number
201  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_init_badInitMsg() + " " + Bundle.SolrConnectionCheck_Port(), ex);
202  }
203  try {
204  kwsService.tryConnect(properties.getHost(), port);
205  } catch (KeywordSearchServiceException ex) {
206  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_init_badInitMsg(), ex);
207  }
208  } else {
209  // for single-user cases need to verify connection to local SOLR service
210  try {
211  if (!server.isRunning()) {
212  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_init_tryStopSolrMsg(Bundle.KeywordSearchIngestModule_init_badInitMsg()));
213  }
214  } catch (KeywordSearchModuleException ex) {
215  //this means Solr is not properly initialized
216  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_init_tryStopSolrMsg(Bundle.KeywordSearchIngestModule_init_badInitMsg()), ex);
217  }
218  try {
219  // make an actual query to verify that server is responding
220  // we had cases where getStatus was OK, but the connection resulted in a 404
221  server.queryNumIndexedDocuments();
223  throw new IngestModuleException(Bundle.KeywordSearchIngestModule_init_exception_errConnToSolr_msg(ex.getMessage()), ex);
224  }
225 
226  // check if this job has any searchable keywords
227  List<KeywordList> keywordLists = XmlKeywordSearchList.getCurrent().getListsL();
228  boolean hasKeywordsForSearch = false;
229  for (KeywordList keywordList : keywordLists) {
230  if (settings.keywordListIsEnabled(keywordList.getName()) && !keywordList.getKeywords().isEmpty()) {
231  hasKeywordsForSearch = true;
232  break;
233  }
234  }
235  if (!hasKeywordsForSearch) {
236  services.postMessage(IngestMessage.createWarningMessage(KeywordSearchModuleFactory.getModuleName(), NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.init.noKwInLstMsg"),
237  NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.init.onlyIdxKwSkipMsg")));
238  }
239  }
240  }
241 
242  //initialize extractors
243  stringExtractor = new StringsTextExtractor();
244  stringExtractor.setScripts(KeywordSearchSettings.getStringExtractScripts());
245  stringExtractor.setOptions(KeywordSearchSettings.getStringExtractOptions());
246 
247  txtFileExtractor = new TextFileExtractor();
248 
249  textExtractors = new ArrayList<>();
250  //order matters, more specific extractors first
251  textExtractors.add(new HtmlTextExtractor());
252  //Add sqlite text extractor to be default for sqlite files, since tika stuggles
253  //with them. See SqliteTextExtractor class for specifics
254  textExtractors.add(new SqliteTextExtractor());
255  textExtractors.add(new TikaTextExtractor());
256 
257  indexer = new Indexer();
258  initialized = true;
259  }
260 
261  @Override
262  public ProcessResult process(AbstractFile abstractFile) {
263  if (initialized == false) //error initializing indexing/Solr
264  {
265  logger.log(Level.SEVERE, "Skipping processing, module not initialized, file: {0}", abstractFile.getName()); //NON-NLS
266  putIngestStatus(jobId, abstractFile.getId(), IngestStatus.SKIPPED_ERROR_INDEXING);
267  return ProcessResult.OK;
268  }
269 
270  if (abstractFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.VIRTUAL_DIR)) {
271  //skip indexing of virtual dirs (no content, no real name) - will index children files
272  return ProcessResult.OK;
273  }
274 
275  if (KeywordSearchSettings.getSkipKnown() && abstractFile.getKnown().equals(FileKnown.KNOWN)) {
276  //index meta-data only
277  if (context.fileIngestIsCancelled()) {
278  return ProcessResult.OK;
279  }
280  indexer.indexFile(abstractFile, false);
281  return ProcessResult.OK;
282  }
283 
284  //index the file and content (if the content is supported)
285  if (context.fileIngestIsCancelled()) {
286  return ProcessResult.OK;
287  }
288  indexer.indexFile(abstractFile, true);
289 
290  // Start searching if it hasn't started already
291  if (!startedSearching) {
292  if (context.fileIngestIsCancelled()) {
293  return ProcessResult.OK;
294  }
295  List<String> keywordListNames = settings.getNamesOfEnabledKeyWordLists();
296  IngestSearchRunner.getInstance().startJob(context, keywordListNames);
297  startedSearching = true;
298  }
299 
300  return ProcessResult.OK;
301  }
302 
307  @Override
308  public void shutDown() {
309  logger.log(Level.INFO, "Keyword search ingest module instance {0} shutting down", instanceNum); //NON-NLS
310 
311  if ((initialized == false) || (context == null)) {
312  return;
313  }
314 
315  if (context.fileIngestIsCancelled()) {
316  logger.log(Level.INFO, "Keyword search ingest module instance {0} stopping search job due to ingest cancellation", instanceNum); //NON-NLS
317  IngestSearchRunner.getInstance().stopJob(jobId);
318  cleanup();
319  return;
320  }
321 
322  // Remove from the search list and trigger final commit and final search
323  IngestSearchRunner.getInstance().endJob(jobId);
324 
325  // We only need to post the summary msg from the last module per job
326  if (refCounter.decrementAndGet(jobId) == 0) {
327  try {
328  final int numIndexedFiles = KeywordSearch.getServer().queryNumIndexedFiles();
329  logger.log(Level.INFO, "Indexed files count: {0}", numIndexedFiles); //NON-NLS
330  final int numIndexedChunks = KeywordSearch.getServer().queryNumIndexedChunks();
331  logger.log(Level.INFO, "Indexed file chunks count: {0}", numIndexedChunks); //NON-NLS
333  logger.log(Level.SEVERE, "Error executing Solr queries to check number of indexed files and file chunks", ex); //NON-NLS
334  }
335  postIndexSummary();
336  synchronized (ingestStatus) {
337  ingestStatus.remove(jobId);
338  }
339  }
340 
341  cleanup();
342  }
343 
347  private void cleanup() {
348  textExtractors.clear();
349  textExtractors = null;
350  stringExtractor = null;
351  txtFileExtractor = null;
352  initialized = false;
353  }
354 
358  private void postIndexSummary() {
359  int text_ingested = 0;
360  int metadata_ingested = 0;
361  int strings_ingested = 0;
362  int error_text = 0;
363  int error_index = 0;
364  int error_io = 0;
365 
366  synchronized (ingestStatus) {
367  Map<Long, IngestStatus> ingestStatusForJob = ingestStatus.get(jobId);
368  if (ingestStatusForJob == null) {
369  return;
370  }
371  for (IngestStatus s : ingestStatusForJob.values()) {
372  switch (s) {
373  case TEXT_INGESTED:
374  text_ingested++;
375  break;
376  case METADATA_INGESTED:
377  metadata_ingested++;
378  break;
379  case STRINGS_INGESTED:
380  strings_ingested++;
381  break;
382  case SKIPPED_ERROR_TEXTEXTRACT:
383  error_text++;
384  break;
385  case SKIPPED_ERROR_INDEXING:
386  error_index++;
387  break;
388  case SKIPPED_ERROR_IO:
389  error_io++;
390  break;
391  default:
392  ;
393  }
394  }
395  }
396 
397  StringBuilder msg = new StringBuilder();
398  msg.append("<table border=0><tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.knowFileHeaderLbl")).append("</td><td>").append(text_ingested).append("</td></tr>"); //NON-NLS
399  msg.append("<tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.fileGenStringsHead")).append("</td><td>").append(strings_ingested).append("</td></tr>"); //NON-NLS
400  msg.append("<tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.mdOnlyLbl")).append("</td><td>").append(metadata_ingested).append("</td></tr>"); //NON-NLS
401  msg.append("<tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.idxErrLbl")).append("</td><td>").append(error_index).append("</td></tr>"); //NON-NLS
402  msg.append("<tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.errTxtLbl")).append("</td><td>").append(error_text).append("</td></tr>"); //NON-NLS
403  msg.append("<tr><td>").append(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.errIoLbl")).append("</td><td>").append(error_io).append("</td></tr>"); //NON-NLS
404  msg.append("</table>"); //NON-NLS
405  String indexStats = msg.toString();
406  logger.log(Level.INFO, "Keyword Indexing Completed: {0}", indexStats); //NON-NLS
407  services.postMessage(IngestMessage.createMessage(MessageType.INFO, KeywordSearchModuleFactory.getModuleName(), NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.kwIdxResultsLbl"), indexStats));
408  if (error_index > 0) {
409  MessageNotifyUtil.Notify.error(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.kwIdxErrsTitle"),
410  NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.kwIdxErrMsgFiles", error_index));
411  } else if (error_io + error_text > 0) {
412  MessageNotifyUtil.Notify.warn(NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.kwIdxWarnMsgTitle"),
413  NbBundle.getMessage(this.getClass(), "KeywordSearchIngestModule.postIndexSummary.idxErrReadFilesMsg"));
414  }
415  }
416 
421  private class Indexer {
422 
423  private final Logger logger = Logger.getLogger(Indexer.class.getName());
424 
438  private boolean extractTextAndIndex(AbstractFile aFile, String detectedFormat) throws IngesterException {
439  ContentTextExtractor extractor = null;
440 
441  //go over available text extractors in order, and pick the first one (most specific one)
442  for (ContentTextExtractor fe : textExtractors) {
443  if (fe.isSupported(aFile, detectedFormat)) {
444  extractor = fe;
445  break;
446  }
447  }
448 
449  if (extractor == null) {
450  // No text extractor found.
451  return false;
452  }
453 
454  //logger.log(Level.INFO, "Extractor: " + fileExtract + ", file: " + aFile.getName());
455  //divide into chunks and index
456  return Ingester.getDefault().indexText(extractor, aFile, context);
457  }
458 
467  private boolean extractStringsAndIndex(AbstractFile aFile) {
468  try {
469  if (context.fileIngestIsCancelled()) {
470  return true;
471  }
472  if (Ingester.getDefault().indexText(stringExtractor, aFile, KeywordSearchIngestModule.this.context)) {
473  putIngestStatus(jobId, aFile.getId(), IngestStatus.STRINGS_INGESTED);
474  return true;
475  } else {
476  logger.log(Level.WARNING, "Failed to extract strings and ingest, file ''{0}'' (id: {1}).", new Object[]{aFile.getName(), aFile.getId()}); //NON-NLS
477  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_TEXTEXTRACT);
478  return false;
479  }
480  } catch (IngesterException ex) {
481  logger.log(Level.WARNING, "Failed to extract strings and ingest, file '" + aFile.getName() + "' (id: " + aFile.getId() + ").", ex); //NON-NLS
482  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_INDEXING);
483  return false;
484  }
485  }
486 
494  private void indexFile(AbstractFile aFile, boolean indexContent) {
495  //logger.log(Level.INFO, "Processing AbstractFile: " + abstractFile.getName());
496 
497  TskData.TSK_DB_FILES_TYPE_ENUM aType = aFile.getType();
498 
499  // unallocated and unused blocks can only have strings extracted from them.
500  if ((aType.equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) || aType.equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS))) {
501  if (context.fileIngestIsCancelled()) {
502  return;
503  }
504  extractStringsAndIndex(aFile);
505  return;
506  }
507 
508  final long size = aFile.getSize();
509  //if not to index content, or a dir, or 0 content, index meta data only
510 
511  if ((indexContent == false || aFile.isDir() || size == 0)) {
512  try {
513  if (context.fileIngestIsCancelled()) {
514  return;
515  }
516  ingester.indexMetaDataOnly(aFile);
517  putIngestStatus(jobId, aFile.getId(), IngestStatus.METADATA_INGESTED);
518  } catch (IngesterException ex) {
519  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_INDEXING);
520  logger.log(Level.WARNING, "Unable to index meta-data for file: " + aFile.getId(), ex); //NON-NLS
521  }
522  return;
523  }
524 
525  if (context.fileIngestIsCancelled()) {
526  return;
527  }
528  String fileType = fileTypeDetector.getMIMEType(aFile);
529 
530  // we skip archive formats that are opened by the archive module.
531  // @@@ We could have a check here to see if the archive module was enabled though...
532  if (ContentTextExtractor.ARCHIVE_MIME_TYPES.contains(fileType)) {
533  try {
534  if (context.fileIngestIsCancelled()) {
535  return;
536  }
537  ingester.indexMetaDataOnly(aFile);
538  putIngestStatus(jobId, aFile.getId(), IngestStatus.METADATA_INGESTED);
539  } catch (IngesterException ex) {
540  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_INDEXING);
541  logger.log(Level.WARNING, "Unable to index meta-data for file: " + aFile.getId(), ex); //NON-NLS
542  }
543  return;
544  }
545 
546  boolean wasTextAdded = false;
547 
548  //extract text with one of the extractors, divide into chunks and index with Solr
549  try {
550  //logger.log(Level.INFO, "indexing: " + aFile.getName());
551  if (context.fileIngestIsCancelled()) {
552  return;
553  }
554  if (fileType.equals("application/octet-stream")) {
555  extractStringsAndIndex(aFile);
556  return;
557  }
558  if (!extractTextAndIndex(aFile, fileType)) {
559  // Text extractor not found for file. Extract string only.
560  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_TEXTEXTRACT);
561  } else {
562  putIngestStatus(jobId, aFile.getId(), IngestStatus.TEXT_INGESTED);
563  wasTextAdded = true;
564  }
565 
566  } catch (IngesterException e) {
567  logger.log(Level.INFO, "Could not extract text with Tika, " + aFile.getId() + ", " //NON-NLS
568  + aFile.getName(), e);
569  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_INDEXING);
570  } catch (Exception e) {
571  logger.log(Level.WARNING, "Error extracting text with Tika, " + aFile.getId() + ", " //NON-NLS
572  + aFile.getName(), e);
573  putIngestStatus(jobId, aFile.getId(), IngestStatus.SKIPPED_ERROR_TEXTEXTRACT);
574  }
575 
576  if ((wasTextAdded == false) && (aFile.getNameExtension().equalsIgnoreCase("txt") && !(aFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.CARVED)))) {
577  //Carved Files should be the only type of unallocated files capable of a txt extension and
578  //should be ignored by the TextFileExtractor because they may contain more than one text encoding
579  try {
580  if (Ingester.getDefault().indexText(txtFileExtractor, aFile, context)) {
581  putIngestStatus(jobId, aFile.getId(), IngestStatus.TEXT_INGESTED);
582  wasTextAdded = true;
583  }
584  } catch (IngesterException ex) {
585  logger.log(Level.WARNING, "Unable to index as unicode", ex);
586  }
587  }
588 
589  // if it wasn't supported or had an error, default to strings
590  if (wasTextAdded == false) {
591  extractStringsAndIndex(aFile);
592  }
593  }
594  }
595 }
static IndexingServerProperties getMultiUserServerProperties(String caseDirectory)
Definition: Server.java:852
boolean extractTextAndIndex(AbstractFile aFile, String detectedFormat)
static IngestMessage createMessage(MessageType messageType, String source, String subject, String detailsHtml)
void postMessage(final IngestMessage message)
static void putIngestStatus(long ingestJobId, long fileId, IngestStatus status)
static void error(String title, String message)
synchronized static Logger getLogger(String name)
Definition: Logger.java:124
static IngestMessage createWarningMessage(String source, String subject, String detailsHtml)
static void warn(String title, String message)
static synchronized IngestServices getInstance()
STRINGS_INGESTED
Text was extracted by knowing file type and text_ingested.

Copyright © 2012-2018 Basis Technology. Generated on: Thu Oct 4 2018
This work is licensed under a Creative Commons Attribution-Share Alike 3.0 United States License.