Autopsy  4.11.0
Graphical digital forensics platform for The Sleuth Kit and other tools.
EventsRepository.java
Go to the documentation of this file.
1 /*
2  * Autopsy Forensic Browser
3  *
4  * Copyright 2011-2018 Basis Technology Corp.
5  * Contact: carrier <at> sleuthkit <dot> org
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 package org.sleuthkit.autopsy.timeline.db;
20 
21 import com.google.common.cache.CacheBuilder;
22 import com.google.common.cache.CacheLoader;
23 import com.google.common.cache.LoadingCache;
24 import com.google.common.util.concurrent.ThreadFactoryBuilder;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.Collections;
28 import java.util.EnumMap;
29 import java.util.List;
30 import java.util.Map;
31 import static java.util.Objects.isNull;
32 import java.util.Set;
33 import java.util.concurrent.CancellationException;
34 import java.util.concurrent.ExecutionException;
35 import java.util.concurrent.Executor;
36 import java.util.concurrent.Executors;
37 import java.util.concurrent.TimeUnit;
38 import java.util.function.Consumer;
39 import java.util.logging.Level;
40 import java.util.stream.Collectors;
41 import javafx.application.Platform;
42 import javafx.beans.property.ReadOnlyBooleanProperty;
43 import javafx.beans.property.ReadOnlyBooleanWrapper;
44 import javafx.beans.property.ReadOnlyObjectProperty;
45 import javafx.collections.FXCollections;
46 import javafx.collections.ObservableList;
47 import javafx.collections.ObservableMap;
48 import javafx.concurrent.Worker;
49 import javax.swing.JOptionPane;
50 import org.apache.commons.lang3.StringUtils;
51 import org.joda.time.Interval;
52 import org.netbeans.api.progress.ProgressHandle;
53 import org.openide.util.NbBundle;
54 import org.openide.windows.WindowManager;
72 import org.sleuthkit.datamodel.AbstractFile;
73 import org.sleuthkit.datamodel.BlackboardArtifact;
74 import org.sleuthkit.datamodel.BlackboardArtifactTag;
75 import org.sleuthkit.datamodel.Content;
76 import org.sleuthkit.datamodel.ContentTag;
77 import org.sleuthkit.datamodel.SleuthkitCase;
78 import org.sleuthkit.datamodel.Tag;
79 import org.sleuthkit.datamodel.TagName;
80 import org.sleuthkit.datamodel.TskCoreException;
81 import org.sleuthkit.datamodel.TskData;
82 
98 public class EventsRepository {
99 
100  private final static Logger logger = Logger.getLogger(EventsRepository.class.getName());
101 
102  private final Executor workerExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("eventrepository-worker-%d").build()); //NON-NLS
104  private final EventDB eventDB;
105  private final Case autoCase;
107 
108  private final LoadingCache<Object, Long> maxCache;
109  private final LoadingCache<Object, Long> minCache;
110  private final LoadingCache<Long, SingleEvent> idToEventCache;
111  private final LoadingCache<ZoomParams, Map<EventType, Long>> eventCountsCache;
112  private final LoadingCache<ZoomParams, List<EventStripe>> eventStripeCache;
113 
114  private final ObservableMap<Long, String> datasourcesMap = FXCollections.observableHashMap();
115  private final ObservableMap<Long, String> hashSetMap = FXCollections.observableHashMap();
116  private final ObservableList<TagName> tagNames = FXCollections.observableArrayList();
117 
118  public Case getAutoCase() {
119  return autoCase;
120  }
121 
122  public ObservableList<TagName> getTagNames() {
123  return tagNames;
124  }
125 
126  synchronized public ObservableMap<Long, String> getDatasourcesMap() {
127  return datasourcesMap;
128  }
129 
130  synchronized public ObservableMap<Long, String> getHashSetMap() {
131  return hashSetMap;
132  }
133 
134  public Interval getBoundingEventsInterval(Interval timeRange, RootFilter filter) {
135  return eventDB.getBoundingEventsInterval(timeRange, filter);
136  }
137 
143  return modelInstance;
144  }
145 
146  public EventsRepository(Case autoCase, ReadOnlyObjectProperty<ZoomParams> currentStateProperty) {
147  this.autoCase = autoCase;
148  //TODO: we should check that case is open, or get passed a case object/directory -jm
149  this.eventDB = EventDB.getEventDB(autoCase);
151  idToEventCache = CacheBuilder.newBuilder()
152  .maximumSize(5000L)
153  .expireAfterAccess(10, TimeUnit.MINUTES)
154  .build(CacheLoader.from(eventDB::getEventById));
155  eventCountsCache = CacheBuilder.newBuilder()
156  .maximumSize(1000L)
157  .expireAfterAccess(10, TimeUnit.MINUTES)
158  .build(CacheLoader.from(eventDB::countEventsByType));
159  eventStripeCache = CacheBuilder.newBuilder()
160  .maximumSize(1000L)
161  .expireAfterAccess(10, TimeUnit.MINUTES
162  ).build(CacheLoader.from(eventDB::getEventStripes));
163  maxCache = CacheBuilder.newBuilder().build(CacheLoader.from(eventDB::getMaxTime));
164  minCache = CacheBuilder.newBuilder().build(CacheLoader.from(eventDB::getMinTime));
165  this.modelInstance = new FilteredEventsModel(this, currentStateProperty);
166  }
167 
171  public Long getMaxTime() {
172  return maxCache.getUnchecked("max"); // NON-NLS
173 
174  }
175 
179  public Long getMinTime() {
180  return minCache.getUnchecked("min"); // NON-NLS
181 
182  }
183 
184  public SingleEvent getEventById(Long eventID) {
185  return idToEventCache.getUnchecked(eventID);
186  }
187 
188  synchronized public Set<SingleEvent> getEventsById(Collection<Long> eventIDs) {
189  return eventIDs.stream()
190  .map(idToEventCache::getUnchecked)
191  .collect(Collectors.toSet());
192 
193  }
194 
195  synchronized public List<EventStripe> getEventStripes(ZoomParams params) {
196  try {
197  return eventStripeCache.get(params);
198  } catch (ExecutionException ex) {
199  logger.log(Level.SEVERE, "Failed to load Event Stripes from cache for " + params.toString(), ex); //NON-NLS
200  return Collections.emptyList();
201  }
202  }
203 
204  synchronized public Map<EventType, Long> countEvents(ZoomParams params) {
205  return eventCountsCache.getUnchecked(params);
206  }
207 
208  synchronized public int countAllEvents() {
209  return eventDB.countAllEvents();
210  }
211 
227  public List<Long> getEventIDsForFile(AbstractFile file, boolean includeDerivedArtifacts) {
228  return eventDB.getEventIDsForFile(file, includeDerivedArtifacts);
229  }
230 
240  public List<Long> getEventIDsForArtifact(BlackboardArtifact artifact) {
241  return eventDB.getEventIDsForArtifact(artifact);
242  }
243 
244  private void invalidateCaches() {
245  minCache.invalidateAll();
246  maxCache.invalidateAll();
247  eventCountsCache.invalidateAll();
248  eventStripeCache.invalidateAll();
249  idToEventCache.invalidateAll();
250  }
251 
252  public List<Long> getEventIDs(Interval timeRange, RootFilter filter) {
253  return eventDB.getEventIDs(timeRange, filter);
254  }
255 
267  public List<CombinedEvent> getCombinedEvents(Interval timeRange, RootFilter filter) {
268  return eventDB.getCombinedEvents(timeRange, filter);
269  }
270 
271  public Interval getSpanningInterval(Collection<Long> eventIDs) {
272  return eventDB.getSpanningInterval(eventIDs);
273  }
274 
275  public boolean hasNewColumns() {
276  return eventDB.hasNewColumns();
277  }
278 
287  public Map<String, Long> getTagCountsByTagName(Set<Long> eventIDsWithTags) {
288  return eventDB.getTagCountsByTagName(eventIDsWithTags);
289  }
290 
297  synchronized private void populateFilterData(SleuthkitCase skCase) {
298 
299  for (Map.Entry<Long, String> hashSet : eventDB.getHashSetNames().entrySet()) {
300  hashSetMap.putIfAbsent(hashSet.getKey(), hashSet.getValue());
301  }
302  //because there is no way to remove a datasource we only add to this map.
303  for (Long id : eventDB.getDataSourceIDs()) {
304  try {
305  datasourcesMap.putIfAbsent(id, skCase.getContentById(id).getDataSource().getName());
306  } catch (TskCoreException ex) {
307  logger.log(Level.SEVERE, "Failed to get datasource by ID.", ex); //NON-NLS
308  }
309  }
310 
311  try {
312  //should this only be tags applied to files or event bearing artifacts?
313  tagNames.setAll(skCase.getTagNamesInUse());
314  } catch (TskCoreException ex) {
315  logger.log(Level.SEVERE, "Failed to get tag names in use.", ex); //NON-NLS
316  }
317  }
318 
319  synchronized public Set<Long> addTag(long objID, Long artifactID, Tag tag, EventDB.EventTransaction trans) {
320  Set<Long> updatedEventIDs = eventDB.addTag(objID, artifactID, tag, trans);
321  if (!updatedEventIDs.isEmpty()) {
322  invalidateCaches(updatedEventIDs);
323  }
324  return updatedEventIDs;
325  }
326 
327  synchronized public Set<Long> deleteTag(long objID, Long artifactID, long tagID, boolean tagged) {
328  Set<Long> updatedEventIDs = eventDB.deleteTag(objID, artifactID, tagID, tagged);
329  if (!updatedEventIDs.isEmpty()) {
330  invalidateCaches(updatedEventIDs);
331  }
332  return updatedEventIDs;
333  }
334 
335  synchronized private void invalidateCaches(Set<Long> updatedEventIDs) {
336  eventCountsCache.invalidateAll();
337  eventStripeCache.invalidateAll();
338  idToEventCache.invalidateAll(updatedEventIDs);
339  try {
340  tagNames.setAll(autoCase.getSleuthkitCase().getTagNamesInUse());
341  } catch (TskCoreException ex) {
342  logger.log(Level.SEVERE, "Failed to get tag names in use.", ex); //NON-NLS
343  }
344  }
345 
354  public void syncTagsFilter(TagsFilter tagsFilter) {
355  for (TagName t : tagNames) {
356  tagsFilter.addSubFilter(new TagNameFilter(t, autoCase));
357  }
358  for (TagNameFilter t : tagsFilter.getSubFilters()) {
359  t.setDisabled(tagNames.contains(t.getTagName()) == false);
360  }
361  }
362 
363  public boolean areFiltersEquivalent(RootFilter f1, RootFilter f2) {
364  return SQLHelper.getSQLWhere(f1).equals(SQLHelper.getSQLWhere(f2));
365  }
366 
379  public CancellationProgressTask<Void> rebuildRepository(Consumer<Worker.State> onStateChange) {
380  return rebuildRepository(DBPopulationMode.FULL, onStateChange);
381  }
382 
395  public CancellationProgressTask<Void> rebuildTags(Consumer<Worker.State> onStateChange) {
396  return rebuildRepository(DBPopulationMode.TAGS_ONLY, onStateChange);
397  }
398 
411  private CancellationProgressTask<Void> rebuildRepository(final DBPopulationMode mode, Consumer<Worker.State> onStateChange) {
412  logger.log(Level.INFO, "(re)starting {0} db population task", mode); //NON-NLS
413  if (dbWorker != null) {
414  dbWorker.cancel();
415  }
416  dbWorker = new DBPopulationWorker(mode, onStateChange);
417  workerExecutor.execute(dbWorker);
418  return dbWorker;
419  }
420 
421  private enum DBPopulationMode {
422 
425  }
426 
431  private class DBPopulationWorker extends CancellationProgressTask<Void> {
432 
433  private final ReadOnlyBooleanWrapper cancellable = new ReadOnlyBooleanWrapper(true);
434 
436  private final SleuthkitCase skCase;
437  private final TagsManager tagsManager;
438 
439  private ProgressHandle progressHandle;
440 
441  @Override
442  public ReadOnlyBooleanProperty cancellableProperty() {
443  return cancellable.getReadOnlyProperty();
444  }
445 
446  @Override
447  public boolean requestCancel() {
448  Platform.runLater(() -> cancellable.set(false));
449  return super.requestCancel();
450  }
451 
452  @Override
453  protected void updateTitle(String title) {
454  super.updateTitle(title);
455  progressHandle.setDisplayName(title);
456  }
457 
458  @Override
459  protected void updateMessage(String message) {
460  super.updateMessage(message);
461  progressHandle.progress(message);
462  }
463 
464  @Override
465  protected void updateProgress(double workDone, double max) {
466  super.updateProgress(workDone, max);
467  if (workDone >= 0) {
468  progressHandle.progress((int) workDone);
469  }
470  }
471 
472  @Override
473  protected void updateProgress(long workDone, long max) {
474  super.updateProgress(workDone, max);
475  super.updateProgress(workDone, max);
476  if (workDone >= 0) {
477  progressHandle.progress((int) workDone);
478  }
479  }
480 
481  DBPopulationWorker(DBPopulationMode mode, Consumer<Worker.State> onStateChange) {
482  skCase = autoCase.getSleuthkitCase();
483  tagsManager = autoCase.getServices().getTagsManager();
484  this.dbPopulationMode = mode;
485  this.stateProperty().addListener(stateObservable -> onStateChange.accept(getState()));
486  }
487 
488  void restartProgressHandle(String title, String message, Double workDone, double total, Boolean cancellable) {
489  if (progressHandle != null) {
490  progressHandle.finish();
491  }
492  progressHandle = cancellable
493  ? ProgressHandle.createHandle(title, this::requestCancel)
494  : ProgressHandle.createHandle(title);
495 
496  if (workDone < 0) {
497  progressHandle.start();
498  } else {
499  progressHandle.start((int) total);
500  }
501  updateTitle(title);
502  updateMessage(message);
503  updateProgress(workDone, total);
504  }
505 
506  @SuppressWarnings("deprecation") // TODO (EUR-733): Do not use SleuthkitCase.getLastObjectId
507  @Override
508  @NbBundle.Messages({"progressWindow.msg.refreshingFileTags=Refreshing file tags",
509  "progressWindow.msg.refreshingResultTags=Refreshing result tags",
510  "progressWindow.msg.gatheringData=Gathering event data",
511  "progressWindow.msg.commitingDb=Committing events database"})
512  protected Void call() throws Exception {
513  EventDB.EventTransaction trans = null;
514 
515  if (dbPopulationMode == DBPopulationMode.FULL) {
516  //drop old db, and add back MAC and artifact events
517  logger.log(Level.INFO, "Beginning population of timeline db."); // NON-NLS
518  restartProgressHandle(Bundle.progressWindow_msg_gatheringData(), "", -1D, 1, true);
519  //reset database //TODO: can we do more incremental updates? -jm
520  eventDB.reInitializeDB();
521  //grab ids of all files
522  List<Long> fileIDs = skCase.findAllFileIdsWhere("name != '.' AND name != '..'" +
523  " AND type != " + TskData.TSK_DB_FILES_TYPE_ENUM.SLACK.ordinal()); //NON-NLS
524  final int numFiles = fileIDs.size();
525 
526  trans = eventDB.beginTransaction();
527  insertMACTimeEvents(numFiles, fileIDs, trans);
529  }
530 
531  //tags
532  if (dbPopulationMode == DBPopulationMode.TAGS_ONLY) {
533  trans = eventDB.beginTransaction();
534  logger.log(Level.INFO, "dropping old tags"); // NON-NLS
535  eventDB.reInitializeTags();
536  }
537 
538  logger.log(Level.INFO, "updating content tags"); // NON-NLS
539  List<ContentTag> contentTags = tagsManager.getAllContentTags();
540  int currentWorkTotal = contentTags.size();
541  restartProgressHandle(Bundle.progressWindow_msg_refreshingFileTags(), "", 0D, currentWorkTotal, true);
542  insertContentTags(currentWorkTotal, contentTags, trans);
543 
544  logger.log(Level.INFO, "updating artifact tags"); // NON-NLS
545  List<BlackboardArtifactTag> artifactTags = tagsManager.getAllBlackboardArtifactTags();
546  currentWorkTotal = artifactTags.size();
547  restartProgressHandle(Bundle.progressWindow_msg_refreshingResultTags(), "", 0D, currentWorkTotal, true);
548  insertArtifactTags(currentWorkTotal, artifactTags, trans);
549 
550  logger.log(Level.INFO, "committing db"); // NON-NLS
551  Platform.runLater(() -> cancellable.set(false));
552  restartProgressHandle(Bundle.progressWindow_msg_commitingDb(), "", -1D, 1, false);
553  eventDB.commitTransaction(trans);
554 
555  eventDB.analyze();
556  populateFilterData(skCase);
558 
559  progressHandle.finish();
560  if (isCancelRequested()) {
561  cancel();
562  }
563  return null;
564  }
565 
566  private void insertArtifactTags(int currentWorkTotal, List<BlackboardArtifactTag> artifactTags, EventDB.EventTransaction trans) {
567  for (int i = 0; i < currentWorkTotal; i++) {
568  if (isCancelRequested()) {
569  break;
570  }
571  updateProgress(i, currentWorkTotal);
572  BlackboardArtifactTag artifactTag = artifactTags.get(i);
573  eventDB.addTag(artifactTag.getContent().getId(), artifactTag.getArtifact().getArtifactID(), artifactTag, trans);
574  }
575  }
576 
577  private void insertContentTags(int currentWorkTotal, List<ContentTag> contentTags, EventDB.EventTransaction trans) {
578  for (int i = 0; i < currentWorkTotal; i++) {
579  if (isCancelRequested()) {
580  break;
581  }
582  updateProgress(i, currentWorkTotal);
583  ContentTag contentTag = contentTags.get(i);
584  eventDB.addTag(contentTag.getContent().getId(), null, contentTag, trans);
585  }
586  }
587 
589  //insert artifact based events
590  //TODO: use (not-yet existing api) to grab all artifacts with timestamps, rather than the hardcoded lists in EventType -jm
591  for (EventType type : RootEventType.allTypes) {
592  if (isCancelRequested()) {
593  break;
594  }
595  //skip file_system events, they are already handled above.
596  if (type instanceof ArtifactEventType) {
597  populateEventType((ArtifactEventType) type, trans);
598  }
599  }
600  }
601 
602  @NbBundle.Messages("progressWindow.msg.populateMacEventsFiles=Populating MAC time events for files")
603  private void insertMACTimeEvents(final int numFiles, List<Long> fileIDs, EventDB.EventTransaction trans) {
604  restartProgressHandle(Bundle.progressWindow_msg_populateMacEventsFiles(), "", 0D, numFiles, true);
605  for (int i = 0; i < numFiles; i++) {
606  if (isCancelRequested()) {
607  break;
608  }
609  long fID = fileIDs.get(i);
610  try {
611  AbstractFile f = skCase.getAbstractFileById(fID);
612 
613  if (isNull(f)) {
614  logger.log(Level.WARNING, "Failed to get data for file : {0}", fID); // NON-NLS
615  } else {
616  insertEventsForFile(f, trans);
617  updateProgress(i, numFiles);
618  updateMessage(f.getName());
619  }
620  } catch (TskCoreException tskCoreException) {
621  logger.log(Level.SEVERE, "Failed to insert MAC time events for file : " + fID, tskCoreException); // NON-NLS
622  }
623  }
624  }
625 
626  private void insertEventsForFile(AbstractFile f, EventDB.EventTransaction trans) throws TskCoreException {
627  //gather time stamps into map
628  EnumMap<FileSystemTypes, Long> timeMap = new EnumMap<>(FileSystemTypes.class);
629  timeMap.put(FileSystemTypes.FILE_CREATED, f.getCrtime());
630  timeMap.put(FileSystemTypes.FILE_ACCESSED, f.getAtime());
631  timeMap.put(FileSystemTypes.FILE_CHANGED, f.getCtime());
632  timeMap.put(FileSystemTypes.FILE_MODIFIED, f.getMtime());
633 
634  /*
635  * if there are no legitimate ( greater than zero ) time stamps (
636  * eg, logical/local files) skip the rest of the event generation:
637  * this should result in dropping logical files, since they do not
638  * have legitimate time stamps.
639  */
640  if (Collections.max(timeMap.values()) > 0) {
641  final String uniquePath = f.getUniquePath();
642  final String parentPath = f.getParentPath();
643  long datasourceID = f.getDataSource().getId();
644  String datasourceName = StringUtils.substringBeforeLast(uniquePath, parentPath);
645 
646  String rootFolder = StringUtils.substringBefore(StringUtils.substringAfter(parentPath, "/"), "/");
647  String shortDesc = datasourceName + "/" + StringUtils.defaultString(rootFolder);
648  shortDesc = shortDesc.endsWith("/") ? shortDesc : shortDesc + "/";
649  String medDesc = datasourceName + parentPath;
650 
651  final TskData.FileKnown known = f.getKnown();
652  Set<String> hashSets = f.getHashSetNames();
653  List<ContentTag> tags = tagsManager.getContentTagsByContent(f);
654 
655  for (Map.Entry<FileSystemTypes, Long> timeEntry : timeMap.entrySet()) {
656  if (timeEntry.getValue() > 0) {
657  // if the time is legitimate ( greater than zero ) insert it
658  eventDB.insertEvent(timeEntry.getValue(), timeEntry.getKey(),
659  datasourceID, f.getId(), null, uniquePath, medDesc,
660  shortDesc, known, hashSets, tags, trans);
661  }
662  }
663  }
664  }
665 
666  @Override
667  @NbBundle.Messages("msgdlg.problem.text=There was a problem populating the timeline."
668  + " Not all events may be present or accurate.")
669  protected void done() {
670  super.done();
671  try {
672  get();
673  } catch (CancellationException ex) {
674  logger.log(Level.WARNING, "Timeline database population was cancelled by the user. " //NON-NLS
675  + " Not all events may be present or accurate."); // NON-NLS
676  } catch (Exception ex) {
677  logger.log(Level.WARNING, "Unexpected exception while populating database.", ex); // NON-NLS
678  JOptionPane.showMessageDialog(WindowManager.getDefault().getMainWindow(), Bundle.msgdlg_problem_text());
679  }
680  }
681 
688  @NbBundle.Messages({"# {0} - event type ", "progressWindow.populatingXevents=Populating {0} events"})
690  try {
691  //get all the blackboard artifacts corresponding to the given event sub_type
692  final ArrayList<BlackboardArtifact> blackboardArtifacts = skCase.getBlackboardArtifacts(type.getArtifactTypeID());
693  final int numArtifacts = blackboardArtifacts.size();
694  restartProgressHandle(Bundle.progressWindow_populatingXevents(type.getDisplayName()), "", 0D, numArtifacts, true);
695  for (int i = 0; i < numArtifacts; i++) {
696  try {
697  //for each artifact, extract the relevant information for the descriptions
698  insertEventForArtifact(type, blackboardArtifacts.get(i), trans);
699  updateProgress(i, numArtifacts);
700  } catch (TskCoreException ex) {
701  logger.log(Level.SEVERE, "There was a problem inserting event for artifact: " + blackboardArtifacts.get(i).getArtifactID(), ex); // NON-NLS
702  }
703  }
704  } catch (TskCoreException ex) {
705  logger.log(Level.SEVERE, "There was a problem getting events with sub type " + type.toString() + ".", ex); // NON-NLS
706  }
707  }
708 
709  private void insertEventForArtifact(final ArtifactEventType type, BlackboardArtifact bbart, EventDB.EventTransaction trans) throws TskCoreException {
711 
712  // if the time is legitimate ( greater than zero ) insert it into the db
713  if (eventDescription != null && eventDescription.getTime() > 0) {
714  long objectID = bbart.getObjectID();
715  Content content = skCase.getContentById(objectID);
716  long datasourceID = content.getDataSource().getId();
717  long artifactID = bbart.getArtifactID();
718  Set<String> hashSets = content.getHashSetNames();
719  List<BlackboardArtifactTag> tags = tagsManager.getBlackboardArtifactTagsByArtifact(bbart);
720  String fullDescription = eventDescription.getFullDescription();
721  String medDescription = eventDescription.getMedDescription();
722  String shortDescription = eventDescription.getShortDescription();
723  eventDB.insertEvent(eventDescription.getTime(), type, datasourceID, objectID, artifactID, fullDescription, medDescription, shortDescription, null, hashSets, tags, trans);
724  }
725  }
726  }
727 }
static EventDB getEventDB(Case autoCase)
Definition: EventDB.java:110
List< Long > getEventIDsForArtifact(BlackboardArtifact artifact)
List< Long > getEventIDsForFile(AbstractFile file, boolean includeDerivedArtifacts)
boolean areFiltersEquivalent(RootFilter f1, RootFilter f2)
Interval getSpanningInterval(Collection< Long > eventIDs)
Definition: EventDB.java:177
void insertMACTimeEvents(final int numFiles, List< Long > fileIDs, EventDB.EventTransaction trans)
Map< String, Long > getTagCountsByTagName(Set< Long > eventIDsWithTags)
synchronized ObservableMap< Long, String > getHashSetMap()
synchronized Map< EventType, Long > countEvents(ZoomParams params)
synchronized void populateFilterData(SleuthkitCase skCase)
synchronized List< EventStripe > getEventStripes(ZoomParams params)
synchronized ObservableMap< Long, String > getDatasourcesMap()
Interval getSpanningInterval(Collection< Long > eventIDs)
List< ContentTag > getContentTagsByContent(Content content)
void insertEventForArtifact(final ArtifactEventType type, BlackboardArtifact bbart, EventDB.EventTransaction trans)
synchronized Set< Long > deleteTag(long objID, Long artifactID, long tagID, boolean tagged)
synchronized Set< Long > addTag(long objID, Long artifactID, Tag tag, EventDB.EventTransaction trans)
CancellationProgressTask< Void > rebuildRepository(Consumer< Worker.State > onStateChange)
static final List<?extends EventType > allTypes
Definition: EventType.java:35
final ObservableMap< Long, String > hashSetMap
List< BlackboardArtifactTag > getAllBlackboardArtifactTags()
EventsRepository(Case autoCase, ReadOnlyObjectProperty< ZoomParams > currentStateProperty)
void insertArtifactTags(int currentWorkTotal, List< BlackboardArtifactTag > artifactTags, EventDB.EventTransaction trans)
synchronized Set< SingleEvent > getEventsById(Collection< Long > eventIDs)
List< CombinedEvent > getCombinedEvents(Interval timeRange, RootFilter filter)
final LoadingCache< ZoomParams, Map< EventType, Long > > eventCountsCache
void populateEventType(final ArtifactEventType type, EventDB.EventTransaction trans)
final LoadingCache< Long, SingleEvent > idToEventCache
void insertEventsForFile(AbstractFile f, EventDB.EventTransaction trans)
synchronized void invalidateCaches(Set< Long > updatedEventIDs)
synchronized static Logger getLogger(String name)
Definition: Logger.java:124
final ObservableMap< Long, String > datasourcesMap
CancellationProgressTask< Void > rebuildTags(Consumer< Worker.State > onStateChange)
Interval getBoundingEventsInterval(Interval timeRange, RootFilter filter)
final LoadingCache< ZoomParams, List< EventStripe > > eventStripeCache
static AttributeEventDescription buildEventDescription(ArtifactEventType type, BlackboardArtifact artf)
void insertContentTags(int currentWorkTotal, List< ContentTag > contentTags, EventDB.EventTransaction trans)
List< Long > getEventIDs(Interval timeRange, RootFilter filter)
List< BlackboardArtifactTag > getBlackboardArtifactTagsByArtifact(BlackboardArtifact artifact)

Copyright © 2012-2018 Basis Technology. Generated on: Fri Jun 21 2019
This work is licensed under a Creative Commons Attribution-Share Alike 3.0 United States License.