19 package org.sleuthkit.autopsy.timeline.db;
21 import com.google.common.cache.CacheBuilder;
22 import com.google.common.cache.CacheLoader;
23 import com.google.common.cache.LoadingCache;
24 import com.google.common.util.concurrent.ThreadFactoryBuilder;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.Collections;
28 import java.util.EnumMap;
29 import java.util.List;
31 import static java.util.Objects.isNull;
33 import java.util.concurrent.CancellationException;
34 import java.util.concurrent.ExecutionException;
35 import java.util.concurrent.Executor;
36 import java.util.concurrent.Executors;
37 import java.util.concurrent.TimeUnit;
38 import java.util.function.Consumer;
39 import java.util.logging.Level;
40 import java.util.stream.Collectors;
41 import javafx.application.Platform;
42 import javafx.beans.property.ReadOnlyBooleanProperty;
43 import javafx.beans.property.ReadOnlyBooleanWrapper;
44 import javafx.beans.property.ReadOnlyObjectProperty;
45 import javafx.collections.FXCollections;
46 import javafx.collections.ObservableList;
47 import javafx.collections.ObservableMap;
48 import javafx.concurrent.Worker;
49 import javax.swing.JOptionPane;
50 import org.apache.commons.lang3.StringUtils;
51 import org.joda.time.Interval;
52 import org.netbeans.api.progress.ProgressHandle;
53 import org.openide.util.NbBundle;
100 private final Executor
workerExecutor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat(
"eventrepository-worker-%d").build());
112 private final ObservableMap<Long, String>
datasourcesMap = FXCollections.observableHashMap();
113 private final ObservableMap<Long, String>
hashSetMap = FXCollections.observableHashMap();
114 private final ObservableList<TagName>
tagNames = FXCollections.observableArrayList();
133 return eventDB.getBoundingEventsInterval(timeRange, filter);
149 idToEventCache = CacheBuilder.newBuilder()
151 .expireAfterAccess(10, TimeUnit.MINUTES)
152 .build(CacheLoader.from(eventDB::getEventById));
153 eventCountsCache = CacheBuilder.newBuilder()
155 .expireAfterAccess(10, TimeUnit.MINUTES)
156 .build(CacheLoader.from(eventDB::countEventsByType));
157 eventStripeCache = CacheBuilder.newBuilder()
159 .expireAfterAccess(10, TimeUnit.MINUTES
160 ).build(CacheLoader.from(eventDB::getEventStripes));
161 maxCache = CacheBuilder.newBuilder().build(CacheLoader.from(eventDB::getMaxTime));
162 minCache = CacheBuilder.newBuilder().build(CacheLoader.from(eventDB::getMinTime));
170 return maxCache.getUnchecked(
"max");
178 return minCache.getUnchecked(
"min");
183 return idToEventCache.getUnchecked(eventID);
186 synchronized public Set<SingleEvent>
getEventsById(Collection<Long> eventIDs) {
187 return eventIDs.stream()
188 .map(idToEventCache::getUnchecked)
189 .collect(Collectors.toSet());
195 return eventStripeCache.get(params);
196 }
catch (ExecutionException ex) {
197 LOGGER.log(Level.SEVERE,
"Failed to load Event Stripes from cache for " + params.
toString(), ex);
198 return Collections.emptyList();
203 return eventCountsCache.getUnchecked(params);
207 return eventDB.countAllEvents();
226 return eventDB.getEventIDsForFile(file, includeDerivedArtifacts);
239 return eventDB.getEventIDsForArtifact(artifact);
243 minCache.invalidateAll();
244 maxCache.invalidateAll();
245 eventCountsCache.invalidateAll();
246 eventStripeCache.invalidateAll();
247 idToEventCache.invalidateAll();
251 return eventDB.getEventIDs(timeRange, filter);
266 return eventDB.getCombinedEvents(timeRange, filter);
274 return eventDB.hasNewColumns();
286 return eventDB.getTagCountsByTagName(eventIDsWithTags);
297 for (Map.Entry<Long, String> hashSet : eventDB.getHashSetNames().entrySet()) {
298 hashSetMap.putIfAbsent(hashSet.getKey(), hashSet.getValue());
301 for (Long
id : eventDB.getDataSourceIDs()) {
305 LOGGER.log(Level.SEVERE,
"Failed to get datasource by ID.", ex);
313 LOGGER.log(Level.SEVERE,
"Failed to get tag names in use.", ex);
318 Set<Long> updatedEventIDs = eventDB.addTag(objID, artifactID, tag, trans);
319 if (!updatedEventIDs.isEmpty()) {
322 return updatedEventIDs;
325 synchronized public Set<Long>
deleteTag(
long objID, Long artifactID,
long tagID,
boolean tagged) {
326 Set<Long> updatedEventIDs = eventDB.deleteTag(objID, artifactID, tagID, tagged);
327 if (!updatedEventIDs.isEmpty()) {
330 return updatedEventIDs;
334 eventCountsCache.invalidateAll();
335 eventStripeCache.invalidateAll();
336 idToEventCache.invalidateAll(updatedEventIDs);
340 LOGGER.log(Level.SEVERE,
"Failed to get tag names in use.", ex);
357 t.setDisabled(tagNames.contains(t.getTagName()) ==
false);
362 return SQLHelper.getSQLWhere(f1).equals(SQLHelper.getSQLWhere(f2));
410 LOGGER.log(Level.INFO,
"(re)starting {0} db population task", mode);
411 if (dbWorker != null) {
415 workerExecutor.execute(dbWorker);
431 private final ReadOnlyBooleanWrapper
cancellable =
new ReadOnlyBooleanWrapper(
true);
441 return cancellable.getReadOnlyProperty();
446 Platform.runLater(() -> cancellable.set(
false));
447 return super.requestCancel();
452 super.updateTitle(title);
453 progressHandle.setDisplayName(title);
458 super.updateMessage(message);
459 progressHandle.progress(message);
464 super.updateProgress(workDone, max);
466 progressHandle.progress((
int) workDone);
472 super.updateProgress(workDone, max);
473 super.updateProgress(workDone, max);
475 progressHandle.progress((
int) workDone);
482 this.dbPopulationMode = mode;
483 this.stateProperty().addListener(stateObservable -> onStateChange.accept(getState()));
486 void restartProgressHandle(String title, String message, Double workDone,
double total, Boolean cancellable) {
487 if (progressHandle != null) {
488 progressHandle.finish();
490 progressHandle = cancellable
491 ? ProgressHandle.createHandle(title, this::requestCancel)
492 : ProgressHandle.createHandle(title);
495 progressHandle.start();
497 progressHandle.start((
int) total);
504 @SuppressWarnings(
"deprecation")
506 @NbBundle.Messages({
"progressWindow.msg.refreshingFileTags=Refreshing file tags",
507 "progressWindow.msg.refreshingResultTags=Refreshing result tags",
508 "progressWindow.msg.gatheringData=Gathering event data",
509 "progressWindow.msg.commitingDb=Committing events database"})
510 protected Void
call()
throws Exception {
515 LOGGER.log(Level.INFO,
"Beginning population of timeline db.");
516 restartProgressHandle(Bundle.progressWindow_msg_gatheringData(),
"", -1D, 1,
true);
518 eventDB.reInitializeDB();
522 final int numFiles = fileIDs.size();
524 trans = eventDB.beginTransaction();
531 trans = eventDB.beginTransaction();
532 LOGGER.log(Level.INFO,
"dropping old tags");
533 eventDB.reInitializeTags();
536 LOGGER.log(Level.INFO,
"updating content tags");
538 int currentWorkTotal = contentTags.size();
539 restartProgressHandle(Bundle.progressWindow_msg_refreshingFileTags(),
"", 0D, currentWorkTotal,
true);
542 LOGGER.log(Level.INFO,
"updating artifact tags");
544 currentWorkTotal = artifactTags.size();
545 restartProgressHandle(Bundle.progressWindow_msg_refreshingResultTags(),
"", 0D, currentWorkTotal,
true);
548 LOGGER.log(Level.INFO,
"committing db");
549 Platform.runLater(() -> cancellable.set(
false));
550 restartProgressHandle(Bundle.progressWindow_msg_commitingDb(),
"", -1D, 1,
false);
551 eventDB.commitTransaction(trans);
557 progressHandle.finish();
565 for (
int i = 0; i < currentWorkTotal; i++) {
576 for (
int i = 0; i < currentWorkTotal; i++) {
582 eventDB.addTag(contentTag.
getContent().
getId(), null, contentTag, trans);
600 @NbBundle.Messages(
"progressWindow.msg.populateMacEventsFiles=Populating MAC time events for files")
602 restartProgressHandle(Bundle.progressWindow_msg_populateMacEventsFiles(),
"", 0D, numFiles,
true);
603 for (
int i = 0; i < numFiles; i++) {
607 long fID = fileIDs.get(i);
612 LOGGER.log(Level.WARNING,
"Failed to get data for file : {0}", fID);
619 LOGGER.log(Level.SEVERE,
"Failed to insert MAC time events for file : " + fID, tskCoreException);
626 EnumMap<FileSystemTypes, Long> timeMap =
new EnumMap<>(
FileSystemTypes.class);
638 if (Collections.max(timeMap.values()) > 0) {
642 String datasourceName = StringUtils.substringBeforeLast(uniquePath, parentPath);
644 String rootFolder = StringUtils.substringBefore(StringUtils.substringAfter(parentPath,
"/"),
"/");
645 String shortDesc = datasourceName +
"/" + StringUtils.defaultString(rootFolder);
646 shortDesc = shortDesc.endsWith(
"/") ? shortDesc : shortDesc +
"/";
647 String medDesc = datasourceName + parentPath;
653 for (Map.Entry<
FileSystemTypes, Long> timeEntry : timeMap.entrySet()) {
654 if (timeEntry.getValue() > 0) {
656 eventDB.insertEvent(timeEntry.getValue(), timeEntry.getKey(),
657 datasourceID, f.
getId(), null, uniquePath, medDesc,
658 shortDesc, known, hashSets, tags, trans);
665 @NbBundle.Messages(
"msgdlg.problem.text=There was a problem populating the timeline."
666 +
" Not all events may be present or accurate.")
671 }
catch (CancellationException ex) {
672 LOGGER.log(Level.WARNING,
"Timeline database population was cancelled by the user. "
673 +
" Not all events may be present or accurate.");
674 }
catch (Exception ex) {
675 LOGGER.log(Level.WARNING,
"Unexpected exception while populating database.", ex);
676 JOptionPane.showMessageDialog(null, Bundle.msgdlg_problem_text());
686 @NbBundle.Messages({
"# {0} - event type ",
"progressWindow.populatingXevents=Populating {0} events"})
691 final int numArtifacts = blackboardArtifacts.size();
692 restartProgressHandle(Bundle.progressWindow_populatingXevents(type.
getDisplayName()),
"", 0D, numArtifacts,
true);
693 for (
int i = 0; i < numArtifacts; i++) {
699 LOGGER.log(Level.SEVERE,
"There was a problem inserting event for artifact: " + blackboardArtifacts.get(i).getArtifactID(), ex);
703 LOGGER.log(Level.SEVERE,
"There was a problem getting events with sub type " + type.toString() +
".", ex);
711 if (eventDescription != null && eventDescription.getTime() > 0) {
718 String fullDescription = eventDescription.getFullDescription();
719 String medDescription = eventDescription.getMedDescription();
720 String shortDescription = eventDescription.getShortDescription();
721 eventDB.insertEvent(eventDescription.getTime(), type, datasourceID, objectID, artifactID, fullDescription, medDescription, shortDescription, null, hashSets, tags, trans);
void insertArtifactDerivedEvents(EventDB.EventTransaction trans)
static EventDB getEventDB(Case autoCase)
List< Long > getEventIDsForArtifact(BlackboardArtifact artifact)
final TagsManager tagsManager
List< Long > getEventIDsForFile(AbstractFile file, boolean includeDerivedArtifacts)
final FilteredEventsModel modelInstance
boolean areFiltersEquivalent(RootFilter f1, RootFilter f2)
Interval getSpanningInterval(Collection< Long > eventIDs)
ArrayList< BlackboardArtifact > getBlackboardArtifacts(int artifactTypeID)
FilteredEventsModel getEventsModel()
void addSubFilter(SubFilterType subfilter)
void insertMACTimeEvents(final int numFiles, List< Long > fileIDs, EventDB.EventTransaction trans)
void updateProgress(double workDone, double max)
Map< String, Long > getTagCountsByTagName(Set< Long > eventIDsWithTags)
BlackboardArtifact getArtifact()
synchronized ObservableMap< Long, String > getHashSetMap()
synchronized Map< EventType, Long > countEvents(ZoomParams params)
synchronized void populateFilterData(SleuthkitCase skCase)
synchronized List< EventStripe > getEventStripes(ZoomParams params)
synchronized ObservableMap< Long, String > getDatasourcesMap()
Interval getSpanningInterval(Collection< Long > eventIDs)
Content getContentById(long id)
List< TagName > getTagNamesInUse()
void insertEventForArtifact(final ArtifactEventType type, BlackboardArtifact bbart, EventDB.EventTransaction trans)
synchronized String getUniquePath()
synchronized Set< Long > deleteTag(long objID, Long artifactID, long tagID, boolean tagged)
void updateMessage(String message)
ReadOnlyBooleanProperty cancellableProperty()
synchronized Set< Long > addTag(long objID, Long artifactID, Tag tag, EventDB.EventTransaction trans)
List< Long > findAllFileIdsWhere(String sqlWhereClause)
AbstractFile getAbstractFileById(long id)
CancellationProgressTask< Void > rebuildRepository(Consumer< Worker.State > onStateChange)
final ReadOnlyBooleanWrapper cancellable
ObservableList< TagName > getTagNames()
TskData.FileKnown getKnown()
void updateTitle(String title)
static final List<?extends EventType > allTypes
synchronized int countAllEvents()
TagsManager getTagsManager()
void syncTagsFilter(TagsFilter tagsFilter)
final ObservableMap< Long, String > hashSetMap
SingleEvent getEventById(Long eventID)
EventsRepository(Case autoCase, ReadOnlyObjectProperty< ZoomParams > currentStateProperty)
void insertArtifactTags(int currentWorkTotal, List< BlackboardArtifactTag > artifactTags, EventDB.EventTransaction trans)
synchronized Set< SingleEvent > getEventsById(Collection< Long > eventIDs)
List< CombinedEvent > getCombinedEvents(Interval timeRange, RootFilter filter)
final LoadingCache< ZoomParams, Map< EventType, Long > > eventCountsCache
void populateEventType(final ArtifactEventType type, EventDB.EventTransaction trans)
void updateProgress(long workDone, long max)
final LoadingCache< Long, SingleEvent > idToEventCache
default int getArtifactTypeID()
SleuthkitCase getSleuthkitCase()
static final Logger LOGGER
void insertEventsForFile(AbstractFile f, EventDB.EventTransaction trans)
synchronized void invalidateCaches(Set< Long > updatedEventIDs)
final Executor workerExecutor
synchronized boolean isCancelRequested()
final ObservableList< TagName > tagNames
synchronized static Logger getLogger(String name)
final ObservableMap< Long, String > datasourcesMap
CancellationProgressTask< Void > rebuildTags(Consumer< Worker.State > onStateChange)
Set< String > getHashSetNames()
Interval getBoundingEventsInterval(Interval timeRange, RootFilter filter)
final SleuthkitCase skCase
final LoadingCache< ZoomParams, List< EventStripe > > eventStripeCache
final LoadingCache< Object, Long > minCache
ProgressHandle progressHandle
final DBPopulationMode dbPopulationMode
static AttributeEventDescription buildEventDescription(ArtifactEventType type, BlackboardArtifact artf)
void insertContentTags(int currentWorkTotal, List< ContentTag > contentTags, EventDB.EventTransaction trans)
List< Long > getEventIDs(Interval timeRange, RootFilter filter)
DBPopulationWorker dbWorker
final LoadingCache< Object, Long > maxCache