Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions app/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ android {
applicationId = "net.opendasharchive.openarchive"
minSdk = 29
targetSdk = 36
versionCode = 30033
versionName = "4.0.12"
versionCode = 30035
versionName = "4.0.15"
multiDexEnabled = true
vectorDrawables.useSupportLibrary = true
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
Expand Down Expand Up @@ -281,7 +281,6 @@ dependencies {
implementation(libs.okhttp.logging)
implementation(libs.retrofit)
implementation(libs.retrofit.kotlinx.serialization)
implementation(libs.guardianproject.sardine)

// Images & Media
implementation(libs.coil)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,19 +25,19 @@ import javax.crypto.spec.GCMParameterSpec
// Replaces TinkVaultCredentialStore — same AES-256-GCM + Android Keystore, no Tink dependency.
// Migration: if decryption fails (pre-existing Tink-encrypted data), the credential is cleared
// and the user will be prompted to re-enter their server password on next connection.
//
// DataStore must be a process-wide singleton for a given file (Android requirement). The
// companion object holds the single instance so that the migration-time store (created before
// Koin) and the Koin-injected store share the same underlying DataStore and never conflict.
class TinkVaultCredentialStore(
context: Context,
private val io: CoroutineDispatcher = Dispatchers.IO
) : VaultCredentialStore {

private val appContext = context.applicationContext

private val dataStore: DataStore<Preferences> by lazy {
PreferenceDataStoreFactory.create(
scope = CoroutineScope(SupervisorJob() + io),
produceFile = { appContext.preferencesDataStoreFile(DATASTORE_FILE_NAME) }
)
}
private val dataStore: DataStore<Preferences>
get() = getOrCreateDataStore(appContext)

private fun getOrCreateKey(): SecretKey {
val ks = KeyStore.getInstance(ANDROID_KEYSTORE).apply { load(null) }
Expand Down Expand Up @@ -94,12 +94,22 @@ class TinkVaultCredentialStore(

private fun secretKey(vaultId: Long) = stringPreferencesKey("vault_secret_$vaultId")

private companion object {
companion object {
const val DATASTORE_FILE_NAME = "vault_secure_credentials"
const val KEY_ALIAS = "openarchive_vault_master_key"
const val ANDROID_KEYSTORE = "AndroidKeyStore"
const val TRANSFORMATION = "AES/GCM/NoPadding"
const val GCM_IV_LENGTH = 12
const val GCM_TAG_BITS = 128
private const val ANDROID_KEYSTORE = "AndroidKeyStore"
private const val TRANSFORMATION = "AES/GCM/NoPadding"
private const val GCM_IV_LENGTH = 12
private const val GCM_TAG_BITS = 128

@Volatile private var sharedDataStore: DataStore<Preferences>? = null

private fun getOrCreateDataStore(context: Context): DataStore<Preferences> =
sharedDataStore ?: synchronized(this) {
sharedDataStore ?: PreferenceDataStoreFactory.create(
scope = CoroutineScope(SupervisorJob() + Dispatchers.IO),
produceFile = { context.applicationContext.preferencesDataStoreFile(DATASTORE_FILE_NAME) }
).also { sharedDataStore = it }
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
package net.opendasharchive.openarchive.db

import androidx.room3.RoomDatabase

Check warning

Code scanning / detekt

Detects unused imports Warning

Unused import
import com.orm.SugarRecord
import net.opendasharchive.openarchive.core.domain.EvidenceStatus
import net.opendasharchive.openarchive.core.domain.VaultType
import net.opendasharchive.openarchive.core.logger.AppLogger
import net.opendasharchive.openarchive.core.security.VaultCredentialStore
import net.opendasharchive.openarchive.util.DateUtils
import net.opendasharchive.openarchive.util.toLocalDateTime
import net.opendasharchive.openarchive.db.sugar.Collection as SugarCollection
import net.opendasharchive.openarchive.db.sugar.Media as SugarMedia
import net.opendasharchive.openarchive.db.sugar.Project as SugarProject
import net.opendasharchive.openarchive.db.sugar.Space as SugarSpace

/**
* Standalone synchronous migrator — no Koin dependency.
*
* Call this with a temporary [AppDatabase] instance opened directly via
* [androidx.room3.Room.databaseBuilder] BEFORE [startKoin] so that the Koin
* bindings can safely target Room from the very first launch.
*/
object SugarToRoomMigrator {

/**
* Reads every Sugar ORM record and upserts it into the corresponding Room DAO.
* Throws on unrecoverable failure; callers should catch and log.
*/
suspend fun migrate(
vaultDao: VaultDao,
archiveDao: ArchiveDao,
submissionDao: SubmissionDao,
evidenceDao: EvidenceDao,
migrationDao: MigrationDao,
credentialStore: VaultCredentialStore
) {

Check warning

Code scanning / detekt

The more parameters a function has the more complex it is. Long parameter lists are often used to control complex algorithms and violate the Single Responsibility Principle. Prefer functions with short parameter lists. Warning

The function migrate(vaultDao: VaultDao, archiveDao: ArchiveDao, submissionDao: SubmissionDao, evidenceDao: EvidenceDao, migrationDao: MigrationDao, credentialStore: VaultCredentialStore) has too many parameters. The current threshold is set to 6.
migrateSpaces(vaultDao, migrationDao, credentialStore)
migrateProjects(archiveDao, migrationDao)
migrateCollections(submissionDao, migrationDao)
migrateMedia(evidenceDao, migrationDao)
migrationDao.upsert(
MigrationStateEntity(stage = "DONE", processedCount = 0, totalCount = 0, completedAt = DateUtils.now)
)
}

private suspend fun migrateSpaces(
vaultDao: VaultDao,
migrationDao: MigrationDao,
credentialStore: VaultCredentialStore
) {
val spaces = try {
SugarSpace.getAll().asSequence().toList()
} catch (e: Exception) {

Check warning

Code scanning / detekt

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled. Warning

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled.
AppLogger.e("SugarToRoomMigrator: migrateSpaces — failed to read Sugar spaces, skipping", e)
migrationDao.upsert(MigrationStateEntity(stage = "PROJECTS", processedCount = 0, totalCount = 0))
return
}
AppLogger.i("SugarToRoomMigrator: Migrating ${spaces.size} spaces")

spaces.forEach { space ->
val vaultId = vaultDao.upsert(
VaultEntity(
id = space.id,
type = when (space.tType) {
SugarSpace.Type.WEBDAV -> VaultType.PRIVATE_SERVER
SugarSpace.Type.INTERNET_ARCHIVE -> VaultType.INTERNET_ARCHIVE
SugarSpace.Type.RAVEN -> VaultType.DWEB_STORAGE
},
name = space.name,
username = space.username,
displayName = space.displayname,
host = space.host,
metaData = space.metaData,
licenseUrl = space.license,
createdAt = DateUtils.now.toLocalDateTime()
)
)
if (space.password.isNotBlank()) {
credentialStore.putSecret(vaultId, space.password)
}
}
migrationDao.upsert(MigrationStateEntity(stage = "PROJECTS", processedCount = 0, totalCount = 0))
}

private suspend fun migrateProjects(archiveDao: ArchiveDao, migrationDao: MigrationDao) {
val projects = try {
SugarRecord.findAll(SugarProject::class.java).asSequence().toList()
} catch (e: Exception) {

Check warning

Code scanning / detekt

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled. Warning

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled.
AppLogger.e("SugarToRoomMigrator: migrateProjects — failed to read Sugar projects, skipping", e)
migrationDao.upsert(MigrationStateEntity(stage = "COLLECTIONS", processedCount = 0, totalCount = 0))
return
}
AppLogger.i("SugarToRoomMigrator: Migrating ${projects.size} projects")

projects.forEach { project ->
archiveDao.upsert(
ArchiveEntity(
id = project.id,
description = project.description,
createdAt = project.created?.time?.toLocalDateTime(),
vaultId = project.spaceId ?: -1,
archived = project.isArchived,
openSubmissionId = project.openCollectionId,
licenseUrl = project.licenseUrl,
isRemote = false
)
)
}
migrationDao.upsert(MigrationStateEntity(stage = "COLLECTIONS", processedCount = 0, totalCount = 0))
}

private suspend fun migrateCollections(submissionDao: SubmissionDao, migrationDao: MigrationDao) {
val collections = try {
SugarRecord.findAll(SugarCollection::class.java).asSequence().toList()
} catch (e: Exception) {

Check warning

Code scanning / detekt

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled. Warning

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled.
AppLogger.e("SugarToRoomMigrator: migrateCollections — failed to read Sugar collections, skipping", e)
migrationDao.upsert(MigrationStateEntity(stage = "MEDIA", processedCount = 0, totalCount = 0))
return
}
AppLogger.i("SugarToRoomMigrator: Migrating ${collections.size} collections")

collections.forEach { collection ->
submissionDao.upsert(
SubmissionEntity(
id = collection.id,
archiveId = collection.projectId ?: -1,
uploadedAt = collection.uploadDate?.time?.toLocalDateTime(),
serverUrl = collection.serverUrl
)
)
}
migrationDao.upsert(MigrationStateEntity(stage = "MEDIA", processedCount = 0, totalCount = 0))
}

private suspend fun migrateMedia(evidenceDao: EvidenceDao, migrationDao: MigrationDao) {

Check warning

Code scanning / detekt

Function parameter is unused and should be removed. Warning

Function parameter migrationDao is unused.
val mediaList = try {
SugarRecord.findAll(SugarMedia::class.java).asSequence().toList()
} catch (e: Exception) {

Check warning

Code scanning / detekt

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled. Warning

The caught exception is too generic. Prefer catching specific exceptions to the case that is currently handled.
AppLogger.e("SugarToRoomMigrator: migrateMedia — failed to read Sugar media, skipping", e)
return
}
AppLogger.i("SugarToRoomMigrator: Migrating ${mediaList.size} media items")

mediaList.forEach { media ->
evidenceDao.upsert(
EvidenceEntity(
id = media.id,
originalFilePath = media.originalFilePath,
mimeType = media.mimeType,
createdAt = media.createDate?.time?.toLocalDateTime(),
updatedAt = media.updateDate?.time?.toLocalDateTime(),
uploadedAt = media.uploadDate?.time?.toLocalDateTime(),
serverUrl = media.serverUrl,
title = media.title,
description = media.description,
author = media.author,
location = media.location,
tags = media.tags,
licenseUrl = media.licenseUrl,
mediaHashString = media.mediaHashString,
status = when (media.sStatus) {
SugarMedia.Status.Local -> EvidenceStatus.LOCAL
SugarMedia.Status.Queued -> EvidenceStatus.QUEUED
SugarMedia.Status.Uploading -> EvidenceStatus.QUEUED
SugarMedia.Status.Uploaded -> EvidenceStatus.UPLOADED
SugarMedia.Status.Error -> EvidenceStatus.ERROR
else -> EvidenceStatus.NEW
},
statusMessage = media.statusMessage,
archiveId = media.projectId,
submissionId = media.collectionId,
contentLength = media.contentLength,
progress = media.progress,
flag = media.flag,
priority = media.priority
)
)
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
import android.net.Uri
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.NonCancellable
import kotlinx.coroutines.SupervisorJob
import kotlinx.coroutines.cancel
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import net.opendasharchive.openarchive.R
import net.opendasharchive.openarchive.analytics.api.AnalyticsEvent
import net.opendasharchive.openarchive.analytics.api.AnalyticsManager
Expand Down Expand Up @@ -199,7 +201,13 @@
scope.cancel()
}

suspend fun jobFailed(exception: Throwable) {
suspend fun jobFailed(exception: Throwable) = withContext(NonCancellable) {

Check warning

Code scanning / detekt

One method should have one responsibility. Long methods tend to handle many things at once. Prefer smaller methods to make them easier to understand. Warning

The function jobFailed is too long (133). The maximum length is 60.
// NonCancellable ensures DB writes and bus events always complete even if the
// parent serviceScope is being cancelled (e.g. onStopJob). Without it,
// suspension points inside jobFailed throw CancellationException, which propagates
// to the outer catch in upload() and calls jobFailed a second time — producing
// duplicate "Upload cancelled" log entries and leaving evidence in a bad state.

// TorNotReadyException is transient — re-queue silently so the item retries when Tor connects.
if (exception is TorNotReadyException) {
AppLogger.i("Tor not ready during upload, re-queuing item ${mEvidence.id}")
Expand All @@ -213,7 +221,7 @@
isUploaded = false
)
scope.cancel()
return
return@withContext
}

// If an upload was cancelled, reset to QUEUED so it's retried on next session,
Expand Down Expand Up @@ -247,7 +255,7 @@
isUploaded = false
)
scope.cancel()
return
return@withContext
}

mEvidence = mEvidence.copy(
Expand All @@ -258,49 +266,49 @@

AppLogger.e(exception)

// Track failed upload analytics (GDPR-compliant - no PII)
val vault = spaceRepository.getSpaceById(mEvidence.vaultId)
val backendType = vault?.type?.friendlyName ?: "Unknown"
val fileType = getFileType(mEvidence.mimeType)
val fileSizeKB = mEvidence.contentLength / 1024

// Categorize error
val errorCategory = when (exception) {
is IOException -> "network"
is FileNotFoundException -> "file_not_found"
is SecurityException -> "permission"
else -> "unknown"
}
// Track failed upload analytics (GDPR-compliant - no PII)
val vault = spaceRepository.getSpaceById(mEvidence.vaultId)
val backendType = vault?.type?.friendlyName ?: "Unknown"
val fileType = getFileType(mEvidence.mimeType)
val fileSizeKB = mEvidence.contentLength / 1024

// Categorize error
val errorCategory = when (exception) {
is IOException -> "network"
is FileNotFoundException -> "file_not_found"
is SecurityException -> "permission"
else -> "unknown"
}

analyticsManager.trackUploadFailed(
backendType = backendType,
fileType = fileType,
errorCategory = errorCategory,
fileSizeKB = fileSizeKB
)
analyticsManager.trackUploadFailed(
backendType = backendType,
fileType = fileType,
errorCategory = errorCategory,
fileSizeKB = fileSizeKB
)

// Track in session
sessionTracker.trackUploadFailed()
// Track in session
sessionTracker.trackUploadFailed()

// Track error for drop-off analysis
analyticsManager.trackError(
errorCategory = errorCategory,
screenName = "Upload",
backendType = backendType
)
// Track error for drop-off analysis
analyticsManager.trackError(
errorCategory = errorCategory,
screenName = "Upload",
backendType = backendType
)

BroadcastManager.postChange(
context = mContext,
collectionId = mEvidence.submissionId,
mediaId = mEvidence.id
)
UploadEventBus.emitChanged(
projectId = mEvidence.archiveId,
collectionId = mEvidence.submissionId,
mediaId = mEvidence.id,
progress = -1,
isUploaded = false
)
BroadcastManager.postChange(
context = mContext,
collectionId = mEvidence.submissionId,
mediaId = mEvidence.id
)
UploadEventBus.emitChanged(
projectId = mEvidence.archiveId,
collectionId = mEvidence.submissionId,
mediaId = mEvidence.id,
progress = -1,
isUploaded = false
)
scope.cancel()
}

Expand Down
Loading
Loading