diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java index 0cddc3a9d53..adc72629c2c 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java @@ -33,7 +33,6 @@ import org.apache.jackrabbit.oak.composite.CompositeSegmentStoreFixture; import org.apache.jackrabbit.oak.segment.aws.fixture.SegmentAwsFixture; import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixture; -import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixtureV8; import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; public class NodeStoreFixtures { @@ -44,8 +43,6 @@ public class NodeStoreFixtures { public static final NodeStoreFixture SEGMENT_AWS = new SegmentAwsFixture(); - public static final NodeStoreFixture SEGMENT_AZURE_V8 = new SegmentAzureFixtureV8(); - public static final NodeStoreFixture SEGMENT_AZURE = new SegmentAzureFixture(); public static final NodeStoreFixture DOCUMENT_NS = new DocumentMongoFixture(); @@ -83,9 +80,6 @@ public static Collection asJunitParameters(Set if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE)) { configuredFixtures.add(SEGMENT_AZURE); } - if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE_V8)) { - configuredFixtures.add(SEGMENT_AZURE_V8); - } if (fixtures.contains(FixturesHelper.Fixture.COMPOSITE_SEGMENT)) { configuredFixtures.add(COMPOSITE_SEGMENT); } diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java index 916ecd7d090..bbb677bca87 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java @@ -459,7 +459,7 @@ public void moveToDescendant() { if (fixture == NodeStoreFixtures.SEGMENT_TAR || fixture == NodeStoreFixtures.MEMORY_NS || fixture == NodeStoreFixtures.COMPOSITE_MEM || fixture == NodeStoreFixtures.COMPOSITE_SEGMENT || fixture == NodeStoreFixtures.COW_DOCUMENT || fixture == NodeStoreFixtures.SEGMENT_AWS - || fixture == NodeStoreFixtures.SEGMENT_AZURE_V8 || fixture == NodeStoreFixtures.SEGMENT_AZURE) { + || fixture == NodeStoreFixtures.SEGMENT_AZURE) { assertTrue(x.moveTo(x, "xx")); assertFalse(x.exists()); assertFalse(test.hasChildNode("x")); diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java index 9a539cd3cc0..64dd0fcf87e 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java @@ -28,10 +28,8 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.core.data.FileDataStore; import org.apache.jackrabbit.oak.Oak; @@ -44,7 +42,8 @@ import org.apache.jackrabbit.oak.segment.aws.AwsContext; import org.apache.jackrabbit.oak.segment.aws.AwsPersistence; import org.apache.jackrabbit.oak.segment.aws.Configuration; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistenceManager; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -183,7 +182,7 @@ public SegmentTarFixture build() { private StandbyClientSync[] clientSyncs; private ScheduledExecutorService[] executors; - private CloudBlobContainer[] containers; + private BlobContainerClient[] containers; public SegmentTarFixture(SegmentTarFixtureBuilder builder) { this(builder, false, -1); @@ -285,11 +284,9 @@ public Oak getOak(int clusterId) throws Exception { } if (azureConnectionString != null) { - CloudStorageAccount cloud = CloudStorageAccount.parse(azureConnectionString); - CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(azureContainerName); - container.createIfNotExists(); - CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath); - fileStoreBuilder.withCustomPersistence(new AzurePersistenceV8(directory)); + String azureAccountName = getAzureAccountName(azureConnectionString); + AzurePersistence azurePersistence = AzurePersistenceManager.createAzurePersistence(azureConnectionString, null, azureAccountName, azureContainerName, azureRootPath, false, true); + fileStoreBuilder.withCustomPersistence(azurePersistence); } BlobStore blobStore = null; @@ -336,12 +333,10 @@ public Oak[] setUpCluster(int n, StatisticsProvider statsProvider) throws Except } if (azureConnectionString != null) { - CloudStorageAccount cloud = CloudStorageAccount.parse(azureConnectionString); - CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(azureContainerName); - container.createIfNotExists(); - containers[i] = container; - CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath + "/primary-" + i); - builder.withCustomPersistence(new AzurePersistenceV8(directory)); + String azureAccountName = getAzureAccountName(azureConnectionString); + AzurePersistence azurePersistence = AzurePersistenceManager.createAzurePersistence(azureConnectionString, null, azureAccountName, azureContainerName, azureRootPath + "/primary-" + i, false, true); + containers[i] = azurePersistence.getReadBlobContainerClient(); + builder.withCustomPersistence(azurePersistence); } if (blobStore != null) { @@ -481,7 +476,7 @@ private void init(int n) { blobStoreFixtures = new BlobStoreFixture[blobStoresLength]; if (azureConnectionString != null) { - containers = new CloudBlobContainer[n]; + containers = new BlobContainerClient[n]; } } @@ -514,11 +509,11 @@ public void tearDownCluster() { } if (containers != null) { - for (CloudBlobContainer container : containers) { + for (BlobContainerClient container : containers) { if (container != null) { try { container.deleteIfExists(); - } catch (StorageException e) { + } catch (BlobStorageException e) { log.error("Can't remove container", e); } } @@ -528,6 +523,27 @@ public void tearDownCluster() { FileUtils.deleteQuietly(parentPath); } + /** + * Extracts the Azure Storage Account Name from a connection string. + * @param azureConnectionString The full Azure Storage connection string. + * @return The account name, or null if not found. + */ + private String getAzureAccountName(String azureConnectionString) { + if (azureConnectionString == null || azureConnectionString.isEmpty()) { + return null; + } + + String[] parts = azureConnectionString.split(";"); + for (String part : parts) { + String[] keyValue = part.split("=", 2); + if (keyValue.length == 2 && keyValue[0].trim().equalsIgnoreCase("AccountName")) { + return keyValue[1].trim(); + } + } + + return null; + } + public BlobStoreFixture[] getBlobStoreFixtures() { return blobStoreFixtures; } diff --git a/oak-segment-azure/pom.xml b/oak-segment-azure/pom.xml index 272b5e28054..2f7c3a409ee 100644 --- a/oak-segment-azure/pom.xml +++ b/oak-segment-azure/pom.xml @@ -170,10 +170,6 @@ - - com.microsoft.azure - azure-storage - com.microsoft.azure azure-keyvault-core diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java index ceca1f62d13..8cc7ce390a5 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java @@ -18,8 +18,6 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureSegmentStoreV8; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.osgi.framework.ServiceRegistration; import org.osgi.service.component.ComponentContext; @@ -51,34 +49,18 @@ public class AzureSegmentStoreService { private ServiceRegistration registration; - public static final String SEGMENT_AZURE_V_12_ENABLED = "segment.azure.v12.enabled"; - - private final boolean useAzureSdkV12 = Boolean.getBoolean(SEGMENT_AZURE_V_12_ENABLED); - - @Activate public void activate(ComponentContext context, Configuration config) throws IOException { - if (useAzureSdkV12) { - log.info("Starting node store using Azure SDK 12"); - AzurePersistence persistence = AzurePersistenceManager.createAzurePersistenceFrom(config); - registration = context.getBundleContext() - .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ - put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); - if (!Objects.equals(config.role(), "")) { - put("role", config.role()); - } - }}); - } else { - log.info("Starting node store using Azure SDK 8"); - AzurePersistenceV8 persistence = AzureSegmentStoreV8.createAzurePersistenceFrom(config); - registration = context.getBundleContext() - .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ - put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistenceV8.class.getName(), config.accountName(), config.rootPath())); - if (!Objects.equals(config.role(), "")) { - put("role", config.role()); - } - }}); - } + log.info("Starting node store using Azure SDK 12"); + AzurePersistence persistence = AzurePersistenceManager.createAzurePersistenceFrom(config); + registration = context.getBundleContext() + .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ + put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); + if (!Objects.equals(config.role(), "")) { + put("role", config.role()); + } + }}); + } @Deactivate diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureConfigurationParserUtils.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureConfigurationParserUtils.java index 9ee8f1098fe..6d3562eef6b 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureConfigurationParserUtils.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureConfigurationParserUtils.java @@ -123,6 +123,7 @@ public static Map parseAzureConfigurationFromCustomConnection(St config.put(KEY_CONTAINER_NAME, tempConfig.get(CONTAINER_NAME)); config.put(KEY_DIR, tempConfig.get(DIRECTORY)); config.put(KEY_SHARED_ACCESS_SIGNATURE, tempConfig.get(SHARED_ACCESS_SIGNATURE)); + config.put(KEY_ACCOUNT_NAME, tempConfig.get(ACCOUNT_NAME)); return config; } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java deleted file mode 100644 index 59700d656f9..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.segment.azure.util; - -import com.microsoft.azure.storage.RetryLinearRetry; -import com.microsoft.azure.storage.blob.BlobRequestOptions; - -import java.util.concurrent.TimeUnit; - -public class AzureRequestOptionsV8 { - - static final String RETRY_ATTEMPTS_PROP = "segment.azure.retry.attempts"; - static final int DEFAULT_RETRY_ATTEMPTS = 5; - - static final String RETRY_BACKOFF_PROP = "segment.azure.retry.backoff"; - static final int DEFAULT_RETRY_BACKOFF_SECONDS = 5; - - static final String TIMEOUT_EXECUTION_PROP = "segment.timeout.execution"; - static final int DEFAULT_TIMEOUT_EXECUTION = 30; - - static final String TIMEOUT_INTERVAL_PROP = "segment.timeout.interval"; - static final int DEFAULT_TIMEOUT_INTERVAL = 1; - - static final String WRITE_TIMEOUT_EXECUTION_PROP = "segment.write.timeout.execution"; - - static final String WRITE_TIMEOUT_INTERVAL_PROP = "segment.write.timeout.interval"; - - private AzureRequestOptionsV8() { - } - - /** - * Apply default request options to the blobRequestOptions if they are not already set. - * @param blobRequestOptions - */ - public static void applyDefaultRequestOptions(BlobRequestOptions blobRequestOptions) { - if (blobRequestOptions.getRetryPolicyFactory() == null) { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - if (retryAttempts > 0) { - Integer retryBackoffSeconds = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); - blobRequestOptions.setRetryPolicyFactory(new RetryLinearRetry((int) TimeUnit.SECONDS.toMillis(retryBackoffSeconds), retryAttempts)); - } - } - if (blobRequestOptions.getMaximumExecutionTimeInMs() == null) { - int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - if (timeoutExecution > 0) { - blobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(timeoutExecution)); - } - } - if (blobRequestOptions.getTimeoutIntervalInMs() == null) { - int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - if (timeoutInterval > 0) { - blobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(timeoutInterval)); - } - } - } - - /** - * Optimise the blob request options for write operations. This method does not change the original blobRequestOptions. - * This method also applies the default request options if they are not already set, by calling {@link #applyDefaultRequestOptions(BlobRequestOptions)} - * @param blobRequestOptions - * @return write optimised blobRequestOptions - */ - public static BlobRequestOptions optimiseForWriteOperations(BlobRequestOptions blobRequestOptions) { - BlobRequestOptions writeOptimisedBlobRequestOptions = new BlobRequestOptions(blobRequestOptions); - applyDefaultRequestOptions(writeOptimisedBlobRequestOptions); - - Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP); - if (writeTimeoutExecution != null) { - writeOptimisedBlobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutExecution)); - } - - Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP); - if (writeTimeoutInterval != null) { - writeOptimisedBlobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutInterval)); - } - - return writeOptimisedBlobRequestOptions; - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java deleted file mode 100644 index d63f6144a35..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CopyStatus; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; -import org.jetbrains.annotations.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.UUID; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import static org.apache.jackrabbit.oak.commons.conditions.Validate.checkArgument; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.getName; - -public class AzureArchiveManagerV8 implements SegmentArchiveManager { - - private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReaderV8.class); - - private static final String DELETED_ARCHIVE_MARKER = "deleted"; - private static final String CLOSED_ARCHIVE_MARKER = "closed"; - - protected final CloudBlobDirectory cloudBlobDirectory; - - protected final IOMonitor ioMonitor; - - protected final FileStoreMonitor monitor; - private WriteAccessController writeAccessController; - - public AzureArchiveManagerV8(CloudBlobDirectory segmentstoreDirectory, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { - this.cloudBlobDirectory = segmentstoreDirectory; - this.ioMonitor = ioMonitor; - this.monitor = fileStoreMonitor; - this.writeAccessController = writeAccessController; - } - - @Override - public List listArchives() throws IOException { - try { - List archiveNames = StreamSupport.stream(cloudBlobDirectory - .listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null) - .spliterator(), false) - .filter(i -> i instanceof CloudBlobDirectory) - .map(i -> (CloudBlobDirectory) i) - .map(AzureUtilitiesV8::getName) - .filter(name -> name.endsWith(".tar")) - .collect(Collectors.toList()); - - Iterator it = archiveNames.iterator(); - while (it.hasNext()) { - String archiveName = it.next(); - if (deleteInProgress(archiveName)) { - if (writeAccessController.isWritingAllowed()) { - delete(archiveName); - } - it.remove(); - } - } - return archiveNames; - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - /** - * Check if the archive is being deleted. - * - * @param archiveName - * @return true if the "deleted" marker exists - */ - private boolean deleteInProgress(String archiveName) throws IOException, URISyntaxException, StorageException { - return getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).exists(); - } - - @Override - public SegmentArchiveReader open(String archiveName) throws IOException { - try { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - if (!archiveDirectory.getBlockBlobReference(CLOSED_ARCHIVE_MARKER).exists()) { - return null; - } - return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); - } catch (StorageException | URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - public SegmentArchiveReader forceOpen(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); - } - - @Override - public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController); - } - - @Override - public boolean delete(String archiveName) { - try { - uploadDeletedMarker(archiveName); - getBlobs(archiveName) - .forEach(cloudBlob -> { - try { - String blobName = getName(cloudBlob); - if (!blobName.equals(DELETED_ARCHIVE_MARKER) && !blobName.equals(CLOSED_ARCHIVE_MARKER)) { - writeAccessController.checkWritingAllowed(); - cloudBlob.delete(); - } - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); - } - }); - deleteClosedMarker(archiveName); - deleteDeletedMarker(archiveName); - return true; - } catch (IOException | URISyntaxException | StorageException e) { - log.error("Can't delete archive {}", archiveName, e); - return false; - } - } - - private void deleteDeletedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { - writeAccessController.checkWritingAllowed(); - getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).deleteIfExists(); - } - - private void deleteClosedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { - writeAccessController.checkWritingAllowed(); - getDirectory(archiveName).getBlockBlobReference(CLOSED_ARCHIVE_MARKER).deleteIfExists(); - } - - private void uploadDeletedMarker(String archiveName) throws IOException, URISyntaxException, StorageException { - writeAccessController.checkWritingAllowed(); - getDirectory(archiveName).getBlockBlobReference(DELETED_ARCHIVE_MARKER).openOutputStream().close(); - } - - @Override - public boolean renameTo(String from, String to) { - try { - CloudBlobDirectory targetDirectory = getDirectory(to); - getBlobs(from) - .forEach(cloudBlob -> { - try { - writeAccessController.checkWritingAllowed(); - renameBlob(cloudBlob, targetDirectory); - } catch (IOException e) { - log.error("Can't rename segment {}", cloudBlob.getUri().getPath(), e); - } - }); - return true; - } catch (IOException e) { - log.error("Can't rename archive {} to {}", from, to, e); - return false; - } - } - - @Override - public void copyFile(String from, String to) throws IOException { - CloudBlobDirectory targetDirectory = getDirectory(to); - getBlobs(from) - .forEach(cloudBlob -> { - try { - copyBlob(cloudBlob, targetDirectory); - } catch (IOException e) { - log.error("Can't copy segment {}", cloudBlob.getUri().getPath(), e); - } - }); - } - - @Override - public boolean exists(String archiveName) { - try { - return getDirectory(archiveName).listBlobsSegmented(null, false, null, 1, null, null, null).getLength() > 0; - } catch (IOException | StorageException | URISyntaxException e) { - log.error("Can't check the existence of {}", archiveName, e); - return false; - } - } - - @Override - public void recoverEntries(String archiveName, LinkedHashMap entries) throws IOException { - Pattern pattern = Pattern.compile(RemoteUtilities.SEGMENT_FILE_NAME_PATTERN); - List entryList = new ArrayList<>(); - - for (CloudBlob b : getBlobs(archiveName)) { - String name = getName(b); - Matcher m = pattern.matcher(name); - if (!m.matches()) { - continue; - } - int position = Integer.parseInt(m.group(1), 16); - UUID uuid = UUID.fromString(m.group(2)); - long length = b.getProperties().getLength(); - if (length > 0) { - byte[] data = new byte[(int) length]; - try { - b.downloadToByteArray(data, 0); - } catch (StorageException e) { - throw new IOException(e); - } - entryList.add(new RecoveredEntry(position, uuid, data, name)); - } - } - Collections.sort(entryList); - - int i = 0; - for (RecoveredEntry e : entryList) { - if (e.position != i) { - log.warn("Missing entry {}.??? when recovering {}. No more segments will be read.", String.format("%04X", i), archiveName); - break; - } - log.info("Recovering segment {}/{}", archiveName, e.fileName); - entries.put(e.uuid, e.data); - i++; - } - } - - private void delete(String archiveName, Set recoveredEntries) throws IOException { - getBlobs(archiveName) - .forEach(cloudBlob -> { - String name = getName(cloudBlob); - if (RemoteUtilities.isSegmentName(name) && !recoveredEntries.contains(RemoteUtilities.getSegmentUUID(name))) { - try { - cloudBlob.delete(); - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); - } - } - }); - } - - /** - * Method is not deleting segments from the directory given with {@code archiveName}, if they are in the set of recovered segments. - * Reason for that is because during execution of this method, remote repository can be accessed by another application, and deleting a valid segment can - * cause consistency issues there. - */ - @Override - public void backup(@NotNull String archiveName, @NotNull String backupArchiveName, @NotNull Set recoveredEntries) throws IOException { - copyFile(archiveName, backupArchiveName); - delete(archiveName, recoveredEntries); - } - - protected CloudBlobDirectory getDirectory(String archiveName) throws IOException { - try { - return cloudBlobDirectory.getDirectoryReference(archiveName); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - private List getBlobs(String archiveName) throws IOException { - return AzureUtilitiesV8.getBlobs(getDirectory(archiveName)); - } - - private void renameBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { - copyBlob(blob, newParent); - try { - blob.delete(); - } catch (StorageException e) { - throw new IOException(e); - } - } - - private void copyBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { - checkArgument(blob instanceof CloudBlockBlob, "Only page blobs are supported for the rename"); - try { - String blobName = getName(blob); - CloudBlockBlob newBlob = newParent.getBlockBlobReference(blobName); - newBlob.startCopy(blob.getUri()); - - boolean isStatusPending = true; - while (isStatusPending) { - newBlob.downloadAttributes(); - if (newBlob.getCopyState().getStatus() == CopyStatus.PENDING) { - Thread.sleep(100); - } else { - isStatusPending = false; - } - } - - CopyStatus finalStatus = newBlob.getCopyState().getStatus(); - if (newBlob.getCopyState().getStatus() != CopyStatus.SUCCESS) { - throw new IOException("Invalid copy status for " + blob.getUri().getPath() + ": " + finalStatus); - } - } catch (StorageException | InterruptedException | URISyntaxException e) { - throw new IOException(e); - } - } - - private static class RecoveredEntry implements Comparable { - - private final byte[] data; - - private final UUID uuid; - - private final int position; - - private final String fileName; - - public RecoveredEntry(int position, UUID uuid, byte[] data, String fileName) { - this.data = data; - this.uuid = uuid; - this.position = position; - this.fileName = fileName; - } - - @Override - public int compareTo(RecoveredEntry o) { - return Integer.compare(this.position, o.position); - } - } - -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java deleted file mode 100644 index 02d9c9f63ea..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; - -public class AzureGCJournalFileV8 implements GCJournalFile { - - private final CloudAppendBlob gcJournal; - - public AzureGCJournalFileV8(CloudAppendBlob gcJournal) { - this.gcJournal = gcJournal; - } - - @Override - public void writeLine(String line) throws IOException { - try { - if (!gcJournal.exists()) { - gcJournal.createOrReplace(); - } - gcJournal.appendText(line + "\n", StandardCharsets.UTF_8.name(), null, null, null); - } catch (StorageException e) { - throw new IOException(e); - } - } - - @Override - public List readLines() throws IOException { - try { - if (!gcJournal.exists()) { - return Collections.emptyList(); - } - byte[] data = new byte[(int) gcJournal.getProperties().getLength()]; - gcJournal.downloadToByteArray(data, 0); - return IOUtils.readLines(new ByteArrayInputStream(data), Charset.defaultCharset()); - } catch (StorageException e) { - throw new IOException(e); - } - } - - @Override - public void truncate() throws IOException { - try { - if (gcJournal.exists()) { - gcJournal.delete(); - } - } catch (StorageException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java deleted file mode 100644 index ceda9b4a66f..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import org.apache.jackrabbit.oak.commons.collections.ListUtils; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.ListBlobItem; -import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; -import org.apache.jackrabbit.oak.segment.azure.util.CaseInsensitiveKeysMapAccess; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class AzureJournalFileV8 implements JournalFile { - - private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8.class); - - private static final int JOURNAL_LINE_LIMIT = Integer.getInteger("org.apache.jackrabbit.oak.segment.azure.journal.lines", 40_000); - - private final CloudBlobDirectory directory; - - private final String journalNamePrefix; - - private final int lineLimit; - - private final WriteAccessController writeAccessController; - - AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { - this.directory = directory; - this.journalNamePrefix = journalNamePrefix; - this.lineLimit = lineLimit; - this.writeAccessController = writeAccessController; - } - - public AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController) { - this(directory, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); - } - - @Override - public JournalFileReader openJournalReader() throws IOException { - return new CombinedReader(getJournalBlobs()); - } - - @Override - public JournalFileWriter openJournalWriter() throws IOException { - return new AzureJournalWriter(); - } - - @Override - public String getName() { - return journalNamePrefix; - } - - @Override - public boolean exists() { - try { - return !getJournalBlobs().isEmpty(); - } catch (IOException e) { - log.error("Can't check if the file exists", e); - return false; - } - } - - private String getJournalFileName(int index) { - return String.format("%s.%03d", journalNamePrefix, index); - } - - private List getJournalBlobs() throws IOException { - try { - List result = new ArrayList<>(); - for (ListBlobItem b : directory.listBlobs(journalNamePrefix)) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } else { - log.warn("Invalid blob type: {} {}", b.getUri(), b.getClass()); - } - } - result.sort(Comparator.comparing(AzureUtilitiesV8::getName).reversed()); - return result; - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - private static class AzureJournalReader implements JournalFileReader { - - private final CloudBlob blob; - - private ReverseFileReaderV8 reader; - - private boolean metadataFetched; - - private boolean firstLineReturned; - - private AzureJournalReader(CloudBlob blob) { - this.blob = blob; - } - - @Override - public String readLine() throws IOException { - if (reader == null) { - try { - if (!metadataFetched) { - blob.downloadAttributes(); - metadataFetched = true; - Map metadata = CaseInsensitiveKeysMapAccess.convert(blob.getMetadata()); - if (metadata.containsKey("lastEntry")) { - firstLineReturned = true; - return metadata.get("lastEntry"); - } - } - reader = new ReverseFileReaderV8(blob); - if (firstLineReturned) { - while("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it - } - } catch (StorageException e) { - throw new IOException(e); - } - } - return reader.readLine(); - } - - @Override - public void close() throws IOException { - } - } - - private class AzureJournalWriter implements JournalFileWriter { - - private CloudAppendBlob currentBlob; - - private int lineCount; - - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - - public AzureJournalWriter() throws IOException { - writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(directory.getServiceClient().getDefaultRequestOptions()); - - List blobs = getJournalBlobs(); - if (blobs.isEmpty()) { - try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(1)); - currentBlob.createOrReplace(); - currentBlob.downloadAttributes(); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } else { - currentBlob = blobs.get(0); - } - try { - currentBlob.downloadAttributes(); - } catch (StorageException e) { - throw new IOException(e); - } - String lc = currentBlob.getMetadata().get("lineCount"); - lineCount = lc == null ? 0 : Integer.parseInt(lc); - } - - @Override - public void truncate() throws IOException { - try { - writeAccessController.checkWritingAllowed(); - - for (CloudAppendBlob cloudAppendBlob : getJournalBlobs()) { - cloudAppendBlob.delete(DeleteSnapshotsOption.NONE, null, writeOptimisedBlobRequestOptions, null); - } - - createNextFile(0); - } catch (StorageException e) { - throw new IOException(e); - } - } - - @Override - public void writeLine(String line) throws IOException { - batchWriteLines(List.of(line)); - } - - @Override - public void batchWriteLines(List lines) throws IOException { - writeAccessController.checkWritingAllowed(); - - if (lines.isEmpty()) { - return; - } - int firstBlockSize = Math.min(lineLimit - lineCount, lines.size()); - List firstBlock = lines.subList(0, firstBlockSize); - List> remainingBlocks = ListUtils.partitionList(lines.subList(firstBlockSize, lines.size()), lineLimit); - List> allBlocks = new ArrayList<>(); - allBlocks.addAll(firstBlock.isEmpty() ? List.of() : List.of(firstBlock)); - allBlocks.addAll(remainingBlocks); - - for (List entries : allBlocks) { - if (lineCount >= lineLimit) { - int parsedSuffix = parseCurrentSuffix(); - createNextFile(parsedSuffix); - } - StringBuilder text = new StringBuilder(); - for (String line : entries) { - text.append(line).append("\n"); - } - try { - currentBlob.appendText(text.toString(), null, null, writeOptimisedBlobRequestOptions, null); - currentBlob.getMetadata().put("lastEntry", entries.get(entries.size() - 1)); - lineCount += entries.size(); - currentBlob.getMetadata().put("lineCount", Integer.toString(lineCount)); - currentBlob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { - throw new IOException(e); - } - } - } - - private void createNextFile(int suffix) throws IOException { - try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(suffix + 1)); - currentBlob.createOrReplace(null, writeOptimisedBlobRequestOptions, null); - lineCount = 0; - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - private int parseCurrentSuffix() { - String name = AzureUtilitiesV8.getName(currentBlob); - Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)" ); - Matcher matcher = pattern.matcher(name); - int parsedSuffix; - if (matcher.find()) { - String suffix = matcher.group(1); - try { - parsedSuffix = Integer.parseInt(suffix); - } catch (NumberFormatException e) { - log.warn("Can't parse suffix for journal file {}", name); - parsedSuffix = 0; - } - } else { - log.warn("Can't parse journal file name {}", name); - parsedSuffix = 0; - } - return parsedSuffix; - } - - @Override - public void close() throws IOException { - // do nothing - } - } - - private static class CombinedReader implements JournalFileReader { - - private final Iterator readers; - - private JournalFileReader currentReader; - - private CombinedReader(List blobs) { - readers = blobs.stream().map(AzureJournalReader::new).iterator(); - } - - @Override - public String readLine() throws IOException { - String line; - do { - if (currentReader == null) { - if (!readers.hasNext()) { - return null; - } - currentReader = readers.next(); - } - do { - line = currentReader.readLine(); - } while ("".equals(line)); - if (line == null) { - currentReader.close(); - currentReader = null; - } - } while (line == null); - return line; - } - - @Override - public void close() throws IOException { - while (readers.hasNext()) { - readers.next().close(); - } - if (currentReader != null) { - currentReader.close(); - currentReader = null; - } - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java deleted file mode 100644 index 28568c780f6..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.Properties; - -public class AzureManifestFileV8 implements ManifestFile { - - private static final Logger log = LoggerFactory.getLogger(AzureManifestFileV8.class); - - private final CloudBlockBlob manifestBlob; - - public AzureManifestFileV8(CloudBlockBlob manifestBlob) { - this.manifestBlob = manifestBlob; - } - - @Override - public boolean exists() { - try { - return manifestBlob.exists(); - } catch (StorageException e) { - log.error("Can't check if the manifest exists", e); - return false; - } - } - - @Override - public Properties load() throws IOException { - Properties properties = new Properties(); - if (exists()) { - long length = manifestBlob.getProperties().getLength(); - byte[] data = new byte[(int) length]; - try { - manifestBlob.downloadToByteArray(data, 0); - } catch (StorageException e) { - throw new IOException(e); - } - properties.load(new ByteArrayInputStream(data)); - } - return properties; - } - - @Override - public void save(Properties properties) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - properties.store(bos, null); - - byte[] data = bos.toByteArray(); - try { - manifestBlob.uploadFromByteArray(data, 0, data.length); - } catch (StorageException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java deleted file mode 100644 index 0b056f768fa..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.Date; -import java.util.EnumSet; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RequestCompletedEvent; -import com.microsoft.azure.storage.StorageEvent; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class AzurePersistenceV8 implements SegmentNodeStorePersistence { - private static final Logger log = LoggerFactory.getLogger(AzurePersistenceV8.class); - - protected final CloudBlobDirectory segmentstoreDirectory; - - protected WriteAccessController writeAccessController = new WriteAccessController(); - - public AzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { - this.segmentstoreDirectory = segmentStoreDirectory; - - AzureRequestOptionsV8.applyDefaultRequestOptions(segmentStoreDirectory.getServiceClient().getDefaultRequestOptions()); - } - - @Override - public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { - attachRemoteStoreMonitor(remoteStoreMonitor); - return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController); - } - - @Override - public boolean segmentFilesExist() { - try { - for (ListBlobItem i : segmentstoreDirectory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null)) { - if (i instanceof CloudBlobDirectory) { - CloudBlobDirectory dir = (CloudBlobDirectory) i; - String name = AzureUtilitiesV8.getName(dir); - if (name.endsWith(".tar")) { - return true; - } - } - } - return false; - } catch (StorageException | URISyntaxException e) { - log.error("Can't check if the segment archives exists", e); - return false; - } - } - - @Override - public JournalFile getJournalFile() { - return new AzureJournalFileV8(segmentstoreDirectory, "journal.log", writeAccessController); - } - - @Override - public GCJournalFile getGCJournalFile() throws IOException { - return new AzureGCJournalFileV8(getAppendBlob("gc.log")); - } - - @Override - public ManifestFile getManifestFile() throws IOException { - return new AzureManifestFileV8(getBlockBlob("manifest")); - } - - @Override - public RepositoryLock lockRepository() throws IOException { - return new AzureRepositoryLockV8(getBlockBlob("repo.lock"), () -> { - log.warn("Lost connection to the Azure. The client will be closed."); - // TODO close the connection - }, writeAccessController).lock(); - } - - private CloudBlockBlob getBlockBlob(String path) throws IOException { - try { - return segmentstoreDirectory.getBlockBlobReference(path); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - private CloudAppendBlob getAppendBlob(String path) throws IOException { - try { - return segmentstoreDirectory.getAppendBlobReference(path); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - private static void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { - OperationContext.getGlobalRequestCompletedEventHandler().addListener(new StorageEvent() { - - @Override - public void eventOccurred(RequestCompletedEvent e) { - Date startDate = e.getRequestResult().getStartDate(); - Date stopDate = e.getRequestResult().getStopDate(); - - if (startDate != null && stopDate != null) { - long requestDuration = stopDate.getTime() - startDate.getTime(); - remoteStoreMonitor.requestDuration(requestDuration, TimeUnit.MILLISECONDS); - } - - Exception exception = e.getRequestResult().getException(); - - if (exception == null) { - remoteStoreMonitor.requestCount(); - } else { - remoteStoreMonitor.requestError(); - } - } - - }); - } - - public CloudBlobDirectory getSegmentstoreDirectory() { - return segmentstoreDirectory; - } - - public void setWriteAccessController(WriteAccessController writeAccessController) { - this.writeAccessController = writeAccessController; - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java deleted file mode 100644 index 9f58115125a..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.RetryNoRetry; -import com.microsoft.azure.storage.StorageErrorCode; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Set; - -public class AzureRepositoryLockV8 implements RepositoryLock { - - private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8.class); - - private static final int TIMEOUT_SEC = Integer.getInteger("oak.segment.azure.lock.timeout", 0); - private static final Integer LEASE_RENEWAL_TIMEOUT_MS = 5000; - - public static final String LEASE_DURATION_PROP = "oak.segment.azure.lock.leaseDurationInSec"; - private final int leaseDuration = Integer.getInteger(LEASE_DURATION_PROP, 60); - - public static final String RENEWAL_INTERVAL_PROP = "oak.segment.azure.lock.leaseRenewalIntervalInSec"; - private final int renewalInterval = Integer.getInteger(RENEWAL_INTERVAL_PROP, 5); - - public static final String TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP = "oak.segment.azure.lock.blockWritesAfterInSec"; - private final int timeToWaitBeforeWriteBlock = Integer.getInteger(TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, 20); - - private final Runnable shutdownHook; - - private final CloudBlockBlob blob; - - private final Thread refresherThread; - - private static final String REFRESHER_THREAD_NAME = "AzureRepositoryLock-Refresher"; - - private boolean inError; - - private final int timeoutSec; - - private WriteAccessController writeAccessController; - - private String leaseId; - - private volatile boolean doUpdate; - - public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController) { - this(blob, shutdownHook, writeAccessController, TIMEOUT_SEC); - } - - public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { - this.shutdownHook = shutdownHook; - this.blob = blob; - this.refresherThread = new Thread(this::refreshLease, REFRESHER_THREAD_NAME); - this.refresherThread.setDaemon(true); - this.timeoutSec = timeoutSec; - this.writeAccessController = writeAccessController; - - if (leaseDuration < timeToWaitBeforeWriteBlock || timeToWaitBeforeWriteBlock < renewalInterval) { - throw new IllegalStateException(String.format("The value of %s must be greater than %s and the value of %s must be greater than %s", - LEASE_DURATION_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, RENEWAL_INTERVAL_PROP)); - } - } - - public AzureRepositoryLockV8 lock() throws IOException { - long start = System.currentTimeMillis(); - Exception ex = null; - do { - try { - blob.openOutputStream().close(); - - log.info("{} = {}", LEASE_DURATION_PROP, leaseDuration); - log.info("{} = {}", RENEWAL_INTERVAL_PROP, renewalInterval); - log.info("{} = {}", TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, timeToWaitBeforeWriteBlock); - - leaseId = blob.acquireLease(leaseDuration, null); - writeAccessController.enableWriting(); - log.info("Acquired lease {}", leaseId); - } catch (Exception e) { - if (ex == null) { - log.info("Can't acquire the lease. Retrying every 1s. Timeout is set to {}s.", timeoutSec); - } - ex = e; - if ((System.currentTimeMillis() - start) / 1000 < timeoutSec) { - try { - Thread.sleep(1000); - } catch (InterruptedException e1) { - throw new IOException(e1); - } - } else { - break; - } - } - } while (leaseId == null); - if (leaseId == null) { - log.error("Can't acquire the lease in {}s.", timeoutSec); - throw new IOException(ex); - } else { - refresherThread.start(); - return this; - } - } - - private void refreshLease() { - log.info("Starting the lease renewal loop"); - doUpdate = true; - long lastUpdate = 0; - setInError(false); - while (doUpdate) { - try { - long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; - try { - if (timeSinceLastUpdate > renewalInterval) { - - BlobRequestOptions requestOptions = new BlobRequestOptions(); - requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); - requestOptions.setRetryPolicyFactory(new RetryNoRetry()); - blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); - - writeAccessController.enableWriting(); - if (isInError()) { - log.info("Lease renewal successful again."); - setInError(false); - } - lastUpdate = System.currentTimeMillis(); - } - } catch (Exception e) { - timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; - - if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { - writeAccessController.disableWriting(); - } - - if (e instanceof StorageException) { - StorageException storageException = (StorageException) e; - String errorCode = storageException.getErrorCode(); - if (errorCode != null && - Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT - , StorageErrorCode.SERVICE_INTERNAL_ERROR - , StorageErrorCodeStrings.SERVER_BUSY - , StorageErrorCodeStrings.INTERNAL_ERROR).contains(errorCode)) { - log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); - } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { - log.warn("Client side error. Retry in progress ...", e); - } else { - log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); - } - } else { - log.error("Can't renew the lease", e); - shutdownHook.run(); - doUpdate = false; - return; - } - } - waitABit(100); - } catch (Throwable t) { - if (!isInError()) { - log.error("Unexpected error in the lease renewal loop, trying to recover", t); - setInError(true); - } - waitABit(100); - } - } - log.info("Lease renewal loop exiting."); - } - - @Override - public void unlock() throws IOException { - doUpdate = false; - try { - refresherThread.join(60000); - } catch (InterruptedException e) { - throw new IOException(e); - } finally { - releaseLease(); - } - } - - private void releaseLease() throws IOException { - try { - blob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); - blob.delete(); - log.info("Released lease {}", leaseId); - leaseId = null; - } catch (StorageException e) { - throw new IOException(e); - } - } - - private void setInError(boolean inError) { - this.inError = inError; - refresherThread.setName(REFRESHER_THREAD_NAME + (inError ? "-InError" : "")); - } - - private boolean isInError() { - return inError; - } - - private void waitABit(long millis) { - try { - Thread.sleep(millis); - } catch (InterruptedException e) { - // ignore - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java deleted file mode 100644 index de69711b6e0..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.Map; -import java.util.UUID; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; -import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; - -public class AzureSegmentArchiveReaderV8 extends AbstractRemoteSegmentArchiveReader { - - private final CloudBlobDirectory archiveDirectory; - - private final long length; - - protected AzureSegmentArchiveReaderV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor) throws IOException { - super(ioMonitor); - this.archiveDirectory = archiveDirectory; - this.length = computeArchiveIndexAndLength(); - } - - @Override - public long length() { - return length; - } - - @Override - public String getName() { - return AzureUtilitiesV8.getName(archiveDirectory); - } - - @Override - protected long computeArchiveIndexAndLength() throws IOException { - long length = 0; - for (CloudBlob blob : AzureUtilitiesV8.getBlobs(archiveDirectory)) { - Map metadata = blob.getMetadata(); - if (AzureBlobMetadata.isSegment(metadata)) { - RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, (int) blob.getProperties().getLength()); - index.put(new UUID(indexEntry.getMsb(), indexEntry.getLsb()), indexEntry); - } - length += blob.getProperties().getLength(); - } - - return length; - } - - @Override - protected void doReadSegmentToBuffer(String segmentFileName, Buffer buffer) throws IOException { - readBufferFully(getBlob(segmentFileName), buffer); - } - - @Override - protected Buffer doReadDataFile(String extension) throws IOException { - return readBlob(getName() + extension); - } - - @Override - protected File archivePathAsFile() { - return new File(archiveDirectory.getUri().getPath()); - } - - private CloudBlockBlob getBlob(String name) throws IOException { - try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } - - private Buffer readBlob(String name) throws IOException { - try { - CloudBlockBlob blob = getBlob(name); - if (!blob.exists()) { - return null; - } - long length = blob.getProperties().getLength(); - Buffer buffer = Buffer.allocate((int) length); - AzureUtilitiesV8.readBufferFully(blob, buffer); - return buffer; - } catch (StorageException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java deleted file mode 100644 index cd3e15ca8ee..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.NoSuchElementException; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.commons.time.Stopwatch; -import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.azure.util.Retrier; -import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveWriter; -import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; - -public class AzureSegmentArchiveWriterV8 extends AbstractRemoteSegmentArchiveWriter { - - private final CloudBlobDirectory archiveDirectory; - - private final Retrier retrier = Retrier.withParams( - Integer.getInteger("azure.segment.archive.writer.retries.max", 16), - Integer.getInteger("azure.segment.archive.writer.retries.intervalMs", 5000) - ); - - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - - public AzureSegmentArchiveWriterV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) throws IOException { - super(ioMonitor, monitor); - this.archiveDirectory = archiveDirectory; - this.writeAccessController = writeAccessController; - this.writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); - this.created = hasBlobs(); - } - - private boolean hasBlobs() throws IOException { - try { - return this.archiveDirectory.listBlobs().iterator().hasNext(); - } catch (StorageException | URISyntaxException | NoSuchElementException e) { - throw new IOException(e); - } - } - - @Override - public String getName() { - return AzureUtilitiesV8.getName(archiveDirectory); - } - - @Override - protected void doWriteArchiveEntry(RemoteSegmentArchiveEntry indexEntry, byte[] data, int offset, int size) throws IOException { - - writeAccessController.checkWritingAllowed(); - - long msb = indexEntry.getMsb(); - long lsb = indexEntry.getLsb(); - String segmentName = getSegmentFileName(indexEntry); - CloudBlockBlob blob = getBlob(segmentName); - ioMonitor.beforeSegmentWrite(new File(blob.getName()), msb, lsb, size); - Stopwatch stopwatch = Stopwatch.createStarted(); - try { - blob.setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); - blob.uploadFromByteArray(data, offset, size, null, writeOptimisedBlobRequestOptions, null); - blob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { - throw new IOException(e); - } - ioMonitor.afterSegmentWrite(new File(blob.getName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); - } - - @Override - protected Buffer doReadArchiveEntry(RemoteSegmentArchiveEntry indexEntry) throws IOException { - Buffer buffer; - if (OFF_HEAP) { - buffer = Buffer.allocateDirect(indexEntry.getLength()); - } else { - buffer = Buffer.allocate(indexEntry.getLength()); - } - readBufferFully(getBlob(getSegmentFileName(indexEntry)), buffer); - return buffer; - } - - @Override - protected void doWriteDataFile(byte[] data, String extension) throws IOException { - retrier.execute(() -> { - try { - writeAccessController.checkWritingAllowed(); - - getBlob(getName() + extension).uploadFromByteArray(data, 0, data.length, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { - throw new IOException(e); - } - }); - } - - @Override - protected void afterQueueClosed() throws IOException { - retrier.execute(() -> { - try { - writeAccessController.checkWritingAllowed(); - - getBlob("closed").uploadFromByteArray(new byte[0], 0, 0, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { - throw new IOException(e); - } - }); - } - - @Override - protected void afterQueueFlushed() { - // do nothing - } - - private CloudBlockBlob getBlob(String name) throws IOException { - try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java deleted file mode 100644 index 1f110f9db8b..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.oak.segment.azure.Configuration; -import org.jetbrains.annotations.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -public class AzureSegmentStoreV8 { - - private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreV8.class); - - public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; - - private static AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; - - public static AzurePersistenceV8 createAzurePersistenceFrom(Configuration configuration) throws IOException { - if (!StringUtils.isBlank(configuration.connectionURL())) { - return createPersistenceFromConnectionURL(configuration); - } - if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { - return createPersistenceFromServicePrincipalCredentials(configuration); - } - if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { - return createPersistenceFromSasUri(configuration); - } - return createPersistenceFromAccessKey(configuration); - } - - private static AzurePersistenceV8 createPersistenceFromAccessKey(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("AccountKey=").append(configuration.accessKey()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - - return createAzurePersistence(connectionString.toString(), configuration, true); - } - - private static AzurePersistenceV8 createPersistenceFromSasUri(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - return createAzurePersistence(connectionString.toString(), configuration, false); - } - - @NotNull - private static AzurePersistenceV8 createPersistenceFromConnectionURL(Configuration configuration) throws IOException { - return createAzurePersistence(configuration.connectionURL(), configuration, true); - } - - @NotNull - private static AzurePersistenceV8 createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { - azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); - StorageCredentials storageCredentialsToken = azureStorageCredentialManagerV8.getStorageCredentialAccessTokenFromServicePrincipals(configuration.accountName(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId()); - - try { - CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, configuration.accountName()); - return createAzurePersistence(cloud, configuration, true); - } catch (StorageException | URISyntaxException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistenceV8 createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { - try { - CloudStorageAccount cloud = CloudStorageAccount.parse(connectionString); - log.info("Connection string: '{}'", cloud); - return createAzurePersistence(cloud, configuration, createContainer); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistenceV8 createAzurePersistence(CloudStorageAccount cloud, Configuration configuration, boolean createContainer) throws URISyntaxException, StorageException { - CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); - BlobRequestOptions blobRequestOptions = new BlobRequestOptions(); - - if (configuration.enableSecondaryLocation()) { - blobRequestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); - } - cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); - - CloudBlobContainer container = cloudBlobClient.getContainerReference(configuration.containerName()); - if (createContainer && !container.exists()) { - container.create(); - } - String path = normalizePath(configuration.rootPath()); - return new AzurePersistenceV8(container.getDirectoryReference(path)); - } - - @NotNull - private static String normalizePath(@NotNull String rootPath) { - if (rootPath.length() > 0 && rootPath.charAt(0) == '/') { - return rootPath.substring(1); - } - return rootPath; - } - -} \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java deleted file mode 100644 index 6c92481979c..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.azure.core.credential.AccessToken; -import com.azure.core.credential.TokenRequestContext; -import com.azure.identity.ClientSecretCredential; -import com.azure.identity.ClientSecretCredentialBuilder; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageCredentialsAccountAndKey; -import com.microsoft.azure.storage.StorageCredentialsToken; -import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; -import org.apache.jackrabbit.oak.segment.azure.util.Environment; -import org.jetbrains.annotations.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.time.LocalDateTime; -import java.time.OffsetDateTime; -import java.time.format.DateTimeFormatter; -import java.util.Objects; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; - -public class AzureStorageCredentialManagerV8 implements Closeable { - private static final Logger log = LoggerFactory.getLogger(AzureStorageCredentialManagerV8.class); - private static final String AZURE_DEFAULT_SCOPE = "https://storage.azure.com/.default"; - private static final long TOKEN_REFRESHER_INITIAL_DELAY = 45L; - private static final long TOKEN_REFRESHER_DELAY = 1L; - private ClientSecretCredential clientSecretCredential; - private AccessToken accessToken; - private StorageCredentialsToken storageCredentialsToken; - private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); - - public StorageCredentials getStorageCredentialsFromEnvironment(@NotNull String accountName, @NotNull Environment environment) { - final String clientId = environment.getVariable(AZURE_CLIENT_ID); - final String clientSecret = environment.getVariable(AZURE_CLIENT_SECRET); - final String tenantId = environment.getVariable(AZURE_TENANT_ID); - - if (StringUtils.isNoneBlank(clientId, clientSecret, tenantId)) { - try { - return getStorageCredentialAccessTokenFromServicePrincipals(accountName, clientId, clientSecret, tenantId); - } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { - log.error("Error occurred while connecting to Azure Storage using service principals: ", e); - throw new IllegalArgumentException( - "Could not connect to the Azure Storage. Please verify if AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables are correctly set!"); - } - } - - log.warn("AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables empty or missing. Switching to authentication with AZURE_SECRET_KEY."); - - String key = environment.getVariable(AZURE_SECRET_KEY); - try { - return new StorageCredentialsAccountAndKey(accountName, key); - } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { - log.error("Error occurred while connecting to Azure Storage using secret key: ", e); - throw new IllegalArgumentException( - "Could not connect to the Azure Storage. Please verify if AZURE_SECRET_KEY environment variable is correctly set!"); - } - } - - public StorageCredentials getStorageCredentialAccessTokenFromServicePrincipals(String accountName, String clientId, String clientSecret, String tenantId) { - boolean isAccessTokenGenerated = false; - if (accessToken == null) { - clientSecretCredential = new ClientSecretCredentialBuilder() - .clientId(clientId) - .clientSecret(clientSecret) - .tenantId(tenantId) - .build(); - accessToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); - if (accessToken == null || StringUtils.isBlank(accessToken.getToken())) { - throw new IllegalArgumentException("Could not connect to azure storage, access token is null or empty"); - } - storageCredentialsToken = new StorageCredentialsToken(accountName, accessToken.getToken()); - isAccessTokenGenerated = true; - } - Objects.requireNonNull(storageCredentialsToken, "storageCredentialsToken cannot be null"); - - // start refresh token executor only when the access token is first generated - if (isAccessTokenGenerated) { - log.info("starting refresh token task at: {}", OffsetDateTime.now()); - TokenRefresher tokenRefresher = new TokenRefresher(); - executorService.scheduleWithFixedDelay(tokenRefresher, TOKEN_REFRESHER_INITIAL_DELAY, TOKEN_REFRESHER_DELAY, TimeUnit.MINUTES); - } - return storageCredentialsToken; - } - - /** - * This class represents a token refresher responsible for ensuring the validity of the access token used for azure AD authentication. - * The access token generated by the Azure client is valid for 1 hour only. Therefore, this class periodically checks the validity - * of the access token and refreshes it if necessary. The refresh is triggered when the current access token is about to expire, - * defined by a threshold of 5 minutes from the current time. This threshold is similar to what is being used in azure identity library to - * generate a new token - */ - private class TokenRefresher implements Runnable { - @Override - public void run() { - try { - log.debug("Checking for azure access token expiry at: {}", LocalDateTime.now()); - OffsetDateTime tokenExpiryThreshold = OffsetDateTime.now().plusMinutes(5); - if (accessToken.getExpiresAt() != null && accessToken.getExpiresAt().isBefore(tokenExpiryThreshold)) { - log.info("Access token is about to expire (5 minutes or less) at: {}. New access token will be generated", - accessToken.getExpiresAt().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME)); - AccessToken newToken = clientSecretCredential.getTokenSync(new TokenRequestContext().addScopes(AZURE_DEFAULT_SCOPE)); - log.info("New azure access token generated at: {}", LocalDateTime.now()); - if (newToken == null || StringUtils.isBlank(newToken.getToken())) { - log.error("New access token is null or empty"); - return; - } - // update access token with newly generated token - accessToken = newToken; - storageCredentialsToken.updateToken(accessToken.getToken()); - } - } catch (Exception e) { - log.error("Error while acquiring new access token: ", e); - } - } - } - - @Override - public void close() { - new ExecutorCloser(executorService).close(); - log.info("Access token refresh executor shutdown completed"); - } -} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java deleted file mode 100644 index 2a95c41d0da..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.ResultContinuation; -import com.microsoft.azure.storage.ResultSegment; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.StorageUri; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.LeaseStatus; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; -import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; -import org.jetbrains.annotations.NotNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.List; - -public final class AzureUtilitiesV8 { - - public static final String AZURE_ACCOUNT_NAME = "AZURE_ACCOUNT_NAME"; - public static final String AZURE_SECRET_KEY = "AZURE_SECRET_KEY"; - public static final String AZURE_TENANT_ID = "AZURE_TENANT_ID"; - public static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID"; - public static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET"; - - private static final Logger log = LoggerFactory.getLogger(AzureUtilitiesV8.class); - - private AzureUtilitiesV8() { - } - - public static String getName(CloudBlob blob) { - return AzureUtilities.getName(blob.getName()); - } - - public static String getName(CloudBlobDirectory directory) { - return AzureUtilities.getName(directory.getPrefix()); - } - - public static List getBlobs(CloudBlobDirectory directory) throws IOException { - List blobList = new ArrayList<>(); - ResultContinuation token = null; - do { - ResultSegment result = listBlobsInSegments(directory, token); //get the blobs in pages of 5000 - for (ListBlobItem b : result.getResults()) { //add resultant blobs to list - if (b instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) b; - blobList.add(cloudBlob); - } - } - token = result.getContinuationToken(); - } while (token != null); - return blobList; - } - - public static void readBufferFully(CloudBlob blob, Buffer buffer) throws IOException { - try { - blob.download(new ByteBufferOutputStream(buffer)); - buffer.flip(); - } catch (StorageException e) { - if (e.getHttpStatusCode() == 404) { - log.error("Blob not found in the remote repository: {}", blob.getName()); - throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getName()); - } - throw new RepositoryNotReachableException(e); - } - } - - public static void deleteAllEntries(CloudBlobDirectory directory) throws IOException { - getBlobs(directory).forEach(b -> { - try { - b.deleteIfExists(); - } catch (StorageException e) { - log.error("Can't delete blob {}", b.getUri().getPath(), e); - } - }); - } - - public static CloudBlobDirectory cloudBlobDirectoryFrom(StorageCredentials credentials, - String uri, String dir) throws URISyntaxException, StorageException { - StorageUri storageUri = new StorageUri(new URI(uri)); - CloudBlobContainer container = new CloudBlobContainer(storageUri, credentials); - - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - public static CloudBlobDirectory cloudBlobDirectoryFrom(String connection, String containerName, - String dir) throws InvalidKeyException, URISyntaxException, StorageException { - CloudStorageAccount cloud = CloudStorageAccount.parse(connection); - CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(containerName); - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - private static ResultSegment listBlobsInSegments(CloudBlobDirectory directory, - ResultContinuation token) throws IOException { - ResultSegment result = null; - IOException lastException = null; - for (int sleep = 10; sleep <= 10000; sleep *= 10) { //increment the sleep time in steps. - try { - result = directory.listBlobsSegmented( - null, - false, - EnumSet.of(BlobListingDetails.METADATA), - 5000, - token, - null, - null); - break; //we have the results, no need to retry - } catch (StorageException | URISyntaxException e) { - lastException = new IOException(e); - try { - Thread.sleep(sleep); //Sleep and retry - } catch (InterruptedException ex) { - log.warn("Interrupted", e); - } - } - } - - if (result == null) { - throw lastException; - } else { - return result; - } - } - - public static void deleteAllBlobs(@NotNull CloudBlobDirectory directory) throws URISyntaxException, StorageException, InterruptedException { - for (ListBlobItem blobItem : directory.listBlobs()) { - if (blobItem instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) blobItem; - if (cloudBlob.getProperties().getLeaseStatus() == LeaseStatus.LOCKED) { - cloudBlob.breakLease(0); - } - cloudBlob.deleteIfExists(); - } else if (blobItem instanceof CloudBlobDirectory) { - CloudBlobDirectory cloudBlobDirectory = (CloudBlobDirectory) blobItem; - deleteAllBlobs(cloudBlobDirectory); - } - } - } - - private static class ByteBufferOutputStream extends OutputStream { - - @NotNull - private final Buffer buffer; - - public ByteBufferOutputStream(@NotNull Buffer buffer) { - this.buffer = buffer; - } - - @Override - public void write(int b) { - buffer.put((byte) b); - } - - @Override - public void write(@NotNull byte[] bytes, int offset, int length) { - buffer.put(bytes, offset, length); - } - } - -} - - diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java deleted file mode 100644 index b81e28588b6..00000000000 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; - -import static java.lang.Math.min; - -public class ReverseFileReaderV8 { - - private static final int BUFFER_SIZE = 16 * 1024; - - private int bufferSize; - - private final CloudBlob blob; - - private byte[] buffer; - - private int bufferOffset; - - private int fileOffset; - - public ReverseFileReaderV8(CloudBlob blob) throws StorageException { - this (blob, BUFFER_SIZE); - } - - public ReverseFileReaderV8(CloudBlob blob, int bufferSize) throws StorageException { - this.blob = blob; - if (blob.exists()) { - this.fileOffset = (int) blob.getProperties().getLength(); - } else { - this.fileOffset = 0; - } - this.bufferSize = bufferSize; - } - - private void readBlock() throws IOException { - if (buffer == null) { - buffer = new byte[min(fileOffset, bufferSize)]; - } else if (fileOffset < buffer.length) { - buffer = new byte[fileOffset]; - } - - if (buffer.length > 0) { - fileOffset -= buffer.length; - try { - OperationContext opContext = new OperationContext(); - HashMap userHeaders = new HashMap<>(); - userHeaders.put("If-Match", "*"); - opContext.setUserHeaders(userHeaders); - blob.downloadRangeToByteArray(fileOffset, Long.valueOf(buffer.length), buffer, 0, null, null, opContext); - } catch (StorageException e) { - throw new IOException(e); - } - } - bufferOffset = buffer.length; - } - - private String readUntilNewLine() { - if (bufferOffset == -1) { - return ""; - } - int stop = bufferOffset; - while (--bufferOffset >= 0) { - if (buffer[bufferOffset] == '\n') { - break; - } - } - // bufferOffset points either the previous '\n' character or -1 - return new String(buffer, bufferOffset + 1, stop - bufferOffset - 1, Charset.defaultCharset()); - } - - public String readLine() throws IOException { - if (bufferOffset == -1 && fileOffset == 0) { - return null; - } - - if (buffer == null) { - readBlock(); - } - - List result = new ArrayList<>(1); - while (true) { - result.add(readUntilNewLine()); - if (bufferOffset > -1) { // stopped on the '\n' - break; - } - if (fileOffset == 0) { // reached the beginning of the file - break; - } - readBlock(); - } - Collections.reverse(result); - return String.join("", result); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java index 78a05b494f7..9c1e1319535 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java @@ -55,7 +55,6 @@ public class AzureSegmentStoreServiceTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private static String oldAzureV12SysPropertyValue; @Rule public final OsgiContext context = new OsgiContext(); @@ -78,8 +77,6 @@ public static void setupTest(){ READ_WRITE.setCreatePermission(true); READ_WRITE.setWritePermission(true); READ_WRITE.setAddPermission(true); - oldAzureV12SysPropertyValue = System.getProperty(AzureSegmentStoreService.SEGMENT_AZURE_V_12_ENABLED); - System.setProperty(AzureSegmentStoreService.SEGMENT_AZURE_V_12_ENABLED, "true"); } @Before @@ -90,15 +87,6 @@ public void setup() throws Exception { } } - @AfterClass - public static void tearDown() { - if (oldAzureV12SysPropertyValue != null) { - System.setProperty(AzureSegmentStoreService.SEGMENT_AZURE_V_12_ENABLED, oldAzureV12SysPropertyValue); - } else { - System.clearProperty(AzureSegmentStoreService.SEGMENT_AZURE_V_12_ENABLED); - } - } - @Test public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { String sasToken = container.generateSas(policy(READ_ONLY), null); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java deleted file mode 100644 index 74b2bdc37a9..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.segment.azure.fixture; - -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; -import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.file.FileStore; -import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; -import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.nio.file.Files; -import java.security.InvalidKeyException; -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; - -public class SegmentAzureFixtureV8 extends NodeStoreFixture { - - private static final String AZURE_CONNECTION_STRING = System.getProperty("oak.segment.azure.connection", "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"); - - private static final String AZURE_CONTAINER = System.getProperty("oak.segment.azure.container", "oak"); - - private static final String AZURE_ROOT_PATH = System.getProperty("oak.segment.azure.rootPath", "/oak"); - - private Map fileStoreMap = new HashMap<>(); - - private Map containerMap = new HashMap<>(); - - @Override - public NodeStore createNodeStore() { - AzurePersistenceV8 persistence; - CloudBlobContainer container; - try { - CloudStorageAccount cloud = CloudStorageAccount.parse(AZURE_CONNECTION_STRING); - - while (true) { - String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); - container = cloud.createCloudBlobClient().getContainerReference(containerName); - if (!container.exists()) { - container.create(); - break; - } - } - CloudBlobDirectory directory = container.getDirectoryReference(AZURE_ROOT_PATH); - persistence = new AzurePersistenceV8(directory); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { - throw new RuntimeException(e); - } - - try { - FileStore fileStore = FileStoreBuilder.fileStoreBuilder( - Files.createTempDirectory(getClass().getSimpleName() + "-").toFile()). - withCustomPersistence(persistence).build(); - NodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); - fileStoreMap.put(nodeStore, fileStore); - containerMap.put(nodeStore, container); - return nodeStore; - } catch (IOException | InvalidFileStoreVersionException e) { - throw new RuntimeException(e); - } - } - - public void dispose(NodeStore nodeStore) { - FileStore fs = fileStoreMap.remove(nodeStore); - if (fs != null) { - fs.close(); - } - try { - CloudBlobContainer container = containerMap.remove(nodeStore); - if (container != null) { - container.deleteIfExists(); - } - } catch (StorageException e) { - throw new RuntimeException(e); - } - } - - @Override - public String toString() { - return "SegmentAzure"; - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java deleted file mode 100644 index e3ef6bb28bd..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.journal.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.file.JournalReader; -import org.apache.jackrabbit.oak.segment.file.JournalReaderTest; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureJournalFileV8; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.junit.Before; -import org.junit.ClassRule; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -public class AzureJournalReaderV8Test extends JournalReaderTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - } - - protected JournalReader createJournalReader(String s) throws IOException { - try { - CloudAppendBlob blob = container.getAppendBlobReference("journal/journal.log.001"); - blob.createOrReplace(); - blob.appendText(s); - return new JournalReader(new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", new WriteAccessController())); - } catch (StorageException | URISyntaxException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java deleted file mode 100644 index f304cfb0dfa..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.journal.v8; - -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.junit.Before; -import org.junit.ClassRule; - -import java.io.IOException; -import java.net.URISyntaxException; - -public class AzureTarRevisionsV8Test extends TarRevisionsTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws Exception { - container = azurite.getContainer("oak-test"); - super.setup(); - } - - @Override - protected SegmentNodeStorePersistence getPersistence() throws IOException { - try { - return new AzurePersistenceV8(container.getDirectoryReference("oak")); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java deleted file mode 100644 index 10dd3d05a72..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.journal.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.azure.v8.ReverseFileReaderV8; -import org.junit.Assert; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Random; - -public class ReverseFileReaderV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - getBlob().createOrReplace(); - } - - private CloudAppendBlob getBlob() throws URISyntaxException, StorageException { - return container.getAppendBlobReference("test-blob"); - } - - @Test - public void testReverseReader() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 1024, 80); - ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); - assertEquals(entries, reader); - } - - @Test - public void testEmptyFile() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 0, 80); - ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); - assertEquals(entries, reader); - } - - @Test - public void test1ByteBlock() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 10, 16); - ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 1); - assertEquals(entries, reader); - } - - - private List createFile(int lines, int maxLineLength) throws IOException, URISyntaxException, StorageException { - Random random = new Random(); - List entries = new ArrayList<>(); - CloudAppendBlob blob = getBlob(); - for (int i = 0; i < lines; i++) { - int entrySize = random.nextInt(maxLineLength) + 1; - String entry = randomString(entrySize); - try { - blob.appendText(entry + '\n'); - } catch (StorageException e) { - throw new IOException(e); - } - entries.add(entry); - } - - entries.add(""); - Collections.reverse(entries); - return entries; - } - - private static void assertEquals(List entries, ReverseFileReaderV8 reader) throws IOException { - int i = entries.size(); - for (String e : entries) { - Assert.assertEquals("line " + (--i), e, reader.readLine()); - } - Assert.assertNull(reader.readLine()); - } - - private static String randomString(int entrySize) { - Random r = new Random(); - - StringBuilder result = new StringBuilder(); - for (int i = 0; i < entrySize; i++) { - result.append((char) ('a' + r.nextInt('z' - 'a'))); - } - - return result.toString(); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java index c7f7f076066..e4c771d255a 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java @@ -18,11 +18,6 @@ */ package org.apache.jackrabbit.oak.segment.azure.tool; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; import static org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.newFileStore; import static org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.newSegmentNodeStorePersistence; import static org.junit.Assert.assertEquals; @@ -31,26 +26,20 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.time.Duration; -import java.time.Instant; +import java.time.OffsetDateTime; import java.util.Collections; -import java.util.Date; -import java.util.EnumSet; import java.util.List; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; -import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.SegmentCache; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.azure.tool.SegmentCopy; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; import org.apache.jackrabbit.oak.segment.file.FileStore; @@ -71,7 +60,6 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.jetbrains.annotations.NotNull; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -81,7 +69,7 @@ public abstract class SegmentCopyTestBase { private static final String AZURE_DIRECTORY = "repository"; private static final String AZURE_CONTAINER = "oak-test"; - private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static final BlobSasPermission READ_WRITE = new BlobSasPermission().setReadPermission(true).setListPermission(true).setCreatePermission(true).setWritePermission(true).setAddPermission(true); @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); @@ -234,7 +222,7 @@ protected SegmentNodeStorePersistence getTarPersistence() { } protected SegmentNodeStorePersistence getAzurePersistence() throws Exception { - return new AzurePersistenceV8(azurite.getContainer(AZURE_CONTAINER).getDirectoryReference(AZURE_DIRECTORY)); + return new AzurePersistence(azurite.getReadBlobContainerClient(AZURE_CONTAINER), AZURE_DIRECTORY); } protected String getTarPersistencePathOrUri() { @@ -256,9 +244,8 @@ protected String getAzurePersistencePathOrUriSas() { String sasToken; try { - sasToken = azurite.getContainer(AZURE_CONTAINER) - .generateSharedAccessSignature(policy(READ_WRITE), null); - } catch (StorageException | InvalidKeyException | URISyntaxException e) { + sasToken = azurite.getReadBlobContainerClient(AZURE_CONTAINER).generateSas(new BlobServiceSasSignatureValues(OffsetDateTime.now().plusDays(1), READ_WRITE)); + } catch (BlobStorageException e) { throw new RuntimeException("Error while accessing container ", e); } @@ -270,12 +257,4 @@ protected String getAzurePersistencePathOrUriSas() { return uri.toString(); } - - @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions) { - SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); - sharedAccessBlobPolicy.setPermissions(permissions); - sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(Instant.now().plus(Duration.ofDays(7)))); - return sharedAccessBlobPolicy; - } } \ No newline at end of file diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java deleted file mode 100644 index 0f9a825da8a..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.segment.azure.util; - -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import org.junit.Before; -import org.junit.Test; - -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; - -public class AzureRequestOptionsV8Test { - - private BlobRequestOptions blobRequestOptions; - - @Before - public void setUp() { - blobRequestOptions = new BlobRequestOptions(); - } - - @Test - public void testApplyDefaultRequestOptions() { - AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); - } - - @Test - public void testApplyDefaultRequestOptionsWithCustomTimeouts() { - System.setProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP, "5"); - - AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); - - System.clearProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP); - } - - @Test - public void testOptimiseForWriteOperations() { - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); - } - - @Test - public void testOptimiseForWriteOperationsWithCustomTimeouts() { - System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP, "5"); - - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); - - System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java deleted file mode 100644 index eba893de2d0..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java +++ /dev/null @@ -1,650 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.SegmentId; -import org.apache.jackrabbit.oak.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.segment.file.FileStore; -import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; -import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; -import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.AbstractPersistentCache; -import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.CachingPersistence; -import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.PersistentCache; -import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.*; -import org.junit.contrib.java.lang.system.ProvideSystemProperty; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeoutException; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.IsNot.not; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class AzureArchiveManagerV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private CloudBlobContainer container; - - private AzurePersistenceV8 azurePersistenceV8; - private WriteAccessController writeAccessController; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - - writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); - azurePersistenceV8.setWriteAccessController(writeAccessController); - } - - @Rule - public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, "15") - .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, "3") - .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, "9"); - - @Test - public void testRecovery() throws StorageException, URISyntaxException, IOException { - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - List uuids = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - uuids.add(u); - } - - writer.flush(); - writer.close(); - - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); - - LinkedHashMap recovered = new LinkedHashMap<>(); - manager.recoverEntries("data00000a.tar", recovered); - assertEquals(uuids.subList(0, 5), new ArrayList<>(recovered.keySet())); - } - - @Test - public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxException, IOException { - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - List uuids = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - uuids.add(u); - } - - writer.flush(); - writer.close(); - - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); - - LinkedHashMap recovered = new LinkedHashMap<>(); - manager.recoverEntries("data00000a.tar", recovered); - - manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); - - for (int i = 0; i <= 4; i++) { - assertTrue(container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); - } - - for (int i = 5; i <= 9; i++) { - assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); - } - } - - @Test - public void testUncleanStop() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.close(); - - container.getBlockBlobReference("oak/data00000a.tar/closed").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete(); - - fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); - fs.close(); - } - - @Test - // see OAK-8566 - public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.close(); - - // make sure there are 2 archives - fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo2", "bar2"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.close(); - - // remove the segment 0000 from the second archive - ListBlobItem segment0000 = container.listBlobs("oak/data00001a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); - - fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); - fs.close(); - } - - @Test - public void testUncleanStopSegmentMissing() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.close(); - - // make sure there are 2 archives - fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo0", "bar0"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.flush(); - //create segment 0001 - builder.setProperty("foo1", "bar1"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.flush(); - //create segment 0002 - builder.setProperty("foo2", "bar2"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.flush(); - //create segment 0003 - builder.setProperty("foo3", "bar3"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fs.flush(); - fs.close(); - - // remove the segment 0002 from the second archive - ListBlobItem segment0002 = container.listBlobs("oak/data00001a.tar/0002.").iterator().next(); - ((CloudBlob) segment0002).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); - - fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); - assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); - - //recovered archive data00001a.tar should not contain segments 0002 and 0003 - assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext()); - - assertTrue("Backup directory should have been created", container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext()); - //backup has all segments but 0002 since it was deleted before recovery - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext()); - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext()); - - //verify content from recovered segments preserved - assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1")); - //content from deleted segments not preserved - assertNull(segmentNodeStore.getRoot().getString("foo2")); - assertNull(segmentNodeStore.getRoot().getString("foo3")); - fs.close(); - } - - @Test - public void testExists() throws IOException, URISyntaxException { - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - List uuids = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - uuids.add(u); - } - - writer.flush(); - writer.close(); - - Assert.assertTrue(manager.exists("data00000a.tar")); - Assert.assertFalse(manager.exists("data00001a.tar")); - } - - @Test - public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException { - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - Assert.assertFalse(manager.exists("data00000a.tar")); - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - writer.flush(); - Assert.assertTrue(manager.exists("data00000a.tar")); - } - - @Test(expected = FileNotFoundException.class) - public void testSegmentDeletedAfterCreatingReader() throws IOException, URISyntaxException, StorageException { - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - Assert.assertFalse(manager.exists("data00000a.tar")); - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - writer.flush(); - writer.close(); - - SegmentArchiveReader reader = manager.open("data00000a.tar"); - Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); - assertNotNull(segment); - - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); - - try { - // FileNotFoundException should be thrown here - reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); - fail(); - } catch (RepositoryNotReachableException e) { - fail(); - } - } - - @Test(expected = SegmentNotFoundException.class) - public void testMissngSegmentDetectedInFileStore() throws IOException, StorageException, URISyntaxException, InvalidFileStoreVersionException { - - AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistenceV8).build(); - - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - - //Assert.assertFalse(manager.exists("data00000a.tar")); - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - writer.flush(); - writer.close(); - - SegmentArchiveReader reader = manager.open("data00000a.tar"); - Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); - assertNotNull(segment); - - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); - - // SegmentNotFoundException should be thrown here - fileStore.readSegment(new SegmentId(fileStore, u.getMostSignificantBits(), u.getLeastSignificantBits())); - } - - @Test - public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - rwFileStore.flush(); - - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); - - // create read-only FS - AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); - - PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() - .getRoot() - .getProperty("foo"); - assertThat(fooProperty, not(nullValue())); - assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); - - roFileStore.close(); - rwFileStore.close(); - - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); - } - - @Test - public void testCachingPersistenceTarRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - rwFileStore.flush(); - - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); - - // create files store with split persistence - AzurePersistenceV8 azureSharedPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - - CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); - File localFolder = folder.newFolder(); - SegmentNodeStorePersistence localPersistence = new TarPersistence(localFolder); - SegmentNodeStorePersistence splitPersistence = new SplitPersistence(cachingPersistence, localPersistence); - - // exception should not be thrown here - FileStore splitPersistenceFileStore = FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build(); - - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); - } - - @Test - public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - rwFileStore.flush(); - - // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); - - // create read-only FS, while the rw FS is still open - AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { - - PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() - .getRoot() - .getProperty("foo"); - - assertThat(fooProperty, not(nullValue())); - assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); - - assertDoesNotThrow(() -> roFileStore.collectBlobReferences(s -> { - })); - } - } - } - - @Test - public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - rwFileStore.flush(); - - // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); - - // create read-only FS, while the rw FS is still open - AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); - try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { - - PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() - .getRoot() - .getProperty("foo"); - - assertThat(fooProperty, not(nullValue())); - assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); - - HashSet references = new HashSet<>(); - assertDoesNotThrow(() -> - roFileStore.collectBlobReferences(references::add)); - - assertTrue("No references should have been collected since reference file has not been created", references.isEmpty()); - } - } - } - - @Test - public void testWriteAfterLosingRepoLock() throws Exception { - CloudBlobDirectory oakDirectory = container.getDirectoryReference("oak"); - AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(oakDirectory); - - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - - CloudBlockBlob blobMocked = Mockito.spy(blob); - - Mockito - .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - - AzurePersistenceV8 mockedRwPersistence = Mockito.spy(rwPersistence); - AzureRepositoryLockV8 azureRepositoryLockV8 = new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController); - AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); - - - Mockito - .doAnswer(invocation -> azureRepositoryLockV8.lock()) - .when(mockedRwPersistence).lockRepository(); - - Mockito - .doReturn(azureArchiveManagerV8) - .when(mockedRwPersistence).createArchiveManager(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any(), Mockito.any()); - Mockito - .doReturn(new AzureJournalFileV8(oakDirectory, "journal.log", writeAccessController)) - .when(mockedRwPersistence).getJournalFile(); - - FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build(); - SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); - NodeBuilder builder = segmentNodeStore.getRoot().builder(); - - - // simulate operation timeout when trying to renew lease - Mockito.reset(blobMocked); - - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); - - Mockito.doThrow(storageException).when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - - - // wait till lease expires - Thread.sleep(17000); - - // try updating repository - Thread thread = new Thread(() -> { - try { - builder.setProperty("foo", "bar"); - segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - rwFileStore.flush(); - } catch (Exception e) { - fail("No Exception expected, but got: " + e.getMessage()); - } - }); - thread.start(); - - Thread.sleep(2000); - - // It should be possible to start another RW file store. - FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistenceV8(oakDirectory)).build(); - SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); - NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); - - //repository hasn't been updated - assertNull(builder2.getProperty("foo")); - - rwFileStore2.close(); - } - - @Test - public void testListArchivesDoesNotReturnDeletedArchive() throws IOException, URISyntaxException, StorageException { - // The archive manager should not return the archive which has "deleted" marker - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - - // Create an archive - createArchive(manager, "data00000a.tar"); - - // Verify the archive is listed - List archives = manager.listArchives(); - assertTrue("Archive should be listed before deletion", archives.contains("data00000a.tar")); - - // Upload deleted marker for the archive - CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); - archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); - - // Verify the archive is no longer listed after adding deleted marker - archives = manager.listArchives(); - assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); - } - - @Test - public void testListArchiveWithDeleteMarkerPresentWithWriteAccess() throws Exception{ - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - - createArchive(manager, "data00000a.tar"); - - // Upload deleted marker for the archive - CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); - archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); - - List archives = manager.listArchives(); - assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); - - assertFalse("Archive should be deleted", container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - } - - - @Test - public void testListArchiveWithDeleteMarkerPresentAndNoWriteAccess() throws Exception{ - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - - createArchive(manager, "data00000a.tar"); - - // Upload deleted marker for the archive - CloudBlobDirectory archiveDirectory = container.getDirectoryReference("oak/data00000a.tar"); - archiveDirectory.getBlockBlobReference("deleted").openOutputStream().close(); - - writeAccessController.disableWriting(); - - List archives = manager.listArchives(); - assertFalse("Archive should not be listed after deleted marker is uploaded", archives.contains("data00000a.tar")); - - assertTrue("Archive should not be deleted", container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - } - - private static void createArchive(SegmentArchiveManager manager, String archiveName) throws IOException { - SegmentArchiveWriter writer = manager.create(archiveName); - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - writer.flush(); - writer.close(); - } - - private PersistentCache createPersistenceCache() { - return new AbstractPersistentCache() { - @Override - protected Buffer readSegmentInternal(long msb, long lsb) { - return null; - } - - @Override - public boolean containsSegment(long msb, long lsb) { - return false; - } - - @Override - public void writeSegment(long msb, long lsb, Buffer buffer) { - - } - - @Override - public void cleanUp() { - - } - }; - } - - private static void assertDoesNotThrow(Executable executable) { - try { - executable.execute(); - } catch (Exception e) { - fail("No Exception expected, but got: " + e.getMessage()); - } - } - - interface Executable { - void execute() throws Exception; - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java deleted file mode 100644 index f431ea194b3..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.file.GcJournalTest; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Test; - -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -public class AzureGCJournalV8Test extends GcJournalTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - } - - @Override - protected SegmentNodeStorePersistence getPersistence() throws Exception { - return new AzurePersistenceV8(container.getDirectoryReference("oak")); - } - - @Test - @Ignore - @Override - public void testReadOak16GCLog() throws Exception { - super.testReadOak16GCLog(); - } - - @Test - @Ignore - @Override - public void testUpdateOak16GCLog() throws Exception { - super.testUpdateOak16GCLog(); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java deleted file mode 100644 index 6344b390bb3..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; -import org.junit.AfterClass; -import org.junit.Assume; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - -public class AzureJournalFileV8ConcurrencyIT { - private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8ConcurrencyIT.class); - - private static CloudBlobContainer container; - - private static int suffix; - - private AzurePersistenceV8 persistence; - - @BeforeClass - public static void connectToAzure() throws URISyntaxException, InvalidKeyException, StorageException { - String azureConnectionString = System.getenv("AZURE_CONNECTION"); - Assume.assumeNotNull(azureConnectionString); - CloudBlobClient client = CloudStorageAccount.parse(azureConnectionString).createCloudBlobClient(); - container = client.getContainerReference("oak-test-" + System.currentTimeMillis()); - container.createIfNotExists(); - suffix = 1; - } - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException, IOException, InterruptedException { - persistence = new AzurePersistenceV8(container.getDirectoryReference("oak-" + (suffix++))); - writeJournalLines(300, 0); - log.info("Finished writing initial content to journal!"); - } - - @AfterClass - public static void cleanupContainer() throws StorageException { - if (container != null) { - container.deleteIfExists(); - } - } - - @Test - public void testConcurrency() throws Exception { - AtomicBoolean stop = new AtomicBoolean(); - AtomicReference exContainer = new AtomicReference<>(); - - Thread producer = new Thread(() -> { - try { - while (!stop.get()) { - writeJournalLines(300, 100); - } - } catch(Exception e) { - exContainer.set(e); - stop.set(true); - } - }); - - Thread consumer = new Thread(() -> { - try { - while (!stop.get()) { - readJournal(); - } - } catch (IOException e) { - exContainer.set(e); - stop.set(true); - } - }); - - producer.start(); - consumer.start(); - - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < 30_000 && !stop.get()) { - Thread.sleep(100); - } - stop.set(true); - - producer.join(); - consumer.join(); - - if (exContainer.get() != null) { - throw exContainer.get(); - } - } - - private void readJournal() throws IOException { - JournalFile file = persistence.getJournalFile(); - try (JournalFileReader reader = file.openJournalReader()) { - String line = null; - while ((line = reader.readLine()) != null) { - log.info(line); - } - } - } - - private void writeJournalLines(int lines, int delayMillis) throws IOException, InterruptedException { - JournalFile file = persistence.getJournalFile(); - try (JournalFileWriter writer = file.openJournalWriter()) { - for (int i = 0; i < lines; i++) { - writer.writeLine(String.format("%4X - %s", i, UUID.randomUUID().toString())); - Thread.sleep(delayMillis); - } - } - } - -} \ No newline at end of file diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java deleted file mode 100644 index f0d2a19c9b2..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; -import java.util.stream.IntStream; -import org.apache.commons.lang3.time.StopWatch; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; -import org.apache.jackrabbit.oak.commons.collections.ListUtils; -import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.List; - -import static java.util.stream.Collectors.toList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class AzureJournalFileV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - private AzureJournalFileV8 journal; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - journal = new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", writeAccessController, 50); - } - - @Test - public void testSplitJournalFiles() throws IOException, URISyntaxException, StorageException { - assertFalse(journal.exists()); - - int index = 0; - index = writeNLines(index, 10); // 10 - assertTrue(journal.exists()); - assertEquals(1, countJournalBlobs()); - - index = writeNLines(index, 20); // 30 - assertEquals(1, countJournalBlobs()); - - index = writeNLines(index, 30); // 60 - assertEquals(2, countJournalBlobs()); - - index = writeNLines(index, 100); // 160 - assertEquals(4, countJournalBlobs()); - - assertJournalEntriesCount(index); - } - - private int countJournalBlobs() throws URISyntaxException, StorageException { - List result = new ArrayList<>(); - for (ListBlobItem b : container.getDirectoryReference("journal").listBlobs("journal.log")) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } - } - return result.size(); - } - - private int writeNLines(int index, int n) throws IOException { - try (JournalFileWriter writer = journal.openJournalWriter()) { - for (int i = 0; i < n; i++) { - writer.writeLine("line " + (index++)); - } - } - return index; - } - - @Test - public void testTruncateJournalFile() throws IOException { - assertFalse(journal.exists()); - - List lines = buildLines(0, 100); - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(lines); - } - - assertTrue(journal.exists()); - assertJournalEntriesCount(100); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.truncate(); - } - - assertTrue(journal.exists()); - assertJournalEntriesCount(0); - } - - @Test - public void testBatchWriteLines() throws IOException { - List lines = buildLines(0, 5000); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(lines); - } - - List entries = readEntriesFromJournal(); - assertEquals(lines, ListUtils.reverse(entries)); - } - - @Test - public void testEnsureBatchWriteLinesIsFasterThanNaiveImplementation() throws IOException { - List lines = buildLines(0, 100); - - StopWatch watchNaiveImpl = StopWatch.createStarted(); - try (JournalFileWriter writer = journal.openJournalWriter()) { - // Emulating previous naive implementation of 'batchWriteLines', which simply delegated to 'writeLine()' - for (String line : lines) { - writer.writeLine(line); - } - } - watchNaiveImpl.stop(); - - StopWatch watchOptimizedImpl = StopWatch.createStarted(); - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(lines); - } - watchOptimizedImpl.stop(); - long optimizedImplTime = watchOptimizedImpl.getTime(); - long naiveImplTime = watchNaiveImpl.getTime(); - assertTrue("batchWriteLines() should be significantly faster (>10x) than the naive implementation, but took " - + optimizedImplTime + "ms while naive implementation took " + naiveImplTime + "ms", optimizedImplTime < naiveImplTime / 10); - } - - @Test - public void testBatchWriteLines_splitJournalFile() throws Exception { - assertFalse(journal.exists()); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(buildLines(0, 30)); // 30 - } - assertTrue(journal.exists()); - assertEquals(1, countJournalBlobs()); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(buildLines(30, 40)); // 70 - } - assertEquals(2, countJournalBlobs()); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(buildLines(70, 30)); // 100 - } - assertEquals(2, countJournalBlobs()); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(buildLines(100, 1)); // 101 - } - assertEquals(3, countJournalBlobs()); - - try (JournalFileWriter writer = journal.openJournalWriter()) { - writer.batchWriteLines(buildLines(101, 100)); // 201 - } - assertEquals(5, countJournalBlobs()); - - assertJournalEntriesCount(201); - } - - private void assertJournalEntriesCount(int index) throws IOException { - List entries = readEntriesFromJournal(); - assertEquals(buildLines(0, index), ListUtils.reverse(entries)); - } - - @NotNull - private static List buildLines(int start, int count) { - return IntStream.range(start, count + start) - .mapToObj(i -> "line " + i) - .collect(toList()); - } - - @NotNull - private List readEntriesFromJournal() throws IOException { - List result = new ArrayList<>(); - try (JournalFileReader reader = journal.openJournalReader()) { - String entry; - while ((entry = reader.readLine()) != null) { - result.add(entry); - } - } - return result; - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java deleted file mode 100644 index 8dc01a2c26a..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class AzureManifestFileV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - } - - @Test - public void testManifest() throws URISyntaxException, IOException { - ManifestFile manifestFile = new AzurePersistenceV8(container.getDirectoryReference("oak")).getManifestFile(); - assertFalse(manifestFile.exists()); - - Properties props = new Properties(); - props.setProperty("xyz", "abc"); - props.setProperty("version", "123"); - manifestFile.save(props); - - Properties loaded = manifestFile.load(); - assertEquals(props, loaded); - } - -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java deleted file mode 100644 index ff3a2d422f6..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.SegmentId; -import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.segment.file.FileStore; -import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; -import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -public class AzureReadSegmentV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - } - - @Test(expected = SegmentNotFoundException.class) - public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - SegmentId id = new SegmentId(fs, 0, 0); - - try { - fs.readSegment(id); - } finally { - fs.close(); - } - } - - @Test(expected = RepositoryNotReachableException.class) - public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistenceV8 p = new ReadFailingAzurePersistenceV8(container.getDirectoryReference("oak")); - FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); - - SegmentId id = new SegmentId(fs, 0, 0); - byte[] buffer = new byte[2]; - - try { - fs.writeSegment(id, buffer, 0, 2); - fs.readSegment(id); - } finally { - fs.close(); - } - } - - static class ReadFailingAzurePersistenceV8 extends AzurePersistenceV8 { - public ReadFailingAzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { - super(segmentStoreDirectory); - } - - @Override - public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, - FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { - return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { - @Override - public SegmentArchiveReader open(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor) { - @Override - public Buffer readSegment(long msb, long lsb) throws IOException { - throw new RepositoryNotReachableException( - new RuntimeException("Cannot access Azure storage")); - } - }; - } - - @Override - public SegmentArchiveWriter create(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveWriterV8(archiveDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { - @Override - public Buffer readSegment(long msb, long lsb) throws IOException { - throw new RepositoryNotReachableException( - new RuntimeException("Cannot access Azure storage")); } - }; - } - }; - } - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java deleted file mode 100644 index d645a1743f0..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.contrib.java.lang.system.ProvideSystemProperty; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeoutException; - -import static org.junit.Assert.*; - -public class AzureRepositoryLockV8Test { - - private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8Test.class); - public static final String LEASE_DURATION = "15"; - public static final String RENEWAL_INTERVAL = "3"; - public static final String TIME_TO_WAIT_BEFORE_BLOCK = "9"; - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - } - - @Rule - public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, LEASE_DURATION) - .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, RENEWAL_INTERVAL) - .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_BLOCK); - - @Test - public void testFailingLock() throws URISyntaxException, IOException, StorageException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); - try { - new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); - fail("The second lock should fail."); - } catch (IOException e) { - // it's fine - } - } - - @Test - public void testWaitingLock() throws URISyntaxException, IOException, StorageException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - Semaphore s = new Semaphore(0); - new Thread(() -> { - try { - RepositoryLock lock = new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); - s.release(); - Thread.sleep(1000); - lock.unlock(); - } catch (Exception e) { - log.error("Can't lock or unlock the repo", e); - } - }).start(); - - s.acquire(); - new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController(), 10).lock(); - } - - @Test - public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageException, IOException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - - CloudBlockBlob blobMocked = Mockito.spy(blob); - - // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); - Mockito.doThrow(storageException) - .doThrow(storageException) - .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - - new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); - - // wait till lease expires - Thread.sleep(16000); - - // reset the mock to default behaviour - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - - try { - new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); - fail("The second lock should fail."); - } catch (IOException e) { - // it's fine - } - } - - @Test - public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { - - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - - CloudBlockBlob blobMocked = Mockito.spy(blob); - - // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); - Mockito - .doCallRealMethod() - .doThrow(storageException) - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - - - WriteAccessController writeAccessController = new WriteAccessController(); - - new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController).lock(); - - - Thread thread = new Thread(() -> { - - while (true) { - writeAccessController.checkWritingAllowed(); - - } - }); - - thread.start(); - - Thread.sleep(3000); - assertFalse("after 3 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); - - Thread.sleep(3000); - assertFalse("after 6 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); - - Thread.sleep(5000); - assertTrue("after more than 9 seconds thread should be in a waiting state", thread.getState().equals(Thread.State.WAITING)); - - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java deleted file mode 100644 index eaf7f439b5f..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; -import org.jetbrains.annotations.NotNull; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.mockserver.client.MockServerClient; -import org.mockserver.junit.MockServerRule; -import org.mockserver.matchers.Times; -import org.mockserver.model.BinaryBody; -import org.mockserver.model.HttpRequest; -import shaded_package.org.apache.http.client.utils.URIBuilder; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.UUID; - -import static org.junit.Assert.assertThrows; -import static org.mockserver.model.HttpRequest.request; -import static org.mockserver.model.HttpResponse.response; -import static org.mockserver.verify.VerificationTimes.exactly; - -public class AzureSegmentArchiveWriterV8Test { - public static final String BASE_PATH = "/devstoreaccount1/oak-test"; - public static final int MAX_ATTEMPTS = 3; - private static final String RETRY_ATTEMPTS = "segment.azure.retry.attempts"; - private static final String TIMEOUT_EXECUTION = "segment.timeout.execution"; - private static final String RETRY_INTERVAL_MS = "azure.segment.archive.writer.retries.intervalMs"; - private static final String WRITE_RETRY_ATTEMPTS = "azure.segment.archive.writer.retries.max"; - - @Rule - public MockServerRule mockServerRule = new MockServerRule(this); - - @SuppressWarnings("unused") - private MockServerClient mockServerClient; - - private CloudBlobContainer container; - - @Before - public void setUp() throws Exception { - container = createCloudBlobContainer(); - - System.setProperty(RETRY_INTERVAL_MS, "100"); - System.setProperty(WRITE_RETRY_ATTEMPTS, Integer.toString(MAX_ATTEMPTS)); - - // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty(RETRY_ATTEMPTS, "0"); - System.setProperty(TIMEOUT_EXECUTION, "1"); - } - - @AfterClass - public static void setDown() { - // resetting the values for the properties set in setUp(). otherwise these will apply to all the tests that are executed after - System.clearProperty(RETRY_ATTEMPTS); - System.clearProperty(TIMEOUT_EXECUTION); - System.clearProperty(RETRY_INTERVAL_MS); - System.clearProperty(WRITE_RETRY_ATTEMPTS); - } - - @Test - public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); - - HttpRequest writeBinaryReferencesRequest = getWriteBinaryReferencesRequest(); - // fail twice - mockServerClient - .when(writeBinaryReferencesRequest, Times.exactly(2)) - .respond(response().withStatusCode(500)); - // then succeed - mockServerClient - .when(writeBinaryReferencesRequest, Times.once()) - .respond(response().withStatusCode(201)); - - writer.writeBinaryReferences(new byte[10]); - - mockServerClient.verify(writeBinaryReferencesRequest, exactly(MAX_ATTEMPTS)); - } - - @Test - public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); - - HttpRequest writeGraphRequest = getWriteGraphRequest(); - // fail twice - mockServerClient - .when(writeGraphRequest, Times.exactly(2)) - .respond(response().withStatusCode(500)); - // then succeed - mockServerClient - .when(writeGraphRequest, Times.once()) - .respond(response().withStatusCode(201)); - - writer.writeGraph(new byte[10]); - - mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS)); - } - - @Test - public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); - - HttpRequest closeArchiveRequest = getCloseArchiveRequest(); - // fail twice - mockServerClient - .when(closeArchiveRequest, Times.exactly(2)) - .respond(response().withStatusCode(500)); - // then succeed - mockServerClient - .when(closeArchiveRequest, Times.once()) - .respond(response().withStatusCode(201)); - - writer.close(); - - mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); - } - - @Test - public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); - - HttpRequest closeArchiveRequest = getCloseArchiveRequest(); - // always fail - mockServerClient - .when(closeArchiveRequest, Times.unlimited()) - .respond(response().withStatusCode(500)); - - - assertThrows(IOException.class, writer::close); - - mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); - } - - - private void writeAndFlushSegment(SegmentArchiveWriter writer) throws IOException { - expectWriteRequests(); - UUID u = UUID.randomUUID(); - writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); - writer.flush(); - } - - private void expectWriteRequests() { - mockServerClient - .when(getUploadSegmentDataRequest(), Times.once()) - .respond(response().withStatusCode(201)); - - mockServerClient - .when(getUploadSegmentMetadataRequest(), Times.once()) - .respond(response().withStatusCode(200)); - } - - @NotNull - private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { - // Mock the list blobs operation that's called during AzureSegmentArchiveWriterV8 initialization - expectListBlobsRequest(); - - WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak"));/**/ - azurePersistenceV8.setWriteAccessController(writeAccessController); - SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - SegmentArchiveWriter writer = manager.create("data00000a.tar"); - return writer; - } - - private static HttpRequest getCloseArchiveRequest() { - return request() - .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/closed"); - } - - private static HttpRequest getWriteBinaryReferencesRequest() { - return request() - .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.brf"); - } - - private static HttpRequest getWriteGraphRequest() { - return request() - .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.gph"); - } - - private static HttpRequest getUploadSegmentMetadataRequest() { - return request() - .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") - .withQueryStringParameter("comp", "metadata"); - } - - private static HttpRequest getUploadSegmentDataRequest() { - return request() - .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") - .withBody(new BinaryBody(new byte[10])); - } - - private void expectListBlobsRequest() { - mockServerClient - .when(request() - .withMethod("GET") - .withPath(BASE_PATH) - .withQueryStringParameter("comp", "list") - .withQueryStringParameter("prefix", "oak/data00000a.tar/"), Times.once()) - .respond(response() - .withStatusCode(200) - .withHeader("Content-Type", "application/xml") - .withBody("" + - "" + - "" + - "" + - "")); - } - - @NotNull - private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { - URI uri = new URIBuilder() - .setScheme("http") - .setHost(mockServerClient.remoteAddress().getHostName()) - .setPort(mockServerClient.remoteAddress().getPort()) - .setPath(BASE_PATH) - .build(); - - return new CloudBlobContainer(uri); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java deleted file mode 100644 index 2212536ea7b..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.*; -import java.io.IOException; -import java.net.URISyntaxException; -import java.time.Duration; -import java.time.Instant; -import java.util.Collections; -import java.util.Date; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.Set; -import java.util.stream.StreamSupport; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.commons.collections.SetUtils; -import org.apache.jackrabbit.oak.segment.azure.AzureSegmentStoreService; -import org.apache.jackrabbit.oak.segment.azure.Configuration; -import org.apache.jackrabbit.oak.segment.azure.util.Environment; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.sling.testing.mock.osgi.junit.OsgiContext; -import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.osgi.util.converter.Converters; - -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; - -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; -import static java.util.stream.Collectors.toSet; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeNotNull; - -public class AzureSegmentStoreV8Test { - private static final Environment ENVIRONMENT = new Environment(); - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - @Rule - public final OsgiContext context = new OsgiContext(); - - private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); - private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); - private static final Set BLOBS = Set.of("blob1", "blob2"); - - private CloudBlobContainer container; - - @Before - public void setup() throws Exception { - container = azurite.getContainer(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); - for (String blob : BLOBS) { - container.getBlockBlobReference(blob + ".txt").uploadText(blob); - } - } - - @Test - public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); - - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessNotGranted(persistence); - assertReadAccessGranted(persistence, BLOBS); - } - - @Test - public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); - - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessGranted(persistence); - assertReadAccessGranted(persistence, concat(BLOBS, "test")); - } - - @Test - public void connectWithSharedAccessSignatureURL_expired() throws Exception { - SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); - String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); - - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessNotGranted(persistence); - assertReadAccessNotGranted(persistence); - } - - @Test - public void connectWithAccessKey() throws Exception { - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessGranted(persistence); - assertReadAccessGranted(persistence, concat(BLOBS, "test")); - } - - @Test - public void connectWithConnectionURL() throws Exception { - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithConfigurationURL(AzuriteDockerRule.ACCOUNT_KEY)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessGranted(persistence); - assertReadAccessGranted(persistence, concat(BLOBS, "test")); - } - - @Test - public void connectWithServicePrincipal() throws Exception { - // Note: make sure blob1.txt and blob2.txt are uploaded to - // AZURE_ACCOUNT_NAME/oak before running this test - - assumeNotNull(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)); - assumeNotNull(ENVIRONMENT.getVariable(AZURE_TENANT_ID)); - assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_ID)); - assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET)); - - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - String accountName = ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME); - String tenantId = ENVIRONMENT.getVariable(AZURE_TENANT_ID); - String clientId = ENVIRONMENT.getVariable(AZURE_CLIENT_ID); - String clientSecret = ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithServicePrincipal(accountName, clientId, clientSecret, tenantId)); - - SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); - assertNotNull(persistence); - assertWriteAccessGranted(persistence); - assertReadAccessGranted(persistence, concat(BLOBS, "test")); - } - - @Test - public void deactivate() throws Exception { - AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); - azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); - assertNotNull(context.getService(SegmentNodeStorePersistence.class)); - - azureSegmentStoreService.deactivate(); - assertNull(context.getService(SegmentNodeStorePersistence.class)); - } - - @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { - SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); - sharedAccessBlobPolicy.setPermissions(permissions); - sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); - return sharedAccessBlobPolicy; - } - - @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions) { - return policy(permissions, Instant.now().plus(Duration.ofDays(7))); - } - - private static void assertReadAccessGranted(SegmentNodeStorePersistence persistence, Set expectedBlobs) throws Exception { - CloudBlobContainer container = getContainerFrom(persistence); - Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) - .map(blob -> blob.getUri().getPath()) - .map(path -> path.substring(path.lastIndexOf('/') + 1)) - .filter(name -> name.equals("test.txt") || name.startsWith("blob")) - .collect(toSet()); - Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); - - assertEquals(expectedBlobNames, actualBlobNames); - - Set actualBlobContent = actualBlobNames.stream() - .map(name -> { - try { - return container.getBlockBlobReference(name).downloadText(); - } catch (StorageException | IOException | URISyntaxException e) { - throw new RuntimeException("Error while reading blob " + name, e); - } - }) - .collect(toSet()); - assertEquals(expectedBlobs, actualBlobContent); - } - - private static void assertWriteAccessGranted(SegmentNodeStorePersistence persistence) throws Exception { - getContainerFrom(persistence) - .getBlockBlobReference("test.txt").uploadText("test"); - } - - private static CloudBlobContainer getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { - return ((AzurePersistenceV8) persistence).getSegmentstoreDirectory().getContainer(); - } - - private static void assertWriteAccessNotGranted(SegmentNodeStorePersistence persistence) { - try { - assertWriteAccessGranted(persistence); - fail("Write access should not be granted, but writing to the storage succeeded."); - } catch (Exception e) { - // successful - } - } - - private static void assertReadAccessNotGranted(SegmentNodeStorePersistence persistence) { - try { - assertReadAccessGranted(persistence, BLOBS); - fail("Read access should not be granted, but reading from the storage succeeded."); - } catch (Exception e) { - // successful - } - } - - private static Instant yesterday() { - return Instant.now().minus(Duration.ofDays(1)); - } - - private static Set concat(Set blobs, String element) { - - final Set set = SetUtils.toLinkedSet(blobs); - set.add(element); - return Collections.unmodifiableSet(set); - } - - private static Configuration getConfigurationWithSharedAccessSignature(String sasToken) { - return getConfiguration(sasToken, AzuriteDockerRule.ACCOUNT_NAME, null, null, null, null, null); - } - - private static Configuration getConfigurationWithAccessKey(String accessKey) { - return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, accessKey, null, null, null, null); - } - - private static Configuration getConfigurationWithConfigurationURL(String accessKey) { - String connectionString = "DefaultEndpointsProtocol=https;" - + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' - + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' - + "AccountKey=" + accessKey + ';'; - return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, null, connectionString, null, null, null); - } - - private static Configuration getConfigurationWithServicePrincipal(String accountName, String clientId, String clientSecret, String tenantId) { - return getConfiguration(null, accountName, null, null, clientId, clientSecret, tenantId); - } - - @NotNull - private static Configuration getConfiguration(String sasToken, String accountName, String accessKey, String connectionURL, String clientId, String clientSecret, String tenantId) { - return Converters.standardConverter() - .convert(new HashMap() {{ - put("accountName", accountName); - put("accessKey", accessKey); - put("connectionURL", connectionURL); - put("sharedAccessSignature", sasToken); - put("clientId", clientId); - put("clientSecret", clientSecret); - put("tenantId", tenantId); - put("blobEndpoint", azurite.getBlobEndpoint()); - }}) - .to(Configuration.class); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java deleted file mode 100644 index 55d0d270a6e..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -public class AzureTarFileV8Test extends TarFileTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - @Override - public void setUp() throws IOException { - try { - container = azurite.getContainer("oak-test"); - AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); - WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - azurePersistenceV8.setWriteAccessController(writeAccessController); - archiveManager = azurePersistenceV8.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - } catch (StorageException | InvalidKeyException | URISyntaxException e) { - throw new IOException(e); - } - } - - @Override - protected long getWriteAndReadExpectedSize() { - return 45; - } - - @Test - @Ignore - @Override - public void graphShouldBeTrimmedDownOnSweep() throws Exception { - super.graphShouldBeTrimmedDownOnSweep(); - } -} \ No newline at end of file diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java deleted file mode 100644 index d17e3862001..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; -import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; -import org.junit.Before; -import org.junit.ClassRule; - -public class AzureTarFilesV8Test extends TarFilesTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - @Override - public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); - AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); - WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - azurePersistenceV8.setWriteAccessController(writeAccessController); - tarFiles = TarFiles.builder() - .withDirectory(folder.newFolder()) - .withTarRecovery((id, data, recovery) -> { - // Intentionally left blank - }) - .withIOMonitor(new IOMonitorAdapter()) - .withFileStoreMonitor(new FileStoreMonitorAdapter()) - .withRemoteStoreMonitor(new RemoteStoreMonitorAdapter()) - .withMaxFileSize(MAX_FILE_SIZE) - .withPersistence(azurePersistenceV8) - .build(); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java deleted file mode 100644 index 18421c74e7c..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.azure.v8; - -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; -import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.ClassRule; - -import java.io.IOException; - -public class AzureTarWriterV8Test extends TarWriterTest { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - private CloudBlobContainer container; - - @Before - public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); - } - - @NotNull - @Override - protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { - WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController); - return azureArchiveManagerV8; - } - - @NotNull - @Override - protected SegmentArchiveManager getFailingSegmentArchiveManager() throws Exception { - final WriteAccessController writeAccessController = new WriteAccessController(); - writeAccessController.enableWriting(); - return new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController) { - @Override - public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController) { - @Override - public void writeGraph(@NotNull byte[] data) throws IOException { - throw new IOException("test"); - } - }; - } - }; - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java deleted file mode 100644 index 072890bb398..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.HashSet; -import java.util.Random; -import java.util.Set; - -import com.microsoft.azure.storage.StorageException; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.commons.collections.SetUtils; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; -import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; -import org.apache.jackrabbit.oak.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.file.FileStore; -import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; -import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import static org.junit.Assert.assertEquals; - -public class SplitPersistenceBlobV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private SegmentNodeStore base; - - private SegmentNodeStore split; - - private FileStore baseFileStore; - - private FileStore splitFileStore; - - private String baseBlobId; - - private SegmentNodeStorePersistence splitPersistence; - - @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { - SegmentNodeStorePersistence sharedPersistence = - new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); - File dataStoreDir = new File(folder.getRoot(), "blobstore"); - BlobStore blobStore = newBlobStore(dataStoreDir); - - baseFileStore = FileStoreBuilder - .fileStoreBuilder(folder.newFolder()) - .withCustomPersistence(sharedPersistence) - .withBlobStore(blobStore) - .build(); - base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); - - NodeBuilder builder = base.getRoot().builder(); - builder.child("foo").child("bar").setProperty("version", "v1"); - base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - baseBlobId = createLoad(base, baseFileStore).getContentIdentity(); - baseFileStore.flush(); - baseFileStore.close(); - - baseFileStore = FileStoreBuilder - .fileStoreBuilder(folder.newFolder()) - .withCustomPersistence(sharedPersistence) - .withBlobStore(blobStore) - .build(); - base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); - - createLoad(base, baseFileStore).getContentIdentity(); - baseFileStore.flush(); - - SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); - splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); - - splitFileStore = FileStoreBuilder - .fileStoreBuilder(folder.newFolder()) - .withCustomPersistence(splitPersistence) - .withBlobStore(blobStore) - .build(); - split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); - } - - @After - public void tearDown() { - baseFileStore.close(); - } - - @Test - public void collectReferences() - throws IOException, CommitFailedException { - String blobId = createLoad(split, splitFileStore).getContentIdentity(); - - assertReferences(2, SetUtils.toSet(baseBlobId, blobId)); - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - - private static BlobStore newBlobStore(File directory) { - OakFileDataStore delegate = new OakFileDataStore(); - delegate.setPath(directory.getAbsolutePath()); - delegate.init(null); - return new DataStoreBlobStore(delegate); - } - - private Blob createLoad(SegmentNodeStore store, FileStore fileStore) - throws IOException, CommitFailedException { - NodeBuilder builder = store.getRoot().builder(); - Blob blob = createBlob(store, 18000); - builder.setProperty("bin", blob); - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fileStore.flush(); - return blob; - } - - private void assertReferences(int count, Set blobIds) - throws IOException { - Set actualReferences = new HashSet<>(); - splitFileStore.collectBlobReferences(actualReferences::add); - assertEquals("visible references different", count, actualReferences.size()); - assertEquals("Binary reference returned should be same", blobIds, actualReferences); - } -} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java deleted file mode 100644 index a5af0abb47b..00000000000 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; - -import com.microsoft.azure.storage.StorageException; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.file.FileStore; -import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; -import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; -import org.apache.jackrabbit.oak.segment.file.tar.binaries.BinaryReferencesIndexLoader; -import org.apache.jackrabbit.oak.segment.file.tar.binaries.InvalidBinaryReferencesIndexException; -import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; - -import static org.junit.Assert.assertEquals; - -public class SplitPersistenceV8Test { - - @ClassRule - public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private SegmentNodeStore base; - - private SegmentNodeStore split; - - private FileStore baseFileStore; - - private FileStore splitFileStore; - - private SegmentNodeStorePersistence splitPersistence; - - @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { - SegmentNodeStorePersistence sharedPersistence = new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); - - baseFileStore = FileStoreBuilder - .fileStoreBuilder(folder.newFolder()) - .withCustomPersistence(sharedPersistence) - .build(); - base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); - - NodeBuilder builder = base.getRoot().builder(); - builder.child("foo").child("bar").setProperty("version", "v1"); - base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - baseFileStore.flush(); - - SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); - splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); - - splitFileStore = FileStoreBuilder - .fileStoreBuilder(folder.newFolder()) - .withCustomPersistence(splitPersistence) - .build(); - split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); - } - - @After - public void tearDown() { - if (splitFileStore != null) { - splitFileStore.close(); - } - - if (baseFileStore != null) { - baseFileStore.close(); - } - } - - @Test - public void testBaseNodeAvailable() { - assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); - } - - @Test - public void testChangesAreLocalForBaseRepository() throws CommitFailedException { - NodeBuilder builder = base.getRoot().builder(); - builder.child("foo").child("bar").setProperty("version", "v2"); - base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); - } - - @Test - public void testChangesAreLocalForSplitRepository() throws CommitFailedException { - NodeBuilder builder = split.getRoot().builder(); - builder.child("foo").child("bar").setProperty("version", "v2"); - split.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertEquals("v1", base.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); - } - - @Test - public void testBinaryReferencesAreNotNull() throws IOException, InvalidBinaryReferencesIndexException { - splitFileStore.close(); - splitFileStore = null; - - SegmentArchiveManager manager = splitPersistence.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - for (String archive : manager.listArchives()) { - SegmentArchiveReader reader = manager.open(archive); - BinaryReferencesIndexLoader.parseBinaryReferencesIndex(reader.getBinaryReferences()); - reader.close(); - } - } -} diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java index aafebfaaad6..974ba7fb822 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java @@ -16,16 +16,12 @@ */ package org.apache.jackrabbit.oak.upgrade.cli.node; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.oak.commons.pio.Closer; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistenceManager; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -33,7 +29,6 @@ import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.apache.jackrabbit.oak.upgrade.cli.CliUtils; import org.apache.jackrabbit.oak.upgrade.cli.node.FileStoreUtils.NodeStoreWithFileStore; import java.io.File; @@ -56,7 +51,6 @@ public class SegmentAzureFactory implements NodeStoreFactory { private int segmentCacheSize; private final boolean readOnly; private static final Environment environment = new Environment(); - private AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public static class Builder { private final String dir; @@ -118,10 +112,10 @@ public SegmentAzureFactory(Builder builder) { @Override public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { - AzurePersistenceV8 azPersistence = null; + AzurePersistence azPersistence = null; try { - azPersistence = createAzurePersistence(closer); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { + azPersistence = createAzurePersistence(); + } catch (BlobStorageException | URISyntaxException | InvalidKeyException e) { throw new IllegalStateException(e); } @@ -152,40 +146,26 @@ public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { } } - private AzurePersistenceV8 createAzurePersistence(Closer closer) throws StorageException, URISyntaxException, InvalidKeyException { - CloudBlobDirectory cloudBlobDirectory = null; - + private AzurePersistence createAzurePersistence() throws URISyntaxException, InvalidKeyException, IOException { // connection string will take precedence over accountkey / sas / service principal if (StringUtils.isNoneBlank(connectionString, containerName)) { - cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(connectionString, containerName, dir); - } else if (StringUtils.isNoneBlank(accountName, uri)) { - StorageCredentials credentials = null; + return AzurePersistenceManager.createAzurePersistence(connectionString, null, accountName, containerName, dir, false, true); + } else if (StringUtils.isNotBlank(accountName)) { if (StringUtils.isNotBlank(sasToken)) { - credentials = new StorageCredentialsSharedAccessSignature(sasToken); - } else { - this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); - credentials = azureStorageCredentialManagerV8.getStorageCredentialsFromEnvironment(accountName, environment); - closer.register(azureStorageCredentialManagerV8); + return AzurePersistenceManager.createAzurePersistence(null, sasToken, accountName, containerName, dir, false, true); } - cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(credentials, uri, dir); - } - - if (cloudBlobDirectory == null) { - throw new IllegalArgumentException("Could not connect to Azure storage. Too few connection parameters specified!"); + return AzurePersistenceManager.createAzurePersistenceFrom(accountName, containerName, dir, environment); } - return new AzurePersistenceV8(cloudBlobDirectory); + throw new IllegalArgumentException("Could not connect to Azure storage. Too few connection parameters specified!"); } @Override public boolean hasExternalBlobReferences() throws IOException { - AzurePersistenceV8 azPersistence = null; - Closer closer = Closer.create(); - CliUtils.handleSigInt(closer); + AzurePersistence azPersistence = null; try { - azPersistence = createAzurePersistence(closer); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { - closer.close(); + azPersistence = createAzurePersistence(); + } catch (BlobStorageException | URISyntaxException | InvalidKeyException e) { throw new IllegalStateException(e); } @@ -201,7 +181,6 @@ public boolean hasExternalBlobReferences() throws IOException { throw new IOException(e); } finally { tmpDir.delete(); - closer.close(); } } diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/parser/StoreType.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/parser/StoreType.java index b9b961b5eb4..da12e71d3a0 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/parser/StoreType.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/parser/StoreType.java @@ -167,6 +167,7 @@ public StoreFactory createFactory(String[] paths, MigrationDirection direction, direction == MigrationDirection.SRC) .connectionString(config.get(KEY_CONNECTION_STRING)) .containerName(config.get(KEY_CONTAINER_NAME)) + .accountName(config.get(KEY_ACCOUNT_NAME)) .build() ); } else { diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/AzuriteDockerRule.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/AzuriteDockerRule.java new file mode 100644 index 00000000000..59deec5df5e --- /dev/null +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/AzuriteDockerRule.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.upgrade; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.junit.Assume; +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.utility.DockerImageName; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class AzuriteDockerRule extends ExternalResource { + + private static final DockerImageName DOCKER_IMAGE_NAME = DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:3.31.0"); + public static final String ACCOUNT_KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; + public static final String ACCOUNT_NAME = "devstoreaccount1"; + private static final AtomicReference STARTUP_EXCEPTION = new AtomicReference<>(); + + private GenericContainer azuriteContainer; + + @Override + protected void before() throws Throwable { + azuriteContainer = new GenericContainer<>(DOCKER_IMAGE_NAME) + .withExposedPorts(10000) + .withEnv(Map.of("executable", "blob")) + .withStartupTimeout(Duration.ofSeconds(30)); + + try { + azuriteContainer.start(); + } catch (IllegalStateException e) { + STARTUP_EXCEPTION.set(e); + throw e; + } + } + + @Override + protected void after() { + if (azuriteContainer != null) { + azuriteContainer.stop(); + } + } + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + before(); + } catch (IllegalStateException e) { + Assume.assumeNoException(STARTUP_EXCEPTION.get()); + throw e; + } + + List errors = new ArrayList(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + public String getBlobEndpoint() { + return "http://127.0.0.1:" + getMappedPort() + "/devstoreaccount1"; + } + + public BlobContainerClient getReadBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOptionsDefault()); + cloud.deleteIfExists(); + cloud.create(); + return cloud; + } + + public BlobContainerClient getNoRetryBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, null); + return cloud; + } + + public BlobContainerClient getWriteBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + return cloud; + } + + public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { + String blobEndpoint = "BlobEndpoint=" + getBlobEndpoint(); + String accountName = "AccountName=" + ACCOUNT_NAME; + String accountKey = "AccountKey=" + ACCOUNT_KEY; + + + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(getBlobEndpoint()) + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)); + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + public int getMappedPort() { + return azuriteContainer.getMappedPort(10000); + } +} diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentAzureToSegmentTarTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentAzureToSegmentTarTest.java index 3aa29077111..e566e26f5ee 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentAzureToSegmentTarTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentAzureToSegmentTarTest.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.upgrade.AzuriteDockerRule; import org.apache.jackrabbit.oak.upgrade.cli.container.NodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentAzureNodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentTarNodeStoreContainer; diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java index db8e92de54b..1d42f0ee296 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java @@ -16,7 +16,7 @@ */ package org.apache.jackrabbit.oak.upgrade.cli; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; +import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.container.NodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentAzureServicePrincipalNodeStoreContainer; @@ -34,8 +34,8 @@ public class SegmentTarToSegmentAzureServicePrincipalTest extends AbstractOak2Oa @Override public void prepare() throws Exception { - assumeNotNull(ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_TENANT_ID), - ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_SECRET)); + assumeNotNull(ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilities.AZURE_TENANT_ID), + ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_SECRET)); skipTest = false; super.prepare(); } diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureTest.java index f801ca1e7c9..b1deef80797 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureTest.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.upgrade.AzuriteDockerRule; import org.apache.jackrabbit.oak.upgrade.cli.container.NodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentAzureNodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentTarNodeStoreContainer; diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java index bfe3407f327..a024495cf09 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java @@ -18,21 +18,19 @@ import java.io.File; import java.io.IOException; -import java.net.URISyntaxException; import java.nio.file.Files; -import java.security.InvalidKeyException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; +import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.state.NodeStore; - -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.jackrabbit.oak.upgrade.AzuriteDockerRule; public class SegmentAzureNodeStoreContainer implements NodeStoreContainer { private static final String AZURE_ACCOUNT_NAME = "devstoreaccount1"; @@ -42,7 +40,7 @@ public class SegmentAzureNodeStoreContainer implements NodeStoreContainer { private final BlobStoreContainer blob; - private final CloudBlobContainer container; + private final BlobContainerClient blobContainerClient; private final int mappedPort; @@ -67,19 +65,19 @@ private SegmentAzureNodeStoreContainer(AzuriteDockerRule azurite, BlobStoreConta this.blob = blob; this.dir = dir == null ? "repository" : dir; try { - this.container = azurite.getContainer("oak-test"); + this.blobContainerClient = azurite.getReadBlobContainerClient("oak-test"); this.mappedPort = azurite.getMappedPort(); - } catch (InvalidKeyException | URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @Override public NodeStore open() throws IOException { - AzurePersistenceV8 azPersistence = null; + AzurePersistence azPersistence = null; try { - azPersistence = new AzurePersistenceV8(container.getDirectoryReference(dir)); - } catch (URISyntaxException e) { + azPersistence = new AzurePersistence(blobContainerClient, dir); + } catch (BlobStorageException e) { throw new IllegalStateException(e); } @@ -113,8 +111,10 @@ public void close() { @Override public void clean() throws IOException { try { - AzureUtilitiesV8.deleteAllEntries(container.getDirectoryReference(dir)); - } catch (URISyntaxException e) { + ListBlobsOptions options = new ListBlobsOptions(); + options.setPrefix(dir); + AzureUtilities.deleteAllEntries(blobContainerClient, options); + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -126,7 +126,7 @@ public String getDescription() { description.append("AccountName=").append(AZURE_ACCOUNT_NAME).append(';'); description.append("AccountKey=").append(AZURE_ACCOUNT_KEY).append(';'); description.append("BlobEndpoint=http://127.0.0.1:").append(mappedPort).append("/devstoreaccount1;"); - description.append("ContainerName=").append(container.getName()).append(";"); + description.append("ContainerName=").append(blobContainerClient.getBlobContainerName()).append(";"); description.append("Directory=").append(dir); return description.toString(); diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java index 1114121348b..3de691f83e9 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java @@ -16,35 +16,24 @@ */ package org.apache.jackrabbit.oak.upgrade.cli.node; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.SharedAccessAccountPermissions; -import com.microsoft.azure.storage.SharedAccessAccountPolicy; -import com.microsoft.azure.storage.SharedAccessAccountResourceType; -import com.microsoft.azure.storage.SharedAccessAccountService; import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.oak.commons.pio.Closer; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.CliUtils; -import org.jetbrains.annotations.NotNull; +import org.apache.jackrabbit.oak.upgrade.AzuriteDockerRule; import org.junit.ClassRule; import org.junit.Test; import java.io.IOException; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; -import java.util.EnumSet; - -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; + +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeNotNull; import static org.junit.Assume.assumeTrue; @@ -68,23 +57,7 @@ public void testConnectionWithConnectionString_accessKey() throws IOException { false) .connectionString(connectionString) .containerName(CONTAINER_NAME) - .build(); - Closer closer = Closer.create(); - CliUtils.handleSigInt(closer); - FileStoreUtils.NodeStoreWithFileStore nodeStore = (FileStoreUtils.NodeStoreWithFileStore) segmentAzureFactory.create(null, closer); - assertEquals(1, nodeStore.getFileStore().getSegmentCount()); - closer.close(); - } - - @Test - public void testConnectionWithConnectionString_sas() throws IOException { - String sasToken = getAccountSasToken(); - String connectionStringWithPlaceholder = "DefaultEndpointsProtocol=http;AccountName=%s;SharedAccessSignature=%s;BlobEndpoint=http://127.0.0.1:%s/%s;"; - String connectionString = String.format(connectionStringWithPlaceholder, AzuriteDockerRule.ACCOUNT_NAME, sasToken, azurite.getMappedPort(), AzuriteDockerRule.ACCOUNT_NAME); - SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, - false) - .connectionString(connectionString) - .containerName(CONTAINER_NAME) + .accountName(AzuriteDockerRule.ACCOUNT_NAME) .build(); Closer closer = Closer.create(); CliUtils.handleSigInt(closer); @@ -109,21 +82,19 @@ public void testConnectionWithUri_accessKey() throws IOException { String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { - try { - SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, - false) - .accountName(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)) - .uri(uri) - .build(); - closer = Closer.create(); - CliUtils.handleSigInt(closer); - FileStoreUtils.NodeStoreWithFileStore nodeStore = (FileStoreUtils.NodeStoreWithFileStore) segmentAzureFactory.create(null, closer); - assertEquals(1, nodeStore.getFileStore().getSegmentCount()); - } finally { - closer.close(); - cleanup(uri); - } + try { + SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, + false) + .accountName(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)) + .uri(uri) + .build(); + closer = Closer.create(); + CliUtils.handleSigInt(closer); + FileStoreUtils.NodeStoreWithFileStore nodeStore = (FileStoreUtils.NodeStoreWithFileStore) segmentAzureFactory.create(null, closer); + assertEquals(1, nodeStore.getFileStore().getSegmentCount()); + } finally { + closer.close(); + cleanup(uri); } } @@ -136,21 +107,19 @@ public void testConnectionWithUri_servicePrincipal() throws IOException, Interru String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { - try { - SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, - false) - .accountName(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)) - .uri(uri) - .build(); - - CliUtils.handleSigInt(closer); - FileStoreUtils.NodeStoreWithFileStore nodeStore = (FileStoreUtils.NodeStoreWithFileStore) segmentAzureFactory.create(null, closer); - assertEquals(1, nodeStore.getFileStore().getSegmentCount()); - } finally { - closer.close(); - cleanup(uri); - } + try { + SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, + false) + .accountName(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)) + .uri(uri) + .build(); + + CliUtils.handleSigInt(closer); + FileStoreUtils.NodeStoreWithFileStore nodeStore = (FileStoreUtils.NodeStoreWithFileStore) segmentAzureFactory.create(null, closer); + assertEquals(1, nodeStore.getFileStore().getSegmentCount()); + } finally { + closer.close(); + cleanup(uri); } } @@ -163,33 +132,4 @@ private void cleanup(String uri) { throw new IllegalStateException(e); } } - - - @NotNull - private String getAccountSasToken() { - try { - CloudStorageAccount cloudStorageAccount = azurite.getCloudStorageAccount(); - return cloudStorageAccount.generateSharedAccessSignature(getPolicy()); - } catch (Exception e) { - throw new IllegalStateException(e); - } - } - - @NotNull - private SharedAccessAccountPolicy getPolicy() { - SharedAccessAccountPolicy sharedAccessAccountPolicy = new SharedAccessAccountPolicy(); - EnumSet sharedAccessAccountPermissions = EnumSet.of(SharedAccessAccountPermissions.CREATE, - SharedAccessAccountPermissions.DELETE, SharedAccessAccountPermissions.READ, SharedAccessAccountPermissions.UPDATE, - SharedAccessAccountPermissions.WRITE, SharedAccessAccountPermissions.LIST); - EnumSet sharedAccessAccountServices = EnumSet.of(SharedAccessAccountService.BLOB); - EnumSet sharedAccessAccountResourceTypes = EnumSet.of( - SharedAccessAccountResourceType.CONTAINER, SharedAccessAccountResourceType.OBJECT, SharedAccessAccountResourceType.SERVICE); - - sharedAccessAccountPolicy.setPermissions(sharedAccessAccountPermissions); - sharedAccessAccountPolicy.setServices(sharedAccessAccountServices); - sharedAccessAccountPolicy.setResourceTypes(sharedAccessAccountResourceTypes); - sharedAccessAccountPolicy.setSharedAccessExpiryTime(Date.from(Instant.now().plus(Duration.ofDays(7)))); - return sharedAccessAccountPolicy; - } - }