Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Remote Store] Moving stale commit deletion to async flow #8201

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import org.opensearch.common.util.FeatureFlags;
import org.opensearch.index.IndexModule;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.test.OpenSearchIntegTestCase;

Expand Down Expand Up @@ -74,6 +75,13 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) {
return remoteStoreIndexSettings(numberOfReplicas, 1);
}

protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFieldLimit) {
return Settings.builder()
.put(remoteStoreIndexSettings(numberOfReplicas))
.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldLimit)
sachinpkale marked this conversation as resolved.
Show resolved Hide resolved
.build();
}

protected Settings remoteTranslogIndexSettings(int numberOfReplicas, int numberOfShards) {
return Settings.builder()
.put(remoteStoreIndexSettings(numberOfReplicas, numberOfShards))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.opensearch.cluster.routing.RecoverySource;
import org.opensearch.common.UUIDs;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.shard.RemoteStoreRefreshListener;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.InternalTestCluster;
Expand Down Expand Up @@ -277,4 +278,42 @@ public void testRemoteSegmentCleanup() throws Exception {
public void testRemoteTranslogCleanup() throws Exception {
verifyRemoteStoreCleanup(true);
}

public void testStaleCommitDeletionWithInvokeFlush() throws Exception {
internalCluster().startDataOnlyNodes(3);
createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l));
int numberOfIterations = randomIntBetween(5, 15);
indexData(numberOfIterations, true);
String indexUUID = client().admin()
.indices()
.prepareGetSettings(INDEX_NAME)
.get()
.getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID);
Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata");
// Delete is async.
assertBusy(() -> {
int actualFileCount = getFileCount(indexPath);
if (numberOfIterations <= RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP) {
assertEquals(numberOfIterations, actualFileCount);
} else {
// As delete is async its possible that the file gets created before the deletion or after
// deletion.
assertTrue(actualFileCount >= 10 || actualFileCount <= 11);
}
}, 30, TimeUnit.SECONDS);
}

public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception {
sachinpkale marked this conversation as resolved.
Show resolved Hide resolved
internalCluster().startDataOnlyNodes(3);
createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l));
int numberOfIterations = randomIntBetween(5, 15);
indexData(numberOfIterations, false);
String indexUUID = client().admin()
.indices()
.prepareGetSettings(INDEX_NAME)
.get()
.getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID);
Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata");
assertEquals(1, getFileCount(indexPath));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2632,7 +2632,7 @@
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: "
+ recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener);
storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool);

Check warning on line 2635 in server/src/main/java/org/opensearch/index/shard/IndexShard.java

View check run for this annotation

Codecov / codecov/patch

server/src/main/java/org/opensearch/index/shard/IndexShard.java#L2635

Added line #L2635 was not covered by tests
} catch (Exception e) {
listener.onFailure(e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public final class RemoteStoreRefreshListener implements ReferenceManager.Refres
// Visible for testing
static final Set<String> EXCLUDE_FILES = Set.of("write.lock");
// Visible for testing
static final int LAST_N_METADATA_FILES_TO_KEEP = 10;
public static final int LAST_N_METADATA_FILES_TO_KEEP = 10;

private final IndexShard indexShard;
private final Directory storeDirectory;
Expand Down Expand Up @@ -200,9 +200,8 @@ private synchronized void syncSegments(boolean isRetry) {
// if a new segments_N file is present in local that is not uploaded to remote store yet, it
// is considered as a first refresh post commit. A cleanup of stale commit files is triggered.
// This is done to avoid delete post each refresh.
// Ideally, we want this to be done in async flow. (GitHub issue #4315)
if (isRefreshAfterCommit()) {
deleteStaleCommits();
remoteDirectory.deleteStaleSegmentsAsync(LAST_N_METADATA_FILES_TO_KEEP);
}

try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) {
Expand Down Expand Up @@ -381,14 +380,6 @@ private String getChecksumOfLocalFile(String file) throws IOException {
return localSegmentChecksumMap.get(file);
}

private void deleteStaleCommits() {
try {
remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP);
} catch (IOException e) {
logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e);
}
}

/**
* Updates the last refresh time and refresh seq no which is seen by local store.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@
import org.opensearch.repositories.IndexId;
import org.opensearch.repositories.RepositoriesService;
import org.opensearch.repositories.Repository;
import org.opensearch.threadpool.ThreadPool;

import java.io.IOException;
import java.nio.channels.FileChannel;
Expand Down Expand Up @@ -356,7 +357,8 @@
final IndexShard indexShard,
Repository repository,
RepositoriesService repositoriesService,
ActionListener<Boolean> listener
ActionListener<Boolean> listener,
ThreadPool threadPool
) {
try {
if (canRecover(indexShard)) {
Expand Down Expand Up @@ -384,7 +386,10 @@
remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository();
}

RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService);
RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory(
() -> repositoriesService,

Check warning on line 390 in server/src/main/java/org/opensearch/index/shard/StoreRecovery.java

View check run for this annotation

Codecov / codecov/patch

server/src/main/java/org/opensearch/index/shard/StoreRecovery.java#L389-L390

Added lines #L389 - L390 were not covered by tests
threadPool
);
RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory(
remoteStoreRepository,
indexUUID,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.opensearch.index.store.lockmanager.RemoteStoreLockManager;
import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata;
import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler;
import org.opensearch.threadpool.ThreadPool;

import java.io.FileNotFoundException;
import java.io.IOException;
Expand Down Expand Up @@ -75,6 +76,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement

private final RemoteStoreLockManager mdLockManager;

private final ThreadPool threadPool;

/**
* To prevent explosion of refresh metadata files, we replace refresh files for the given primary term and generation
* This is achieved by uploading refresh metadata file with the same UUID suffix.
Expand All @@ -96,15 +99,23 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement

private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class);

/**
* AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time.
* Visible for testing
*/
protected final AtomicBoolean canDeleteStaleCommits = new AtomicBoolean(true);

public RemoteSegmentStoreDirectory(
RemoteDirectory remoteDataDirectory,
RemoteDirectory remoteMetadataDirectory,
RemoteStoreLockManager mdLockManager
RemoteStoreLockManager mdLockManager,
ThreadPool threadPool
) throws IOException {
super(remoteDataDirectory);
this.remoteDataDirectory = remoteDataDirectory;
this.remoteMetadataDirectory = remoteMetadataDirectory;
this.mdLockManager = mdLockManager;
this.threadPool = threadPool;
init();
}

Expand Down Expand Up @@ -574,7 +585,7 @@ public Map<String, UploadedSegmentMetadata> getSegmentsUploadedToRemoteStore(lon
* @param lastNMetadataFilesToKeep number of metadata files to keep
* @throws IOException in case of I/O error while reading from / writing to remote segment store
*/
public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException {
private void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException {
Collection<String> metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX);
List<String> sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList());
if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) {
Expand Down Expand Up @@ -656,6 +667,33 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException
}
}

/**
* Delete stale segment and metadata files asynchronously.
* This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner.
* @param lastNMetadataFilesToKeep number of metadata files to keep
*/
public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) {
if (canDeleteStaleCommits.compareAndSet(true, false)) {
sachinpkale marked this conversation as resolved.
Show resolved Hide resolved
try {
threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> {
try {
deleteStaleSegments(lastNMetadataFilesToKeep);
} catch (Exception e) {
logger.info(
"Exception while deleting stale commits from remote segment store, will retry delete post next commit",
e
);
} finally {
canDeleteStaleCommits.set(true);
}
});
} catch (Exception e) {
logger.info("Exception occurred while scheduling deleteStaleCommits", e);
canDeleteStaleCommits.set(true);
}
}
}

/*
Tries to delete shard level directory if it is empty
Return true if it deleted it successfully
Expand All @@ -680,7 +718,7 @@ private boolean deleteIfEmpty() throws IOException {
}

public void close() throws IOException {
deleteStaleSegments(0);
sachinpkale marked this conversation as resolved.
Show resolved Hide resolved
deleteStaleSegmentsAsync(0);
deleteIfEmpty();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.opensearch.repositories.Repository;
import org.opensearch.repositories.RepositoryMissingException;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.threadpool.ThreadPool;

import java.io.IOException;
import java.util.function.Supplier;
Expand All @@ -34,8 +35,11 @@ public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.Dire

private final Supplier<RepositoriesService> repositoriesService;

public RemoteSegmentStoreDirectoryFactory(Supplier<RepositoriesService> repositoriesService) {
private final ThreadPool threadPool;

public RemoteSegmentStoreDirectoryFactory(Supplier<RepositoriesService> repositoriesService, ThreadPool threadPool) {
this.repositoriesService = repositoriesService;
this.threadPool = threadPool;
}

@Override
Expand All @@ -62,7 +66,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh
shardId
);

return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager);
return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool);
} catch (RepositoryMissingException e) {
throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e);
}
Expand Down
3 changes: 2 additions & 1 deletion server/src/main/java/org/opensearch/node/Node.java
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,8 @@ protected Node(
clusterService.setRerouteService(rerouteService);

final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(
repositoriesServiceReference::get
repositoriesServiceReference::get,
threadPool
);

final IndicesService indicesService = new IndicesService(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ private IndexService newIndexService(IndexModule module) throws IOException {
writableRegistry(),
() -> false,
null,
new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService),
new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool),
translogFactorySupplier
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.test.IndexSettingsModule;
import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.threadpool.ThreadPool;

import java.io.IOException;
import java.nio.file.Path;
Expand All @@ -41,14 +42,16 @@ public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase

private Supplier<RepositoriesService> repositoriesServiceSupplier;
private RepositoriesService repositoriesService;
private ThreadPool threadPool;
private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory;

@Before
public void setup() {
repositoriesServiceSupplier = mock(Supplier.class);
repositoriesService = mock(RepositoriesService.class);
threadPool = mock(ThreadPool.class);
when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService);
remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier);
remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier, threadPool);
}

public void testNewDirectory() throws IOException {
Expand Down
Loading
Loading