Skip to content

Commit

Permalink
Fix flaky test in RetryableS3OutputStreamTest (#16639)
Browse files Browse the repository at this point in the history
As part of #16481, we have started uploading the chunks in parallel.
That means that it's not necessary for the part that finished uploading last
to be less than or equal to the chunkSize (as the final part could've been uploaded earlier).

This made a test in RetryableS3OutputStreamTest flaky where we were
asserting that the final part should be smaller than chunk size.

This commit fixes the test, and also adds another test where the file size
is such that all chunk sizes would be of equal size.
  • Loading branch information
Akshat-Jain authored Jun 24, 2024
1 parent 00c9643 commit 641f739
Showing 1 changed file with 34 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -115,12 +115,8 @@ public void testWriteAndHappy() throws IOException
{
chunkSize = 10;
ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES);
try (RetryableS3OutputStream out = new RetryableS3OutputStream(
config,
s3,
path,
s3UploadManager
)) {
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
for (int i = 0; i < 25; i++) {
bb.clear();
bb.putInt(i);
Expand All @@ -137,12 +133,8 @@ public void testWriteSizeLargerThanConfiguredMaxChunkSizeShouldSucceed() throws
{
chunkSize = 10;
ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES * 3);
try (RetryableS3OutputStream out = new RetryableS3OutputStream(
config,
s3,
path,
s3UploadManager
)) {
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
bb.clear();
bb.putInt(1);
bb.putInt(2);
Expand All @@ -158,12 +150,8 @@ public void testWriteSizeLargerThanConfiguredMaxChunkSizeShouldSucceed() throws
public void testWriteSmallBufferShouldSucceed() throws IOException
{
chunkSize = 128;
try (RetryableS3OutputStream out = new RetryableS3OutputStream(
config,
s3,
path,
s3UploadManager
)) {
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
for (int i = 0; i < 600; i++) {
out.write(i);
}
Expand All @@ -173,19 +161,31 @@ public void testWriteSmallBufferShouldSucceed() throws IOException
s3.assertCompleted(chunkSize, 600);
}

@Test
public void testWriteSmallBufferExactChunkSizeShouldSucceed() throws IOException
{
chunkSize = 128;
final int fileSize = 128 * 5;
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
for (int i = 0; i < fileSize; i++) {
out.write(i);
}
}
// each chunk 128 bytes, so there should be 5 chunks.
Assert.assertEquals(5, s3.partRequests.size());
s3.assertCompleted(chunkSize, fileSize);
}

@Test
public void testSuccessToUploadAfterRetry() throws IOException
{
final TestAmazonS3 s3 = new TestAmazonS3(1);

chunkSize = 10;
ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES);
try (RetryableS3OutputStream out = new RetryableS3OutputStream(
config,
s3,
path,
s3UploadManager
)) {
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
for (int i = 0; i < 25; i++) {
bb.clear();
bb.putInt(i);
Expand All @@ -203,12 +203,8 @@ public void testFailToUploadAfterRetries() throws IOException
final TestAmazonS3 s3 = new TestAmazonS3(3);

ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES);
try (RetryableS3OutputStream out = new RetryableS3OutputStream(
config,
s3,
path,
s3UploadManager
)) {
try (RetryableS3OutputStream out =
new RetryableS3OutputStream(config, s3, path, s3UploadManager)) {
for (int i = 0; i < 2; i++) {
bb.clear();
bb.putInt(i);
Expand Down Expand Up @@ -286,13 +282,16 @@ private void assertCompleted(long chunkSize, long expectedFileSize)
Set<Integer> partNumbersFromRequest = partRequests.stream().map(UploadPartRequest::getPartNumber).collect(Collectors.toSet());
Assert.assertEquals(partRequests.size(), partNumbersFromRequest.size());

for (int i = 0; i < partRequests.size(); i++) {
if (i < partRequests.size() - 1) {
Assert.assertEquals(chunkSize, partRequests.get(i).getPartSize());
} else {
Assert.assertTrue(chunkSize >= partRequests.get(i).getPartSize());
// Verify sizes of uploaded chunks
int numSmallerChunks = 0;
for (UploadPartRequest part : partRequests) {
Assert.assertTrue(part.getPartSize() <= chunkSize);
if (part.getPartSize() < chunkSize) {
++numSmallerChunks;
}
}
Assert.assertTrue(numSmallerChunks <= 1);

final List<PartETag> eTags = completeRequest.getPartETags();
Assert.assertEquals(partRequests.size(), eTags.size());
Assert.assertEquals(
Expand Down

0 comments on commit 641f739

Please sign in to comment.