[Storage Blobs Service] Only make one HTTP request if the blob size is small for Concurrent Upload API (#419)
* Only make one HTTP request if the blob size is small for Concurrent Upload API
This commit is contained in:
parent
455cf134b3
commit
cc0ff27eec
@ -115,6 +115,17 @@ namespace Azure { namespace Storage { namespace Blobs {
|
||||
chunkSize = (chunkSize + c_grainSize - 1) / c_grainSize * c_grainSize;
|
||||
}
|
||||
|
||||
if (bufferSize <= static_cast<std::size_t>(chunkSize))
|
||||
{
|
||||
Azure::Core::Http::MemoryBodyStream contentStream(buffer, bufferSize);
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.Context = options.Context;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tier = options.Tier;
|
||||
return Upload(&contentStream, uploadBlockBlobOptions);
|
||||
}
|
||||
|
||||
std::vector<std::pair<BlockType, std::string>> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr std::size_t c_blockIdLength = 64;
|
||||
@ -175,6 +186,18 @@ namespace Azure { namespace Storage { namespace Blobs {
|
||||
chunkSize = (chunkSize + c_grainSize - 1) / c_grainSize * c_grainSize;
|
||||
}
|
||||
|
||||
if (fileReader.GetFileSize() <= chunkSize)
|
||||
{
|
||||
Azure::Core::Http::FileBodyStream contentStream(
|
||||
fileReader.GetHandle(), 0, fileReader.GetFileSize());
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.Context = options.Context;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tier = options.Tier;
|
||||
return Upload(&contentStream, uploadBlockBlobOptions);
|
||||
}
|
||||
|
||||
std::vector<std::pair<BlockType, std::string>> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr std::size_t c_blockIdLength = 64;
|
||||
|
||||
@ -424,6 +424,47 @@ namespace Azure { namespace Storage { namespace Test {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(BlockBlobClientTest, ConcurrentUploadFromNonExistingFile)
|
||||
{
|
||||
auto blockBlobClient = Azure::Storage::Blobs::BlockBlobClient::CreateFromConnectionString(
|
||||
StandardStorageConnectionString(), m_containerName, RandomString());
|
||||
std::string emptyFilename = RandomString();
|
||||
EXPECT_THROW(blockBlobClient.UploadFromFile(emptyFilename), std::runtime_error);
|
||||
EXPECT_THROW(blockBlobClient.Delete(), StorageError);
|
||||
}
|
||||
|
||||
TEST_F(BlockBlobClientTest, ConcurrentDownloadNonExistingBlob)
|
||||
{
|
||||
auto blockBlobClient = Azure::Storage::Blobs::BlockBlobClient::CreateFromConnectionString(
|
||||
StandardStorageConnectionString(), m_containerName, RandomString());
|
||||
std::vector<uint8_t> blobContent(100);
|
||||
std::string tempFilename = RandomString();
|
||||
|
||||
EXPECT_THROW(
|
||||
blockBlobClient.DownloadToBuffer(blobContent.data(), blobContent.size()), StorageError);
|
||||
EXPECT_THROW(blockBlobClient.DownloadToFile(tempFilename), StorageError);
|
||||
DeleteFile(tempFilename);
|
||||
}
|
||||
|
||||
TEST_F(BlockBlobClientTest, ConcurrentUploadEmptyBlob)
|
||||
{
|
||||
std::vector<uint8_t> emptyContent;
|
||||
auto blockBlobClient = Azure::Storage::Blobs::BlockBlobClient::CreateFromConnectionString(
|
||||
StandardStorageConnectionString(), m_containerName, RandomString());
|
||||
|
||||
blockBlobClient.UploadFromBuffer(emptyContent.data(), emptyContent.size());
|
||||
EXPECT_NO_THROW(blockBlobClient.Delete());
|
||||
|
||||
std::string emptyFilename = RandomString();
|
||||
{
|
||||
Details::FileWriter writer(emptyFilename);
|
||||
}
|
||||
blockBlobClient.UploadFromFile(emptyFilename);
|
||||
EXPECT_NO_THROW(blockBlobClient.Delete());
|
||||
|
||||
DeleteFile(emptyFilename);
|
||||
}
|
||||
|
||||
TEST_F(BlockBlobClientTest, ConcurrentDownloadEmptyBlob)
|
||||
{
|
||||
std::string tempFilename = RandomString();
|
||||
@ -544,6 +585,7 @@ namespace Azure { namespace Storage { namespace Test {
|
||||
options.ChunkSize = 1_MB;
|
||||
options.Concurrency = c;
|
||||
options.HttpHeaders = m_blobUploadOptions.HttpHeaders;
|
||||
options.HttpHeaders.ContentMd5.clear();
|
||||
options.Metadata = m_blobUploadOptions.Metadata;
|
||||
options.Tier = m_blobUploadOptions.Tier;
|
||||
{
|
||||
@ -552,9 +594,8 @@ namespace Azure { namespace Storage { namespace Test {
|
||||
EXPECT_FALSE(res->ETag.empty());
|
||||
EXPECT_FALSE(res->LastModified.empty());
|
||||
EXPECT_FALSE(res->SequenceNumber.HasValue());
|
||||
EXPECT_FALSE(res->ContentCrc64.HasValue());
|
||||
EXPECT_FALSE(res->ContentMd5.HasValue());
|
||||
auto properties = *blockBlobClient.GetProperties();
|
||||
properties.HttpHeaders.ContentMd5.clear();
|
||||
EXPECT_EQ(properties.ContentLength, length);
|
||||
EXPECT_EQ(properties.HttpHeaders, options.HttpHeaders);
|
||||
EXPECT_EQ(properties.Metadata, options.Metadata);
|
||||
@ -578,9 +619,8 @@ namespace Azure { namespace Storage { namespace Test {
|
||||
EXPECT_FALSE(res->ETag.empty());
|
||||
EXPECT_FALSE(res->LastModified.empty());
|
||||
EXPECT_FALSE(res->SequenceNumber.HasValue());
|
||||
EXPECT_FALSE(res->ContentCrc64.HasValue());
|
||||
EXPECT_FALSE(res->ContentMd5.HasValue());
|
||||
auto properties = *blockBlobClient.GetProperties();
|
||||
properties.HttpHeaders.ContentMd5.clear();
|
||||
EXPECT_EQ(properties.ContentLength, length);
|
||||
EXPECT_EQ(properties.HttpHeaders, options.HttpHeaders);
|
||||
EXPECT_EQ(properties.Metadata, options.Metadata);
|
||||
|
||||
Loading…
Reference in New Issue
Block a user