diff --git a/sdk/storage/azure-storage-blobs/CHANGELOG.md b/sdk/storage/azure-storage-blobs/CHANGELOG.md index 09c374db7..906f54a32 100644 --- a/sdk/storage/azure-storage-blobs/CHANGELOG.md +++ b/sdk/storage/azure-storage-blobs/CHANGELOG.md @@ -5,6 +5,7 @@ ### Breaking Changes - Renamed `HasMorePages()` in paged response to `HasPage()`. +- Default chunk size for concurrent upload was changed to nullable. ## 12.0.0-beta.10 (2021-04-16) diff --git a/sdk/storage/azure-storage-blobs/inc/azure/storage/blobs/blob_options.hpp b/sdk/storage/azure-storage-blobs/inc/azure/storage/blobs/blob_options.hpp index 712a75845..6c4042cfd 100644 --- a/sdk/storage/azure-storage-blobs/inc/azure/storage/blobs/blob_options.hpp +++ b/sdk/storage/azure-storage-blobs/inc/azure/storage/blobs/blob_options.hpp @@ -743,7 +743,7 @@ namespace Azure { namespace Storage { namespace Blobs { * @brief The maximum number of bytes in a single request. This value cannot be larger than * 4000 MiB. */ - int64_t ChunkSize = 4 * 1024 * 1024; + Azure::Nullable ChunkSize; /** * @brief The maximum number of threads that may be used in a parallel transfer. diff --git a/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp b/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp index eeba8f8f6..1f24a2236 100644 --- a/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp +++ b/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp @@ -110,9 +110,26 @@ namespace Azure { namespace Storage { namespace Blobs { const UploadBlockBlobFromOptions& options, const Azure::Core::Context& context) const { + constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL; constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL; + constexpr int64_t MaxBlockNumber = 50000; + constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; - int64_t chunkSize = std::min(MaxStageBlockSize, options.TransferOptions.ChunkSize); + int64_t chunkSize; + if (options.TransferOptions.ChunkSize.HasValue()) + { + chunkSize = options.TransferOptions.ChunkSize.Value(); + } + else + { + int64_t minChunkSize = (bufferSize + MaxBlockNumber - 1) / MaxBlockNumber; + minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize; + chunkSize = std::max(DefaultStageBlockSize, minChunkSize); + } + if (chunkSize > MaxStageBlockSize) + { + throw Azure::Core::RequestFailedException("Block size is too big"); + } if (bufferSize <= static_cast(options.TransferOptions.SingleUploadThreshold)) { @@ -172,7 +189,10 @@ namespace Azure { namespace Storage { namespace Blobs { const UploadBlockBlobFromOptions& options, const Azure::Core::Context& context) const { + constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL; constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL; + constexpr int64_t MaxBlockNumber = 50000; + constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; { Azure::Core::IO::FileBodyStream contentStream(fileName); @@ -209,7 +229,21 @@ namespace Azure { namespace Storage { namespace Blobs { } }; - int64_t chunkSize = std::min(MaxStageBlockSize, options.TransferOptions.ChunkSize); + int64_t chunkSize; + if (options.TransferOptions.ChunkSize.HasValue()) + { + chunkSize = options.TransferOptions.ChunkSize.Value(); + } + else + { + int64_t minChunkSize = (fileReader.GetFileSize() + MaxBlockNumber - 1) / MaxBlockNumber; + minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize; + chunkSize = std::max(DefaultStageBlockSize, minChunkSize); + } + if (chunkSize > MaxStageBlockSize) + { + throw Azure::Core::RequestFailedException("Block size is too big"); + } _internal::ConcurrentTransfer( 0, diff --git a/sdk/storage/azure-storage-files-datalake/CHANGELOG.md b/sdk/storage/azure-storage-files-datalake/CHANGELOG.md index c4d6212bd..30c21cce4 100644 --- a/sdk/storage/azure-storage-files-datalake/CHANGELOG.md +++ b/sdk/storage/azure-storage-files-datalake/CHANGELOG.md @@ -5,6 +5,7 @@ ### Breaking Changes - Renamed `HasMorePages()` in paged response to `HasPage()`. +- Default chunk size for concurrent upload was changed to nullable. ## 12.0.0-beta.10 (2021-04-16) diff --git a/sdk/storage/azure-storage-files-datalake/inc/azure/storage/files/datalake/datalake_options.hpp b/sdk/storage/azure-storage-files-datalake/inc/azure/storage/files/datalake/datalake_options.hpp index 980605859..62e8fda7e 100644 --- a/sdk/storage/azure-storage-files-datalake/inc/azure/storage/files/datalake/datalake_options.hpp +++ b/sdk/storage/azure-storage-files-datalake/inc/azure/storage/files/datalake/datalake_options.hpp @@ -571,7 +571,7 @@ namespace Azure { namespace Storage { namespace Files { namespace DataLake { * @brief The maximum number of bytes in a single request. This value cannot be larger than * 4000 MiB. */ - int64_t ChunkSize = 4 * 1024 * 1024; + Azure::Nullable ChunkSize; /** * @brief The maximum number of threads that may be used in a parallel transfer.