add TransferOptions into download and upload options (#1503)

* tansfer options

* changelog

* fix build error

* clang-format

* fix typo

* fix crash
This commit is contained in:
JinmingHu 2021-01-28 23:46:15 +08:00 committed by GitHub
parent 836a8a591a
commit ee156b6505
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 218 additions and 210 deletions

View File

@ -21,6 +21,8 @@
- Removed `PreviousContinuationToken` from `ListBlobContainersSinglePageResult`, `ListBlobsByHierarchySinglePageResult` and `ListBlobsSinglePageResult`.
- `ListBlobContainersIncludeItem` was renamed to `ListBlobContainersIncludeFlags`.
- `ListBlobsIncludeItem` was renamed to `ListBlobsIncludeFlags`.
- `Concurrency`, `ChunkSize` and `InitialChunkSize` were moved into `DownloadBlobToOptions::TansferOptions`.
- `Concurrency`, `ChunkSize` and `SingleUploadThreshold` were moved into `UploadBlockBlobFromOptions::TransferOptions`.
- Removed `TagValue` from `FilterBlobItem`, removed `Where` from `FindBlobsByTagsSinglePageResult`.
- Type for ETag was changed to `Azure::Core::ETag`.
- Removed `BlobPrefix` struct, use `std::string` instead.

View File

@ -631,22 +631,25 @@ namespace Azure { namespace Storage { namespace Blobs {
*/
Azure::Core::Nullable<Core::Http::Range> Range;
/**
* @brief The size of the first range request in bytes. Blobs smaller than this limit will be
* downloaded in a single request. Blobs larger than this limit will continue being downloaded
* in chunks of size ChunkSize.
*/
Azure::Core::Nullable<int64_t> InitialChunkSize;
struct
{
/**
* @brief The size of the first range request in bytes. Blobs smaller than this limit will be
* downloaded in a single request. Blobs larger than this limit will continue being downloaded
* in chunks of size ChunkSize.
*/
int64_t InitialChunkSize = 256 * 1024 * 1024;
/**
* @brief The maximum number of bytes in a single request.
*/
Azure::Core::Nullable<int64_t> ChunkSize;
/**
* @brief The maximum number of bytes in a single request.
*/
int64_t ChunkSize = 4 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
} TransferOptions;
};
/**
@ -891,15 +894,25 @@ namespace Azure { namespace Storage { namespace Blobs {
*/
Azure::Core::Nullable<Models::AccessTier> Tier;
/**
* @brief The maximum number of bytes in a single request.
*/
Azure::Core::Nullable<int64_t> ChunkSize;
struct
{
/**
* @brief Blob smaller than this will be uploaded with a single upload operation. This value
* cannot be larger than 5000 MiB.
*/
int64_t SingleUploadThreshold = 256 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
/**
* @brief The maximum number of bytes in a single request. This value cannot be larger than
* 4000 MiB.
*/
int64_t ChunkSize = 4 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
} TransferOptions;
};
/**

View File

@ -216,17 +216,11 @@ namespace Azure { namespace Storage { namespace Blobs {
std::size_t bufferSize,
const DownloadBlobToOptions& options) const
{
constexpr int64_t DefaultChunkSize = 4 * 1024 * 1024;
// Just start downloading using an initial chunk. If it's a small blob, we'll get the whole
// thing in one shot. If it's a large blob, we'll get its full size in Content-Range and can
// keep downloading it in chunks.
int64_t firstChunkOffset = options.Range.HasValue() ? options.Range.GetValue().Offset : 0;
int64_t firstChunkLength = DefaultChunkSize;
if (options.InitialChunkSize.HasValue())
{
firstChunkLength = options.InitialChunkSize.GetValue();
}
int64_t firstChunkLength = options.TransferOptions.InitialChunkSize;
if (options.Range.HasValue() && options.Range.GetValue().Length.HasValue())
{
firstChunkLength = std::min(firstChunkLength, options.Range.GetValue().Length.GetValue());
@ -319,21 +313,13 @@ namespace Azure { namespace Storage { namespace Blobs {
int64_t remainingOffset = firstChunkOffset + firstChunkLength;
int64_t remainingSize = blobRangeSize - firstChunkLength;
int64_t chunkSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t GrainSize = 4 * 1024;
chunkSize = remainingSize / options.Concurrency;
chunkSize = (std::max(chunkSize, int64_t(1)) + GrainSize - 1) / GrainSize * GrainSize;
chunkSize = std::min(chunkSize, DefaultChunkSize);
}
Storage::Details::ConcurrentTransfer(
remainingOffset, remainingSize, chunkSize, options.Concurrency, downloadChunkFunc);
remainingOffset,
remainingSize,
options.TransferOptions.ChunkSize,
options.TransferOptions.Concurrency,
downloadChunkFunc);
ret->ContentLength = blobRangeSize;
return ret;
}
@ -342,17 +328,11 @@ namespace Azure { namespace Storage { namespace Blobs {
const std::string& fileName,
const DownloadBlobToOptions& options) const
{
constexpr int64_t DefaultChunkSize = 4 * 1024 * 1024;
// Just start downloading using an initial chunk. If it's a small blob, we'll get the whole
// thing in one shot. If it's a large blob, we'll get its full size in Content-Range and can
// keep downloading it in chunks.
int64_t firstChunkOffset = options.Range.HasValue() ? options.Range.GetValue().Offset : 0;
int64_t firstChunkLength = DefaultChunkSize;
if (options.InitialChunkSize.HasValue())
{
firstChunkLength = options.InitialChunkSize.GetValue();
}
int64_t firstChunkLength = options.TransferOptions.InitialChunkSize;
if (options.Range.HasValue() && options.Range.GetValue().Length.HasValue())
{
firstChunkLength = std::min(firstChunkLength, options.Range.GetValue().Length.GetValue());
@ -456,21 +436,13 @@ namespace Azure { namespace Storage { namespace Blobs {
int64_t remainingOffset = firstChunkOffset + firstChunkLength;
int64_t remainingSize = blobRangeSize - firstChunkLength;
int64_t chunkSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t GrainSize = 4 * 1024;
chunkSize = remainingSize / options.Concurrency;
chunkSize = (std::max(chunkSize, int64_t(1)) + GrainSize - 1) / GrainSize * GrainSize;
chunkSize = std::min(chunkSize, DefaultChunkSize);
}
Storage::Details::ConcurrentTransfer(
remainingOffset, remainingSize, chunkSize, options.Concurrency, downloadChunkFunc);
remainingOffset,
remainingSize,
options.TransferOptions.ChunkSize,
options.TransferOptions.Concurrency,
downloadChunkFunc);
ret->ContentLength = blobRangeSize;
return ret;
}

View File

@ -107,23 +107,11 @@ namespace Azure { namespace Storage { namespace Blobs {
std::size_t bufferSize,
const UploadBlockBlobFromOptions& options) const
{
constexpr int64_t DefaultBlockSize = 8 * 1024 * 1024;
constexpr int64_t MaximumNumberBlocks = 50000;
constexpr int64_t GrainSize = 4 * 1024;
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
int64_t chunkSize = DefaultBlockSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t minBlockSize = (bufferSize + MaximumNumberBlocks - 1) / MaximumNumberBlocks;
chunkSize = std::max(chunkSize, minBlockSize);
chunkSize = (chunkSize + GrainSize - 1) / GrainSize * GrainSize;
}
int64_t chunkSize = std::min(MaxStageBlockSize, options.TransferOptions.ChunkSize);
if (bufferSize <= static_cast<std::size_t>(chunkSize))
if (bufferSize <= static_cast<std::size_t>(options.TransferOptions.SingleUploadThreshold))
{
Azure::Core::Http::MemoryBodyStream contentStream(buffer, bufferSize);
UploadBlockBlobOptions uploadBlockBlobOptions;
@ -154,7 +142,7 @@ namespace Azure { namespace Storage { namespace Blobs {
};
Storage::Details::ConcurrentTransfer(
0, bufferSize, chunkSize, options.Concurrency, uploadBlockFunc);
0, bufferSize, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc);
for (std::size_t i = 0; i < blockIds.size(); ++i)
{
@ -182,26 +170,13 @@ namespace Azure { namespace Storage { namespace Blobs {
const std::string& fileName,
const UploadBlockBlobFromOptions& options) const
{
constexpr int64_t DefaultBlockSize = 8 * 1024 * 1024;
constexpr int64_t MaximumNumberBlocks = 50000;
constexpr int64_t GrainSize = 4 * 1024;
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
Storage::Details::FileReader fileReader(fileName);
int64_t chunkSize = DefaultBlockSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t minBlockSize
= (fileReader.GetFileSize() + MaximumNumberBlocks - 1) / MaximumNumberBlocks;
chunkSize = std::max(chunkSize, minBlockSize);
chunkSize = (chunkSize + GrainSize - 1) / GrainSize * GrainSize;
}
int64_t chunkSize = std::min(MaxStageBlockSize, options.TransferOptions.ChunkSize);
if (fileReader.GetFileSize() <= chunkSize)
if (fileReader.GetFileSize() <= options.TransferOptions.SingleUploadThreshold)
{
Azure::Core::Http::FileBodyStream contentStream(
fileReader.GetHandle(), 0, fileReader.GetFileSize());
@ -233,7 +208,11 @@ namespace Azure { namespace Storage { namespace Blobs {
};
Storage::Details::ConcurrentTransfer(
0, fileReader.GetFileSize(), chunkSize, options.Concurrency, uploadBlockFunc);
0,
fileReader.GetFileSize(),
chunkSize,
options.TransferOptions.Concurrency,
uploadBlockFunc);
for (std::size_t i = 0; i < blockIds.size(); ++i)
{

View File

@ -368,15 +368,21 @@ namespace Azure { namespace Storage { namespace Test {
}
downloadBuffer.resize(static_cast<std::size_t>(downloadSize), '\x00');
Blobs::DownloadBlobToOptions options;
options.Concurrency = concurrency;
options.TransferOptions.Concurrency = concurrency;
if (offset.HasValue() || length.HasValue())
{
options.Range = Core::Http::Range();
options.Range.GetValue().Offset = offset.GetValue();
options.Range.GetValue().Length = length;
}
options.InitialChunkSize = initialChunkSize;
options.ChunkSize = chunkSize;
if (initialChunkSize.HasValue())
{
options.TransferOptions.InitialChunkSize = initialChunkSize.GetValue();
}
if (chunkSize.HasValue())
{
options.TransferOptions.ChunkSize = chunkSize.GetValue();
}
if (actualDownloadSize > 0)
{
auto res
@ -432,15 +438,21 @@ namespace Azure { namespace Storage { namespace Test {
}
}
Blobs::DownloadBlobToOptions options;
options.Concurrency = concurrency;
options.TransferOptions.Concurrency = concurrency;
if (offset.HasValue() || length.HasValue())
{
options.Range = Core::Http::Range();
options.Range.GetValue().Offset = offset.GetValue();
options.Range.GetValue().Length = length;
}
options.InitialChunkSize = initialChunkSize;
options.ChunkSize = chunkSize;
if (initialChunkSize.HasValue())
{
options.TransferOptions.InitialChunkSize = initialChunkSize.GetValue();
}
if (chunkSize.HasValue())
{
options.TransferOptions.ChunkSize = chunkSize.GetValue();
}
if (actualDownloadSize > 0)
{
auto res = m_blockBlobClient->DownloadTo(tempFilename, options);
@ -511,7 +523,7 @@ namespace Azure { namespace Storage { namespace Test {
// buffer not big enough
Blobs::DownloadBlobToOptions options;
options.Concurrency = c;
options.TransferOptions.Concurrency = c;
options.Range = Core::Http::Range();
options.Range.GetValue().Offset = 1;
for (int64_t length : {1ULL, 2ULL, 4_KB, 5_KB, 8_KB, 11_KB, 20_KB})
@ -622,9 +634,9 @@ namespace Azure { namespace Storage { namespace Test {
for (int c : {1, 2})
{
Azure::Storage::Blobs::DownloadBlobToOptions options;
options.InitialChunkSize = 10;
options.ChunkSize = 10;
options.Concurrency = c;
options.TransferOptions.InitialChunkSize = 10;
options.TransferOptions.ChunkSize = 10;
options.TransferOptions.Concurrency = c;
res = blockBlobClient.DownloadTo(
emptyContent.data(), static_cast<std::size_t>(8_MB), options);
@ -682,8 +694,8 @@ namespace Azure { namespace Storage { namespace Test {
auto blockBlobClient = m_blobContainerClient->GetBlockBlobClient(RandomString());
Azure::Storage::Blobs::UploadBlockBlobFromOptions options;
options.ChunkSize = 1_MB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 1_MB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = m_blobUploadOptions.HttpHeaders;
options.HttpHeaders.ContentHash.Value.clear();
options.Metadata = m_blobUploadOptions.Metadata;
@ -712,8 +724,8 @@ namespace Azure { namespace Storage { namespace Test {
auto blockBlobClient = m_blobContainerClient->GetBlockBlobClient(RandomString());
Azure::Storage::Blobs::UploadBlockBlobFromOptions options;
options.ChunkSize = 1_MB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 1_MB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = m_blobUploadOptions.HttpHeaders;
options.HttpHeaders.ContentHash.Value.clear();
options.Metadata = m_blobUploadOptions.Metadata;

View File

@ -433,9 +433,9 @@ namespace Azure { namespace Storage { namespace Test {
std::string downloadBuffer;
downloadBuffer.resize(std::max(primaryContent.size(), secondaryContent.size()));
Blobs::DownloadBlobToOptions options;
options.InitialChunkSize = 2;
options.ChunkSize = 2;
options.Concurrency = 1;
options.TransferOptions.InitialChunkSize = 2;
options.TransferOptions.ChunkSize = 2;
options.TransferOptions.Concurrency = 1;
blobClient.DownloadTo(
reinterpret_cast<uint8_t*>(&downloadBuffer[0]),
static_cast<int64_t>(downloadBuffer.size()),

View File

@ -34,6 +34,8 @@
- Changed all previous `LeaseDuration` members to a new type named `LeaseDurationType`.
- `startsOn` parameter for `GetUserDelegationKey` was changed to optional.
- Removed `PreviousContinuationToken` from `ListFileSystemsSinglePageResult`.
- `Concurrency`, `ChunkSize` and `InitialChunkSize` were moved into `DownloadDataLakeFileToOptions::TansferOptions`.
- `Concurrency`, `ChunkSize` and `SingleUploadThreshold` were moved into `UploadDataLakeFileFromOptions::TransferOptions`.
- Removed `Rename` from `DataLakeDirectoryClient` and `DataLakeFileClient`. Instead, added `RenameFile` and `RenameSubdirectory` to `DataLakeDirectoryClient` and added `RenameFile` and `RenameDirectory` to `DataLakeFileSystemClient`.
- Rename APIs now return the client of the resource it is renaming to.
- Removed `Mode` for rename operations' options, that originally controls the rename mode. Now it is fixed to legacy mode.

View File

@ -741,15 +741,25 @@ namespace Azure { namespace Storage { namespace Files { namespace DataLake {
*/
Storage::Metadata Metadata;
/**
* @brief The maximum number of bytes in a single request.
*/
Azure::Core::Nullable<int64_t> ChunkSize;
struct
{
/**
* @brief File smaller than this will be uploaded with a single upload operation. This value
* cannot be larger than 5000 MiB.
*/
int64_t SingleUploadThreshold = 256 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
/**
* @brief The maximum number of bytes in a single request. This value cannot be larger than
* 4000 MiB.
*/
int64_t ChunkSize = 4 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
} TransferOptions;
};
using ScheduleDataLakeFileExpiryOriginType = Blobs::Models::ScheduleBlobExpiryOriginType;

View File

@ -323,10 +323,12 @@ namespace Azure { namespace Storage { namespace Files { namespace DataLake {
{
Blobs::UploadBlockBlobFromOptions blobOptions;
blobOptions.Context = options.Context;
blobOptions.ChunkSize = options.ChunkSize;
blobOptions.TransferOptions.SingleUploadThreshold
= options.TransferOptions.SingleUploadThreshold;
blobOptions.TransferOptions.ChunkSize = options.TransferOptions.ChunkSize;
blobOptions.TransferOptions.Concurrency = options.TransferOptions.Concurrency;
blobOptions.HttpHeaders = FromPathHttpHeaders(options.HttpHeaders);
blobOptions.Metadata = options.Metadata;
blobOptions.Concurrency = options.Concurrency;
return m_blockBlobClient.UploadFrom(fileName, blobOptions);
}
@ -337,10 +339,12 @@ namespace Azure { namespace Storage { namespace Files { namespace DataLake {
{
Blobs::UploadBlockBlobFromOptions blobOptions;
blobOptions.Context = options.Context;
blobOptions.ChunkSize = options.ChunkSize;
blobOptions.TransferOptions.SingleUploadThreshold
= options.TransferOptions.SingleUploadThreshold;
blobOptions.TransferOptions.ChunkSize = options.TransferOptions.ChunkSize;
blobOptions.TransferOptions.Concurrency = options.TransferOptions.Concurrency;
blobOptions.HttpHeaders = FromPathHttpHeaders(options.HttpHeaders);
blobOptions.Metadata = options.Metadata;
blobOptions.Concurrency = options.Concurrency;
return m_blockBlobClient.UploadFrom(buffer, bufferSize, blobOptions);
}

View File

@ -460,8 +460,8 @@ namespace Azure { namespace Storage { namespace Test {
auto fileClient = m_fileSystemClient->GetFileClient(RandomString());
Azure::Storage::Files::DataLake::UploadDataLakeFileFromOptions options;
options.ChunkSize = 1_MB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 1_MB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = GetInterestingHttpHeaders();
options.Metadata = RandomMetadata();
auto res
@ -489,8 +489,8 @@ namespace Azure { namespace Storage { namespace Test {
auto fileClient = m_fileSystemClient->GetFileClient(RandomString());
Azure::Storage::Files::DataLake::UploadDataLakeFileFromOptions options;
options.ChunkSize = 1_MB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 1_MB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = GetInterestingHttpHeaders();
options.Metadata = RandomMetadata();

View File

@ -16,6 +16,8 @@
- Added `RequestId` in each return types for REST API calls, except for concurrent APIs.
- Removed `PreviousContinuationToken` from `ListFilesAndDirectoriesSinglePageResult` and `ListSharesSinglePageResult`.
- Removed `c_` for constants: `c_FileDefaultTimeValue`, `c_FileCopySourceTime`, `c_FileInheritPermission`, `FilePreserveSmbProperties` and `FileAllHandles`.
- `Concurrency`, `ChunkSize` and `InitialChunkSize` were moved into `DownloadShareFileToOptions::TansferOptions`.
- `Concurrency`, `ChunkSize` and `SingleUploadThreshold` were moved into `UploadShareFileFromOptions::TransferOptions`.
### Other Changes and Improvements

View File

@ -13,8 +13,6 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
constexpr static const char* FileAllHandles = "*";
namespace Details {
constexpr int64_t FileUploadDefaultChunkSize = 4 * 1024 * 1024;
constexpr int64_t FileDownloadDefaultChunkSize = 4 * 1024 * 1024;
constexpr static const char* ShareSnapshotQueryParameter = "sharesnapshot";
// Error codes:

View File

@ -750,22 +750,25 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
*/
Azure::Core::Nullable<Core::Http::Range> Range;
/**
* @brief The size of the first range request in bytes. Files smaller than this limit will be
* downloaded in a single request. Files larger than this limit will continue being downloaded
* in chunks of size ChunkSize.
*/
Azure::Core::Nullable<int64_t> InitialChunkSize;
struct
{
/**
* @brief The size of the first range request in bytes. Files smaller than this limit will be
* downloaded in a single request. Files larger than this limit will continue being downloaded
* in chunks of size ChunkSize.
*/
int64_t InitialChunkSize = 256 * 1024 * 1024;
/**
* @brief The maximum number of bytes in a single request.
*/
Azure::Core::Nullable<int64_t> ChunkSize;
/**
* @brief The maximum number of bytes in a single request.
*/
int64_t ChunkSize = 4 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
} TransferOptions;
};
/**
@ -788,11 +791,6 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
*/
Storage::Metadata Metadata;
/**
* @brief The maximum number of bytes in a single request.
*/
Azure::Core::Nullable<int64_t> ChunkSize;
/**
* @brief SMB properties to set for the destination file.
*/
@ -806,9 +804,23 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
*/
Azure::Core::Nullable<std::string> FilePermission;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
struct
{
/**
* @brief File smaller than this will be uploaded with a single upload operation. This value
* cannot be larger than 4 MiB.
*/
int64_t SingleUploadThreshold = 4 * 1024 * 1024;
/**
* @brief The maximum number of bytes in a single request.
*/
int64_t ChunkSize = 4 * 1024 * 1024;
/**
* @brief The maximum number of threads that may be used in a parallel transfer.
*/
int Concurrency = 5;
} TransferOptions;
};
}}}} // namespace Azure::Storage::Files::Shares

View File

@ -580,11 +580,8 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
// thing in one shot. If it's a large file, we'll get its full size in Content-Range and can
// keep downloading it in chunks.
int64_t firstChunkOffset = options.Range.HasValue() ? options.Range.GetValue().Offset : 0;
int64_t firstChunkLength = Details::FileDownloadDefaultChunkSize;
if (options.InitialChunkSize.HasValue())
{
firstChunkLength = options.InitialChunkSize.GetValue();
}
int64_t firstChunkLength = options.TransferOptions.InitialChunkSize;
if (options.Range.HasValue() && options.Range.GetValue().Length.HasValue())
{
firstChunkLength = std::min(firstChunkLength, options.Range.GetValue().Length.GetValue());
@ -672,21 +669,13 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
int64_t remainingOffset = firstChunkOffset + firstChunkLength;
int64_t remainingSize = fileRangeSize - firstChunkLength;
int64_t chunkSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t GrainSize = 4 * 1024;
chunkSize = remainingSize / options.Concurrency;
chunkSize = (std::max(chunkSize, int64_t(1)) + GrainSize - 1) / GrainSize * GrainSize;
chunkSize = std::min(chunkSize, Details::FileDownloadDefaultChunkSize);
}
Storage::Details::ConcurrentTransfer(
remainingOffset, remainingSize, chunkSize, options.Concurrency, downloadChunkFunc);
remainingOffset,
remainingSize,
options.TransferOptions.ChunkSize,
options.TransferOptions.Concurrency,
downloadChunkFunc);
ret->ContentLength = fileRangeSize;
return ret;
}
@ -699,11 +688,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
// thing in one shot. If it's a large file, we'll get its full size in Content-Range and can
// keep downloading it in chunks.
int64_t firstChunkOffset = options.Range.HasValue() ? options.Range.GetValue().Offset : 0;
int64_t firstChunkLength = Details::FileDownloadDefaultChunkSize;
if (options.InitialChunkSize.HasValue())
{
firstChunkLength = options.InitialChunkSize.GetValue();
}
int64_t firstChunkLength = options.TransferOptions.InitialChunkSize;
if (options.Range.HasValue() && options.Range.GetValue().Length.HasValue())
{
firstChunkLength = std::min(firstChunkLength, options.Range.GetValue().Length.GetValue());
@ -802,21 +787,13 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
int64_t remainingOffset = firstChunkOffset + firstChunkLength;
int64_t remainingSize = fileRangeSize - firstChunkLength;
int64_t chunkSize;
if (options.ChunkSize.HasValue())
{
chunkSize = options.ChunkSize.GetValue();
}
else
{
int64_t GrainSize = 4 * 1024;
chunkSize = remainingSize / options.Concurrency;
chunkSize = (std::max(chunkSize, int64_t(1)) + GrainSize - 1) / GrainSize * GrainSize;
chunkSize = std::min(chunkSize, Details::FileDownloadDefaultChunkSize);
}
Storage::Details::ConcurrentTransfer(
remainingOffset, remainingSize, chunkSize, options.Concurrency, downloadChunkFunc);
remainingOffset,
remainingSize,
options.TransferOptions.ChunkSize,
options.TransferOptions.Concurrency,
downloadChunkFunc);
ret->ContentLength = fileRangeSize;
return ret;
}
@ -900,9 +877,6 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
auto createResult = Details::ShareRestClient::File::Create(
m_shareFileUrl, *m_pipeline, options.Context, protocolLayerOptions);
int64_t chunkSize = options.ChunkSize.HasValue() ? options.ChunkSize.GetValue()
: Details::FileUploadDefaultChunkSize;
auto uploadPageFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
unused(chunkId, numChunks);
Azure::Core::Http::MemoryBodyStream contentStream(buffer + offset, length);
@ -911,8 +885,17 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
UploadRange(offset, &contentStream, uploadRangeOptions);
};
Storage::Details::ConcurrentTransfer(
0, bufferSize, chunkSize, options.Concurrency, uploadPageFunc);
int64_t chunkSize = options.TransferOptions.ChunkSize;
if (bufferSize < static_cast<std::size_t>(options.TransferOptions.SingleUploadThreshold))
{
chunkSize = bufferSize;
}
if (bufferSize > 0)
{
Storage::Details::ConcurrentTransfer(
0, bufferSize, chunkSize, options.TransferOptions.Concurrency, uploadPageFunc);
}
Models::UploadShareFileFromResult result;
result.IsServerEncrypted = createResult->IsServerEncrypted;
@ -1000,9 +983,6 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
auto createResult = Details::ShareRestClient::File::Create(
m_shareFileUrl, *m_pipeline, options.Context, protocolLayerOptions);
int64_t chunkSize = options.ChunkSize.HasValue() ? options.ChunkSize.GetValue()
: Details::FileUploadDefaultChunkSize;
auto uploadPageFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
unused(chunkId, numChunks);
Azure::Core::Http::FileBodyStream contentStream(fileReader.GetHandle(), offset, length);
@ -1011,8 +991,18 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
UploadRange(offset, &contentStream, uploadRangeOptions);
};
Storage::Details::ConcurrentTransfer(
0, fileReader.GetFileSize(), chunkSize, options.Concurrency, uploadPageFunc);
const int64_t fileSize = fileReader.GetFileSize();
int64_t chunkSize = options.TransferOptions.ChunkSize;
if (fileSize < options.TransferOptions.SingleUploadThreshold)
{
chunkSize = fileSize;
}
if (fileSize > 0)
{
Storage::Details::ConcurrentTransfer(
0, fileSize, chunkSize, options.TransferOptions.Concurrency, uploadPageFunc);
}
Models::UploadShareFileFromResult result;
result.IsServerEncrypted = createResult->IsServerEncrypted;

View File

@ -294,8 +294,8 @@ namespace Azure { namespace Storage { namespace Test {
auto fileClient = m_fileShareDirectoryClient->GetFileClient(RandomString());
Files::Shares::UploadShareFileFromOptions options;
options.ChunkSize = 512_KB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 512_KB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = GetInterestingHttpHeaders();
options.Metadata = RandomMetadata();
@ -317,8 +317,8 @@ namespace Azure { namespace Storage { namespace Test {
auto fileClient = m_fileShareDirectoryClient->GetFileClient(RandomString());
Files::Shares::UploadShareFileFromOptions options;
options.ChunkSize = 512_KB;
options.Concurrency = concurrency;
options.TransferOptions.ChunkSize = 512_KB;
options.TransferOptions.Concurrency = concurrency;
options.HttpHeaders = GetInterestingHttpHeaders();
options.Metadata = RandomMetadata();
@ -404,7 +404,7 @@ namespace Azure { namespace Storage { namespace Test {
}
downloadBuffer.resize(static_cast<std::size_t>(downloadSize), '\x00');
Files::Shares::DownloadShareFileToOptions options;
options.Concurrency = concurrency;
options.TransferOptions.Concurrency = concurrency;
if (offset.HasValue())
{
options.Range = Core::Http::Range();
@ -412,8 +412,14 @@ namespace Azure { namespace Storage { namespace Test {
options.Range.GetValue().Length = length;
}
options.InitialChunkSize = initialChunkSize;
options.ChunkSize = chunkSize;
if (initialChunkSize.HasValue())
{
options.TransferOptions.InitialChunkSize = initialChunkSize.GetValue();
}
if (chunkSize.HasValue())
{
options.TransferOptions.ChunkSize = chunkSize.GetValue();
}
if (actualDownloadSize > 0)
{
auto res = m_fileClient->DownloadTo(downloadBuffer.data(), downloadBuffer.size(), options);
@ -468,15 +474,21 @@ namespace Azure { namespace Storage { namespace Test {
}
}
Files::Shares::DownloadShareFileToOptions options;
options.Concurrency = concurrency;
options.TransferOptions.Concurrency = concurrency;
if (offset.HasValue())
{
options.Range = Core::Http::Range();
options.Range.GetValue().Offset = offset.GetValue();
options.Range.GetValue().Length = length;
}
options.InitialChunkSize = initialChunkSize;
options.ChunkSize = chunkSize;
if (initialChunkSize.HasValue())
{
options.TransferOptions.InitialChunkSize = initialChunkSize.GetValue();
}
if (chunkSize.HasValue())
{
options.TransferOptions.ChunkSize = chunkSize.GetValue();
}
if (actualDownloadSize > 0)
{
auto res = m_fileClient->DownloadTo(tempFilename, options);
@ -547,7 +559,7 @@ namespace Azure { namespace Storage { namespace Test {
// buffer not big enough
Files::Shares::DownloadShareFileToOptions options;
options.Concurrency = c;
options.TransferOptions.Concurrency = c;
options.Range = Core::Http::Range();
options.Range.GetValue().Offset = 1;
for (int64_t length : {1ULL, 2ULL, 4_KB, 5_KB, 8_KB, 11_KB, 20_KB})