diff --git a/sdk/storage/azure-storage-blobs/src/blob_client.cpp b/sdk/storage/azure-storage-blobs/src/blob_client.cpp index 2bd48887d..57112b514 100644 --- a/sdk/storage/azure-storage-blobs/src/blob_client.cpp +++ b/sdk/storage/azure-storage-blobs/src/blob_client.cpp @@ -255,7 +255,8 @@ namespace Azure { namespace Storage { namespace Blobs { } firstChunkLength = std::min(firstChunkLength, blobRangeSize); - if (static_cast(blobRangeSize) > bufferSize) + if (static_cast(blobRangeSize) > std::numeric_limits::max() + || static_cast(blobRangeSize) > bufferSize) { throw Azure::Core::RequestFailedException( "Buffer is not big enough, blob range size is " + std::to_string(blobRangeSize) + "."); @@ -366,13 +367,13 @@ namespace Azure { namespace Storage { namespace Blobs { auto bodyStreamToFile = [](Azure::Core::IO::BodyStream& stream, _internal::FileWriter& fileWriter, int64_t offset, - size_t length, + int64_t length, const Azure::Core::Context& context) { constexpr size_t bufferSize = 4 * 1024 * 1024; std::vector buffer(bufferSize); while (length > 0) { - size_t readSize = std::min(bufferSize, length); + size_t readSize = static_cast(std::min(bufferSize, length)); size_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context); if (bytesRead != readSize) { @@ -384,12 +385,7 @@ namespace Azure { namespace Storage { namespace Blobs { } }; - bodyStreamToFile( - *(firstChunk.Value.BodyStream), - fileWriter, - 0, - static_cast(firstChunkLength), - context); + bodyStreamToFile(*(firstChunk.Value.BodyStream), fileWriter, 0, firstChunkLength, context); firstChunk.Value.BodyStream.reset(); auto returnTypeConverter = [](Azure::Response& response) { @@ -417,7 +413,7 @@ namespace Azure { namespace Storage { namespace Blobs { *(chunk.Value.BodyStream), fileWriter, offset - firstChunkOffset, - static_cast(chunkOptions.Range.Value().Length.Value()), + chunkOptions.Range.Value().Length.Value(), context); if (chunkId == numChunks - 1) diff --git a/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp b/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp index c74babd00..cdbb9134a 100644 --- a/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp +++ b/sdk/storage/azure-storage-blobs/src/block_blob_client.cpp @@ -127,6 +127,11 @@ namespace Azure { namespace Storage { namespace Blobs { constexpr int64_t MaxBlockNumber = 50000; constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; + if (static_cast(options.TransferOptions.SingleUploadThreshold) + > std::numeric_limits::max()) + { + throw Azure::Core::RequestFailedException("Single upload threshold is too big"); + } if (bufferSize <= static_cast(options.TransferOptions.SingleUploadThreshold)) { Azure::Core::IO::MemoryBodyStream contentStream(buffer, bufferSize); @@ -163,8 +168,6 @@ namespace Azure { namespace Storage { namespace Blobs { }; auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) { - // TODO: Investigate changing lambda parameters to be size_t, unless they need to be int64_t - // for some reason. Azure::Core::IO::MemoryBodyStream contentStream(buffer + offset, static_cast(length)); StageBlockOptions chunkOptions; auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context); diff --git a/sdk/storage/azure-storage-common/src/crypt.cpp b/sdk/storage/azure-storage-common/src/crypt.cpp index f10ce3ae3..9719f48a9 100644 --- a/sdk/storage/azure-storage-common/src/crypt.cpp +++ b/sdk/storage/azure-storage-common/src/crypt.cpp @@ -151,6 +151,10 @@ namespace Azure { namespace Storage { std::vector Sha256(const std::vector& data) { + if (data.size() > std::numeric_limits::max()) + { + throw std::runtime_error("Data size is too big."); + } static AlgorithmProviderInstance AlgorithmProvider(AlgorithmType::Sha256); std::string context; @@ -198,6 +202,10 @@ namespace Azure { namespace Storage { const std::vector& data, const std::vector& key) { + if (data.size() > std::numeric_limits::max()) + { + throw std::runtime_error("Data size is too big."); + } static AlgorithmProviderInstance AlgorithmProvider(AlgorithmType::HmacSha256); diff --git a/sdk/storage/azure-storage-common/src/file_io.cpp b/sdk/storage/azure-storage-common/src/file_io.cpp index e48fb2d59..96cb8e179 100644 --- a/sdk/storage/azure-storage-common/src/file_io.cpp +++ b/sdk/storage/azure-storage-common/src/file_io.cpp @@ -31,7 +31,12 @@ namespace Azure { namespace Storage { namespace _internal { FileReader::FileReader(const std::string& filename) { int sizeNeeded = MultiByteToWideChar( - CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), int(filename.length()), nullptr, 0); + CP_UTF8, + MB_ERR_INVALID_CHARS, + filename.data(), + static_cast(filename.length()), + nullptr, + 0); if (sizeNeeded == 0) { throw std::runtime_error("Invalid filename."); @@ -41,7 +46,7 @@ namespace Azure { namespace Storage { namespace _internal { CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), - int(filename.length()), + static_cast(filename.length()), &filenameW[0], sizeNeeded) == 0) @@ -85,7 +90,12 @@ namespace Azure { namespace Storage { namespace _internal { FileWriter::FileWriter(const std::string& filename) { int sizeNeeded = MultiByteToWideChar( - CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), int(filename.length()), nullptr, 0); + CP_UTF8, + MB_ERR_INVALID_CHARS, + filename.data(), + static_cast(filename.length()), + nullptr, + 0); if (sizeNeeded == 0) { throw std::runtime_error("Invalid filename."); @@ -95,7 +105,7 @@ namespace Azure { namespace Storage { namespace _internal { CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), - int(filename.length()), + static_cast(filename.length()), &filenameW[0], sizeNeeded) == 0) diff --git a/sdk/storage/azure-storage-files-shares/src/share_file_client.cpp b/sdk/storage/azure-storage-files-shares/src/share_file_client.cpp index 761eb5950..6f3d06d91 100644 --- a/sdk/storage/azure-storage-files-shares/src/share_file_client.cpp +++ b/sdk/storage/azure-storage-files-shares/src/share_file_client.cpp @@ -688,7 +688,8 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares { } firstChunkLength = std::min(firstChunkLength, fileRangeSize); - if (static_cast(fileRangeSize) > bufferSize) + if (static_cast(fileRangeSize) > std::numeric_limits::max() + || static_cast(fileRangeSize) > bufferSize) { throw Azure::Core::RequestFailedException( "Buffer is not big enough, file range size is " + std::to_string(fileRangeSize) + "."); @@ -802,13 +803,13 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares { auto bodyStreamToFile = [](Azure::Core::IO::BodyStream& stream, _internal::FileWriter& fileWriter, int64_t offset, - size_t length, + int64_t length, const Azure::Core::Context& context) { constexpr size_t bufferSize = 4 * 1024 * 1024; std::vector buffer(bufferSize); while (length > 0) { - size_t readSize = std::min(bufferSize, length); + size_t readSize = static_cast(std::min(bufferSize, length)); size_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context); if (bytesRead != readSize) { @@ -820,12 +821,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares { } }; - bodyStreamToFile( - *(firstChunk.Value.BodyStream), - fileWriter, - 0, - static_cast(firstChunkLength), - context); + bodyStreamToFile(*(firstChunk.Value.BodyStream), fileWriter, 0, firstChunkLength, context); firstChunk.Value.BodyStream.reset(); auto returnTypeConverter = [](Azure::Response& response) { @@ -855,7 +851,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares { *(chunk.Value.BodyStream), fileWriter, offset - firstChunkOffset, - static_cast(chunkOptions.Range.Value().Length.Value()), + chunkOptions.Range.Value().Length.Value(), context); if (chunkId == numChunks - 1)