Fix potential overflow of casting (#2379)

* fix potential overflow during integer cast

* more

* clang-format

* fix
This commit is contained in:
JinmingHu 2021-06-07 10:50:38 +08:00 committed by GitHub
parent 7f21263cc9
commit 15adb632a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 39 additions and 26 deletions

View File

@ -255,7 +255,8 @@ namespace Azure { namespace Storage { namespace Blobs {
}
firstChunkLength = std::min(firstChunkLength, blobRangeSize);
if (static_cast<size_t>(blobRangeSize) > bufferSize)
if (static_cast<uint64_t>(blobRangeSize) > std::numeric_limits<size_t>::max()
|| static_cast<size_t>(blobRangeSize) > bufferSize)
{
throw Azure::Core::RequestFailedException(
"Buffer is not big enough, blob range size is " + std::to_string(blobRangeSize) + ".");
@ -366,13 +367,13 @@ namespace Azure { namespace Storage { namespace Blobs {
auto bodyStreamToFile = [](Azure::Core::IO::BodyStream& stream,
_internal::FileWriter& fileWriter,
int64_t offset,
size_t length,
int64_t length,
const Azure::Core::Context& context) {
constexpr size_t bufferSize = 4 * 1024 * 1024;
std::vector<uint8_t> buffer(bufferSize);
while (length > 0)
{
size_t readSize = std::min(bufferSize, length);
size_t readSize = static_cast<size_t>(std::min<int64_t>(bufferSize, length));
size_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context);
if (bytesRead != readSize)
{
@ -384,12 +385,7 @@ namespace Azure { namespace Storage { namespace Blobs {
}
};
bodyStreamToFile(
*(firstChunk.Value.BodyStream),
fileWriter,
0,
static_cast<size_t>(firstChunkLength),
context);
bodyStreamToFile(*(firstChunk.Value.BodyStream), fileWriter, 0, firstChunkLength, context);
firstChunk.Value.BodyStream.reset();
auto returnTypeConverter = [](Azure::Response<Models::DownloadBlobResult>& response) {
@ -417,7 +413,7 @@ namespace Azure { namespace Storage { namespace Blobs {
*(chunk.Value.BodyStream),
fileWriter,
offset - firstChunkOffset,
static_cast<size_t>(chunkOptions.Range.Value().Length.Value()),
chunkOptions.Range.Value().Length.Value(),
context);
if (chunkId == numChunks - 1)

View File

@ -127,6 +127,11 @@ namespace Azure { namespace Storage { namespace Blobs {
constexpr int64_t MaxBlockNumber = 50000;
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
if (static_cast<uint64_t>(options.TransferOptions.SingleUploadThreshold)
> std::numeric_limits<size_t>::max())
{
throw Azure::Core::RequestFailedException("Single upload threshold is too big");
}
if (bufferSize <= static_cast<size_t>(options.TransferOptions.SingleUploadThreshold))
{
Azure::Core::IO::MemoryBodyStream contentStream(buffer, bufferSize);
@ -163,8 +168,6 @@ namespace Azure { namespace Storage { namespace Blobs {
};
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
// TODO: Investigate changing lambda parameters to be size_t, unless they need to be int64_t
// for some reason.
Azure::Core::IO::MemoryBodyStream contentStream(buffer + offset, static_cast<size_t>(length));
StageBlockOptions chunkOptions;
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);

View File

@ -151,6 +151,10 @@ namespace Azure { namespace Storage {
std::vector<uint8_t> Sha256(const std::vector<uint8_t>& data)
{
if (data.size() > std::numeric_limits<ULONG>::max())
{
throw std::runtime_error("Data size is too big.");
}
static AlgorithmProviderInstance AlgorithmProvider(AlgorithmType::Sha256);
std::string context;
@ -198,6 +202,10 @@ namespace Azure { namespace Storage {
const std::vector<uint8_t>& data,
const std::vector<uint8_t>& key)
{
if (data.size() > std::numeric_limits<ULONG>::max())
{
throw std::runtime_error("Data size is too big.");
}
static AlgorithmProviderInstance AlgorithmProvider(AlgorithmType::HmacSha256);

View File

@ -31,7 +31,12 @@ namespace Azure { namespace Storage { namespace _internal {
FileReader::FileReader(const std::string& filename)
{
int sizeNeeded = MultiByteToWideChar(
CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), int(filename.length()), nullptr, 0);
CP_UTF8,
MB_ERR_INVALID_CHARS,
filename.data(),
static_cast<int>(filename.length()),
nullptr,
0);
if (sizeNeeded == 0)
{
throw std::runtime_error("Invalid filename.");
@ -41,7 +46,7 @@ namespace Azure { namespace Storage { namespace _internal {
CP_UTF8,
MB_ERR_INVALID_CHARS,
filename.data(),
int(filename.length()),
static_cast<int>(filename.length()),
&filenameW[0],
sizeNeeded)
== 0)
@ -85,7 +90,12 @@ namespace Azure { namespace Storage { namespace _internal {
FileWriter::FileWriter(const std::string& filename)
{
int sizeNeeded = MultiByteToWideChar(
CP_UTF8, MB_ERR_INVALID_CHARS, filename.data(), int(filename.length()), nullptr, 0);
CP_UTF8,
MB_ERR_INVALID_CHARS,
filename.data(),
static_cast<int>(filename.length()),
nullptr,
0);
if (sizeNeeded == 0)
{
throw std::runtime_error("Invalid filename.");
@ -95,7 +105,7 @@ namespace Azure { namespace Storage { namespace _internal {
CP_UTF8,
MB_ERR_INVALID_CHARS,
filename.data(),
int(filename.length()),
static_cast<int>(filename.length()),
&filenameW[0],
sizeNeeded)
== 0)

View File

@ -688,7 +688,8 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
}
firstChunkLength = std::min(firstChunkLength, fileRangeSize);
if (static_cast<size_t>(fileRangeSize) > bufferSize)
if (static_cast<uint64_t>(fileRangeSize) > std::numeric_limits<size_t>::max()
|| static_cast<size_t>(fileRangeSize) > bufferSize)
{
throw Azure::Core::RequestFailedException(
"Buffer is not big enough, file range size is " + std::to_string(fileRangeSize) + ".");
@ -802,13 +803,13 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
auto bodyStreamToFile = [](Azure::Core::IO::BodyStream& stream,
_internal::FileWriter& fileWriter,
int64_t offset,
size_t length,
int64_t length,
const Azure::Core::Context& context) {
constexpr size_t bufferSize = 4 * 1024 * 1024;
std::vector<uint8_t> buffer(bufferSize);
while (length > 0)
{
size_t readSize = std::min(bufferSize, length);
size_t readSize = static_cast<size_t>(std::min<int64_t>(bufferSize, length));
size_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context);
if (bytesRead != readSize)
{
@ -820,12 +821,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
}
};
bodyStreamToFile(
*(firstChunk.Value.BodyStream),
fileWriter,
0,
static_cast<size_t>(firstChunkLength),
context);
bodyStreamToFile(*(firstChunk.Value.BodyStream), fileWriter, 0, firstChunkLength, context);
firstChunk.Value.BodyStream.reset();
auto returnTypeConverter = [](Azure::Response<Models::DownloadFileResult>& response) {
@ -855,7 +851,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
*(chunk.Value.BodyStream),
fileWriter,
offset - firstChunkOffset,
static_cast<size_t>(chunkOptions.Range.Value().Length.Value()),
chunkOptions.Range.Value().Length.Value(),
context);
if (chunkId == numChunks - 1)