Changed the static methods 'BodyStream::ReadToCount()' and 'BodyStream::ReadToEnd()' into instance methods. (#1863)

Usability studies found that static methods are generally not as user-friendly as instance methods. Folks new to the SDK have harder time discovering the APIs, they reverse the flow of typing, and the calling code ends up a bit more verbose because you have to spell out the whole namespace and type name when there aren't any using directives.

There doesn't seem to be a strong benefit or feasibility reason to keep these method statics which are typically harder to use and discover.

cc @kyle-patterson
This commit is contained in:
Ahson Khan 2021-03-11 03:58:33 -08:00 committed by GitHub
parent 84df3cefbc
commit 12475350b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 31 additions and 63 deletions

View File

@ -34,6 +34,7 @@
- Changed type of `Azure::Core::Http::RetryOptions::StatusCodes` from `std::vector` to `std::set`.
- Renamed `Azure::Core::Http::LoggingPolicy` to `LogPolicy`.
- Introduced `Azure::Core::Http::Policies::LogOptions`, a mandatory parameter for `LogPolicy` construction. Entities that are not specified in the allow lists are hidden in the log.
- Changed the static methods `BodyStream::ReadToCount()` and `BodyStream::ReadToEnd()` into instance methods.
- Moved `Azure::Core::Logging` namespace entities to `Azure::Core::Diagnostics::Logger` class.
- Removed `Azure::Core::DateTime::GetRfc3339String()`: `Azure::Core::DateTime::ToString()` was extended to provide the same functionality.
- Changed the constructor of `Azure::IO::FileBodyStream` to accept a file name directly and take ownership of opening/closing the file, instead of accepting a file descriptor, offset, and length.

View File

@ -94,29 +94,23 @@ namespace Azure { namespace Core { namespace IO {
* @brief Read #Azure::Core::IO::BodyStream into a buffer until the buffer is filled, or until
* the stream is read to end.
*
* @param body #Azure::Core::IO::BodyStream to read.
* @param buffer Pointer to a first byte of the byte buffer to read the data into.
* @param count Size of the buffer to read the data into.
* @param context #Azure::Core::Context so that operation can be cancelled.
*
* @return Number of bytes read.
*/
static int64_t ReadToCount(
BodyStream& body,
uint8_t* buffer,
int64_t count,
Azure::Core::Context const& context);
int64_t ReadToCount(uint8_t* buffer, int64_t count, Azure::Core::Context const& context);
/**
* @brief Read #Azure::Core::IO::BodyStream until the stream is read to end, allocating memory
* for the entirety of contents.
*
* @param context #Azure::Core::Context so that operation can be cancelled.
* @param body #Azure::Core::IO::BodyStream to read.
*
* @return A vector of bytes containing the entirety of data read from the \p body.
*/
static std::vector<uint8_t> ReadToEnd(BodyStream& body, Azure::Core::Context const& context);
std::vector<uint8_t> ReadToEnd(Azure::Core::Context const& context);
};
/**

View File

@ -58,7 +58,7 @@ std::unique_ptr<RawResponse> TransportPolicy::Send(
// Using DownloadViaStream and getting an error code would also get to here to download error from
// body
auto bodyStream = response->GetBodyStream();
response->SetBody(BodyStream::ReadToEnd(*bodyStream, ctx));
response->SetBody(bodyStream->ReadToEnd(ctx));
// BodyStream is moved out of response. This makes transport implementation to clean any active
// session with sockets or internal state.
return response;

View File

@ -34,17 +34,13 @@ using Azure::Core::Context;
using namespace Azure::Core::IO;
// Keep reading until buffer is all fill out of the end of stream content is reached
int64_t BodyStream::ReadToCount(
BodyStream& body,
uint8_t* buffer,
int64_t count,
Context const& context)
int64_t BodyStream::ReadToCount(uint8_t* buffer, int64_t count, Context const& context)
{
int64_t totalRead = 0;
for (;;)
{
int64_t readBytes = body.Read(buffer + totalRead, count - totalRead, context);
int64_t readBytes = this->Read(buffer + totalRead, count - totalRead, context);
totalRead += readBytes;
// Reach all of buffer size
if (totalRead == count || readBytes == 0)
@ -54,7 +50,7 @@ int64_t BodyStream::ReadToCount(
}
}
std::vector<uint8_t> BodyStream::ReadToEnd(BodyStream& body, Context const& context)
std::vector<uint8_t> BodyStream::ReadToEnd(Context const& context)
{
constexpr int64_t chunkSize = 1024 * 8;
auto buffer = std::vector<uint8_t>();
@ -63,7 +59,7 @@ std::vector<uint8_t> BodyStream::ReadToEnd(BodyStream& body, Context const& cont
{
buffer.resize((static_cast<decltype(buffer)::size_type>(chunkNumber) + 1) * chunkSize);
int64_t readBytes
= ReadToCount(body, buffer.data() + (chunkNumber * chunkSize), chunkSize, context);
= this->ReadToCount(buffer.data() + (chunkNumber * chunkSize), chunkSize, context);
if (readBytes < chunkSize)
{

View File

@ -123,8 +123,7 @@ namespace Azure { namespace Core { namespace Test {
// Read the bodyStream to get all chunks
EXPECT_THROW(
Azure::Core::IO::BodyStream::ReadToEnd(
*bodyS, Azure::Core::Context::GetApplicationContext()),
bodyS->ReadToEnd(Azure::Core::Context::GetApplicationContext()),
Azure::Core::Http::TransportException);
}
// Clear the connections from the pool to invoke clean routine
@ -200,8 +199,7 @@ namespace Azure { namespace Core { namespace Test {
auto bodyS = response->GetBodyStream();
// Read the bodyStream to get all chunks
EXPECT_NO_THROW(Azure::Core::IO::BodyStream::ReadToEnd(
*bodyS, Azure::Core::Context::GetApplicationContext()));
EXPECT_NO_THROW(bodyS->ReadToEnd(Azure::Core::Context::GetApplicationContext()));
}
// Clear the connections from the pool to invoke clean routine
Azure::Core::Http::CurlConnectionPool::ConnectionPoolIndex.clear();

View File

@ -191,10 +191,7 @@ namespace Azure { namespace Core { namespace Test {
// Change the offset of the stream to be non-zero by reading a byte.
std::vector<uint8_t> temp(2);
EXPECT_EQ(
Azure::Core::IO::BodyStream::ReadToCount(
stream, temp.data(), 1, Context::GetApplicationContext()),
1);
EXPECT_EQ(stream.ReadToCount(temp.data(), 1, Context::GetApplicationContext()), 1);
EXPECT_EQ(temp[0], 1);
EXPECT_EQ(temp[1], 0);
@ -212,10 +209,7 @@ namespace Azure { namespace Core { namespace Test {
// Verify that StartTry rewound the stream back.
auto getStream = req.GetBodyStream();
EXPECT_EQ(
Azure::Core::IO::BodyStream::ReadToCount(
*getStream, temp.data(), 2, Context::GetApplicationContext()),
2);
EXPECT_EQ(getStream->ReadToCount(temp.data(), 2, Context::GetApplicationContext()), 2);
EXPECT_EQ(temp[0], 1);
EXPECT_EQ(temp[1], 2);

View File

@ -556,8 +556,8 @@ namespace Azure { namespace Core { namespace Test {
auto body = response.GetBodyStream();
EXPECT_NE(body, nullptr);
std::vector<uint8_t> bodyVector = Azure::Core::IO::BodyStream::ReadToEnd(
*body, Azure::Core::Context::GetApplicationContext());
std::vector<uint8_t> bodyVector
= body->ReadToEnd(Azure::Core::Context::GetApplicationContext());
int64_t bodySize = body->Length();
EXPECT_EQ(bodySize, size);
bodySize = bodyVector.size();

View File

@ -60,7 +60,7 @@ namespace Azure { namespace Perf { namespace Test {
auto response = _detail::HttpClient->Send(request, ctx);
// Read the body from network
auto bodyStream = response->GetBodyStream();
response->SetBody(Azure::Core::IO::BodyStream::ReadToEnd(*bodyStream, ctx));
response->SetBody(bodyStream->ReadToEnd(ctx));
}
/**

View File

@ -278,8 +278,7 @@ namespace Azure { namespace Storage { namespace Blobs {
"buffer is not big enough, blob range size is " + std::to_string(blobRangeSize));
}
int64_t bytesRead = Azure::Core::IO::BodyStream::ReadToCount(
*(firstChunk->BodyStream), buffer, firstChunkLength, context);
int64_t bytesRead = firstChunk->BodyStream->ReadToCount(buffer, firstChunkLength, context);
if (bytesRead != firstChunkLength)
{
throw Azure::Core::RequestFailedException("error when reading body stream");
@ -310,8 +309,7 @@ namespace Azure { namespace Storage { namespace Blobs {
chunkOptions.AccessConditions.IfMatch = eTag;
}
auto chunk = Download(chunkOptions, context);
int64_t bytesRead = Azure::Core::IO::BodyStream::ReadToCount(
*(chunk->BodyStream),
int64_t bytesRead = chunk->BodyStream->ReadToCount(
buffer + (offset - firstChunkOffset),
chunkOptions.Range.GetValue().Length.GetValue(),
context);
@ -394,8 +392,7 @@ namespace Azure { namespace Storage { namespace Blobs {
while (length > 0)
{
int64_t readSize = std::min(static_cast<int64_t>(bufferSize), length);
int64_t bytesRead
= Azure::Core::IO::BodyStream::ReadToCount(stream, buffer.data(), readSize, context);
int64_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context);
if (bytesRead != readSize)
{
throw Azure::Core::RequestFailedException("error when reading body stream");

View File

@ -369,9 +369,7 @@ namespace Azure { namespace Storage { namespace Test {
EXPECT_FALSE(response->Created);
}
auto downloadStream = std::move(blobClient.Download()->BodyStream);
EXPECT_EQ(
Azure::Core::IO::BodyStream::ReadToEnd(*downloadStream, Azure::Core::Context()),
m_blobContent);
EXPECT_EQ(downloadStream->ReadToEnd(Azure::Core::Context()), m_blobContent);
}
}}} // namespace Azure::Storage::Test

View File

@ -315,9 +315,7 @@ namespace Azure { namespace Storage { namespace Test {
EXPECT_FALSE(response->Created);
}
auto downloadStream = std::move(blobClient.Download()->BodyStream);
EXPECT_EQ(
Azure::Core::IO::BodyStream::ReadToEnd(*downloadStream, Azure::Core::Context()),
m_blobContent);
EXPECT_EQ(downloadStream->ReadToEnd(Azure::Core::Context()), m_blobContent);
}
}}} // namespace Azure::Storage::Test

View File

@ -255,8 +255,7 @@ namespace Azure { namespace Storage { namespace Test {
auto blobClient = Azure::Storage::Blobs::BlobClient::CreateFromConnectionString(
StandardStorageConnectionString(), RandomString(), RandomString(), clientOptions);
auto ret = blobClient.Download();
auto responseBody
= Azure::Core::IO::BodyStream::ReadToEnd(*(ret->BodyStream), Azure::Core::Context());
auto responseBody = ret->BodyStream->ReadToEnd(Azure::Core::Context());
EXPECT_EQ(std::string(responseBody.begin(), responseBody.end()), primaryContent);
}
@ -285,8 +284,7 @@ namespace Azure { namespace Storage { namespace Test {
auto timeBegin = std::chrono::steady_clock::now();
auto ret = blobClient.Download();
auto timeEnd = std::chrono::steady_clock::now();
auto responseBody
= Azure::Core::IO::BodyStream::ReadToEnd(*(ret->BodyStream), Azure::Core::Context());
auto responseBody = ret->BodyStream->ReadToEnd(Azure::Core::Context());
EXPECT_EQ(std::string(responseBody.begin(), responseBody.end()), primaryContent);
EXPECT_EQ(numTrial, 2);
@ -328,8 +326,7 @@ namespace Azure { namespace Storage { namespace Test {
auto blobClient = Azure::Storage::Blobs::BlobClient::CreateFromConnectionString(
StandardStorageConnectionString(), RandomString(), RandomString(), clientOptions);
auto ret = blobClient.Download();
auto responseBody
= Azure::Core::IO::BodyStream::ReadToEnd(*(ret->BodyStream), Azure::Core::Context());
auto responseBody = ret->BodyStream->ReadToEnd(Azure::Core::Context());
EXPECT_EQ(std::string(responseBody.begin(), responseBody.end()), secondaryContent);
}
@ -377,8 +374,7 @@ namespace Azure { namespace Storage { namespace Test {
auto blobClient = Azure::Storage::Blobs::BlobClient::CreateFromConnectionString(
StandardStorageConnectionString(), RandomString(), RandomString(), clientOptions);
auto ret = blobClient.Download();
auto responseBody
= Azure::Core::IO::BodyStream::ReadToEnd(*(ret->BodyStream), Azure::Core::Context());
auto responseBody = ret->BodyStream->ReadToEnd(Azure::Core::Context());
EXPECT_EQ(std::string(responseBody.begin(), responseBody.end()), primaryContent);
EXPECT_EQ(numPrimaryTrial, 3);
EXPECT_EQ(numSecondaryTrial, 1);

View File

@ -68,7 +68,7 @@ namespace Azure { namespace Storage { namespace Test {
inline std::vector<uint8_t> ReadBodyStream(std::unique_ptr<Azure::Core::IO::BodyStream>& stream)
{
Azure::Core::Context context;
return Azure::Core::IO::BodyStream::ReadToEnd(*stream, context);
return stream->ReadToEnd(context);
}
inline std::vector<uint8_t> ReadBodyStream(std::unique_ptr<Azure::Core::IO::BodyStream>&& stream)

View File

@ -80,8 +80,7 @@ void DataLakeGettingStarted()
// Read
auto result = fileClient.Download();
Azure::Core::Context context;
std::vector<uint8_t> downloaded
= Azure::Core::IO::BodyStream::ReadToEnd(*(result->Body), context);
std::vector<uint8_t> downloaded = result->Body->ReadToEnd(context);
// downloaded contains your downloaded data.
std::cout << "Downloaded data was:\n" + std::string(downloaded.begin(), downloaded.end())
<< std::endl;

View File

@ -671,8 +671,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
"buffer is not big enough, file range size is " + std::to_string(fileRangeSize));
}
int64_t bytesRead = Azure::Core::IO::BodyStream::ReadToCount(
*(firstChunk->BodyStream), buffer, firstChunkLength, context);
int64_t bytesRead = firstChunk->BodyStream->ReadToCount(buffer, firstChunkLength, context);
if (bytesRead != firstChunkLength)
{
throw Azure::Core::RequestFailedException("error when reading body stream");
@ -697,8 +696,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
chunkOptions.Range.GetValue().Offset = offset;
chunkOptions.Range.GetValue().Length = length;
auto chunk = Download(chunkOptions, context);
int64_t bytesRead = Azure::Core::IO::BodyStream::ReadToCount(
*(chunk->BodyStream),
int64_t bytesRead = chunk->BodyStream->ReadToCount(
buffer + (offset - firstChunkOffset),
chunkOptions.Range.GetValue().Length.GetValue(),
context);
@ -781,8 +779,7 @@ namespace Azure { namespace Storage { namespace Files { namespace Shares {
while (length > 0)
{
int64_t readSize = std::min(static_cast<int64_t>(bufferSize), length);
int64_t bytesRead
= Azure::Core::IO::BodyStream::ReadToCount(stream, buffer.data(), readSize, context);
int64_t bytesRead = stream.ReadToCount(buffer.data(), readSize, context);
if (bytesRead != readSize)
{
throw Azure::Core::RequestFailedException("error when reading body stream");

View File

@ -634,7 +634,7 @@ namespace Azure { namespace Storage { namespace Test {
downloadOptions.Range.GetValue().Length = rangeSize;
Files::Shares::Models::DownloadShareFileResult result;
EXPECT_NO_THROW(result = fileClient.Download(downloadOptions).ExtractValue());
auto resultBuffer = Core::IO::BodyStream::ReadToEnd(*(result.BodyStream), Core::Context());
auto resultBuffer = result.BodyStream->ReadToEnd(Core::Context());
EXPECT_EQ(rangeContent, resultBuffer);
EXPECT_EQ(
downloadOptions.Range.GetValue().Length.GetValue(),
@ -872,7 +872,7 @@ namespace Azure { namespace Storage { namespace Test {
Files::Shares::DownloadShareFileOptions downloadOptions;
downloadOptions.Range = destRange;
EXPECT_NO_THROW(result = destFileClient.Download(downloadOptions).ExtractValue());
auto resultBuffer = Core::IO::BodyStream::ReadToEnd(*(result.BodyStream), Core::Context());
auto resultBuffer = result.BodyStream->ReadToEnd(Core::Context());
EXPECT_EQ(fileContent, resultBuffer);
Files::Shares::Models::GetShareFileRangeListResult getRangeResult;
EXPECT_NO_THROW(getRangeResult = destFileClient.GetRangeList().ExtractValue());