Skip to content

Commit

Permalink
Merge pull request pistacheio#1206 from dgreatwood/clangConformance
Browse files Browse the repository at this point in the history
Conforming All Source Files to clang-format
  • Loading branch information
kiplingw authored Apr 19, 2024
2 parents f9acf99 + 9607720 commit 2359c2d
Show file tree
Hide file tree
Showing 6 changed files with 95 additions and 95 deletions.
104 changes: 52 additions & 52 deletions include/pistache/async.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,16 +217,16 @@ namespace Pistache::Async
std::exception_ptr exc;

/*
* We need this lock because a Promise might be resolved or rejected from a
* thread A while a continuation to the same Promise (Core) might be attached
* at the same from a thread B. If that's the case, then we need to serialize
* operations so that we avoid a race-condition.
*
* Since we have a lock, we have a blocking progress guarantee but I don't
* expect this to be a major bottleneck as I don't expect major contention on
* the lock If it ends up being a bottlenick, try @improving it by
* experimenting with a lock-free scheme
*/
* We need this lock because a Promise might be resolved or rejected from a
* thread A while a continuation to the same Promise (Core) might be attached
* at the same from a thread B. If that's the case, then we need to serialize
* operations so that we avoid a race-condition.
*
* Since we have a lock, we have a blocking progress guarantee but I don't
* expect this to be a major bottleneck as I don't expect major contention on
* the lock If it ends up being a bottlenick, try @improving it by
* experimenting with a lock-free scheme
*/
std::mutex mtx;
std::vector<std::shared_ptr<Request>> requests;
TypeId id;
Expand Down Expand Up @@ -329,7 +329,7 @@ namespace Pistache::Async
{
if (resolveCount_ >= 1)
return; // TODO is this the right thing?
// throw Error("Resolve must not be called more than once");
// throw Error("Resolve must not be called more than once");

++resolveCount_;
doResolve(coreCast(core));
Expand All @@ -339,7 +339,7 @@ namespace Pistache::Async
{
if (rejectCount_ >= 1)
return; // TODO is this the right thing?
// throw Error("Reject must not be called more than once");
// throw Error("Reject must not be called more than once");

++rejectCount_;
try
Expand Down Expand Up @@ -823,10 +823,10 @@ namespace Pistache::Async
: core_(core)
{ }

Resolver(const Resolver& other) = delete;
Resolver(const Resolver& other) = delete;
Resolver& operator=(const Resolver& other) = delete;

Resolver(Resolver&& other) = default;
Resolver(Resolver&& other) = default;
Resolver& operator=(Resolver&& other) = default;

template <typename Arg>
Expand All @@ -841,9 +841,9 @@ namespace Pistache::Async
throw Error("Attempt to resolve a fulfilled promise");

/* In a ideal world, this should be checked at compile-time rather
* than runtime. However, since types are erased, this looks like
* a difficult task
*/
* than runtime. However, since types are erased, this looks like
* a difficult task
*/
if (core_->isVoid())
{
throw Error("Attempt to resolve a void promise with arguments");
Expand Down Expand Up @@ -896,10 +896,10 @@ namespace Pistache::Async
: core_(core)
{ }

Rejection(const Rejection& other) = delete;
Rejection(const Rejection& other) = delete;
Rejection& operator=(const Rejection& other) = delete;

Rejection(Rejection&& other) = default;
Rejection(Rejection&& other) = default;
Rejection& operator=(Rejection&& other) = default;

template <typename Exc>
Expand Down Expand Up @@ -939,10 +939,10 @@ namespace Pistache::Async
, rejection(nullptr)
{ }

Deferred(const Deferred& other) = delete;
Deferred(const Deferred& other) = delete;
Deferred& operator=(const Deferred& other) = delete;

Deferred(Deferred&& other) = default;
Deferred(Deferred&& other) = default;
Deferred& operator=(Deferred&& other) = default;

Deferred(Resolver _resolver, Rejection _reject)
Expand Down Expand Up @@ -990,10 +990,10 @@ namespace Pistache::Async
, rejection(nullptr)
{ }

Deferred(const Deferred& other) = delete;
Deferred(const Deferred& other) = delete;
Deferred& operator=(const Deferred& other) = delete;

Deferred(Deferred&& other) = default;
Deferred(Deferred&& other) = default;
Deferred& operator=(Deferred&& other) = default;

Deferred(Resolver _resolver, Rejection _reject)
Expand All @@ -1019,13 +1019,13 @@ namespace Pistache::Async
{

/*
* Note that we could use std::result_of to SFINAE-out and dispatch to the right
* call However, gcc 4.7 does not correctly support std::result_of for SFINAE
* purposes, so we use a decltype SFINAE-expression instead.
*
* See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3462.html and
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56283 for reference
*/
* Note that we could use std::result_of to SFINAE-out and dispatch to the right
* call However, gcc 4.7 does not correctly support std::result_of for SFINAE
* purposes, so we use a decltype SFINAE-expression instead.
*
* See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3462.html and
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56283 for reference
*/
template <typename T, typename Func>
auto callAsync(Func func, Resolver& resolver, Rejection& rejection)
-> decltype(std::declval<Func>()(resolver, rejection), void())
Expand Down Expand Up @@ -1059,10 +1059,10 @@ namespace Pistache::Async
details::callAsync<T>(func, resolver_, rejection_);
}

Promise(const Promise<T>& other) = delete;
Promise(const Promise<T>& other) = delete;
Promise& operator=(const Promise<T>& other) = delete;

Promise(Promise<T>&& other) = default;
Promise(Promise<T>&& other) = default;
Promise& operator=(Promise<T>&& other) = default;

~Promise() override = default;
Expand Down Expand Up @@ -1215,10 +1215,10 @@ namespace Pistache::Async
public:
friend struct Impl::Any;

Any(const Any& other) = default;
Any(const Any& other) = default;
Any& operator=(const Any& other) = default;

Any(Any&& other) = default;
Any(Any&& other) = default;
Any& operator=(Any&& other) = default;

template <typename T>
Expand All @@ -1245,10 +1245,10 @@ namespace Pistache::Async
{

/* Instead of duplicating the code between whenAll and whenAny functions, the
* main implementation is in the When class below and we configure the class
* with a policy instead, depending if we are executing an "all" or "any"
* operation, how cool is that ?
*/
* main implementation is in the When class below and we configure the class
* with a policy instead, depending if we are executing an "all" or "any"
* operation, how cool is that ?
*/
struct All
{

Expand Down Expand Up @@ -1448,17 +1448,17 @@ namespace Pistache::Async
typename std::remove_reference<Args>::type>::Type...>
Results;
/* We need to keep the results alive until the last promise
* finishes its execution
*/
* finishes its execution
*/

/* See the trick here ? Basically, we only have access to the real type of
* the results in this function. The policy classes do not have access to
* the full type (std::tuple), but, instead, take a generic template data
* type as a parameter. They only need to know that results is a tuple, they
* do not need to know the real type of the results.
*
* This is some sort of compile-time template type-erasing, hue
*/
* the results in this function. The policy classes do not have access to
* the full type (std::tuple), but, instead, take a generic template data
* type as a parameter. They only need to know that results is a tuple, they
* do not need to know the real type of the results.
*
* This is some sort of compile-time template type-erasing, hue
*/
struct Data : public ContinuationPolicy::Data
{
Data(size_t total, Resolver resolver, Rejection rejection)
Expand Down Expand Up @@ -1548,9 +1548,9 @@ namespace Pistache::Async
};

/* Ok so apparently I can not fully specialize a template structure
* here, so you know what, compiler ? Take that Dummy type and leave
* me alone
*/
* here, so you know what, compiler ? Take that Dummy type and leave
* me alone
*/
template <typename ValueType, typename Dummy = void>
struct DataT : public Data
{
Expand All @@ -1564,8 +1564,8 @@ namespace Pistache::Async
};

/* For a vector of void promises, we do not have any results, that's
* why we need a distinct specialization for the void case
*/
* why we need a distinct specialization for the void case
*/
template <typename Dummy>
struct DataT<void, Dummy> : public Data
{
Expand Down
8 changes: 4 additions & 4 deletions include/pistache/cookie.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ namespace Pistache::Http
struct iterator
{
using iterator_category = std::bidirectional_iterator_tag;
using value_type = Cookie;
using difference_type = std::ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
using value_type = Cookie;
using difference_type = std::ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;

explicit iterator(const Storage::const_iterator& _iterator)
: iter_storage(_iterator)
Expand Down
18 changes: 9 additions & 9 deletions include/pistache/mailbox.h
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ namespace Pistache
// reads val. Second time, read fails with errno = EAGAIN /
// EWOULDBLOCK, causing us to break out of the loop and
// return from our "pop" function here.
//
//
// However, in the problem case, very occasionally two
// pushes - and hence two writes to event_fd - occur just
// ahead of the read, and both writes succeed by the time
Expand All @@ -339,7 +339,7 @@ namespace Pistache
// the read succeeds.
if (bytes == (sizeof val))
{ // success

if (!ret)
{
// Have another try at pop, in case there was no
Expand All @@ -348,10 +348,10 @@ namespace Pistache
// but before the read above
ret = Queue<T>::pop();
}

break;
}

if (bytes == -1)
{
if (errno == EAGAIN || errno == EWOULDBLOCK)
Expand Down Expand Up @@ -402,14 +402,14 @@ namespace Pistache
static constexpr size_t Mask = Size - 1;

public:
MPMCQueue(const MPMCQueue& other) = delete;
MPMCQueue(const MPMCQueue& other) = delete;
MPMCQueue& operator=(const MPMCQueue& other) = delete;

/*
* Note that you should not move a queue. This is somehow needed for gcc 4.7,
* otherwise the client won't compile
* @Investigate why
*/
* Note that you should not move a queue. This is somehow needed for gcc 4.7,
* otherwise the client won't compile
* @Investigate why
*/
MPMCQueue(MPMCQueue&& other) { *this = std::move(other); }

MPMCQueue& operator=(MPMCQueue&& other)
Expand Down
2 changes: 1 addition & 1 deletion src/common/net.cc
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ namespace Pistache
char buff[INET6_ADDRSTRLEN];
const auto* addr_sa = reinterpret_cast<const struct sockaddr*>(&addr_);
int err = getnameinfo(
addr_sa, sizeof(addr_), buff, sizeof(buff), NULL, 0, NI_NUMERICHOST);
addr_sa, sizeof(addr_), buff, sizeof(buff), NULL, 0, NI_NUMERICHOST);
if (err) /* [[unlikely]] */
{
throw std::runtime_error(gai_strerror(err));
Expand Down
56 changes: 28 additions & 28 deletions src/common/reactor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ namespace Pistache::Aio
};

/* Synchronous implementation of the reactor that polls in the context
* of the same thread
*/
* of the same thread
*/
class SyncImpl : public Reactor::Impl
{
public:
Expand Down Expand Up @@ -244,10 +244,10 @@ namespace Pistache::Aio
std::fill(std::begin(handlers), std::end(handlers), nullptr);
}

HandlerList(const HandlerList& other) = delete;
HandlerList(const HandlerList& other) = delete;
HandlerList& operator=(const HandlerList& other) = delete;

HandlerList(HandlerList&& other) = default;
HandlerList(HandlerList&& other) = default;
HandlerList& operator=(HandlerList&& other) = default;

HandlerList clone() const
Expand Down Expand Up @@ -332,30 +332,30 @@ namespace Pistache::Aio
};

/* Asynchronous implementation of the reactor that spawns a number N of threads
* and creates a polling fd per thread
*
* Implementation detail:
*
* Here is how it works: the implementation simply starts a synchronous variant
* of the implementation in its own std::thread. When adding an handler, it
* will add a clone() of the handler to every worker (thread), and assign its
* own key to the handler. Here is where things start to get interesting. Here
* is how the key encoding works for every handler:
*
* [ handler idx ] [ worker idx ]
* ------------------------ ----------------------------
* ^ 32 bits ^ 32 bits
* -----------------------------------------------------
* ^ 64 bits
*
* Since we have up to 64 bits of data for every key, we encode the index of the
* handler that has been assigned by the SyncImpl in the upper 32 bits, and
* encode the index of the worker thread in the lowest 32 bits.
*
* When registering a fd for a given key, the AsyncImpl then knows which worker
* to use by looking at the lowest 32 bits of the Key's data. The SyncImpl will
* then use the highest 32 bits to retrieve the index of the handler.
*/
* and creates a polling fd per thread
*
* Implementation detail:
*
* Here is how it works: the implementation simply starts a synchronous variant
* of the implementation in its own std::thread. When adding an handler, it
* will add a clone() of the handler to every worker (thread), and assign its
* own key to the handler. Here is where things start to get interesting. Here
* is how the key encoding works for every handler:
*
* [ handler idx ] [ worker idx ]
* ------------------------ ----------------------------
* ^ 32 bits ^ 32 bits
* -----------------------------------------------------
* ^ 64 bits
*
* Since we have up to 64 bits of data for every key, we encode the index of the
* handler that has been assigned by the SyncImpl in the upper 32 bits, and
* encode the index of the worker thread in the lowest 32 bits.
*
* When registering a fd for a given key, the AsyncImpl then knows which worker
* to use by looking at the lowest 32 bits of the Key's data. The SyncImpl will
* then use the highest 32 bits to retrieve the index of the handler.
*/

class AsyncImpl : public Reactor::Impl
{
Expand Down
2 changes: 1 addition & 1 deletion version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.2.9.20240413
0.2.9.20240419

0 comments on commit 2359c2d

Please sign in to comment.