Add boost and cpprestsdk

This commit is contained in:
2025-05-28 13:59:55 -03:00
parent d3fb88a65d
commit 7079ba75f2
14583 changed files with 3063824 additions and 1 deletions

219
vendor/cpprestsdk/include/pplx/pplx.h vendored Normal file
View File

@ -0,0 +1,219 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* Parallel Patterns Library
*
* For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#ifndef _PPLX_H
#define _PPLX_H
#if (defined(_MSC_VER) && (_MSC_VER >= 1800)) && !CPPREST_FORCE_PPLX
#error This file must not be included for Visual Studio 12 or later
#endif
#ifndef _WIN32
#if defined(_WIN32) || defined(__cplusplus_winrt)
#define _WIN32
#endif
#endif // _WIN32
#ifdef _NO_PPLXIMP
#define _PPLXIMP
#else
#ifdef _PPLX_EXPORT
#ifdef _WIN32
#define _PPLXIMP __declspec(dllexport)
#else
#define _PPLXIMP __attribute__((visibility("default")))
#endif
#else
#ifdef _WIN32
#define _PPLXIMP __declspec(dllimport)
#else
#define _PPLXIMP
#endif
#endif
#endif
#include "cpprest/details/cpprest_compat.h"
// Use PPLx
#ifdef _WIN32
#include "pplx/pplxwin.h"
#elif defined(__APPLE__)
#undef _PPLXIMP
#define _PPLXIMP
#include "pplx/pplxlinux.h"
#else
#include "pplx/pplxlinux.h"
#endif // _WIN32
// Common implementation across all the non-concrt versions
#include "pplx/pplxcancellation_token.h"
#include <functional>
// conditional expression is constant
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4127)
#endif
#pragma pack(push, _CRT_PACKING)
/// <summary>
/// The <c>pplx</c> namespace provides classes and functions that give you access to the Concurrency Runtime,
/// a concurrent programming framework for C++. For more information, see <see cref="Concurrency Runtime"/>.
/// </summary>
/**/
namespace pplx
{
/// <summary>
/// Sets the ambient scheduler to be used by the PPL constructs.
/// </summary>
_PPLXIMP void _pplx_cdecl set_ambient_scheduler(std::shared_ptr<pplx::scheduler_interface> _Scheduler);
/// <summary>
/// Gets the ambient scheduler to be used by the PPL constructs
/// </summary>
_PPLXIMP std::shared_ptr<pplx::scheduler_interface> _pplx_cdecl get_ambient_scheduler();
namespace details
{
//
// An internal exception that is used for cancellation. Users do not "see" this exception except through the
// resulting stack unwind. This exception should never be intercepted by user code. It is intended
// for use by the runtime only.
//
class _Interruption_exception : public std::exception
{
public:
_Interruption_exception() {}
};
template<typename _T>
struct _AutoDeleter
{
_AutoDeleter(_T* _PPtr) : _Ptr(_PPtr) {}
~_AutoDeleter() { delete _Ptr; }
_T* _Ptr;
};
struct _TaskProcHandle
{
_TaskProcHandle() {}
virtual ~_TaskProcHandle() {}
virtual void invoke() const = 0;
static void _pplx_cdecl _RunChoreBridge(void* _Parameter)
{
auto _PTaskHandle = static_cast<_TaskProcHandle*>(_Parameter);
_AutoDeleter<_TaskProcHandle> _AutoDeleter(_PTaskHandle);
_PTaskHandle->invoke();
}
};
enum _TaskInliningMode
{
// Disable inline scheduling
_NoInline = 0,
// Let runtime decide whether to do inline scheduling or not
_DefaultAutoInline = 16,
// Always do inline scheduling
_ForceInline = -1,
};
// This is an abstraction that is built on top of the scheduler to provide these additional functionalities
// - Ability to wait on a work item
// - Ability to cancel a work item
// - Ability to inline work on invocation of RunAndWait
class _TaskCollectionImpl
{
public:
typedef _TaskProcHandle _TaskProcHandle_t;
_TaskCollectionImpl(scheduler_ptr _PScheduler) : _M_pScheduler(_PScheduler) {}
void _ScheduleTask(_TaskProcHandle_t* _PTaskHandle, _TaskInliningMode _InliningMode)
{
if (_InliningMode == _ForceInline)
{
_TaskProcHandle_t::_RunChoreBridge(_PTaskHandle);
}
else
{
_M_pScheduler->schedule(_TaskProcHandle_t::_RunChoreBridge, _PTaskHandle);
}
}
void _Cancel()
{
// No cancellation support
}
void _RunAndWait()
{
// No inlining support yet
_Wait();
}
void _Wait() { _M_Completed.wait(); }
void _Complete() { _M_Completed.set(); }
scheduler_ptr _GetScheduler() const { return _M_pScheduler; }
// Fire and forget
static void _RunTask(TaskProc_t _Proc, void* _Parameter, _TaskInliningMode _InliningMode)
{
if (_InliningMode == _ForceInline)
{
_Proc(_Parameter);
}
else
{
// Schedule the work on the ambient scheduler
get_ambient_scheduler()->schedule(_Proc, _Parameter);
}
}
static bool _pplx_cdecl _Is_cancellation_requested()
{
// We do not yet have the ability to determine the current task. So return false always
return false;
}
private:
extensibility::event_t _M_Completed;
scheduler_ptr _M_pScheduler;
};
// For create_async lambdas that return a (non-task) result, we oversubscriber the current task for the duration of the
// lambda.
struct _Task_generator_oversubscriber
{
};
typedef _TaskCollectionImpl _TaskCollection_t;
typedef _TaskInliningMode _TaskInliningMode_t;
typedef _Task_generator_oversubscriber _Task_generator_oversubscriber_t;
} // namespace details
} // namespace pplx
#pragma pack(pop)
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif // _PPLX_H

View File

@ -0,0 +1,920 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* Parallel Patterns Library : cancellation_token
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#ifndef _PPLX_H
#error This header must not be included directly
#endif
#ifndef _PPLXCANCELLATION_TOKEN_H
#define _PPLXCANCELLATION_TOKEN_H
#if (defined(_MSC_VER) && (_MSC_VER >= 1800)) && !CPPREST_FORCE_PPLX
#error This file must not be included for Visual Studio 12 or later
#endif
#include "pplx/pplxinterface.h"
#include <cstdlib>
#include <string>
#pragma pack(push, _CRT_PACKING)
// All header files are required to be protected from the macro new
#pragma push_macro("new")
#undef new
namespace pplx
{
/// <summary>
/// This class describes an exception thrown by the PPL tasks layer in order to force the current task
/// to cancel. It is also thrown by the <c>get()</c> method on <see cref="task Class">task</see>, for a
/// canceled task.
/// </summary>
/// <seealso cref="task::get Method"/>
/// <seealso cref="cancel_current_task Method"/>
/**/
class task_canceled : public std::exception
{
private:
std::string _message;
public:
/// <summary>
/// Constructs a <c>task_canceled</c> object.
/// </summary>
/// <param name="_Message">
/// A descriptive message of the error.
/// </param>
/**/
explicit task_canceled(_In_z_ const char* _Message) throw() : _message(_Message) {}
/// <summary>
/// Constructs a <c>task_canceled</c> object.
/// </summary>
/**/
task_canceled() throw() : exception() {}
~task_canceled() throw() {}
const char* what() const CPPREST_NOEXCEPT { return _message.c_str(); }
};
/// <summary>
/// This class describes an exception thrown when an invalid operation is performed that is not more accurately
/// described by another exception type thrown by the Concurrency Runtime.
/// </summary>
/// <remarks>
/// The various methods which throw this exception will generally document under what circumstances they will throw
/// it.
/// </remarks>
/**/
class invalid_operation : public std::exception
{
private:
std::string _message;
public:
/// <summary>
/// Constructs an <c>invalid_operation</c> object.
/// </summary>
/// <param name="_Message">
/// A descriptive message of the error.
/// </param>
/**/
invalid_operation(_In_z_ const char* _Message) throw() : _message(_Message) {}
/// <summary>
/// Constructs an <c>invalid_operation</c> object.
/// </summary>
/**/
invalid_operation() throw() : exception() {}
~invalid_operation() throw() {}
const char* what() const CPPREST_NOEXCEPT { return _message.c_str(); }
};
namespace details
{
// Base class for all reference counted objects
class _RefCounter
{
public:
virtual ~_RefCounter() { _ASSERTE(_M_refCount == 0); }
// Acquires a reference
// Returns the new reference count.
long _Reference()
{
long _Refcount = atomic_increment(_M_refCount);
// 0 - 1 transition is illegal
_ASSERTE(_Refcount > 1);
return _Refcount;
}
// Releases the reference
// Returns the new reference count
long _Release()
{
long _Refcount = atomic_decrement(_M_refCount);
_ASSERTE(_Refcount >= 0);
if (_Refcount == 0)
{
_Destroy();
}
return _Refcount;
}
protected:
// Allow derived classes to provide their own deleter
virtual void _Destroy() { delete this; }
// Only allow instantiation through derived class
_RefCounter(long _InitialCount = 1) : _M_refCount(_InitialCount) { _ASSERTE(_M_refCount > 0); }
// Reference count
atomic_long _M_refCount;
};
class _CancellationTokenState;
class _CancellationTokenRegistration : public _RefCounter
{
private:
static const long _STATE_CLEAR = 0;
static const long _STATE_DEFER_DELETE = 1;
static const long _STATE_SYNCHRONIZE = 2;
static const long _STATE_CALLED = 3;
public:
_CancellationTokenRegistration(long _InitialRefs = 1)
: _RefCounter(_InitialRefs), _M_state(_STATE_CALLED), _M_pTokenState(NULL)
{
}
_CancellationTokenState* _GetToken() const { return _M_pTokenState; }
protected:
virtual ~_CancellationTokenRegistration() { _ASSERTE(_M_state != _STATE_CLEAR); }
virtual void _Exec() = 0;
private:
friend class _CancellationTokenState;
void _Invoke()
{
long tid = ::pplx::details::platform::GetCurrentThreadId();
_ASSERTE((tid & 0x3) == 0); // If this ever fires, we need a different encoding for this.
long result = atomic_compare_exchange(_M_state, tid, _STATE_CLEAR);
if (result == _STATE_CLEAR)
{
_Exec();
result = atomic_compare_exchange(_M_state, _STATE_CALLED, tid);
if (result == _STATE_SYNCHRONIZE)
{
_M_pSyncBlock->set();
}
}
_Release();
}
atomic_long _M_state;
extensibility::event_t* _M_pSyncBlock;
_CancellationTokenState* _M_pTokenState;
};
template<typename _Function>
class _CancellationTokenCallback : public _CancellationTokenRegistration
{
public:
_CancellationTokenCallback(const _Function& _Func) : _M_function(_Func) {}
protected:
virtual void _Exec() { _M_function(); }
private:
_Function _M_function;
};
class CancellationTokenRegistration_TaskProc : public _CancellationTokenRegistration
{
public:
CancellationTokenRegistration_TaskProc(TaskProc_t proc, _In_ void* pData, int initialRefs)
: _CancellationTokenRegistration(initialRefs), m_proc(proc), m_pData(pData)
{
}
protected:
virtual void _Exec() { m_proc(m_pData); }
private:
TaskProc_t m_proc;
void* m_pData;
};
// The base implementation of a cancellation token.
class _CancellationTokenState : public _RefCounter
{
protected:
class TokenRegistrationContainer
{
private:
typedef struct _Node
{
_CancellationTokenRegistration* _M_token;
_Node* _M_next;
} Node;
public:
TokenRegistrationContainer() : _M_begin(nullptr), _M_last(nullptr) {}
~TokenRegistrationContainer()
{
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 6001)
#endif
auto node = _M_begin;
while (node != nullptr)
{
Node* tmp = node;
node = node->_M_next;
::free(tmp);
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
}
void swap(TokenRegistrationContainer& list)
{
std::swap(list._M_begin, _M_begin);
std::swap(list._M_last, _M_last);
}
bool empty() { return _M_begin == nullptr; }
template<typename T>
void for_each(T lambda)
{
Node* node = _M_begin;
while (node != nullptr)
{
lambda(node->_M_token);
node = node->_M_next;
}
}
void push_back(_CancellationTokenRegistration* token)
{
Node* node = reinterpret_cast<Node*>(::malloc(sizeof(Node)));
if (node == nullptr)
{
throw ::std::bad_alloc();
}
node->_M_token = token;
node->_M_next = nullptr;
if (_M_begin == nullptr)
{
_M_begin = node;
}
else
{
_M_last->_M_next = node;
}
_M_last = node;
}
void remove(_CancellationTokenRegistration* token)
{
Node* node = _M_begin;
Node* prev = nullptr;
while (node != nullptr)
{
if (node->_M_token == token)
{
if (prev == nullptr)
{
_M_begin = node->_M_next;
}
else
{
prev->_M_next = node->_M_next;
}
if (node->_M_next == nullptr)
{
_M_last = prev;
}
::free(node);
break;
}
prev = node;
node = node->_M_next;
}
}
private:
Node* _M_begin;
Node* _M_last;
};
public:
static _CancellationTokenState* _NewTokenState() { return new _CancellationTokenState(); }
static _CancellationTokenState* _None() { return reinterpret_cast<_CancellationTokenState*>(2); }
static bool _IsValid(_In_opt_ _CancellationTokenState* _PToken) { return (_PToken != NULL && _PToken != _None()); }
_CancellationTokenState() : _M_stateFlag(0) {}
~_CancellationTokenState()
{
TokenRegistrationContainer rundownList;
{
extensibility::scoped_critical_section_t _Lock(_M_listLock);
_M_registrations.swap(rundownList);
}
rundownList.for_each([](_CancellationTokenRegistration* pRegistration) {
pRegistration->_M_state = _CancellationTokenRegistration::_STATE_SYNCHRONIZE;
pRegistration->_Release();
});
}
bool _IsCanceled() const { return (_M_stateFlag != 0); }
void _Cancel()
{
if (atomic_compare_exchange(_M_stateFlag, 1l, 0l) == 0)
{
TokenRegistrationContainer rundownList;
{
extensibility::scoped_critical_section_t _Lock(_M_listLock);
_M_registrations.swap(rundownList);
}
rundownList.for_each([](_CancellationTokenRegistration* pRegistration) { pRegistration->_Invoke(); });
_M_stateFlag = 2;
_M_cancelComplete.set();
}
}
_CancellationTokenRegistration* _RegisterCallback(TaskProc_t _PCallback, _In_ void* _PData, int _InitialRefs = 1)
{
_CancellationTokenRegistration* pRegistration =
new CancellationTokenRegistration_TaskProc(_PCallback, _PData, _InitialRefs);
_RegisterCallback(pRegistration);
return pRegistration;
}
void _RegisterCallback(_In_ _CancellationTokenRegistration* _PRegistration)
{
_PRegistration->_M_state = _CancellationTokenRegistration::_STATE_CLEAR;
_PRegistration->_Reference();
_PRegistration->_M_pTokenState = this;
bool invoke = true;
if (!_IsCanceled())
{
extensibility::scoped_critical_section_t _Lock(_M_listLock);
if (!_IsCanceled())
{
invoke = false;
_M_registrations.push_back(_PRegistration);
}
}
if (invoke)
{
_PRegistration->_Invoke();
}
}
void _DeregisterCallback(_In_ _CancellationTokenRegistration* _PRegistration)
{
bool synchronize = false;
{
extensibility::scoped_critical_section_t _Lock(_M_listLock);
//
// If a cancellation has occurred, the registration list is guaranteed to be empty if we've observed it
// under the auspices of the lock. In this case, we must synchronize with the canceling thread to guarantee
// that the cancellation is finished by the time we return from this method.
//
if (!_M_registrations.empty())
{
_M_registrations.remove(_PRegistration);
_PRegistration->_M_state = _CancellationTokenRegistration::_STATE_SYNCHRONIZE;
_PRegistration->_Release();
}
else
{
synchronize = true;
}
}
//
// If the list is empty, we are in one of several situations:
//
// - The callback has already been made --> do nothing
// - The callback is about to be made --> flag it so it doesn't happen and return
// - The callback is in progress elsewhere --> synchronize with it
// - The callback is in progress on this thread --> do nothing
//
if (synchronize)
{
long result = atomic_compare_exchange(_PRegistration->_M_state,
_CancellationTokenRegistration::_STATE_DEFER_DELETE,
_CancellationTokenRegistration::_STATE_CLEAR);
switch (result)
{
case _CancellationTokenRegistration::_STATE_CLEAR:
case _CancellationTokenRegistration::_STATE_CALLED: break;
case _CancellationTokenRegistration::_STATE_DEFER_DELETE:
case _CancellationTokenRegistration::_STATE_SYNCHRONIZE: _ASSERTE(false); break;
default:
{
long tid = result;
if (tid == ::pplx::details::platform::GetCurrentThreadId())
{
//
// It is entirely legal for a caller to Deregister during a callback instead of having to
// provide their own synchronization mechanism between the two. In this case, we do *NOT* need
// to explicitly synchronize with the callback as doing so would deadlock. If the call happens
// during, skip any extra synchronization.
//
break;
}
extensibility::event_t ev;
_PRegistration->_M_pSyncBlock = &ev;
long result_1 =
atomic_exchange(_PRegistration->_M_state, _CancellationTokenRegistration::_STATE_SYNCHRONIZE);
if (result_1 != _CancellationTokenRegistration::_STATE_CALLED)
{
_PRegistration->_M_pSyncBlock->wait(::pplx::extensibility::event_t::timeout_infinite);
}
break;
}
}
}
}
private:
// The flag for the token state (whether it is canceled or not)
atomic_long _M_stateFlag;
// Notification of completion of cancellation of this token.
extensibility::event_t _M_cancelComplete; // Hmm.. where do we wait for it??
// Lock to protect the registrations list
extensibility::critical_section_t _M_listLock;
// The protected list of registrations
TokenRegistrationContainer _M_registrations;
};
} // namespace details
class cancellation_token_source;
class cancellation_token;
/// <summary>
/// The <c>cancellation_token_registration</c> class represents a callback notification from a
/// <c>cancellation_token</c>. When the <c>register</c> method on a <c>cancellation_token</c> is used to receive
/// notification of when cancellation occurs, a <c>cancellation_token_registration</c> object is returned as a
/// handle to the callback so that the caller can request a specific callback no longer be made through use of the
/// <c>deregister</c> method.
/// </summary>
class cancellation_token_registration
{
public:
cancellation_token_registration() : _M_pRegistration(NULL) {}
~cancellation_token_registration() { _Clear(); }
cancellation_token_registration(const cancellation_token_registration& _Src) { _Assign(_Src._M_pRegistration); }
cancellation_token_registration(cancellation_token_registration&& _Src) { _Move(_Src._M_pRegistration); }
cancellation_token_registration& operator=(const cancellation_token_registration& _Src)
{
if (this != &_Src)
{
_Clear();
_Assign(_Src._M_pRegistration);
}
return *this;
}
cancellation_token_registration& operator=(cancellation_token_registration&& _Src)
{
if (this != &_Src)
{
_Clear();
_Move(_Src._M_pRegistration);
}
return *this;
}
bool operator==(const cancellation_token_registration& _Rhs) const
{
return _M_pRegistration == _Rhs._M_pRegistration;
}
bool operator!=(const cancellation_token_registration& _Rhs) const { return !(operator==(_Rhs)); }
private:
friend class cancellation_token;
cancellation_token_registration(_In_ details::_CancellationTokenRegistration* _PRegistration)
: _M_pRegistration(_PRegistration)
{
}
void _Clear()
{
if (_M_pRegistration != NULL)
{
_M_pRegistration->_Release();
}
_M_pRegistration = NULL;
}
void _Assign(_In_ details::_CancellationTokenRegistration* _PRegistration)
{
if (_PRegistration != NULL)
{
_PRegistration->_Reference();
}
_M_pRegistration = _PRegistration;
}
void _Move(_In_ details::_CancellationTokenRegistration*& _PRegistration)
{
_M_pRegistration = _PRegistration;
_PRegistration = NULL;
}
details::_CancellationTokenRegistration* _M_pRegistration;
};
/// <summary>
/// The <c>cancellation_token</c> class represents the ability to determine whether some operation has been
/// requested to cancel. A given token can be associated with a <c>task_group</c>, <c>structured_task_group</c>, or
/// <c>task</c> to provide implicit cancellation. It can also be polled for cancellation or have a callback
/// registered for if and when the associated <c>cancellation_token_source</c> is canceled.
/// </summary>
class cancellation_token
{
public:
typedef details::_CancellationTokenState* _ImplType;
/// <summary>
/// Returns a cancellation token which can never be subject to cancellation.
/// </summary>
/// <returns>
/// A cancellation token that cannot be canceled.
/// </returns>
static cancellation_token none() { return cancellation_token(); }
cancellation_token(const cancellation_token& _Src) { _Assign(_Src._M_Impl); }
cancellation_token(cancellation_token&& _Src) { _Move(_Src._M_Impl); }
cancellation_token& operator=(const cancellation_token& _Src)
{
if (this != &_Src)
{
_Clear();
_Assign(_Src._M_Impl);
}
return *this;
}
cancellation_token& operator=(cancellation_token&& _Src)
{
if (this != &_Src)
{
_Clear();
_Move(_Src._M_Impl);
}
return *this;
}
bool operator==(const cancellation_token& _Src) const { return _M_Impl == _Src._M_Impl; }
bool operator!=(const cancellation_token& _Src) const { return !(operator==(_Src)); }
~cancellation_token() { _Clear(); }
/// <summary>
/// Returns an indication of whether this token can be canceled or not.
/// </summary>
/// <returns>
/// An indication of whether this token can be canceled or not.
/// </returns>
bool is_cancelable() const { return (_M_Impl != NULL); }
/// <summary>
/// Returns <c>true</c> if the token has been canceled.
/// </summary>
/// <returns>
/// The value <c>true</c> if the token has been canceled; otherwise, the value <c>false</c>.
/// </returns>
bool is_canceled() const { return (_M_Impl != NULL && _M_Impl->_IsCanceled()); }
/// <summary>
/// Registers a callback function with the token. If and when the token is canceled, the callback will be made.
/// Note that if the token is already canceled at the point where this method is called, the callback will be
/// made immediately and synchronously.
/// </summary>
/// <typeparam name="_Function">
/// The type of the function object that will be called back when this <c>cancellation_token</c> is canceled.
/// </typeparam>
/// <param name="_Func">
/// The function object that will be called back when this <c>cancellation_token</c> is canceled.
/// </param>
/// <returns>
/// A <c>cancellation_token_registration</c> object which can be utilized in the <c>deregister</c> method to
/// deregister a previously registered callback and prevent it from being made. The method will throw an <see
/// cref="invalid_operation Class">invalid_operation </see> exception if it is called on a
/// <c>cancellation_token</c> object that was created using the <see cref="cancellation_token::none
/// Method">cancellation_token::none </see> method.
/// </returns>
template<typename _Function>
::pplx::cancellation_token_registration register_callback(const _Function& _Func) const
{
if (_M_Impl == NULL)
{
// A callback cannot be registered if the token does not have an associated source.
throw invalid_operation();
}
#if defined(_MSC_VER)
#pragma warning(suppress : 28197)
#endif
details::_CancellationTokenCallback<_Function>* _PCallback =
new details::_CancellationTokenCallback<_Function>(_Func);
_M_Impl->_RegisterCallback(_PCallback);
return cancellation_token_registration(_PCallback);
}
/// <summary>
/// Removes a callback previously registered via the <c>register</c> method based on the
/// <c>cancellation_token_registration</c> object returned at the time of registration.
/// </summary>
/// <param name="_Registration">
/// The <c>cancellation_token_registration</c> object corresponding to the callback to be deregistered. This
/// token must have been previously returned from a call to the <c>register</c> method.
/// </param>
void deregister_callback(const cancellation_token_registration& _Registration) const
{
_M_Impl->_DeregisterCallback(_Registration._M_pRegistration);
}
_ImplType _GetImpl() const { return _M_Impl; }
_ImplType _GetImplValue() const
{
return (_M_Impl == NULL) ? ::pplx::details::_CancellationTokenState::_None() : _M_Impl;
}
static cancellation_token _FromImpl(_ImplType _Impl) { return cancellation_token(_Impl); }
private:
friend class cancellation_token_source;
_ImplType _M_Impl;
void _Clear()
{
if (_M_Impl != NULL)
{
_M_Impl->_Release();
}
_M_Impl = NULL;
}
void _Assign(_ImplType _Impl)
{
if (_Impl != NULL)
{
_Impl->_Reference();
}
_M_Impl = _Impl;
}
void _Move(_ImplType& _Impl)
{
_M_Impl = _Impl;
_Impl = NULL;
}
cancellation_token() : _M_Impl(NULL) {}
cancellation_token(_ImplType _Impl) : _M_Impl(_Impl)
{
if (_M_Impl == ::pplx::details::_CancellationTokenState::_None())
{
_M_Impl = NULL;
}
if (_M_Impl != NULL)
{
_M_Impl->_Reference();
}
}
};
/// <summary>
/// The <c>cancellation_token_source</c> class represents the ability to cancel some cancelable operation.
/// </summary>
class cancellation_token_source
{
public:
typedef ::pplx::details::_CancellationTokenState* _ImplType;
/// <summary>
/// Constructs a new <c>cancellation_token_source</c>. The source can be used to flag cancellation of some
/// cancelable operation.
/// </summary>
cancellation_token_source() { _M_Impl = new ::pplx::details::_CancellationTokenState; }
cancellation_token_source(const cancellation_token_source& _Src) { _Assign(_Src._M_Impl); }
cancellation_token_source(cancellation_token_source&& _Src) { _Move(_Src._M_Impl); }
cancellation_token_source& operator=(const cancellation_token_source& _Src)
{
if (this != &_Src)
{
_Clear();
_Assign(_Src._M_Impl);
}
return *this;
}
cancellation_token_source& operator=(cancellation_token_source&& _Src)
{
if (this != &_Src)
{
_Clear();
_Move(_Src._M_Impl);
}
return *this;
}
bool operator==(const cancellation_token_source& _Src) const { return _M_Impl == _Src._M_Impl; }
bool operator!=(const cancellation_token_source& _Src) const { return !(operator==(_Src)); }
~cancellation_token_source()
{
if (_M_Impl != NULL)
{
_M_Impl->_Release();
}
}
/// <summary>
/// Returns a cancellation token associated with this source. The returned token can be polled for cancellation
/// or provide a callback if and when cancellation occurs.
/// </summary>
/// <returns>
/// A cancellation token associated with this source.
/// </returns>
cancellation_token get_token() const { return cancellation_token(_M_Impl); }
/// <summary>
/// Creates a <c>cancellation_token_source</c> which is canceled when the provided token is canceled.
/// </summary>
/// <param name="_Src">
/// A token whose cancellation will cause cancellation of the returned token source. Note that the returned
/// token source can also be canceled independently of the source contained in this parameter.
/// </param>
/// <returns>
/// A <c>cancellation_token_source</c> which is canceled when the token provided by the <paramref name="_Src"/>
/// parameter is canceled.
/// </returns>
static cancellation_token_source create_linked_source(cancellation_token& _Src)
{
cancellation_token_source newSource;
_Src.register_callback([newSource]() { newSource.cancel(); });
return newSource;
}
/// <summary>
/// Creates a <c>cancellation_token_source</c> which is canceled when one of a series of tokens represented by
/// an STL iterator pair is canceled.
/// </summary>
/// <param name="_Begin">
/// The STL iterator corresponding to the beginning of the range of tokens to listen for cancellation of.
/// </param>
/// <param name="_End">
/// The STL iterator corresponding to the ending of the range of tokens to listen for cancellation of.
/// </param>
/// <returns>
/// A <c>cancellation_token_source</c> which is canceled when any of the tokens provided by the range described
/// by the STL iterators contained in the <paramref name="_Begin"/> and <paramref name="_End"/> parameters is
/// canceled.
/// </returns>
template<typename _Iter>
static cancellation_token_source create_linked_source(_Iter _Begin, _Iter _End)
{
cancellation_token_source newSource;
for (_Iter _It = _Begin; _It != _End; ++_It)
{
_It->register_callback([newSource]() { newSource.cancel(); });
}
return newSource;
}
/// <summary>
/// Cancels the token. Any <c>task_group</c>, <c>structured_task_group</c>, or <c>task</c> which utilizes the
/// token will be canceled upon this call and throw an exception at the next interruption point.
/// </summary>
void cancel() const { _M_Impl->_Cancel(); }
_ImplType _GetImpl() const { return _M_Impl; }
static cancellation_token_source _FromImpl(_ImplType _Impl) { return cancellation_token_source(_Impl); }
private:
_ImplType _M_Impl;
void _Clear()
{
if (_M_Impl != NULL)
{
_M_Impl->_Release();
}
_M_Impl = NULL;
}
void _Assign(_ImplType _Impl)
{
if (_Impl != NULL)
{
_Impl->_Reference();
}
_M_Impl = _Impl;
}
void _Move(_ImplType& _Impl)
{
_M_Impl = _Impl;
_Impl = NULL;
}
cancellation_token_source(_ImplType _Impl) : _M_Impl(_Impl)
{
if (_M_Impl == ::pplx::details::_CancellationTokenState::_None())
{
_M_Impl = NULL;
}
if (_M_Impl != NULL)
{
_M_Impl->_Reference();
}
}
};
} // namespace pplx
#pragma pop_macro("new")
#pragma pack(pop)
#endif // _PPLXCANCELLATION_TOKEN_H

View File

@ -0,0 +1,83 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* Utilities to convert between PPL tasks and PPLX tasks
*
* For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#ifndef _PPLXCONV_H
#define _PPLXCONV_H
#ifndef _WIN32
#error This is only supported on Windows
#endif
#if defined(_MSC_VER) && (_MSC_VER >= 1700) && (_MSC_VER < 1800) && !CPPREST_FORCE_PPLX
#include "pplx/pplxtasks.h"
#include <ppltasks.h>
namespace pplx
{
namespace _Ppl_conv_helpers
{
template<typename _Tc, typename _F>
auto _Set_value(_Tc _Tcp, const _F& _Func) -> decltype(_Tcp.set(_Func()))
{
return _Tcp.set(_Func());
}
template<typename _Tc, typename _F>
auto _Set_value(_Tc _Tcp, const _F& _Func, ...) -> decltype(_Tcp.set())
{
_Func();
return _Tcp.set();
}
template<typename _TaskType, typename _OtherTaskType, typename _OtherTCEType>
_OtherTaskType _Convert_task(_TaskType _Task)
{
_OtherTCEType _Tc;
_Task.then([_Tc](_TaskType _Task2) {
try
{
_Ppl_conv_helpers::_Set_value(_Tc, [=] { return _Task2.get(); });
}
catch (...)
{
_Tc.set_exception(std::current_exception());
}
});
_OtherTaskType _T_other(_Tc);
return _T_other;
}
} // namespace _Ppl_conv_helpers
template<typename _TaskType>
concurrency::task<_TaskType> pplx_task_to_concurrency_task(pplx::task<_TaskType> _Task)
{
return _Ppl_conv_helpers::_Convert_task<typename pplx::task<_TaskType>,
concurrency::task<_TaskType>,
concurrency::task_completion_event<_TaskType>>(_Task);
}
template<typename _TaskType>
pplx::task<_TaskType> concurrency_task_to_pplx_task(concurrency::task<_TaskType> _Task)
{
return _Ppl_conv_helpers::_Convert_task<typename concurrency::task<_TaskType>,
pplx::task<_TaskType>,
pplx::task_completion_event<_TaskType>>(_Task);
}
} // namespace pplx
#endif
#endif // _PPLXCONV_H

View File

@ -0,0 +1,227 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* PPL interfaces
*
* For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#ifndef _PPLXINTERFACE_H
#define _PPLXINTERFACE_H
#if (defined(_MSC_VER) && (_MSC_VER >= 1800)) && !CPPREST_FORCE_PPLX
#error This file must not be included for Visual Studio 12 or later
#endif
#if defined(_CRTBLD)
#elif defined(_WIN32)
#if (_MSC_VER >= 1700)
#define _USE_REAL_ATOMICS
#endif
#else // GCC compiler
#define _USE_REAL_ATOMICS
#endif
#include <memory>
#ifdef _USE_REAL_ATOMICS
#include <atomic>
#endif
#define _pplx_cdecl __cdecl
namespace pplx
{
/// <summary>
/// An elementary abstraction for a task, defined as <c>void (__cdecl * TaskProc_t)(void *)</c>. A <c>TaskProc</c>
/// is called to invoke the body of a task.
/// </summary>
/**/
typedef void(_pplx_cdecl* TaskProc_t)(void*);
/// <summary>
/// Scheduler Interface
/// </summary>
struct __declspec(novtable) scheduler_interface
{
virtual void schedule(TaskProc_t, _In_ void*) = 0;
};
/// <summary>
/// Represents a pointer to a scheduler. This class exists to allow the
/// the specification of a shared lifetime by using shared_ptr or just
/// a plain reference by using raw pointer.
/// </summary>
struct scheduler_ptr
{
/// <summary>
/// Creates a scheduler pointer from shared_ptr to scheduler
/// </summary>
explicit scheduler_ptr(std::shared_ptr<scheduler_interface> scheduler) : m_sharedScheduler(std::move(scheduler))
{
m_scheduler = m_sharedScheduler.get();
}
/// <summary>
/// Creates a scheduler pointer from raw pointer to scheduler
/// </summary>
explicit scheduler_ptr(_In_opt_ scheduler_interface* pScheduler) : m_scheduler(pScheduler) {}
/// <summary>
/// Behave like a pointer
/// </summary>
scheduler_interface* operator->() const { return get(); }
/// <summary>
/// Returns the raw pointer to the scheduler
/// </summary>
scheduler_interface* get() const { return m_scheduler; }
/// <summary>
/// Test whether the scheduler pointer is non-null
/// </summary>
operator bool() const { return get() != nullptr; }
private:
std::shared_ptr<scheduler_interface> m_sharedScheduler;
scheduler_interface* m_scheduler;
};
/// <summary>
/// Describes the execution status of a <c>task_group</c> or <c>structured_task_group</c> object. A value of this
/// type is returned by numerous methods that wait on tasks scheduled to a task group to complete.
/// </summary>
/// <seealso cref="task_group Class"/>
/// <seealso cref="task_group::wait Method"/>
/// <seealso cref="task_group::run_and_wait Method"/>
/// <seealso cref="structured_task_group Class"/>
/// <seealso cref="structured_task_group::wait Method"/>
/// <seealso cref="structured_task_group::run_and_wait Method"/>
/**/
enum task_group_status
{
/// <summary>
/// The tasks queued to the <c>task_group</c> object have not completed. Note that this value is not presently
/// returned by the Concurrency Runtime.
/// </summary>
/**/
not_complete,
/// <summary>
/// The tasks queued to the <c>task_group</c> or <c>structured_task_group</c> object completed successfully.
/// </summary>
/**/
completed,
/// <summary>
/// The <c>task_group</c> or <c>structured_task_group</c> object was canceled. One or more tasks may not have
/// executed.
/// </summary>
/**/
canceled
};
namespace details
{
/// <summary>
/// Atomics
/// </summary>
#ifdef _USE_REAL_ATOMICS
typedef std::atomic<long> atomic_long;
typedef std::atomic<size_t> atomic_size_t;
template<typename _T>
_T atomic_compare_exchange(std::atomic<_T>& _Target, _T _Exchange, _T _Comparand)
{
_T _Result = _Comparand;
_Target.compare_exchange_strong(_Result, _Exchange);
return _Result;
}
template<typename _T>
_T atomic_exchange(std::atomic<_T>& _Target, _T _Value)
{
return _Target.exchange(_Value);
}
template<typename _T>
_T atomic_increment(std::atomic<_T>& _Target)
{
return _Target.fetch_add(1) + 1;
}
template<typename _T>
_T atomic_decrement(std::atomic<_T>& _Target)
{
return _Target.fetch_sub(1) - 1;
}
template<typename _T>
_T atomic_add(std::atomic<_T>& _Target, _T value)
{
return _Target.fetch_add(value) + value;
}
#else // not _USE_REAL_ATOMICS
typedef long volatile atomic_long;
typedef size_t volatile atomic_size_t;
template<class T>
inline T atomic_exchange(T volatile& _Target, T _Value)
{
return _InterlockedExchange(&_Target, _Value);
}
inline long atomic_increment(long volatile& _Target) { return _InterlockedIncrement(&_Target); }
inline long atomic_add(long volatile& _Target, long value) { return _InterlockedExchangeAdd(&_Target, value) + value; }
inline size_t atomic_increment(size_t volatile& _Target)
{
#if (defined(_M_IX86) || defined(_M_ARM))
return static_cast<size_t>(_InterlockedIncrement(reinterpret_cast<long volatile*>(&_Target)));
#else
return static_cast<size_t>(_InterlockedIncrement64(reinterpret_cast<__int64 volatile*>(&_Target)));
#endif
}
inline long atomic_decrement(long volatile& _Target) { return _InterlockedDecrement(&_Target); }
inline size_t atomic_decrement(size_t volatile& _Target)
{
#if (defined(_M_IX86) || defined(_M_ARM))
return static_cast<size_t>(_InterlockedDecrement(reinterpret_cast<long volatile*>(&_Target)));
#else
return static_cast<size_t>(_InterlockedDecrement64(reinterpret_cast<__int64 volatile*>(&_Target)));
#endif
}
inline long atomic_compare_exchange(long volatile& _Target, long _Exchange, long _Comparand)
{
return _InterlockedCompareExchange(&_Target, _Exchange, _Comparand);
}
inline size_t atomic_compare_exchange(size_t volatile& _Target, size_t _Exchange, size_t _Comparand)
{
#if (defined(_M_IX86) || defined(_M_ARM))
return static_cast<size_t>(_InterlockedCompareExchange(
reinterpret_cast<long volatile*>(_Target), static_cast<long>(_Exchange), static_cast<long>(_Comparand)));
#else
return static_cast<size_t>(_InterlockedCompareExchange64(reinterpret_cast<__int64 volatile*>(_Target),
static_cast<__int64>(_Exchange),
static_cast<__int64>(_Comparand)));
#endif
}
#endif // _USE_REAL_ATOMICS
} // namespace details
} // namespace pplx
#endif // _PPLXINTERFACE_H

View File

@ -0,0 +1,277 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* Linux specific pplx implementations
*
* For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#if (defined(_MSC_VER))
#error This file must not be included for Visual Studio
#endif
#ifndef _WIN32
#include "cpprest/details/cpprest_compat.h"
#include "pthread.h"
#include <signal.h>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include "pplx/pplxinterface.h"
namespace pplx
{
namespace details
{
namespace platform
{
/// <summary>
/// Returns a unique identifier for the execution thread where this routine in invoked
/// </summary>
_PPLXIMP long _pplx_cdecl GetCurrentThreadId();
/// <summary>
/// Yields the execution of the current execution thread - typically when spin-waiting
/// </summary>
_PPLXIMP void _pplx_cdecl YieldExecution();
/// <summary>
/// Captures the callstack
/// </summary>
__declspec(noinline) inline static size_t CaptureCallstack(void**, size_t, size_t) { return 0; }
} // namespace platform
/// <summary>
/// Manual reset event
/// </summary>
class event_impl
{
private:
std::mutex _lock;
std::condition_variable _condition;
bool _signaled;
public:
static const unsigned int timeout_infinite = 0xFFFFFFFF;
event_impl() : _signaled(false) {}
void set()
{
std::lock_guard<std::mutex> lock(_lock);
_signaled = true;
_condition.notify_all();
}
void reset()
{
std::lock_guard<std::mutex> lock(_lock);
_signaled = false;
}
unsigned int wait(unsigned int timeout)
{
std::unique_lock<std::mutex> lock(_lock);
if (timeout == event_impl::timeout_infinite)
{
_condition.wait(lock, [this]() -> bool { return _signaled; });
return 0;
}
else
{
std::chrono::milliseconds period(timeout);
auto status = _condition.wait_for(lock, period, [this]() -> bool { return _signaled; });
_ASSERTE(status == _signaled);
// Return 0 if the wait completed as a result of signaling the event. Otherwise, return timeout_infinite
// Note: this must be consistent with the behavior of the Windows version, which is based on
// WaitForSingleObjectEx
return status ? 0 : event_impl::timeout_infinite;
}
}
unsigned int wait() { return wait(event_impl::timeout_infinite); }
};
/// <summary>
/// Reader writer lock
/// </summary>
class reader_writer_lock_impl
{
private:
pthread_rwlock_t _M_reader_writer_lock;
public:
class scoped_lock_read
{
public:
explicit scoped_lock_read(reader_writer_lock_impl& _Reader_writer_lock)
: _M_reader_writer_lock(_Reader_writer_lock)
{
_M_reader_writer_lock.lock_read();
}
~scoped_lock_read() { _M_reader_writer_lock.unlock(); }
private:
reader_writer_lock_impl& _M_reader_writer_lock;
scoped_lock_read(const scoped_lock_read&); // no copy constructor
scoped_lock_read const& operator=(const scoped_lock_read&); // no assignment operator
};
reader_writer_lock_impl() { pthread_rwlock_init(&_M_reader_writer_lock, nullptr); }
~reader_writer_lock_impl() { pthread_rwlock_destroy(&_M_reader_writer_lock); }
void lock() { pthread_rwlock_wrlock(&_M_reader_writer_lock); }
void lock_read() { pthread_rwlock_rdlock(&_M_reader_writer_lock); }
void unlock() { pthread_rwlock_unlock(&_M_reader_writer_lock); }
};
/// <summary>
/// Recursive mutex
/// </summary>
class recursive_lock_impl
{
public:
recursive_lock_impl() : _M_owner(-1), _M_recursionCount(0) {}
~recursive_lock_impl()
{
_ASSERTE(_M_owner == -1);
_ASSERTE(_M_recursionCount == 0);
}
void lock()
{
auto id = ::pplx::details::platform::GetCurrentThreadId();
if (_M_owner == id)
{
_M_recursionCount++;
}
else
{
_M_cs.lock();
_M_owner = id;
_M_recursionCount = 1;
}
}
void unlock()
{
_ASSERTE(_M_owner == ::pplx::details::platform::GetCurrentThreadId());
_ASSERTE(_M_recursionCount >= 1);
_M_recursionCount--;
if (_M_recursionCount == 0)
{
_M_owner = -1;
_M_cs.unlock();
}
}
private:
std::mutex _M_cs;
std::atomic<long> _M_owner;
long _M_recursionCount;
};
#if defined(__APPLE__)
class apple_scheduler : public pplx::scheduler_interface
#else
class linux_scheduler : public pplx::scheduler_interface
#endif
{
public:
_PPLXIMP virtual void schedule(TaskProc_t proc, _In_ void* param);
#if defined(__APPLE__)
virtual ~apple_scheduler() {}
#else
virtual ~linux_scheduler() {}
#endif
};
} // namespace details
/// <summary>
/// A generic RAII wrapper for locks that implements the critical_section interface
/// std::lock_guard
/// </summary>
template<class _Lock>
class scoped_lock
{
public:
explicit scoped_lock(_Lock& _Critical_section) : _M_critical_section(_Critical_section)
{
_M_critical_section.lock();
}
~scoped_lock() { _M_critical_section.unlock(); }
private:
_Lock& _M_critical_section;
scoped_lock(const scoped_lock&); // no copy constructor
scoped_lock const& operator=(const scoped_lock&); // no assignment operator
};
// The extensibility namespace contains the type definitions that are used internally
namespace extensibility
{
typedef ::pplx::details::event_impl event_t;
typedef std::mutex critical_section_t;
typedef scoped_lock<critical_section_t> scoped_critical_section_t;
typedef ::pplx::details::reader_writer_lock_impl reader_writer_lock_t;
typedef scoped_lock<reader_writer_lock_t> scoped_rw_lock_t;
typedef ::pplx::extensibility::reader_writer_lock_t::scoped_lock_read scoped_read_lock_t;
typedef ::pplx::details::recursive_lock_impl recursive_lock_t;
typedef scoped_lock<recursive_lock_t> scoped_recursive_lock_t;
} // namespace extensibility
/// <summary>
/// Default scheduler type
/// </summary>
#if defined(__APPLE__)
typedef details::apple_scheduler default_scheduler_t;
#else
typedef details::linux_scheduler default_scheduler_t;
#endif
namespace details
{
/// <summary>
/// Terminate the process due to unhandled exception
/// </summary>
#ifndef _REPORT_PPLTASK_UNOBSERVED_EXCEPTION
#define _REPORT_PPLTASK_UNOBSERVED_EXCEPTION() \
do \
{ \
raise(SIGTRAP); \
std::terminate(); \
} while (false)
#endif //_REPORT_PPLTASK_UNOBSERVED_EXCEPTION
} // namespace details
// see: http://gcc.gnu.org/onlinedocs/gcc/Return-Address.html
// this is critical to inline
__attribute__((always_inline)) inline void* _ReturnAddress() { return __builtin_return_address(0); }
} // namespace pplx
#endif // !_WIN32

7600
vendor/cpprestsdk/include/pplx/pplxtasks.h vendored Normal file

File diff suppressed because it is too large Load Diff

268
vendor/cpprestsdk/include/pplx/pplxwin.h vendored Normal file
View File

@ -0,0 +1,268 @@
/***
* Copyright (C) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
*
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* Windows specific pplx implementations
*
* For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#if !defined(_WIN32) || _MSC_VER < 1800 || CPPREST_FORCE_PPLX
#include "cpprest/details/cpprest_compat.h"
#include "pplx/pplxinterface.h"
namespace pplx
{
namespace details
{
namespace platform
{
/// <summary>
/// Returns a unique identifier for the execution thread where this routine in invoked
/// </summary>
_PPLXIMP long __cdecl GetCurrentThreadId();
/// <summary>
/// Yields the execution of the current execution thread - typically when spin-waiting
/// </summary>
_PPLXIMP void __cdecl YieldExecution();
/// <summary>
/// Captures the callstack
/// </summary>
__declspec(noinline) _PPLXIMP size_t __cdecl CaptureCallstack(void**, size_t, size_t);
#if defined(__cplusplus_winrt)
/// <summary>
// Internal API which retrieves the next async id.
/// </summary>
_PPLXIMP unsigned int __cdecl GetNextAsyncId();
#endif
} // namespace platform
/// <summary>
/// Manual reset event
/// </summary>
class event_impl
{
public:
static const unsigned int timeout_infinite = 0xFFFFFFFF;
_PPLXIMP event_impl();
_PPLXIMP ~event_impl();
_PPLXIMP void set();
_PPLXIMP void reset();
_PPLXIMP unsigned int wait(unsigned int timeout);
unsigned int wait() { return wait(event_impl::timeout_infinite); }
private:
// Windows events
void* _M_impl;
event_impl(const event_impl&); // no copy constructor
event_impl const& operator=(const event_impl&); // no assignment operator
};
/// <summary>
/// Mutex - lock for mutual exclusion
/// </summary>
class critical_section_impl
{
public:
_PPLXIMP critical_section_impl();
_PPLXIMP ~critical_section_impl();
_PPLXIMP void lock();
_PPLXIMP void unlock();
private:
typedef void* _PPLX_BUFFER;
// Windows critical section
_PPLX_BUFFER _M_impl[8];
critical_section_impl(const critical_section_impl&); // no copy constructor
critical_section_impl const& operator=(const critical_section_impl&); // no assignment operator
};
#if _WIN32_WINNT >= _WIN32_WINNT_VISTA
/// <summary>
/// Reader writer lock
/// </summary>
class reader_writer_lock_impl
{
public:
class scoped_lock_read
{
public:
explicit scoped_lock_read(reader_writer_lock_impl& _Reader_writer_lock)
: _M_reader_writer_lock(_Reader_writer_lock)
{
_M_reader_writer_lock.lock_read();
}
~scoped_lock_read() { _M_reader_writer_lock.unlock(); }
private:
reader_writer_lock_impl& _M_reader_writer_lock;
scoped_lock_read(const scoped_lock_read&); // no copy constructor
scoped_lock_read const& operator=(const scoped_lock_read&); // no assignment operator
};
_PPLXIMP reader_writer_lock_impl();
_PPLXIMP void lock();
_PPLXIMP void lock_read();
_PPLXIMP void unlock();
private:
// Windows slim reader writer lock
void* _M_impl;
// Slim reader writer lock doesn't have a general 'unlock' method.
// We need to track how it was acquired and release accordingly.
// true - lock exclusive
// false - lock shared
bool m_locked_exclusive;
};
#endif // _WIN32_WINNT >= _WIN32_WINNT_VISTA
/// <summary>
/// Recursive mutex
/// </summary>
class recursive_lock_impl
{
public:
recursive_lock_impl() : _M_owner(-1), _M_recursionCount(0) {}
~recursive_lock_impl()
{
_ASSERTE(_M_owner == -1);
_ASSERTE(_M_recursionCount == 0);
}
void lock()
{
auto id = ::pplx::details::platform::GetCurrentThreadId();
if (_M_owner == id)
{
_M_recursionCount++;
}
else
{
_M_cs.lock();
_M_owner = id;
_M_recursionCount = 1;
}
}
void unlock()
{
_ASSERTE(_M_owner == ::pplx::details::platform::GetCurrentThreadId());
_ASSERTE(_M_recursionCount >= 1);
_M_recursionCount--;
if (_M_recursionCount == 0)
{
_M_owner = -1;
_M_cs.unlock();
}
}
private:
pplx::details::critical_section_impl _M_cs;
long _M_recursionCount;
volatile long _M_owner;
};
class windows_scheduler : public pplx::scheduler_interface
{
public:
_PPLXIMP virtual void schedule(TaskProc_t proc, _In_ void* param);
};
} // namespace details
/// <summary>
/// A generic RAII wrapper for locks that implement the critical_section interface
/// std::lock_guard
/// </summary>
template<class _Lock>
class scoped_lock
{
public:
explicit scoped_lock(_Lock& _Critical_section) : _M_critical_section(_Critical_section)
{
_M_critical_section.lock();
}
~scoped_lock() { _M_critical_section.unlock(); }
private:
_Lock& _M_critical_section;
scoped_lock(const scoped_lock&); // no copy constructor
scoped_lock const& operator=(const scoped_lock&); // no assignment operator
};
// The extensibility namespace contains the type definitions that are used internally
namespace extensibility
{
typedef ::pplx::details::event_impl event_t;
typedef ::pplx::details::critical_section_impl critical_section_t;
typedef scoped_lock<critical_section_t> scoped_critical_section_t;
#if _WIN32_WINNT >= _WIN32_WINNT_VISTA
typedef ::pplx::details::reader_writer_lock_impl reader_writer_lock_t;
typedef scoped_lock<reader_writer_lock_t> scoped_rw_lock_t;
typedef reader_writer_lock_t::scoped_lock_read scoped_read_lock_t;
#endif // _WIN32_WINNT >= _WIN32_WINNT_VISTA
typedef ::pplx::details::recursive_lock_impl recursive_lock_t;
typedef scoped_lock<recursive_lock_t> scoped_recursive_lock_t;
} // namespace extensibility
/// <summary>
/// Default scheduler type
/// </summary>
typedef details::windows_scheduler default_scheduler_t;
namespace details
{
/// <summary>
/// Terminate the process due to unhandled exception
/// </summary>
#ifndef _REPORT_PPLTASK_UNOBSERVED_EXCEPTION
#define _REPORT_PPLTASK_UNOBSERVED_EXCEPTION() \
do \
{ \
__debugbreak(); \
std::terminate(); \
} while (false)
#endif // _REPORT_PPLTASK_UNOBSERVED_EXCEPTION
} // namespace details
} // namespace pplx
#endif