root/core/thread_safe_adaptors.hpp

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. have_work_
  2. have_work_
  3. have_work_
  4. have_work
  5. add
  6. add
  7. remove
  8. remove
  9. try_remove
  10. clear
  11. have_work
  12. have_work
  13. add
  14. add
  15. pop_lock
  16. pop_lock
  17. push_lock
  18. push_lock
  19. colln
  20. colln
  21. pop_lock
  22. pop_lock
  23. push_lock
  24. push_lock
  25. colln
  26. colln
  27. pop_lock
  28. pop_lock
  29. push_lock
  30. push_lock
  31. colln
  32. colln

   1 #ifndef libjmmcg_core_thread_safe_adaptors_hpp
   2 #define libjmmcg_core_thread_safe_adaptors_hpp
   3 
   4 /******************************************************************************
   5 ** $Header: svn+ssh://jmmcg@svn.code.sf.net/p/libjmmcg/code/trunk/libjmmcg/core/thread_safe_adaptors.hpp 2287 2018-06-24 17:53:41Z jmmcg $
   6 **
   7 ** Copyright © 2004 by J.M.McGuiness, coder@hussar.me.uk
   8 **
   9 ** This library is free software; you can redistribute it and/or
  10 ** modify it under the terms of the GNU Lesser General Public
  11 ** License as published by the Free Software Foundation; either
  12 ** version 2.1 of the License, or (at your option) any later version.
  13 **
  14 ** This library is distributed in the hope that it will be useful,
  15 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17 ** Lesser General Public License for more details.
  18 **
  19 ** You should have received a copy of the GNU Lesser General Public
  20 ** License along with this library; if not, write to the Free Software
  21 ** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22 */
  23 
  24 #include "exception.hpp"
  25 
  26 #include <boost/mpl/assert.hpp>
  27 
  28 #include <mutex>
  29 
  30 namespace jmmcg { namespace ppd {
  31 
  32         /// Atomically count the amount of work that there is to do, and provide access to the lock on the containing collection.
  33         /**
  34                 This class adds a constant-time counter to the safe_colln or queue or funky_queue.
  35                 This uses standard locks.
  36                 \todo I suppose I could use some kind of enable_if to detect if the container has a size() member-method, and only use this if it doesn't. That's a micro-optimisation to do.
  37         */
  38         template<class Lk>
  39         class no_signalling {
  40         public:
  41                 typedef api_lock_traits<platform_api, sequential_mode>::anon_event_type atomic_t;
  42                 typedef Lk locker_type;
  43                 typedef typename locker_type::lock_traits lock_traits;
  44 
  45                 /**
  46                         To assist in allowing compile-time computation of the algorithmic order of the threading model.
  47                 */
  48                 static constexpr generic_traits::memory_access_modes memory_access_mode=(
  49                         locker_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
  50                         && atomic_t::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
  51                         ? generic_traits::memory_access_modes::crew_memory_access
  52                         : generic_traits::memory_access_modes::erew_memory_access
  53                 );
  54 
  55                 constexpr no_signalling() noexcept(true) FORCE_INLINE
  56                 : lock_(), have_work_() {
  57                 }
  58                 explicit no_signalling(atomic_t &ev) noexcept(true) FORCE_INLINE
  59                 : lock_(), have_work_(&ev) {
  60                         assert(dynamic_cast<atomic_t *>(have_work_));
  61                 }
  62                 constexpr no_signalling(no_signalling const &s) noexcept(true) FORCE_INLINE
  63                 : lock_(), have_work_(s.have_work_) {
  64                 }
  65 
  66                 atomic_t & __fastcall have_work() noexcept(true) FORCE_INLINE {
  67                         assert(dynamic_cast<atomic_t *>(have_work_));
  68                         return *have_work_;
  69                 }
  70                 locker_type & __fastcall locker() const noexcept(true) FORCE_INLINE {
  71                         assert(dynamic_cast<locker_type const *>(&lock_));
  72                         return lock_;
  73                 }
  74                 locker_type & __fastcall locker() noexcept(true) {
  75                         assert(dynamic_cast<locker_type *>(&lock_));
  76                         return lock_;
  77                 }
  78 
  79                 void __fastcall add() noexcept(true) FORCE_INLINE {
  80                         if (have_work_) {
  81                                 have_work_->set();
  82                         }
  83                 }
  84                 void __fastcall add(typename atomic_t::count_type const c) noexcept(true) FORCE_INLINE {
  85                         if (have_work_) {
  86                                 for (typename atomic_t::count_type i=0; i<c; ++i) {
  87                                         have_work_->set();
  88                                 }
  89                         }
  90                 }
  91                 typename atomic_t::atomic_state_type __fastcall remove() noexcept(true) FORCE_INLINE {
  92                         if (have_work_) {
  93                                 return have_work_->lock();
  94                         } else {
  95                                 return atomic_t::atom_unset;
  96                         }
  97                 }
  98                 void __fastcall remove(typename atomic_t::count_type const c) noexcept(true) FORCE_INLINE {
  99                         if (have_work_) {
 100                                 for (typename atomic_t::count_type i=0; i<c; ++i) {
 101                                         have_work_->lock();
 102                                 }
 103                         }
 104                 }
 105                 typename atomic_t::atomic_state_type __fastcall try_remove() noexcept(true) FORCE_INLINE {
 106                         assert(dynamic_cast<atomic_t *>(have_work_));
 107                         return have_work_->try_lock();
 108                 }
 109                 static constexpr void clear() noexcept(true) FORCE_INLINE {
 110                 }
 111                 static constexpr typename atomic_t::count_type __fastcall count() noexcept(true) FORCE_INLINE {
 112                         return 0;
 113                 }
 114 
 115         private:
 116                 mutable locker_type lock_;
 117                 atomic_t *have_work_;
 118         };
 119 
 120         /// A flag to atomically signal if the container contains work or not and also count the amount of work that there is to do.
 121         /**
 122                 This uses standard locks.
 123         */
 124         template<class Lk>
 125         class signalling {
 126         public:
 127                 typedef Lk atomic_t;
 128                 typedef typename atomic_t::lock_traits lock_traits;
 129                 typedef typename lock_traits::exception_type exception_type;
 130                 typedef typename atomic_t::locker_type locker_type;
 131 
 132                 /**
 133                         To assist in allowing compile-time computation of the algorithmic order of the threading model.
 134                 */
 135                 static constexpr generic_traits::memory_access_modes memory_access_mode=atomic_t::memory_access_mode;
 136 
 137         private:
 138                 atomic_t *have_work_;
 139 
 140         public:
 141                 constexpr __stdcall signalling() noexcept(true) FORCE_INLINE
 142                 : have_work_() {
 143                 }
 144                 explicit signalling(atomic_t &ev) noexcept(true) FORCE_INLINE
 145                 : have_work_(&ev) {
 146                         assert(dynamic_cast<atomic_t *>(have_work_));
 147                 }
 148                 __stdcall signalling(signalling const &s) noexcept(true) FORCE_INLINE
 149                 : have_work_(s.have_work_) {
 150                         assert(dynamic_cast<atomic_t *>(have_work_));
 151                 }
 152 
 153                 signalling(signalling &&)=delete;
 154                 void operator=(signalling const &)=delete;
 155                 void operator=(signalling &&)=delete;
 156 
 157                 constexpr atomic_t & __fastcall have_work() const noexcept(true) FORCE_INLINE {
 158                         assert(dynamic_cast<atomic_t *>(have_work_));
 159                         return *have_work_;
 160                 }
 161                 atomic_t & __fastcall have_work() noexcept(true) FORCE_INLINE {
 162                         assert(dynamic_cast<atomic_t *>(have_work_));
 163                         return *have_work_;
 164                 }
 165                 constexpr locker_type & __fastcall locker() const noexcept(true) FORCE_INLINE {
 166                         assert(dynamic_cast<atomic_t const *>(have_work_));
 167                         return have_work_->locker();
 168                 }
 169                 locker_type & __fastcall locker() noexcept(true) FORCE_INLINE {
 170                         assert(dynamic_cast<atomic_t *>(have_work_));
 171                         return have_work_->locker();
 172                 }
 173 
 174                 void add() noexcept(false) FORCE_INLINE {
 175                         assert(dynamic_cast<atomic_t *>(have_work_));
 176                         typename lock_traits::atomic_state_type const ret=have_work_->set_nolk(atomic_t::states::new_work_arrived);
 177                         if (ret!=lock_traits::atom_set) {
 178                                 throw exception_type(_T("Could not add more work to the atomic object."), jmmcg::info::function(__LINE__, __PRETTY_FUNCTION__, typeid(*this)), JMMCG_REVISION_HDR(_T("$Header: svn+ssh://jmmcg@svn.code.sf.net/p/libjmmcg/code/trunk/libjmmcg/core/thread_safe_adaptors.hpp 2287 2018-06-24 17:53:41Z jmmcg $")));
 179                         }
 180                 }
 181                 void __fastcall add(typename atomic_t::count_type const c) noexcept(false) FORCE_INLINE {
 182                         for (typename atomic_t::count_type i=0; i<c; ++i) {
 183                                 add();
 184                         }
 185                 }
 186                 typename atomic_t::lock_result_type __fastcall remove() noexcept(noexcept(have_work_->lock(0))) FORCE_INLINE;
 187                 void __fastcall remove(typename atomic_t::count_type const c) noexcept(false) FORCE_INLINE;
 188                 typename atomic_t::lock_result_type __fastcall try_remove() noexcept(noexcept(have_work_->try_lock())) FORCE_INLINE;
 189                 void clear() noexcept(noexcept(have_work_->clear())) FORCE_INLINE;
 190                 typename atomic_t::count_type __fastcall count() const noexcept(noexcept(have_work_->count())) FORCE_INLINE;
 191         };
 192 
 193         /// An adaptor for a container that attempts to add some thread-safety to assist in making thread-safe programs.
 194         /**
 195                 By default the adapted container does not use a read-write lock.
 196                 Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them ,then it will exhibit CREW semantics.
 197 
 198                 \see queue
 199         */
 200         template<
 201                 typename C,
 202                 typename M,
 203                 typename WL=typename M::write_lock_type,
 204                 class Sig=no_signalling<M>,
 205                 class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
 206         >
 207         class safe_colln : private C ///< We want to be able to pass it as a "C (container)", but we also don't want to expose the unprotected, base functionality.
 208         {
 209         public:
 210                 typedef C container_type;       ///< The container to be adapted.
 211                 typedef Sig have_work_type;     ///< Used to enable functionality to atomically signal if the container contains work or not.
 212                 typedef M atomic_t;     ///< The underlying lock object to use that will be locked in some (EREW or CREW or other) manner.
 213                 typedef WL write_lock_type;     ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
 214                 typedef typename atomic_t::read_lock_type read_lock_type;       ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
 215                 typedef MLk lock_all_type;      ///< The multi-lock type to use to ensured that operations on combined safe_collns are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
 216                 typedef typename atomic_t::lock_traits lock_traits;
 217                 typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
 218                 typedef typename container_type::reference reference;
 219                 typedef typename container_type::const_reference const_reference;
 220                 typedef typename container_type::size_type size_type;
 221                 typedef typename container_type::value_type value_type;
 222                 typedef typename lock_traits::exception_type exception_type;
 223                 using exit_requested_type=typename have_work_type::atomic_t;
 224 
 225                 /**
 226                         To assist in allowing compile-time computation of the algorithmic order of the threading model.
 227                 */
 228                 static constexpr generic_traits::memory_access_modes memory_access_mode=(
 229                         write_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 230                         && read_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 231                         && lock_all_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 232 // TODO: some do and some don't have this as a member...                        && value_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 233                         && have_work_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 234                         ? generic_traits::memory_access_modes::crew_memory_access
 235                         : generic_traits::memory_access_modes::erew_memory_access
 236                 );
 237 
 238                 BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
 239 
 240                 /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
 241                 mutable have_work_type have_work;
 242 
 243                 /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
 244                 atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
 245                         return have_work.locker();
 246                 }
 247                 /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
 248                 atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
 249                         return have_work.locker();
 250                 }
 251                 /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
 252                 atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
 253                         return have_work.locker();
 254                 }
 255                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 256                 atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
 257                         return have_work.locker();
 258                 }
 259 
 260                 __stdcall safe_colln() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
 261                 explicit safe_colln(typename have_work_type::atomic_t &) FORCE_INLINE;
 262                 explicit safe_colln(std::initializer_list<value_type>) FORCE_INLINE;
 263                 explicit __stdcall safe_colln(size_type const sz, value_type const &v=value_type()) FORCE_INLINE;
 264                 template<class T1, class T2>
 265                 __stdcall FORCE_INLINE
 266                 safe_colln(size_type const sz, T1 const &, T2 const &);
 267                 explicit __stdcall safe_colln(const container_type &) FORCE_INLINE;
 268                 __stdcall safe_colln(const safe_colln &) noexcept(false) FORCE_INLINE;
 269                 __stdcall ~safe_colln() FORCE_INLINE;
 270                 safe_colln & __fastcall operator=(const safe_colln &) noexcept(false) FORCE_INLINE;
 271 
 272                 bool __fastcall empty() const noexcept(true) FORCE_INLINE;
 273                 size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
 274                 size_type __fastcall size() const noexcept(true) FORCE_INLINE;
 275 
 276                 value_type __fastcall operator[](size_type s) const noexcept(false) FORCE_INLINE;
 277 
 278                 void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
 279                 void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
 280 
 281                 void __fastcall push_front(const value_type &v) noexcept(false) FORCE_INLINE;
 282 
 283                 void __fastcall push_front(value_type &&v) noexcept(false) FORCE_INLINE;
 284 
 285                 size_type __fastcall erase(const value_type &v) noexcept(false) FORCE_INLINE;
 286 
 287                 void __fastcall reserve(size_type sz) noexcept(false) FORCE_INLINE;
 288 
 289                 void __fastcall clear() noexcept(false) FORCE_INLINE;
 290 
 291                 void __fastcall swap(safe_colln &t) noexcept(false) FORCE_INLINE;
 292 
 293                 /// Resize the container to the requested size, but try to minimise (re-)initialising or deleting any of the existing elements.
 294                 /**
 295                         Current C++03 & C++11 containers have an implicit sequential order of initialisation or re-initialisation of the elements they contain. This enforces a O(n) complexity on resize(). To minimise this (re-)initialisation of existing elements, only initialise new elements added to the container, or delete the excess.
 296 
 297                         \todo Ideally I want to have an "uninitialized resize()" (reserve() does not set the size), so that I can initialise the elements of the container in the order I wish, using a parallel fill_n() for example.
 298 
 299                         \see resize(), reserve()
 300                 */
 301                 void __fastcall resize_noinit_nolk(const size_type sz) noexcept(false) FORCE_INLINE;
 302 
 303                 /// Resize the container to the requested size.
 304                 /**
 305                         \see resize_noinit_nolk(), resize(), reserve()
 306                 */
 307                 void __fastcall resize(const size_type sz) noexcept(false) FORCE_INLINE;
 308 
 309                 bool __fastcall operator==(safe_colln const &) const noexcept(true) FORCE_INLINE;
 310                 template<typename M1, typename WL1, class Sig1, class MLk1>
 311                 bool __fastcall FORCE_INLINE
 312                 operator==(safe_colln<C, M1, WL1, Sig1, MLk1> const &) const noexcept(true);
 313 
 314                 container_type const &colln() const noexcept(true) FORCE_INLINE {
 315                         return static_cast<container_type const &>(*this);
 316                 }
 317                 container_type &colln() noexcept(true) FORCE_INLINE {
 318                         return static_cast<container_type &>(*this);
 319                 }
 320 
 321         };
 322 
 323         /// An adaptor to add thread-safety assistance, specifically for queues.
 324         /**
 325                 Note that this adaptor relies on the standardised behaviour of a sequence (or an adaptor thereof) with respect to invalidating iterators when items are added to or removed from removed from the container. Basically only std::list is guaranteed to satisfy these requirements, but std::queue often does, but that is implementation-dependent.
 326                 This queue operates with one big fat lock.
 327                 The iterators are not exposed to assist with writing thread-safe code.
 328                 Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them, then it will exhibit CREW semantics.
 329 
 330                 \see safe_colln, funky_queue
 331         */
 332         template<
 333                 typename QT,
 334                 typename M,
 335                 typename WL=typename M::write_lock_type,
 336                 class Sig=no_signalling<M>,     ///< \todo Should be a template type to ensure that M is a unique type.
 337                 class ValRet=typename QT::value_type,
 338                 class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
 339         >
 340         class queue : protected QT {
 341         public:
 342                 typedef QT container_type;      ///< The queue to be adapted, usually std::list or std::queue.
 343                 typedef Sig have_work_type;     ///< Used to enable functionality to atomically signal if the container contains work or not.
 344                 typedef M atomic_t;     ///< The underlying lock object to use.
 345                 typedef WL write_lock_type;     ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
 346                 typedef typename atomic_t::read_lock_type read_lock_type;       ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
 347                 typedef MLk lock_all_type;      ///< The multi-lock type to use to ensured that operations on combined queues are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
 348                 typedef typename write_lock_type::lock_traits lock_traits;
 349                 typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
 350                 typedef typename container_type::reference reference;
 351                 typedef typename container_type::const_reference const_reference;
 352                 typedef typename container_type::size_type size_type;
 353                 typedef typename container_type::value_type value_type;
 354                 typedef ValRet value_ret_type;  ///< The type to return when removing items from the queue.
 355                 typedef typename lock_traits::exception_type exception_type;
 356 
 357                 /**
 358                         To assist in allowing compile-time computation of the algorithmic order of the threading model.
 359                 */
 360                 static constexpr generic_traits::memory_access_modes memory_access_mode=(
 361                         write_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 362                         && read_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 363                         && lock_all_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 364                         && value_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 365                         && have_work_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 366                         ? generic_traits::memory_access_modes::crew_memory_access
 367                         : generic_traits::memory_access_modes::erew_memory_access
 368                 );
 369 
 370                 BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
 371 
 372                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 373                 mutable have_work_type have_work;
 374 
 375                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 376                 atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
 377                         return have_work.locker();
 378                 }
 379                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 380                 atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
 381                         return have_work.locker();
 382                 }
 383                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 384                 atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
 385                         return have_work.locker();
 386                 }
 387                 /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
 388                 atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
 389                         return have_work.locker();
 390                 }
 391 
 392                 __stdcall queue() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
 393                 explicit queue(typename have_work_type::atomic_t &) FORCE_INLINE;
 394                 __stdcall queue(queue const &) noexcept(false) FORCE_INLINE;
 395                 __stdcall ~queue() noexcept(true) FORCE_INLINE;
 396                 queue &__fastcall operator=(queue const &) noexcept(false) FORCE_INLINE;
 397 
 398                 bool __fastcall empty() const noexcept(true) FORCE_INLINE;
 399                 size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
 400                 size_type __fastcall size() const noexcept(true) FORCE_INLINE;
 401 
 402                 value_type __fastcall front() const noexcept(false) FORCE_INLINE;
 403 
 404                 void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
 405                 void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
 406 
 407                 value_ret_type __fastcall pop_front() noexcept(false) FORCE_INLINE;
 408                 void __fastcall push_front(const value_type &v) noexcept(false) FORCE_INLINE;
 409                 void __fastcall push_front(value_type &&v) noexcept(false) FORCE_INLINE;
 410 
 411                 size_type __fastcall erase(const value_type &v) noexcept(false) FORCE_INLINE;
 412 
 413                 void __fastcall clear() noexcept(false) FORCE_INLINE;
 414 
 415                 container_type const &colln() const noexcept(true) FORCE_INLINE {
 416                         return static_cast<container_type const &>(*this);
 417                 }
 418                 container_type &colln() noexcept(true) FORCE_INLINE {
 419                         return static_cast<container_type &>(*this);
 420                 }
 421 
 422                 value_ret_type __fastcall pop_front_nolk() noexcept(false) FORCE_INLINE;
 423 
 424                 value_type __fastcall pop_front_1_nochk_nolk() noexcept(noexcept(have_work.remove())) FORCE_INLINE;
 425                 value_type __fastcall pop_front_1_nochk_nosig() noexcept(true) FORCE_INLINE;
 426 
 427         protected:
 428                 virtual value_ret_type __fastcall pop_front_nochk_nolk() noexcept(false) FORCE_INLINE;
 429         };
 430 
 431         /// An adaptor to add thread-safety assistance, specifically for queues.
 432         /**
 433                 Note that this adaptor relies on the standardised behaviour of a sequence (or an adaptor thereof) with respect to invalidating iterators when items are added to or removed from removed from the container. Basically only std::list is guaranteed to satisfy these requirements, but std::queue often does, but that is implementation-dependent.
 434                 This queue operates two locks, a pop & a push lock, that operate independently as long as the queue is large enough.
 435                 The operations push() and push_back() have a push lock and are thus serialised.
 436                 The operations pop() and pop_front() have a pop lock and are thus serialised.
 437                 When the queue is too short, these pairs of operations are also mutually serialised.
 438                 By default the adapted queue does not use a read-write lock.
 439                 The iterators are not exposed to assist with writing thread-safe code.
 440                 Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them, then it will exhibit CREW semantics.
 441 
 442                 \see safe_colln, queue
 443         */
 444         template<
 445                 typename QT,
 446                 typename M,
 447                 typename WL=typename M::write_lock_type,
 448                 class Sig=no_signalling<M>,
 449                 class ValRet=typename QT::value_type,
 450                 class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
 451         >
 452         class funky_queue : private QT {
 453         public:
 454                 typedef QT container_type;      ///< The queue to be adapted, usually std::list or std::queue.
 455                 typedef Sig have_work_type;     ///< Used to enable functionality to atomically signal if the container contains work or not.
 456                 typedef M atomic_t;     ///< The underlying lock object to use.
 457                 typedef WL write_lock_type;     ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
 458                 typedef typename atomic_t::read_lock_type read_lock_type;       ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
 459                 typedef MLk lock_all_type;      ///< The multi-lock type to use to ensured that operations on combined queues are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
 460                 typedef typename write_lock_type::lock_traits lock_traits;
 461                 typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
 462                 typedef typename container_type::reference reference;
 463                 typedef typename container_type::const_reference const_reference;
 464                 typedef typename container_type::size_type size_type;
 465                 typedef typename container_type::value_type value_type;
 466                 typedef ValRet value_ret_type;  ///< The type to return when removing items from the queue.
 467                 typedef typename lock_traits::exception_type exception_type;
 468 
 469                 static constexpr size_type serialise_size=2;    ///< The size of the queue, below which the push & pop operations serialise.
 470 
 471                 /**
 472                         To assist in allowing compile-time computation of the algorithmic order of the threading model.
 473                 */
 474                 static constexpr generic_traits::memory_access_modes memory_access_mode=(
 475                         write_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 476                         && read_lock_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 477                         && lock_all_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 478                         && value_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 479                         && have_work_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
 480                         ? generic_traits::memory_access_modes::crew_memory_access
 481                         : generic_traits::memory_access_modes::erew_memory_access
 482                 );
 483 
 484                 BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
 485 
 486                 /// A flag to atomically signal if the container contains work or not, and how much work.
 487                 mutable have_work_type have_work;
 488 
 489                 /// The underlying locks, to assist in writing thread-safe code.
 490                 atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
 491                         return pop_lock_;
 492                 }
 493                 /// The underlying locks, to assist in writing thread-safe code.
 494                 atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
 495                         return pop_lock_;
 496                 }
 497                 /// The underlying locks, to assist in writing thread-safe code.
 498                 atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
 499                         return push_lock_;
 500                 }
 501                 /// The underlying locks, to assist in writing thread-safe code.
 502                 atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
 503                         return push_lock_;
 504                 }
 505 
 506                 __stdcall funky_queue() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
 507                 explicit funky_queue(typename have_work_type::atomic_t &) FORCE_INLINE;
 508                 __stdcall funky_queue(funky_queue const &) noexcept(false) FORCE_INLINE;
 509                 __stdcall ~funky_queue() FORCE_INLINE;
 510                 funky_queue &__fastcall operator=(funky_queue const &) noexcept(false) FORCE_INLINE;
 511 
 512                 bool __fastcall empty() const noexcept(true) FORCE_INLINE;
 513                 size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
 514                 size_type __fastcall size() const noexcept(true) FORCE_INLINE;
 515 
 516                 void __fastcall clear() noexcept(false) FORCE_INLINE;
 517                 /**
 518                         This function is provided to assist with writing thread-safe code.
 519 
 520                         \return Returns true if a value was erased, otherwise false.
 521                 */
 522                 bool __fastcall erase(value_type const &) noexcept(false) FORCE_INLINE;
 523 
 524                 /**
 525                         If the queue is long enough, then this function will not block the operation of push() or push_back().
 526 
 527                         \return A copy of the value that is on the front of the queue.
 528                 */
 529                 value_type __fastcall front() const noexcept(false) FORCE_INLINE;
 530                 /**
 531                         If the queue is long enough, then this function will not block the operation of pop() or pop_back().
 532 
 533                         \return A copy of the value that is on the back of the queue.
 534                 */
 535                 value_type __fastcall back() const noexcept(false) FORCE_INLINE;
 536 
 537                 /**
 538                         If the queue is long enough, then this function will not block the operation of pop() or pop_back().
 539 
 540                         \param v        The value to be added.
 541                 */
 542                 void __fastcall push(value_type const &v) noexcept(false) FORCE_INLINE;
 543                 /**
 544                         If the queue is long enough, then this function will not block the operation of pop() or pop_back().
 545 
 546                         \param v        The value to be added.
 547                 */
 548                 void __fastcall push(value_type &&v) noexcept(false) FORCE_INLINE;
 549                 /**
 550                         If the queue is long enough, then this function will not block the operation of pop() or pop_back().
 551 
 552                         \param v        The value to be added.
 553                 */
 554                 void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
 555                 /**
 556                         If the queue is long enough, then this function will not block the operation of pop() or pop_back().
 557 
 558                         \param v        The value to be added.
 559                 */
 560                 void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
 561 
 562                 /**
 563                         If the queue is long enough, then this function will not block the operation of push() or push_front().
 564 
 565                         \return The value popped off the queue.
 566                 */
 567                 value_ret_type __fastcall pop() noexcept(false) FORCE_INLINE;
 568                 /**
 569                         If the queue is long enough, then this function will not block the operation of push() or push_front().
 570 
 571                         \return The value popped off the queue.
 572                 */
 573                 value_ret_type __fastcall pop_front() noexcept(false) FORCE_INLINE;
 574 
 575                 /**
 576                         \param  e       The item to be removed from the container_type.
 577                 */
 578                 void __fastcall remove(const value_type &e) noexcept(false) FORCE_INLINE;
 579 
 580                 container_type const &colln() const noexcept(true) FORCE_INLINE {
 581                         return static_cast<container_type const &>(*this);
 582                 }
 583                 container_type &colln() noexcept(true) FORCE_INLINE {
 584                         return static_cast<container_type &>(*this);
 585                 }
 586 
 587                 value_ret_type __fastcall pop_front_nolk() noexcept(false) FORCE_INLINE;
 588 
 589                 value_type __fastcall pop_front_1_nochk_nolk() noexcept(noexcept(have_work.remove())) FORCE_INLINE;
 590                 value_type __fastcall pop_front_1_nochk_nosig() noexcept(true) FORCE_INLINE;
 591 
 592         protected:
 593                 value_type &__fastcall back_nolk() noexcept(true) FORCE_INLINE;
 594                 virtual value_ret_type __fastcall pop_front_nochk_nolk() noexcept(false) FORCE_INLINE;
 595 
 596         private:
 597                 mutable atomic_t push_lock_;
 598                 mutable atomic_t pop_lock_;
 599 
 600                 value_ret_type __fastcall pop_nolk() noexcept(false) FORCE_INLINE;
 601 
 602                 value_type const &__fastcall back_nolk() const noexcept(true) FORCE_INLINE;
 603                 void __fastcall push_back_nolk(const value_type &e) noexcept(false) FORCE_INLINE;
 604                 void __fastcall push_back_nolk(value_type &&e) noexcept(false) FORCE_INLINE;
 605                 void __fastcall push_nolk(const value_type &e) noexcept(false) FORCE_INLINE;
 606                 void __fastcall push_nolk(value_type &&e) noexcept(false) FORCE_INLINE;
 607                 void __fastcall pop_nochk_nolk() noexcept(false) FORCE_INLINE;
 608         };
 609 
 610 } }
 611 
 612 #include "thread_safe_adaptors_impl.hpp"
 613 
 614 #endif

/* [<][>][^][v][top][bottom][index][help] */