aboutsummaryrefslogtreecommitdiff
blob: e57a337c2770e3cd0bd12411105a2bb4c2776306 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
/* Low level locking macros used in NPTL implementation.  Stub version.
   Copyright (C) 2002-2013 Free Software Foundation, Inc.
   This file is part of the GNU C Library.
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <http://www.gnu.org/licenses/>.  */

#include <atomic.h>


/* Mutex lock counter:
   bit 31 clear means unlocked;
   bit 31 set means locked.

   All code that looks at bit 31 first increases the 'number of
   interested threads' usage counter, which is in bits 0-30.

   All negative mutex values indicate that the mutex is still locked.  */


static inline void
__generic_mutex_lock (int *mutex)
{
  unsigned int v;

  /* Bit 31 was clear, we got the mutex.  (this is the fastpath).  */
  if (atomic_bit_test_set (mutex, 31) == 0)
    return;

  atomic_increment (mutex);

  while (1)
    {
      if (atomic_bit_test_set (mutex, 31) == 0)
	{
	  atomic_decrement (mutex);
	  return;
	}

      /* We have to wait now. First make sure the futex value we are
	 monitoring is truly negative (i.e. locked). */
      v = *mutex;
      if (v >= 0)
	continue;

      lll_futex_wait (mutex, v,
		      // XYZ check mutex flag
		      LLL_SHARED);
    }
}


static inline void
__generic_mutex_unlock (int *mutex)
{
  /* Adding 0x80000000 to the counter results in 0 if and only if
     there are not other interested threads - we can return (this is
     the fastpath).  */
  if (atomic_add_zero (mutex, 0x80000000))
    return;

  /* There are other threads waiting for this mutex, wake one of them
     up.  */
  lll_futex_wake (mutex, 1,
		  // XYZ check mutex flag
		  LLL_SHARED);
}


#define lll_mutex_lock(futex) __generic_mutex_lock (&(futex))
#define lll_mutex_unlock(futex) __generic_mutex_unlock (&(futex))