diff options
Diffstat (limited to 'linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c')
-rw-r--r--[-rwxr-xr-x] | linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c | 1300 |
1 files changed, 650 insertions, 650 deletions
diff --git a/linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c b/linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c index c70aca0..37b8bdd 100755..100644 --- a/linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c +++ b/linden/indra/libgcrypt/libgcrypt-1.2.2/src/secmem.c | |||
@@ -1,650 +1,650 @@ | |||
1 | /* secmem.c - memory allocation from a secure heap | 1 | /* secmem.c - memory allocation from a secure heap |
2 | * Copyright (C) 1998, 1999, 2000, 2001, 2002, | 2 | * Copyright (C) 1998, 1999, 2000, 2001, 2002, |
3 | * 2003 Free Software Foundation, Inc. | 3 | * 2003 Free Software Foundation, Inc. |
4 | * | 4 | * |
5 | * This file is part of Libgcrypt. | 5 | * This file is part of Libgcrypt. |
6 | * | 6 | * |
7 | * Libgcrypt is free software; you can redistribute it and/or modify | 7 | * Libgcrypt is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU Lesser general Public License as | 8 | * it under the terms of the GNU Lesser general Public License as |
9 | * published by the Free Software Foundation; either version 2.1 of | 9 | * published by the Free Software Foundation; either version 2.1 of |
10 | * the License, or (at your option) any later version. | 10 | * the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * Libgcrypt is distributed in the hope that it will be useful, | 12 | * Libgcrypt is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU Lesser General Public License for more details. | 15 | * GNU Lesser General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU Lesser General Public | 17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with this program; if not, write to the Free Software | 18 | * License along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | 19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <config.h> | 22 | #include <config.h> |
23 | #include <stdio.h> | 23 | #include <stdio.h> |
24 | #include <stdlib.h> | 24 | #include <stdlib.h> |
25 | #include <string.h> | 25 | #include <string.h> |
26 | #include <errno.h> | 26 | #include <errno.h> |
27 | #include <stdarg.h> | 27 | #include <stdarg.h> |
28 | #include <unistd.h> | 28 | #include <unistd.h> |
29 | #include <stddef.h> | 29 | #include <stddef.h> |
30 | 30 | ||
31 | #if defined(HAVE_MLOCK) || defined(HAVE_MMAP) | 31 | #if defined(HAVE_MLOCK) || defined(HAVE_MMAP) |
32 | #include <sys/mman.h> | 32 | #include <sys/mman.h> |
33 | #include <sys/types.h> | 33 | #include <sys/types.h> |
34 | #include <fcntl.h> | 34 | #include <fcntl.h> |
35 | #ifdef USE_CAPABILITIES | 35 | #ifdef USE_CAPABILITIES |
36 | #include <sys/capability.h> | 36 | #include <sys/capability.h> |
37 | #endif | 37 | #endif |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #include "ath.h" | 40 | #include "ath.h" |
41 | #include "g10lib.h" | 41 | #include "g10lib.h" |
42 | #include "secmem.h" | 42 | #include "secmem.h" |
43 | 43 | ||
44 | #if defined (MAP_ANON) && ! defined (MAP_ANONYMOUS) | 44 | #if defined (MAP_ANON) && ! defined (MAP_ANONYMOUS) |
45 | #define MAP_ANONYMOUS MAP_ANON | 45 | #define MAP_ANONYMOUS MAP_ANON |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #define DEFAULT_POOL_SIZE 16384 | 48 | #define DEFAULT_POOL_SIZE 16384 |
49 | #define DEFAULT_PAGE_SIZE 4096 | 49 | #define DEFAULT_PAGE_SIZE 4096 |
50 | 50 | ||
51 | typedef struct memblock | 51 | typedef struct memblock |
52 | { | 52 | { |
53 | unsigned size; /* Size of the memory available to the | 53 | unsigned size; /* Size of the memory available to the |
54 | user. */ | 54 | user. */ |
55 | int flags; /* See below. */ | 55 | int flags; /* See below. */ |
56 | PROPERLY_ALIGNED_TYPE aligned; | 56 | PROPERLY_ALIGNED_TYPE aligned; |
57 | } memblock_t; | 57 | } memblock_t; |
58 | 58 | ||
59 | /* This flag specifies that the memory block is in use. */ | 59 | /* This flag specifies that the memory block is in use. */ |
60 | #define MB_FLAG_ACTIVE 1 << 0 | 60 | #define MB_FLAG_ACTIVE 1 << 0 |
61 | 61 | ||
62 | /* The pool of secure memory. */ | 62 | /* The pool of secure memory. */ |
63 | static void *pool; | 63 | static void *pool; |
64 | 64 | ||
65 | /* Size of POOL in bytes. */ | 65 | /* Size of POOL in bytes. */ |
66 | static size_t pool_size; | 66 | static size_t pool_size; |
67 | 67 | ||
68 | /* True, if the memory pool is ready for use. May be checked in an | 68 | /* True, if the memory pool is ready for use. May be checked in an |
69 | atexit function. */ | 69 | atexit function. */ |
70 | static volatile int pool_okay; | 70 | static volatile int pool_okay; |
71 | 71 | ||
72 | /* True, if the memory pool is mmapped. */ | 72 | /* True, if the memory pool is mmapped. */ |
73 | static volatile int pool_is_mmapped; | 73 | static volatile int pool_is_mmapped; |
74 | 74 | ||
75 | /* FIXME? */ | 75 | /* FIXME? */ |
76 | static int disable_secmem; | 76 | static int disable_secmem; |
77 | static int show_warning; | 77 | static int show_warning; |
78 | static int no_warning; | 78 | static int no_warning; |
79 | static int suspend_warning; | 79 | static int suspend_warning; |
80 | 80 | ||
81 | /* Stats. */ | 81 | /* Stats. */ |
82 | static unsigned int cur_alloced, cur_blocks; | 82 | static unsigned int cur_alloced, cur_blocks; |
83 | 83 | ||
84 | /* Lock protecting accesses to the memory pool. */ | 84 | /* Lock protecting accesses to the memory pool. */ |
85 | static ath_mutex_t secmem_lock; | 85 | static ath_mutex_t secmem_lock; |
86 | 86 | ||
87 | /* Convenient macros. */ | 87 | /* Convenient macros. */ |
88 | #define SECMEM_LOCK ath_mutex_lock (&secmem_lock) | 88 | #define SECMEM_LOCK ath_mutex_lock (&secmem_lock) |
89 | #define SECMEM_UNLOCK ath_mutex_unlock (&secmem_lock) | 89 | #define SECMEM_UNLOCK ath_mutex_unlock (&secmem_lock) |
90 | 90 | ||
91 | /* The size of the memblock structure; this does not include the | 91 | /* The size of the memblock structure; this does not include the |
92 | memory that is available to the user. */ | 92 | memory that is available to the user. */ |
93 | #define BLOCK_HEAD_SIZE \ | 93 | #define BLOCK_HEAD_SIZE \ |
94 | offsetof (memblock_t, aligned) | 94 | offsetof (memblock_t, aligned) |
95 | 95 | ||
96 | /* Convert an address into the according memory block structure. */ | 96 | /* Convert an address into the according memory block structure. */ |
97 | #define ADDR_TO_BLOCK(addr) \ | 97 | #define ADDR_TO_BLOCK(addr) \ |
98 | (memblock_t *) ((char *) addr - BLOCK_HEAD_SIZE) | 98 | (memblock_t *) ((char *) addr - BLOCK_HEAD_SIZE) |
99 | 99 | ||
100 | /* Check wether MB is a valid block. */ | 100 | /* Check wether MB is a valid block. */ |
101 | #define BLOCK_VALID(mb) \ | 101 | #define BLOCK_VALID(mb) \ |
102 | (((char *) mb - (char *) pool) < pool_size) | 102 | (((char *) mb - (char *) pool) < pool_size) |
103 | 103 | ||
104 | /* Update the stats. */ | 104 | /* Update the stats. */ |
105 | static void | 105 | static void |
106 | stats_update (size_t add, size_t sub) | 106 | stats_update (size_t add, size_t sub) |
107 | { | 107 | { |
108 | if (add) | 108 | if (add) |
109 | { | 109 | { |
110 | cur_alloced += add; | 110 | cur_alloced += add; |
111 | cur_blocks++; | 111 | cur_blocks++; |
112 | } | 112 | } |
113 | if (sub) | 113 | if (sub) |
114 | { | 114 | { |
115 | cur_alloced -= sub; | 115 | cur_alloced -= sub; |
116 | cur_blocks--; | 116 | cur_blocks--; |
117 | } | 117 | } |
118 | } | 118 | } |
119 | 119 | ||
120 | /* Return the block following MB or NULL, if MB is the last block. */ | 120 | /* Return the block following MB or NULL, if MB is the last block. */ |
121 | static memblock_t * | 121 | static memblock_t * |
122 | mb_get_next (memblock_t *mb) | 122 | mb_get_next (memblock_t *mb) |
123 | { | 123 | { |
124 | memblock_t *mb_next; | 124 | memblock_t *mb_next; |
125 | 125 | ||
126 | mb_next = (memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size); | 126 | mb_next = (memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size); |
127 | 127 | ||
128 | if (! BLOCK_VALID (mb_next)) | 128 | if (! BLOCK_VALID (mb_next)) |
129 | mb_next = NULL; | 129 | mb_next = NULL; |
130 | 130 | ||
131 | return mb_next; | 131 | return mb_next; |
132 | } | 132 | } |
133 | 133 | ||
134 | /* Return the block preceeding MB or NULL, if MB is the first | 134 | /* Return the block preceeding MB or NULL, if MB is the first |
135 | block. */ | 135 | block. */ |
136 | static memblock_t * | 136 | static memblock_t * |
137 | mb_get_prev (memblock_t *mb) | 137 | mb_get_prev (memblock_t *mb) |
138 | { | 138 | { |
139 | memblock_t *mb_prev, *mb_next; | 139 | memblock_t *mb_prev, *mb_next; |
140 | 140 | ||
141 | if (mb == pool) | 141 | if (mb == pool) |
142 | mb_prev = NULL; | 142 | mb_prev = NULL; |
143 | else | 143 | else |
144 | { | 144 | { |
145 | mb_prev = (memblock_t *) pool; | 145 | mb_prev = (memblock_t *) pool; |
146 | while (1) | 146 | while (1) |
147 | { | 147 | { |
148 | mb_next = mb_get_next (mb_prev); | 148 | mb_next = mb_get_next (mb_prev); |
149 | if (mb_next == mb) | 149 | if (mb_next == mb) |
150 | break; | 150 | break; |
151 | else | 151 | else |
152 | mb_prev = mb_next; | 152 | mb_prev = mb_next; |
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
156 | return mb_prev; | 156 | return mb_prev; |
157 | } | 157 | } |
158 | 158 | ||
159 | /* If the preceeding block of MB and/or the following block of MB | 159 | /* If the preceeding block of MB and/or the following block of MB |
160 | exist and are not active, merge them to form a bigger block. */ | 160 | exist and are not active, merge them to form a bigger block. */ |
161 | static void | 161 | static void |
162 | mb_merge (memblock_t *mb) | 162 | mb_merge (memblock_t *mb) |
163 | { | 163 | { |
164 | memblock_t *mb_prev, *mb_next; | 164 | memblock_t *mb_prev, *mb_next; |
165 | 165 | ||
166 | mb_prev = mb_get_prev (mb); | 166 | mb_prev = mb_get_prev (mb); |
167 | mb_next = mb_get_next (mb); | 167 | mb_next = mb_get_next (mb); |
168 | 168 | ||
169 | if (mb_prev && (! (mb_prev->flags & MB_FLAG_ACTIVE))) | 169 | if (mb_prev && (! (mb_prev->flags & MB_FLAG_ACTIVE))) |
170 | { | 170 | { |
171 | mb_prev->size += BLOCK_HEAD_SIZE + mb->size; | 171 | mb_prev->size += BLOCK_HEAD_SIZE + mb->size; |
172 | mb = mb_prev; | 172 | mb = mb_prev; |
173 | } | 173 | } |
174 | if (mb_next && (! (mb_next->flags & MB_FLAG_ACTIVE))) | 174 | if (mb_next && (! (mb_next->flags & MB_FLAG_ACTIVE))) |
175 | mb->size += BLOCK_HEAD_SIZE + mb_next->size; | 175 | mb->size += BLOCK_HEAD_SIZE + mb_next->size; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* Return a new block, which can hold SIZE bytes. */ | 178 | /* Return a new block, which can hold SIZE bytes. */ |
179 | static memblock_t * | 179 | static memblock_t * |
180 | mb_get_new (memblock_t *block, size_t size) | 180 | mb_get_new (memblock_t *block, size_t size) |
181 | { | 181 | { |
182 | memblock_t *mb, *mb_split; | 182 | memblock_t *mb, *mb_split; |
183 | 183 | ||
184 | for (mb = block; BLOCK_VALID (mb); mb = mb_get_next (mb)) | 184 | for (mb = block; BLOCK_VALID (mb); mb = mb_get_next (mb)) |
185 | if (! (mb->flags & MB_FLAG_ACTIVE) && mb->size >= size) | 185 | if (! (mb->flags & MB_FLAG_ACTIVE) && mb->size >= size) |
186 | { | 186 | { |
187 | /* Found a free block. */ | 187 | /* Found a free block. */ |
188 | mb->flags |= MB_FLAG_ACTIVE; | 188 | mb->flags |= MB_FLAG_ACTIVE; |
189 | 189 | ||
190 | if (mb->size - size > BLOCK_HEAD_SIZE) | 190 | if (mb->size - size > BLOCK_HEAD_SIZE) |
191 | { | 191 | { |
192 | /* Split block. */ | 192 | /* Split block. */ |
193 | 193 | ||
194 | mb_split = (memblock_t *) (((char *) mb) + BLOCK_HEAD_SIZE + size); | 194 | mb_split = (memblock_t *) (((char *) mb) + BLOCK_HEAD_SIZE + size); |
195 | mb_split->size = mb->size - size - BLOCK_HEAD_SIZE; | 195 | mb_split->size = mb->size - size - BLOCK_HEAD_SIZE; |
196 | mb_split->flags = 0; | 196 | mb_split->flags = 0; |
197 | 197 | ||
198 | mb->size = size; | 198 | mb->size = size; |
199 | 199 | ||
200 | mb_merge (mb_split); | 200 | mb_merge (mb_split); |
201 | 201 | ||
202 | } | 202 | } |
203 | 203 | ||
204 | break; | 204 | break; |
205 | } | 205 | } |
206 | 206 | ||
207 | if (! BLOCK_VALID (mb)) | 207 | if (! BLOCK_VALID (mb)) |
208 | mb = NULL; | 208 | mb = NULL; |
209 | 209 | ||
210 | return mb; | 210 | return mb; |
211 | } | 211 | } |
212 | 212 | ||
213 | /* Print a warning message. */ | 213 | /* Print a warning message. */ |
214 | static void | 214 | static void |
215 | print_warn (void) | 215 | print_warn (void) |
216 | { | 216 | { |
217 | if (!no_warning) | 217 | if (!no_warning) |
218 | log_info (_("Warning: using insecure memory!\n")); | 218 | log_info (_("Warning: using insecure memory!\n")); |
219 | } | 219 | } |
220 | 220 | ||
221 | /* Lock the memory pages into core and drop privileges. */ | 221 | /* Lock the memory pages into core and drop privileges. */ |
222 | static void | 222 | static void |
223 | lock_pool (void *p, size_t n) | 223 | lock_pool (void *p, size_t n) |
224 | { | 224 | { |
225 | #if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK) | 225 | #if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK) |
226 | int err; | 226 | int err; |
227 | 227 | ||
228 | cap_set_proc (cap_from_text ("cap_ipc_lock+ep")); | 228 | cap_set_proc (cap_from_text ("cap_ipc_lock+ep")); |
229 | err = mlock (p, n); | 229 | err = mlock (p, n); |
230 | if (err && errno) | 230 | if (err && errno) |
231 | err = errno; | 231 | err = errno; |
232 | cap_set_proc (cap_from_text ("cap_ipc_lock+p")); | 232 | cap_set_proc (cap_from_text ("cap_ipc_lock+p")); |
233 | 233 | ||
234 | if (err) | 234 | if (err) |
235 | { | 235 | { |
236 | if (errno != EPERM | 236 | if (errno != EPERM |
237 | #ifdef EAGAIN /* OpenBSD returns this */ | 237 | #ifdef EAGAIN /* OpenBSD returns this */ |
238 | && errno != EAGAIN | 238 | && errno != EAGAIN |
239 | #endif | 239 | #endif |
240 | #ifdef ENOSYS /* Some SCOs return this (function not implemented) */ | 240 | #ifdef ENOSYS /* Some SCOs return this (function not implemented) */ |
241 | && errno != ENOSYS | 241 | && errno != ENOSYS |
242 | #endif | 242 | #endif |
243 | #ifdef ENOMEM /* Linux might return this. */ | 243 | #ifdef ENOMEM /* Linux might return this. */ |
244 | && errno != ENOMEM | 244 | && errno != ENOMEM |
245 | #endif | 245 | #endif |
246 | ) | 246 | ) |
247 | log_error ("can't lock memory: %s\n", strerror (err)); | 247 | log_error ("can't lock memory: %s\n", strerror (err)); |
248 | show_warning = 1; | 248 | show_warning = 1; |
249 | } | 249 | } |
250 | 250 | ||
251 | #elif defined(HAVE_MLOCK) | 251 | #elif defined(HAVE_MLOCK) |
252 | uid_t uid; | 252 | uid_t uid; |
253 | int err; | 253 | int err; |
254 | 254 | ||
255 | uid = getuid (); | 255 | uid = getuid (); |
256 | 256 | ||
257 | #ifdef HAVE_BROKEN_MLOCK | 257 | #ifdef HAVE_BROKEN_MLOCK |
258 | /* Under HP/UX mlock segfaults if called by non-root. Note, we have | 258 | /* Under HP/UX mlock segfaults if called by non-root. Note, we have |
259 | noch checked whether mlock does really work under AIX where we | 259 | noch checked whether mlock does really work under AIX where we |
260 | also detected a broken nlock. Note further, that using plock () | 260 | also detected a broken nlock. Note further, that using plock () |
261 | is not a good idea under AIX. */ | 261 | is not a good idea under AIX. */ |
262 | if (uid) | 262 | if (uid) |
263 | { | 263 | { |
264 | errno = EPERM; | 264 | errno = EPERM; |
265 | err = errno; | 265 | err = errno; |
266 | } | 266 | } |
267 | else | 267 | else |
268 | { | 268 | { |
269 | err = mlock (p, n); | 269 | err = mlock (p, n); |
270 | if (err && errno) | 270 | if (err && errno) |
271 | err = errno; | 271 | err = errno; |
272 | } | 272 | } |
273 | #else /* !HAVE_BROKEN_MLOCK */ | 273 | #else /* !HAVE_BROKEN_MLOCK */ |
274 | err = mlock (p, n); | 274 | err = mlock (p, n); |
275 | if (err && errno) | 275 | if (err && errno) |
276 | err = errno; | 276 | err = errno; |
277 | #endif /* !HAVE_BROKEN_MLOCK */ | 277 | #endif /* !HAVE_BROKEN_MLOCK */ |
278 | 278 | ||
279 | if (uid && ! geteuid ()) | 279 | if (uid && ! geteuid ()) |
280 | { | 280 | { |
281 | /* check that we really dropped the privs. | 281 | /* check that we really dropped the privs. |
282 | * Note: setuid(0) should always fail */ | 282 | * Note: setuid(0) should always fail */ |
283 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) | 283 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) |
284 | log_fatal ("failed to reset uid: %s\n", strerror (errno)); | 284 | log_fatal ("failed to reset uid: %s\n", strerror (errno)); |
285 | } | 285 | } |
286 | 286 | ||
287 | if (err) | 287 | if (err) |
288 | { | 288 | { |
289 | if (errno != EPERM | 289 | if (errno != EPERM |
290 | #ifdef EAGAIN /* OpenBSD returns this. */ | 290 | #ifdef EAGAIN /* OpenBSD returns this. */ |
291 | && errno != EAGAIN | 291 | && errno != EAGAIN |
292 | #endif | 292 | #endif |
293 | #ifdef ENOSYS /* Some SCOs return this (function not implemented). */ | 293 | #ifdef ENOSYS /* Some SCOs return this (function not implemented). */ |
294 | && errno != ENOSYS | 294 | && errno != ENOSYS |
295 | #endif | 295 | #endif |
296 | #ifdef ENOMEM /* Linux might return this. */ | 296 | #ifdef ENOMEM /* Linux might return this. */ |
297 | && errno != ENOMEM | 297 | && errno != ENOMEM |
298 | #endif | 298 | #endif |
299 | ) | 299 | ) |
300 | log_error ("can't lock memory: %s\n", strerror (err)); | 300 | log_error ("can't lock memory: %s\n", strerror (err)); |
301 | show_warning = 1; | 301 | show_warning = 1; |
302 | } | 302 | } |
303 | 303 | ||
304 | #elif defined ( __QNX__ ) | 304 | #elif defined ( __QNX__ ) |
305 | /* QNX does not page at all, so the whole secure memory stuff does | 305 | /* QNX does not page at all, so the whole secure memory stuff does |
306 | * not make much sense. However it is still of use because it | 306 | * not make much sense. However it is still of use because it |
307 | * wipes out the memory on a free(). | 307 | * wipes out the memory on a free(). |
308 | * Therefore it is sufficient to suppress the warning | 308 | * Therefore it is sufficient to suppress the warning |
309 | */ | 309 | */ |
310 | #elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__) | 310 | #elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__) |
311 | /* It does not make sense to print such a warning, given the fact that | 311 | /* It does not make sense to print such a warning, given the fact that |
312 | * this whole Windows !@#$% and their user base are inherently insecure | 312 | * this whole Windows !@#$% and their user base are inherently insecure |
313 | */ | 313 | */ |
314 | #elif defined (__riscos__) | 314 | #elif defined (__riscos__) |
315 | /* no virtual memory on RISC OS, so no pages are swapped to disc, | 315 | /* no virtual memory on RISC OS, so no pages are swapped to disc, |
316 | * besides we don't have mmap, so we don't use it! ;-) | 316 | * besides we don't have mmap, so we don't use it! ;-) |
317 | * But don't complain, as explained above. | 317 | * But don't complain, as explained above. |
318 | */ | 318 | */ |
319 | #else | 319 | #else |
320 | log_info ("Please note that you don't have secure memory on this system\n"); | 320 | log_info ("Please note that you don't have secure memory on this system\n"); |
321 | #endif | 321 | #endif |
322 | } | 322 | } |
323 | 323 | ||
324 | /* Initialize POOL. */ | 324 | /* Initialize POOL. */ |
325 | static void | 325 | static void |
326 | init_pool (size_t n) | 326 | init_pool (size_t n) |
327 | { | 327 | { |
328 | size_t pgsize; | 328 | size_t pgsize; |
329 | memblock_t *mb; | 329 | memblock_t *mb; |
330 | 330 | ||
331 | pool_size = n; | 331 | pool_size = n; |
332 | 332 | ||
333 | if (disable_secmem) | 333 | if (disable_secmem) |
334 | log_bug ("secure memory is disabled"); | 334 | log_bug ("secure memory is disabled"); |
335 | 335 | ||
336 | #ifdef HAVE_GETPAGESIZE | 336 | #ifdef HAVE_GETPAGESIZE |
337 | pgsize = getpagesize (); | 337 | pgsize = getpagesize (); |
338 | #else | 338 | #else |
339 | pgsize = DEFAULT_PAGE_SIZE; | 339 | pgsize = DEFAULT_PAGE_SIZE; |
340 | #endif | 340 | #endif |
341 | 341 | ||
342 | #if HAVE_MMAP | 342 | #if HAVE_MMAP |
343 | pool_size = (pool_size + pgsize - 1) & ~(pgsize - 1); | 343 | pool_size = (pool_size + pgsize - 1) & ~(pgsize - 1); |
344 | #ifdef MAP_ANONYMOUS | 344 | #ifdef MAP_ANONYMOUS |
345 | pool = mmap (0, pool_size, PROT_READ | PROT_WRITE, | 345 | pool = mmap (0, pool_size, PROT_READ | PROT_WRITE, |
346 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 346 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
347 | #else /* map /dev/zero instead */ | 347 | #else /* map /dev/zero instead */ |
348 | { | 348 | { |
349 | int fd; | 349 | int fd; |
350 | 350 | ||
351 | fd = open ("/dev/zero", O_RDWR); | 351 | fd = open ("/dev/zero", O_RDWR); |
352 | if (fd == -1) | 352 | if (fd == -1) |
353 | { | 353 | { |
354 | log_error ("can't open /dev/zero: %s\n", strerror (errno)); | 354 | log_error ("can't open /dev/zero: %s\n", strerror (errno)); |
355 | pool = (void *) -1; | 355 | pool = (void *) -1; |
356 | } | 356 | } |
357 | else | 357 | else |
358 | { | 358 | { |
359 | pool = mmap (0, pool_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | 359 | pool = mmap (0, pool_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
360 | } | 360 | } |
361 | } | 361 | } |
362 | #endif | 362 | #endif |
363 | if (pool == (void *) -1) | 363 | if (pool == (void *) -1) |
364 | log_info ("can't mmap pool of %u bytes: %s - using malloc\n", | 364 | log_info ("can't mmap pool of %u bytes: %s - using malloc\n", |
365 | (unsigned) pool_size, strerror (errno)); | 365 | (unsigned) pool_size, strerror (errno)); |
366 | else | 366 | else |
367 | { | 367 | { |
368 | pool_is_mmapped = 1; | 368 | pool_is_mmapped = 1; |
369 | pool_okay = 1; | 369 | pool_okay = 1; |
370 | } | 370 | } |
371 | 371 | ||
372 | #endif | 372 | #endif |
373 | if (!pool_okay) | 373 | if (!pool_okay) |
374 | { | 374 | { |
375 | pool = malloc (pool_size); | 375 | pool = malloc (pool_size); |
376 | if (!pool) | 376 | if (!pool) |
377 | log_fatal ("can't allocate memory pool of %u bytes\n", | 377 | log_fatal ("can't allocate memory pool of %u bytes\n", |
378 | (unsigned) pool_size); | 378 | (unsigned) pool_size); |
379 | else | 379 | else |
380 | pool_okay = 1; | 380 | pool_okay = 1; |
381 | } | 381 | } |
382 | 382 | ||
383 | /* Initialize first memory block. */ | 383 | /* Initialize first memory block. */ |
384 | mb = (memblock_t *) pool; | 384 | mb = (memblock_t *) pool; |
385 | mb->size = pool_size; | 385 | mb->size = pool_size; |
386 | mb->flags = 0; | 386 | mb->flags = 0; |
387 | } | 387 | } |
388 | 388 | ||
389 | void | 389 | void |
390 | _gcry_secmem_set_flags (unsigned flags) | 390 | _gcry_secmem_set_flags (unsigned flags) |
391 | { | 391 | { |
392 | int was_susp; | 392 | int was_susp; |
393 | 393 | ||
394 | SECMEM_LOCK; | 394 | SECMEM_LOCK; |
395 | 395 | ||
396 | was_susp = suspend_warning; | 396 | was_susp = suspend_warning; |
397 | no_warning = flags & GCRY_SECMEM_FLAG_NO_WARNING; | 397 | no_warning = flags & GCRY_SECMEM_FLAG_NO_WARNING; |
398 | suspend_warning = flags & GCRY_SECMEM_FLAG_SUSPEND_WARNING; | 398 | suspend_warning = flags & GCRY_SECMEM_FLAG_SUSPEND_WARNING; |
399 | 399 | ||
400 | /* and now issue the warning if it is not longer suspended */ | 400 | /* and now issue the warning if it is not longer suspended */ |
401 | if (was_susp && !suspend_warning && show_warning) | 401 | if (was_susp && !suspend_warning && show_warning) |
402 | { | 402 | { |
403 | show_warning = 0; | 403 | show_warning = 0; |
404 | print_warn (); | 404 | print_warn (); |
405 | } | 405 | } |
406 | 406 | ||
407 | SECMEM_UNLOCK; | 407 | SECMEM_UNLOCK; |
408 | } | 408 | } |
409 | 409 | ||
410 | unsigned | 410 | unsigned |
411 | _gcry_secmem_get_flags (void) | 411 | _gcry_secmem_get_flags (void) |
412 | { | 412 | { |
413 | unsigned flags; | 413 | unsigned flags; |
414 | 414 | ||
415 | SECMEM_LOCK; | 415 | SECMEM_LOCK; |
416 | 416 | ||
417 | flags = no_warning ? GCRY_SECMEM_FLAG_NO_WARNING : 0; | 417 | flags = no_warning ? GCRY_SECMEM_FLAG_NO_WARNING : 0; |
418 | flags |= suspend_warning ? GCRY_SECMEM_FLAG_SUSPEND_WARNING : 0; | 418 | flags |= suspend_warning ? GCRY_SECMEM_FLAG_SUSPEND_WARNING : 0; |
419 | 419 | ||
420 | SECMEM_UNLOCK; | 420 | SECMEM_UNLOCK; |
421 | 421 | ||
422 | return flags; | 422 | return flags; |
423 | } | 423 | } |
424 | 424 | ||
425 | /* Initialize the secure memory system. If running with the necessary | 425 | /* Initialize the secure memory system. If running with the necessary |
426 | privileges, the secure memory pool will be locked into the core in | 426 | privileges, the secure memory pool will be locked into the core in |
427 | order to prevent page-outs of the data. Furthermore allocated | 427 | order to prevent page-outs of the data. Furthermore allocated |
428 | secure memory will be wiped out when released. */ | 428 | secure memory will be wiped out when released. */ |
429 | void | 429 | void |
430 | _gcry_secmem_init (size_t n) | 430 | _gcry_secmem_init (size_t n) |
431 | { | 431 | { |
432 | SECMEM_LOCK; | 432 | SECMEM_LOCK; |
433 | 433 | ||
434 | if (!n) | 434 | if (!n) |
435 | { | 435 | { |
436 | #ifdef USE_CAPABILITIES | 436 | #ifdef USE_CAPABILITIES |
437 | /* drop all capabilities */ | 437 | /* drop all capabilities */ |
438 | cap_set_proc (cap_from_text ("all-eip")); | 438 | cap_set_proc (cap_from_text ("all-eip")); |
439 | 439 | ||
440 | #elif !defined(HAVE_DOSISH_SYSTEM) | 440 | #elif !defined(HAVE_DOSISH_SYSTEM) |
441 | uid_t uid; | 441 | uid_t uid; |
442 | 442 | ||
443 | disable_secmem = 1; | 443 | disable_secmem = 1; |
444 | uid = getuid (); | 444 | uid = getuid (); |
445 | if (uid != geteuid ()) | 445 | if (uid != geteuid ()) |
446 | { | 446 | { |
447 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) | 447 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) |
448 | log_fatal ("failed to drop setuid\n"); | 448 | log_fatal ("failed to drop setuid\n"); |
449 | } | 449 | } |
450 | #endif | 450 | #endif |
451 | } | 451 | } |
452 | else | 452 | else |
453 | { | 453 | { |
454 | if (n < DEFAULT_POOL_SIZE) | 454 | if (n < DEFAULT_POOL_SIZE) |
455 | n = DEFAULT_POOL_SIZE; | 455 | n = DEFAULT_POOL_SIZE; |
456 | if (! pool_okay) | 456 | if (! pool_okay) |
457 | { | 457 | { |
458 | init_pool (n); | 458 | init_pool (n); |
459 | lock_pool (pool, n); | 459 | lock_pool (pool, n); |
460 | } | 460 | } |
461 | else | 461 | else |
462 | log_error ("Oops, secure memory pool already initialized\n"); | 462 | log_error ("Oops, secure memory pool already initialized\n"); |
463 | } | 463 | } |
464 | 464 | ||
465 | SECMEM_UNLOCK; | 465 | SECMEM_UNLOCK; |
466 | } | 466 | } |
467 | 467 | ||
468 | 468 | ||
469 | static void * | 469 | static void * |
470 | _gcry_secmem_malloc_internal (size_t size) | 470 | _gcry_secmem_malloc_internal (size_t size) |
471 | { | 471 | { |
472 | memblock_t *mb; | 472 | memblock_t *mb; |
473 | 473 | ||
474 | if (!pool_okay) | 474 | if (!pool_okay) |
475 | { | 475 | { |
476 | log_info (_ | 476 | log_info (_ |
477 | ("operation is not possible without initialized secure memory\n")); | 477 | ("operation is not possible without initialized secure memory\n")); |
478 | exit (2); | 478 | exit (2); |
479 | } | 479 | } |
480 | if (show_warning && !suspend_warning) | 480 | if (show_warning && !suspend_warning) |
481 | { | 481 | { |
482 | show_warning = 0; | 482 | show_warning = 0; |
483 | print_warn (); | 483 | print_warn (); |
484 | } | 484 | } |
485 | 485 | ||
486 | /* Blocks are always a multiple of 32. */ | 486 | /* Blocks are always a multiple of 32. */ |
487 | size = ((size + 31) / 32) * 32; | 487 | size = ((size + 31) / 32) * 32; |
488 | 488 | ||
489 | mb = mb_get_new ((memblock_t *) pool, size); | 489 | mb = mb_get_new ((memblock_t *) pool, size); |
490 | if (mb) | 490 | if (mb) |
491 | stats_update (size, 0); | 491 | stats_update (size, 0); |
492 | 492 | ||
493 | return mb ? &mb->aligned.c : NULL; | 493 | return mb ? &mb->aligned.c : NULL; |
494 | } | 494 | } |
495 | 495 | ||
496 | void * | 496 | void * |
497 | _gcry_secmem_malloc (size_t size) | 497 | _gcry_secmem_malloc (size_t size) |
498 | { | 498 | { |
499 | void *p; | 499 | void *p; |
500 | 500 | ||
501 | SECMEM_LOCK; | 501 | SECMEM_LOCK; |
502 | p = _gcry_secmem_malloc_internal (size); | 502 | p = _gcry_secmem_malloc_internal (size); |
503 | SECMEM_UNLOCK; | 503 | SECMEM_UNLOCK; |
504 | 504 | ||
505 | return p; | 505 | return p; |
506 | } | 506 | } |
507 | 507 | ||
508 | static void | 508 | static void |
509 | _gcry_secmem_free_internal (void *a) | 509 | _gcry_secmem_free_internal (void *a) |
510 | { | 510 | { |
511 | memblock_t *mb; | 511 | memblock_t *mb; |
512 | int size; | 512 | int size; |
513 | 513 | ||
514 | if (!a) | 514 | if (!a) |
515 | return; | 515 | return; |
516 | 516 | ||
517 | mb = ADDR_TO_BLOCK (a); | 517 | mb = ADDR_TO_BLOCK (a); |
518 | size = mb->size; | 518 | size = mb->size; |
519 | 519 | ||
520 | /* This does not make much sense: probably this memory is held in the | 520 | /* This does not make much sense: probably this memory is held in the |
521 | * cache. We do it anyway: */ | 521 | * cache. We do it anyway: */ |
522 | #define MB_WIPE_OUT(byte) \ | 522 | #define MB_WIPE_OUT(byte) \ |
523 | memset ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size); | 523 | memset ((memblock_t *) ((char *) mb + BLOCK_HEAD_SIZE), (byte), size); |
524 | 524 | ||
525 | MB_WIPE_OUT (0xff); | 525 | MB_WIPE_OUT (0xff); |
526 | MB_WIPE_OUT (0xaa); | 526 | MB_WIPE_OUT (0xaa); |
527 | MB_WIPE_OUT (0x55); | 527 | MB_WIPE_OUT (0x55); |
528 | MB_WIPE_OUT (0x00); | 528 | MB_WIPE_OUT (0x00); |
529 | 529 | ||
530 | stats_update (0, size); | 530 | stats_update (0, size); |
531 | 531 | ||
532 | mb->flags &= ~MB_FLAG_ACTIVE; | 532 | mb->flags &= ~MB_FLAG_ACTIVE; |
533 | 533 | ||
534 | /* Update stats. */ | 534 | /* Update stats. */ |
535 | 535 | ||
536 | mb_merge (mb); | 536 | mb_merge (mb); |
537 | } | 537 | } |
538 | 538 | ||
539 | /* Wipe out and release memory. */ | 539 | /* Wipe out and release memory. */ |
540 | void | 540 | void |
541 | _gcry_secmem_free (void *a) | 541 | _gcry_secmem_free (void *a) |
542 | { | 542 | { |
543 | SECMEM_LOCK; | 543 | SECMEM_LOCK; |
544 | _gcry_secmem_free_internal (a); | 544 | _gcry_secmem_free_internal (a); |
545 | SECMEM_UNLOCK; | 545 | SECMEM_UNLOCK; |
546 | } | 546 | } |
547 | 547 | ||
548 | /* Realloc memory. */ | 548 | /* Realloc memory. */ |
549 | void * | 549 | void * |
550 | _gcry_secmem_realloc (void *p, size_t newsize) | 550 | _gcry_secmem_realloc (void *p, size_t newsize) |
551 | { | 551 | { |
552 | memblock_t *mb; | 552 | memblock_t *mb; |
553 | size_t size; | 553 | size_t size; |
554 | void *a; | 554 | void *a; |
555 | 555 | ||
556 | SECMEM_LOCK; | 556 | SECMEM_LOCK; |
557 | 557 | ||
558 | mb = (memblock_t *) ((char *) p - ((size_t) &((memblock_t *) 0)->aligned.c)); | 558 | mb = (memblock_t *) ((char *) p - ((size_t) &((memblock_t *) 0)->aligned.c)); |
559 | size = mb->size; | 559 | size = mb->size; |
560 | if (newsize < size) | 560 | if (newsize < size) |
561 | { | 561 | { |
562 | /* It is easier to not shrink the memory. */ | 562 | /* It is easier to not shrink the memory. */ |
563 | a = p; | 563 | a = p; |
564 | } | 564 | } |
565 | else | 565 | else |
566 | { | 566 | { |
567 | a = _gcry_secmem_malloc_internal (newsize); | 567 | a = _gcry_secmem_malloc_internal (newsize); |
568 | if (a) | 568 | if (a) |
569 | { | 569 | { |
570 | memcpy (a, p, size); | 570 | memcpy (a, p, size); |
571 | memset ((char *) a + size, 0, newsize - size); | 571 | memset ((char *) a + size, 0, newsize - size); |
572 | _gcry_secmem_free_internal (p); | 572 | _gcry_secmem_free_internal (p); |
573 | } | 573 | } |
574 | } | 574 | } |
575 | 575 | ||
576 | SECMEM_UNLOCK; | 576 | SECMEM_UNLOCK; |
577 | 577 | ||
578 | return a; | 578 | return a; |
579 | } | 579 | } |
580 | 580 | ||
581 | int | 581 | int |
582 | _gcry_private_is_secure (const void *p) | 582 | _gcry_private_is_secure (const void *p) |
583 | { | 583 | { |
584 | int ret = 0; | 584 | int ret = 0; |
585 | 585 | ||
586 | SECMEM_LOCK; | 586 | SECMEM_LOCK; |
587 | 587 | ||
588 | if (pool_okay && BLOCK_VALID (ADDR_TO_BLOCK (p))) | 588 | if (pool_okay && BLOCK_VALID (ADDR_TO_BLOCK (p))) |
589 | ret = 1; | 589 | ret = 1; |
590 | 590 | ||
591 | SECMEM_UNLOCK; | 591 | SECMEM_UNLOCK; |
592 | 592 | ||
593 | return ret; | 593 | return ret; |
594 | } | 594 | } |
595 | 595 | ||
596 | 596 | ||
597 | /**************** | 597 | /**************** |
598 | * Warning: This code might be called by an interrupt handler | 598 | * Warning: This code might be called by an interrupt handler |
599 | * and frankly, there should really be such a handler, | 599 | * and frankly, there should really be such a handler, |
600 | * to make sure that the memory is wiped out. | 600 | * to make sure that the memory is wiped out. |
601 | * We hope that the OS wipes out mlocked memory after | 601 | * We hope that the OS wipes out mlocked memory after |
602 | * receiving a SIGKILL - it really should do so, otherwise | 602 | * receiving a SIGKILL - it really should do so, otherwise |
603 | * there is no chance to get the secure memory cleaned. | 603 | * there is no chance to get the secure memory cleaned. |
604 | */ | 604 | */ |
605 | void | 605 | void |
606 | _gcry_secmem_term () | 606 | _gcry_secmem_term () |
607 | { | 607 | { |
608 | if (!pool_okay) | 608 | if (!pool_okay) |
609 | return; | 609 | return; |
610 | 610 | ||
611 | wipememory2 (pool, 0xff, pool_size); | 611 | wipememory2 (pool, 0xff, pool_size); |
612 | wipememory2 (pool, 0xaa, pool_size); | 612 | wipememory2 (pool, 0xaa, pool_size); |
613 | wipememory2 (pool, 0x55, pool_size); | 613 | wipememory2 (pool, 0x55, pool_size); |
614 | wipememory2 (pool, 0x00, pool_size); | 614 | wipememory2 (pool, 0x00, pool_size); |
615 | #if HAVE_MMAP | 615 | #if HAVE_MMAP |
616 | if (pool_is_mmapped) | 616 | if (pool_is_mmapped) |
617 | munmap (pool, pool_size); | 617 | munmap (pool, pool_size); |
618 | #endif | 618 | #endif |
619 | pool = NULL; | 619 | pool = NULL; |
620 | pool_okay = 0; | 620 | pool_okay = 0; |
621 | pool_size = 0; | 621 | pool_size = 0; |
622 | } | 622 | } |
623 | 623 | ||
624 | 624 | ||
625 | void | 625 | void |
626 | _gcry_secmem_dump_stats () | 626 | _gcry_secmem_dump_stats () |
627 | { | 627 | { |
628 | #if 1 | 628 | #if 1 |
629 | SECMEM_LOCK; | 629 | SECMEM_LOCK; |
630 | 630 | ||
631 | if (pool_okay) | 631 | if (pool_okay) |
632 | log_info ("secmem usage: %u/%lu bytes in %u blocks\n", | 632 | log_info ("secmem usage: %u/%lu bytes in %u blocks\n", |
633 | cur_alloced, (unsigned long)pool_size, cur_blocks); | 633 | cur_alloced, (unsigned long)pool_size, cur_blocks); |
634 | SECMEM_UNLOCK; | 634 | SECMEM_UNLOCK; |
635 | #else | 635 | #else |
636 | memblock_t *mb; | 636 | memblock_t *mb; |
637 | int i; | 637 | int i; |
638 | 638 | ||
639 | SECMEM_LOCK; | 639 | SECMEM_LOCK; |
640 | 640 | ||
641 | for (i = 0, mb = (memblock_t *) pool; | 641 | for (i = 0, mb = (memblock_t *) pool; |
642 | BLOCK_VALID (mb); | 642 | BLOCK_VALID (mb); |
643 | mb = mb_get_next (mb), i++) | 643 | mb = mb_get_next (mb), i++) |
644 | log_info ("SECMEM: [%s] block: %i; size: %i\n", | 644 | log_info ("SECMEM: [%s] block: %i; size: %i\n", |
645 | (mb->flags & MB_FLAG_ACTIVE) ? "used" : "free", | 645 | (mb->flags & MB_FLAG_ACTIVE) ? "used" : "free", |
646 | i, | 646 | i, |
647 | mb->size); | 647 | mb->size); |
648 | SECMEM_UNLOCK; | 648 | SECMEM_UNLOCK; |
649 | #endif | 649 | #endif |
650 | } | 650 | } |