File: | d/queue.c |
Warning: | line 904, column 5 Duplicate code detected |
Note: | line 926, column 5 Similar code here |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * The contents of this file are subject to the Mozilla Public License |
3 | * Version 1.1 (the "License"); you may not use this file except in |
4 | * compliance with the License. You may obtain a copy of the License at |
5 | * http://www.mozilla.org/. |
6 | * |
7 | * Software distributed under the License is distributed on an "AS IS" |
8 | * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See |
9 | * the License for the specific language governing rights and limitations |
10 | * under the License. |
11 | * |
12 | * The Original Code is AOLserver Code and related documentation |
13 | * distributed by AOL. |
14 | * |
15 | * The Initial Developer of the Original Code is America Online, |
16 | * Inc. Portions created by AOL are Copyright (C) 1999 America Online, |
17 | * Inc. All Rights Reserved. |
18 | * |
19 | * Alternatively, the contents of this file may be used under the terms |
20 | * of the GNU General Public License (the "GPL"), in which case the |
21 | * provisions of GPL are applicable instead of those above. If you wish |
22 | * to allow use of your version of this file only under the terms of the |
23 | * GPL and not to allow others to use your version of this file under the |
24 | * License, indicate your decision by deleting the provisions above and |
25 | * replace them with the notice and other provisions required by the GPL. |
26 | * If you do not delete the provisions above, a recipient may use your |
27 | * version of this file under either the License or the GPL. |
28 | */ |
29 | |
30 | /* |
31 | * queue.c -- |
32 | * |
33 | * Routines for the managing the virtual server connection queue |
34 | * and service threads. |
35 | */ |
36 | |
37 | #include "nsd.h" |
38 | |
39 | /* |
40 | * Local functions defined in this file |
41 | */ |
42 | |
43 | static void ConnRun(Conn *connPtr) |
44 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))); |
45 | |
46 | static void CreateConnThread(ConnPool *poolPtr) |
47 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))); |
48 | |
49 | static void AppendConn(Tcl_DString *dsPtr, const Conn *connPtr, const char *state, bool_Bool checkforproxy) |
50 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(3)__attribute__((__nonnull__(3))); |
51 | static void AppendConnList(Tcl_DString *dsPtr, const Conn *firstPtr, const char *state, bool_Bool checkforproxy) |
52 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(3)__attribute__((__nonnull__(3))); |
53 | |
54 | static bool_Bool neededAdditionalConnectionThreads(const ConnPool *poolPtr) |
55 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))); |
56 | |
57 | static void WakeupConnThreads(ConnPool *poolPtr) |
58 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))); |
59 | |
60 | static Ns_ReturnCode MapspecParse(Tcl_Interp *interp, Tcl_Obj *mapspecObj, char **method, char **url, |
61 | NsUrlSpaceContextSpec **specPtr) |
62 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(3)__attribute__((__nonnull__(3))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
63 | |
64 | static int ServerMaxThreadsObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
65 | ConnPool *poolPtr, int nargs) |
66 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
67 | |
68 | static int ServerMinThreadsObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
69 | ConnPool *poolPtr, int nargs) |
70 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
71 | |
72 | |
73 | static int ServerConnectionRateLimitObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
74 | ConnPool *poolPtr, int nargs) |
75 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
76 | |
77 | |
78 | static int ServerPoolRateLimitObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
79 | ConnPool *poolPtr, int nargs) |
80 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
81 | |
82 | |
83 | static int ServerMapObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
84 | NsServer *servPtr, ConnPool *poolPtr, int nargs) |
85 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))) NS_GNUC_NONNULL(6)__attribute__((__nonnull__(6))); |
86 | |
87 | static int ServerMappedObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
88 | NsServer *servPtr, int nargs) |
89 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
90 | |
91 | static int ServerUnmapObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
92 | NsServer *servPtr, int nargs) |
93 | NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
94 | |
95 | static void ConnThreadSetName(const char *server, const char *pool, uintptr_t threadId, uintptr_t connId) |
96 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))); |
97 | |
98 | static int ServerListActiveCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
99 | ConnPool *poolPtr, int nargs) |
100 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
101 | static int ServerListAllCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
102 | ConnPool *poolPtr, int nargs) |
103 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
104 | static int ServerListQueuedCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
105 | ConnPool *poolPtr, int nargs) |
106 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))) NS_GNUC_NONNULL(5)__attribute__((__nonnull__(5))); |
107 | |
108 | static void ServerListActive(Tcl_DString *dsPtr, ConnPool *poolPtr, bool_Bool checkforproxy) |
109 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))); |
110 | |
111 | static void ServerListQueued(Tcl_DString *dsPtr, ConnPool *poolPtr) |
112 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(2)__attribute__((__nonnull__(2))); |
113 | |
114 | static int SetPoolAttribute(Tcl_Interp *interp, int nargs, ConnPool *poolPtr, int *valuePtr, int value) |
115 | NS_GNUC_NONNULL(1)__attribute__((__nonnull__(1))) NS_GNUC_NONNULL(3)__attribute__((__nonnull__(3))) NS_GNUC_NONNULL(4)__attribute__((__nonnull__(4))); |
116 | |
117 | static Ns_ArgProc WalkCallback; |
118 | |
119 | /* |
120 | * Static variables defined in this file. |
121 | */ |
122 | |
123 | static Ns_Tls argtls = NULL((void*)0); |
124 | static int poolid = 0; |
125 | |
126 | /* |
127 | * Debugging stuff |
128 | */ |
129 | #define ThreadNr(poolPtr, argPtr)(int)(((argPtr) - (poolPtr)->tqueue.args)) (int)(((argPtr) - (poolPtr)->tqueue.args)) |
130 | |
131 | #if 0 |
132 | static void ConnThreadQueuePrint(ConnPool *poolPtr, char *key) { |
133 | ConnThreadArg *aPtr; |
134 | |
135 | fprintf(stderr, "%s: thread queue (idle %d): ", key, poolPtr->threads.idle)__fprintf_chk (stderr, 2 - 1, "%s: thread queue (idle %d): ", key, poolPtr->threads.idle); |
136 | Ns_MutexLock(&poolPtr->tqueue.lock); |
137 | for (aPtr = poolPtr->tqueue.nextPtr; aPtr; aPtr = aPtr->nextPtr) { |
138 | fprintf(stderr, "[%d] state %d, ", ThreadNr(poolPtr, aPtr), aPtr->state)__fprintf_chk (stderr, 2 - 1, "[%d] state %d, ", (int)(((aPtr ) - (poolPtr)->tqueue.args)), aPtr->state); |
139 | } |
140 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
141 | fprintf(stderr, "\n")__fprintf_chk (stderr, 2 - 1, "\n"); |
142 | } |
143 | #endif |
144 | |
145 | |
146 | /* |
147 | *---------------------------------------------------------------------- |
148 | * |
149 | * NsInitQueue -- |
150 | * |
151 | * Init connection queue. |
152 | * |
153 | * Results: |
154 | * None. |
155 | * |
156 | * Side effects: |
157 | * None. |
158 | * |
159 | *---------------------------------------------------------------------- |
160 | */ |
161 | |
162 | void |
163 | NsInitQueue(void) |
164 | { |
165 | Ns_TlsAlloc(&argtls, NULL((void*)0)); |
166 | poolid = Ns_UrlSpecificAlloc(); |
167 | } |
168 | |
169 | |
170 | /* |
171 | *---------------------------------------------------------------------- |
172 | * |
173 | * Ns_GetConn -- |
174 | * |
175 | * Return the current connection in this thread. |
176 | * |
177 | * Results: |
178 | * Pointer to conn or NULL. |
179 | * |
180 | * Side effects: |
181 | * None. |
182 | * |
183 | *---------------------------------------------------------------------- |
184 | */ |
185 | |
186 | Ns_Conn * |
187 | Ns_GetConn(void) |
188 | { |
189 | const ConnThreadArg *argPtr; |
190 | |
191 | argPtr = Ns_TlsGet(&argtls); |
192 | return ((argPtr != NULL((void*)0)) ? ((Ns_Conn *) argPtr->connPtr) : NULL((void*)0)); |
193 | } |
194 | |
195 | |
196 | /* |
197 | *---------------------------------------------------------------------- |
198 | * |
199 | * NsMapPool -- |
200 | * |
201 | * Map a method/URL to the given pool. |
202 | * |
203 | * Results: |
204 | * None. |
205 | * |
206 | * Side effects: |
207 | * Requests for given URL's will be serviced by given pool. |
208 | * |
209 | *---------------------------------------------------------------------- |
210 | */ |
211 | |
212 | void |
213 | NsMapPool(ConnPool *poolPtr, const char *mapString, unsigned int flags) |
214 | { |
215 | const char *server; |
216 | char *method, *url; |
217 | Tcl_Obj *mapspecObj; |
218 | NsUrlSpaceContextSpec *specPtr; |
219 | |
220 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
221 | NS_NONNULL_ASSERT(mapString != NULL)((void) (0)); |
222 | |
223 | mapspecObj = Tcl_NewStringObj(mapString, -1); |
224 | server = poolPtr->servPtr->server; |
225 | |
226 | Tcl_IncrRefCount(mapspecObj)++(mapspecObj)->refCount; |
227 | if (MapspecParse(NULL((void*)0), mapspecObj, &method, &url, &specPtr) == NS_OK) { |
228 | Ns_UrlSpecificSet2(server, method, url, poolid, poolPtr, flags, NULL((void*)0), specPtr); |
229 | |
230 | } else { |
231 | Ns_Log(Warning, |
232 | "invalid mapspec '%s'; must be 2- or 3-element list " |
233 | "containing HTTP method, URL, and optionally a filtercontext", |
234 | mapString); |
235 | } |
236 | Tcl_DecrRefCount(mapspecObj)do { Tcl_Obj *_objPtr = (mapspecObj); if (_objPtr->refCount -- <= 1) { TclFreeObj(_objPtr); } } while(0); |
237 | } |
238 | |
239 | /* |
240 | *---------------------------------------------------------------------- |
241 | * |
242 | * NsPoolName -- |
243 | * |
244 | * Return a printable pool name. In essence, it translates the empty pool |
245 | * name (the default pool) to the string "defaullr" for printing |
246 | * purposes. |
247 | * |
248 | * Results: |
249 | * Printable string. |
250 | * |
251 | * Side effects: |
252 | * None. |
253 | * |
254 | *---------------------------------------------------------------------- |
255 | */ |
256 | |
257 | const char * |
258 | NsPoolName(const char *poolName) |
259 | { |
260 | const char *result; |
261 | |
262 | NS_NONNULL_ASSERT(poolName != NULL)((void) (0)); |
263 | |
264 | if (*poolName == '\0') { |
265 | result = "default"; |
266 | } else { |
267 | result = poolName; |
268 | } |
269 | |
270 | return result; |
271 | } |
272 | |
273 | |
274 | /* |
275 | *---------------------------------------------------------------------- |
276 | * |
277 | * NsPoolAllocateThreadSlot -- |
278 | * |
279 | * Allocate a thread slot for this pool. When bandwidth management is |
280 | * activated for a pool, one has to aggregate the pool data from multiple |
281 | * writer threads. This happens via slots allocated to the |
282 | * threads. Currently, writer threads only exit when the server goes |
283 | * down, so there is no need to reuse slots from writer thread, and the |
284 | * associated slots are stable once allocated. This function will be |
285 | * called only once per writer thread and pool. The thread ID will become |
286 | * necessary, when the writer threads are dynamic. |
287 | * |
288 | * Results: |
289 | * Allocated slot id for this thread. |
290 | * |
291 | * Side effects: |
292 | * Maybe the DList allocates additional memory. |
293 | * |
294 | *---------------------------------------------------------------------- |
295 | */ |
296 | size_t |
297 | NsPoolAllocateThreadSlot(ConnPool *poolPtr, uintptr_t UNUSED(threadID)UNUSED_threadID __attribute__((__unused__))) |
298 | { |
299 | Ns_DList *dlPtr; |
300 | |
301 | dlPtr = &(poolPtr->rate.writerRates); |
302 | |
303 | /* |
304 | * Appending must be locked, since in rare cases, a realloc might happen |
305 | * under the hood when appending |
306 | */ |
307 | Ns_MutexLock(&poolPtr->rate.lock); |
308 | Ns_DListAppend(dlPtr, 0u); |
309 | Ns_MutexUnlock(&poolPtr->rate.lock); |
310 | |
311 | return (dlPtr->size - 1u); |
312 | } |
313 | |
314 | /* |
315 | *---------------------------------------------------------------------- |
316 | * |
317 | * NsPoolTotalRate -- |
318 | * |
319 | * Calculate the total rate form all writer threads. The function simply |
320 | * adds the data from all allocated slots and reports the number of |
321 | * associated writer threads for estimating rates per writer threads. |
322 | * |
323 | * Results: |
324 | * Actual total rate for a pool (sum of rates per writer thread). |
325 | * |
326 | * Side effects: |
327 | * None. |
328 | * |
329 | *---------------------------------------------------------------------- |
330 | */ |
331 | int |
332 | NsPoolTotalRate(ConnPool *poolPtr, size_t slot, int rate, int *writerThreadCount) |
333 | { |
334 | Ns_DList *dlPtr; |
335 | size_t i; |
336 | uintptr_t totalRate = 0u; |
337 | |
338 | dlPtr = &(poolPtr->rate.writerRates); |
339 | dlPtr->data[slot] = (void*)(uintptr_t)rate; |
340 | |
341 | Ns_MutexLock(&poolPtr->rate.lock); |
342 | for (i = 0u; i < dlPtr->size; i ++) { |
343 | totalRate = totalRate + (uintptr_t)dlPtr->data[i]; |
344 | } |
345 | poolPtr->rate.currentRate = (int)totalRate; |
346 | Ns_MutexUnlock(&poolPtr->rate.lock); |
347 | |
348 | *writerThreadCount = (int)dlPtr->size; |
349 | |
350 | return (int)totalRate; |
351 | } |
352 | |
353 | void |
354 | NsPoolAddBytesSent(ConnPool *poolPtr, Tcl_WideInt bytesSent) |
355 | { |
356 | Ns_MutexLock(&poolPtr->rate.lock); |
357 | poolPtr->rate.bytesSent += bytesSent; |
358 | Ns_MutexUnlock(&poolPtr->rate.lock); |
359 | } |
360 | |
361 | |
362 | |
363 | /* |
364 | *---------------------------------------------------------------------- |
365 | * |
366 | * neededAdditionalConnectionThreads -- |
367 | * |
368 | * Compute the number additional connection threads we should |
369 | * create. This function has to be called under a lock for the |
370 | * provided queue (such as &poolPtr->wqueue.lock). |
371 | * |
372 | * Results: |
373 | * Number of needed additional connection threads. |
374 | * |
375 | * Side effects: |
376 | * None |
377 | * |
378 | *---------------------------------------------------------------------- |
379 | */ |
380 | static bool_Bool |
381 | neededAdditionalConnectionThreads(const ConnPool *poolPtr) { |
382 | bool_Bool wantCreate; |
383 | |
384 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
385 | |
386 | /* |
387 | * Create new connection threads, if |
388 | * |
389 | * - there is currently no connection thread being created, or |
390 | * parallel creates are allowed and there are more than |
391 | * highwatermark requests queued, |
392 | * |
393 | * - AND there are less idle-threads than min threads (the server |
394 | * tries to keep min-threads idle to be ready for short peaks), |
395 | * |
396 | * - AND there are not yet max-threads running. |
397 | * |
398 | */ |
399 | if ( (poolPtr->threads.creating == 0 |
400 | || poolPtr->wqueue.wait.num > poolPtr->wqueue.highwatermark |
401 | ) |
402 | && (poolPtr->threads.current < poolPtr->threads.min |
403 | || (poolPtr->wqueue.wait.num > poolPtr->wqueue.lowwatermark) |
404 | ) |
405 | && poolPtr->threads.current < poolPtr->threads.max |
406 | ) { |
407 | |
408 | Ns_MutexLock(&poolPtr->servPtr->pools.lock); |
409 | wantCreate = (!poolPtr->servPtr->pools.shutdown); |
410 | Ns_MutexUnlock(&poolPtr->servPtr->pools.lock); |
411 | |
412 | /*Ns_Log(Notice, "[%s] wantCreate %d (creating %d current %d idle %d waiting %d)", |
413 | poolPtr->servPtr->server, |
414 | wantCreate, |
415 | poolPtr->threads.creating, |
416 | poolPtr->threads.current, |
417 | poolPtr->threads.idle, |
418 | poolPtr->wqueue.wait.num |
419 | );*/ |
420 | } else { |
421 | wantCreate = NS_FALSE0; |
422 | |
423 | /*Ns_Log(Notice, "[%s] do not wantCreate creating %d, idle %d < min %d, current %d < max %d, waiting %d)", |
424 | poolPtr->servPtr->server, |
425 | poolPtr->threads.creating, |
426 | poolPtr->threads.idle, |
427 | poolPtr->threads.min, |
428 | poolPtr->threads.current, |
429 | poolPtr->threads.max, |
430 | poolPtr->wqueue.wait.num);*/ |
431 | |
432 | } |
433 | |
434 | return wantCreate; |
435 | } |
436 | |
437 | |
438 | /* |
439 | *---------------------------------------------------------------------- |
440 | * |
441 | * NsEnsureRunningConnectionThreads -- |
442 | * |
443 | * Ensure that there are the right number if connection threads |
444 | * running. The function computes for the provided pool or for |
445 | * the default pool of the server the number of missing threads |
446 | * and creates a single connection thread when needed. This |
447 | * function is typically called from the driver. |
448 | * |
449 | * Results: |
450 | * None. |
451 | * |
452 | * Side effects: |
453 | * Potentially, a created connection thread. |
454 | * |
455 | *---------------------------------------------------------------------- |
456 | */ |
457 | |
458 | void |
459 | NsEnsureRunningConnectionThreads(const NsServer *servPtr, ConnPool *poolPtr) { |
460 | bool_Bool create; |
461 | int waitnum; |
462 | |
463 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
464 | |
465 | if (poolPtr == NULL((void*)0)) { |
466 | /* |
467 | * Use the default pool for the time being, if no pool was |
468 | * provided |
469 | */ |
470 | poolPtr = servPtr->pools.defaultPtr; |
471 | } |
472 | |
473 | Ns_MutexLock(&poolPtr->wqueue.lock); |
474 | Ns_MutexLock(&poolPtr->threads.lock); |
475 | create = neededAdditionalConnectionThreads(poolPtr); |
476 | |
477 | if (create) { |
478 | poolPtr->threads.current ++; |
479 | poolPtr->threads.creating ++; |
480 | } |
481 | waitnum = poolPtr->wqueue.wait.num; |
482 | |
483 | Ns_MutexUnlock(&poolPtr->threads.lock); |
484 | Ns_MutexUnlock(&poolPtr->wqueue.lock); |
485 | |
486 | if (create) { |
487 | Ns_Log(Notice, "NsEnsureRunningConnectionThreads wantCreate %d waiting %d idle %d current %d", |
488 | (int)create, |
489 | waitnum, |
490 | poolPtr->threads.idle, |
491 | poolPtr->threads.current); |
492 | CreateConnThread(poolPtr); |
493 | } |
494 | } |
495 | |
496 | |
497 | /* |
498 | *---------------------------------------------------------------------- |
499 | * |
500 | * NsQueueConn -- |
501 | * |
502 | * Append a connection to the run queue of a connection pool when |
503 | * possible (e.g. no shutdown, a free connection thread is available, |
504 | * ...) |
505 | * |
506 | * Results: |
507 | * NS_OK (queued), NS_ERROR (return error), NS_TIMEOUT (try again) |
508 | * |
509 | * Side effects: |
510 | * Connection will run shortly. |
511 | * |
512 | *---------------------------------------------------------------------- |
513 | */ |
514 | |
515 | Ns_ReturnCode |
516 | NsQueueConn(Sock *sockPtr, const Ns_Time *nowPtr) |
517 | { |
518 | ConnThreadArg *argPtr = NULL((void*)0); |
519 | NsServer *servPtr; |
520 | ConnPool *poolPtr = NULL((void*)0); |
521 | Conn *connPtr = NULL((void*)0); |
522 | bool_Bool create = NS_FALSE0; |
523 | int queued = NS_OK; |
524 | |
525 | NS_NONNULL_ASSERT(sockPtr != NULL)((void) (0)); |
526 | NS_NONNULL_ASSERT(nowPtr != NULL)((void) (0)); |
527 | assert(sockPtr->drvPtr != NULL)((void) (0)); |
528 | |
529 | sockPtr->drvPtr->stats.received++; |
530 | servPtr = sockPtr->servPtr; |
531 | |
532 | /* |
533 | * Perform no queuing on shutdown. |
534 | */ |
535 | if (unlikely(servPtr->pools.shutdown)(__builtin_expect((servPtr->pools.shutdown), 0))) { |
536 | return NS_ERROR; |
537 | } |
538 | |
539 | /* |
540 | * Select connection pool. For non-HTTP drivers, the request.method |
541 | * won't be provided. |
542 | */ |
543 | if ((sockPtr->poolPtr == NULL((void*)0)) |
544 | && (sockPtr->reqPtr != NULL((void*)0)) |
545 | && (sockPtr->reqPtr->request.method != NULL((void*)0))) { |
546 | NsUrlSpaceContext ctx; |
547 | |
548 | ctx.headers = sockPtr->reqPtr->headers; |
549 | if (nsconf.reverseproxymode |
550 | && ((struct sockaddr *)&sockPtr->clientsa)->sa_family != 0 |
551 | ) { |
552 | ctx.saPtr = (struct sockaddr *)&(sockPtr->clientsa); |
553 | } else { |
554 | ctx.saPtr = (struct sockaddr *)&(sockPtr->sa); |
555 | } |
556 | |
557 | /* |
558 | * Here we could fit-in the peer addr, when behindproxy is true. |
559 | */ |
560 | poolPtr = NsUrlSpecificGet(servPtr, |
561 | sockPtr->reqPtr->request.method, |
562 | sockPtr->reqPtr->request.url, |
563 | poolid, 0u, NS_URLSPACE_DEFAULT, |
564 | NsUrlSpaceContextFilter, &ctx); |
565 | sockPtr->poolPtr = poolPtr; |
566 | } else if (sockPtr->poolPtr != NULL((void*)0)) { |
567 | poolPtr = sockPtr->poolPtr; |
568 | Ns_Log(Notice , "=== NsQueueConn URL <%s> was already assigned to pool <%s>", |
569 | sockPtr->reqPtr->request.url, poolPtr->pool); |
570 | } |
571 | if (poolPtr == NULL((void*)0)) { |
572 | poolPtr = servPtr->pools.defaultPtr; |
573 | } |
574 | |
575 | /* |
576 | * We know the pool. Try to add connection into the queue of this pool |
577 | * (either into a free slot or into its waiting list, or, when everything |
578 | * fails signal an error or timeout (for retry attempts) to the caller. |
579 | */ |
580 | Ns_MutexLock(&poolPtr->wqueue.lock); |
581 | if (poolPtr->wqueue.freePtr != NULL((void*)0)) { |
582 | connPtr = poolPtr->wqueue.freePtr; |
583 | poolPtr->wqueue.freePtr = connPtr->nextPtr; |
584 | connPtr->nextPtr = NULL((void*)0); |
585 | } |
586 | Ns_MutexUnlock(&poolPtr->wqueue.lock); |
587 | |
588 | if (likely(connPtr != NULL)(__builtin_expect((connPtr != ((void*)0)), 1))) { |
589 | /* |
590 | * We have got a free connPtr from the pool. Initialize the |
591 | * connPtr and copy flags from the socket. |
592 | */ |
593 | |
594 | /* ConnThreadQueuePrint(poolPtr, "driver");*/ |
595 | |
596 | Ns_MutexLock(&servPtr->pools.lock); |
597 | connPtr->id = servPtr->pools.nextconnid++; |
598 | poolPtr->stats.processed++; |
599 | Ns_MutexUnlock(&servPtr->pools.lock); |
600 | |
601 | connPtr->requestQueueTime = *nowPtr; |
602 | connPtr->sockPtr = sockPtr; |
603 | connPtr->drvPtr = sockPtr->drvPtr; |
604 | connPtr->poolPtr = poolPtr; |
605 | connPtr->server = servPtr->server; |
606 | connPtr->location = sockPtr->location; |
607 | connPtr->flags = sockPtr->flags; |
608 | if ((sockPtr->drvPtr->opts & NS_DRIVER_ASYNC0x01u) == 0u) { |
609 | connPtr->acceptTime = *nowPtr; |
610 | } else { |
611 | connPtr->acceptTime = sockPtr->acceptTime; |
612 | } |
613 | connPtr->rateLimit = poolPtr->rate.defaultConnectionLimit; |
614 | |
615 | /* |
616 | * Reset members of sockPtr, which have been passed to connPtr. |
617 | */ |
618 | sockPtr->acceptTime.sec = 0; |
619 | sockPtr->flags = 0u; |
620 | |
621 | /* |
622 | * Try to get an entry from the connection thread queue, |
623 | * and dequeue it when possible. |
624 | */ |
625 | if (poolPtr->tqueue.nextPtr != NULL((void*)0)) { |
626 | Ns_MutexLock(&poolPtr->tqueue.lock); |
627 | if (poolPtr->tqueue.nextPtr != NULL((void*)0)) { |
628 | argPtr = poolPtr->tqueue.nextPtr; |
629 | poolPtr->tqueue.nextPtr = argPtr->nextPtr; |
630 | } |
631 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
632 | } |
633 | |
634 | if (argPtr != NULL((void*)0)) { |
635 | /* |
636 | * We could obtain an idle thread. Dequeue the entry, |
637 | * such that no one else might grab it, and fill in the |
638 | * connPtr that should be run by this thread. |
639 | */ |
640 | |
641 | assert(argPtr->state == connThread_idle)((void) (0)); |
642 | argPtr->connPtr = connPtr; |
643 | |
644 | Ns_MutexLock(&poolPtr->wqueue.lock); |
645 | Ns_MutexLock(&poolPtr->threads.lock); |
646 | create = neededAdditionalConnectionThreads(poolPtr); |
647 | Ns_MutexUnlock(&poolPtr->threads.lock); |
648 | Ns_MutexUnlock(&poolPtr->wqueue.lock); |
649 | |
650 | } else { |
651 | /* |
652 | * There is no connection thread ready, so we add the |
653 | * connection to the waiting queue. |
654 | */ |
655 | Ns_MutexLock(&poolPtr->wqueue.lock); |
656 | if (poolPtr->wqueue.wait.firstPtr == NULL((void*)0)) { |
657 | poolPtr->wqueue.wait.firstPtr = connPtr; |
658 | } else { |
659 | poolPtr->wqueue.wait.lastPtr->nextPtr = connPtr; |
660 | } |
661 | poolPtr->wqueue.wait.lastPtr = connPtr; |
662 | poolPtr->wqueue.wait.num ++; |
663 | Ns_MutexLock(&poolPtr->threads.lock); |
664 | poolPtr->stats.queued++; |
665 | create = neededAdditionalConnectionThreads(poolPtr); |
666 | Ns_MutexUnlock(&poolPtr->threads.lock); |
667 | Ns_MutexUnlock(&poolPtr->wqueue.lock); |
668 | } |
669 | } |
670 | |
671 | if (unlikely(connPtr == NULL)(__builtin_expect((connPtr == ((void*)0)), 0))) { |
672 | /* |
673 | * The connection thread pool queue is full. We can either keep the |
674 | * sockPtr in a waiting state, or we can reject the queue overrun with |
675 | * a 503 - depending on the configuration. |
676 | */ |
677 | queued = NS_TIMEOUT; |
678 | create = NS_FALSE0; |
679 | |
680 | if ((sockPtr->flags & NS_CONN_SOCK_WAITING0x800u) == 0u) { |
681 | /* |
682 | * The flag NS_CONN_SOCK_WAITING is just used to avoid reporting |
683 | * the same request multiple times as unsuccessful queueing |
684 | * attempts (when rejectoverrun is false). |
685 | */ |
686 | sockPtr->flags |= NS_CONN_SOCK_WAITING0x800u; |
687 | Ns_Log(Notice, "[%s pool %s] All available connections are used, waiting %d idle %d current %d", |
688 | poolPtr->servPtr->server, |
689 | poolPtr->pool, |
690 | poolPtr->wqueue.wait.num, |
691 | poolPtr->threads.idle, |
692 | poolPtr->threads.current); |
693 | |
694 | if (poolPtr->wqueue.rejectoverrun) { |
695 | Ns_MutexLock(&poolPtr->threads.lock); |
696 | poolPtr->stats.dropped++; |
697 | Ns_MutexUnlock(&poolPtr->threads.lock); |
698 | queued = NS_ERROR; |
699 | } |
700 | } |
701 | |
702 | } else if (argPtr != NULL((void*)0)) { |
703 | /* |
704 | * We have a connection thread ready. |
705 | * |
706 | * Perform lock just in the debugging case to avoid race condition. |
707 | */ |
708 | if (Ns_LogSeverityEnabled(Debug)) { |
709 | int idle; |
710 | |
711 | Ns_MutexLock(&poolPtr->threads.lock); |
712 | idle = poolPtr->threads.idle; |
713 | Ns_MutexUnlock(&poolPtr->threads.lock); |
714 | |
715 | Ns_Log(Debug, "[%d] dequeue thread connPtr %p idle %d state %d create %d", |
716 | ThreadNr(poolPtr, argPtr)(int)(((argPtr) - (poolPtr)->tqueue.args)), (void *)connPtr, idle, argPtr->state, (int)create); |
717 | } |
718 | |
719 | /* |
720 | * Signal the associated thread to start with this request. |
721 | */ |
722 | Ns_MutexLock(&argPtr->lock); |
723 | Ns_CondSignal(&argPtr->cond); |
724 | Ns_MutexUnlock(&argPtr->lock); |
725 | |
726 | } else { |
727 | if (Ns_LogSeverityEnabled(Debug)) { |
728 | Ns_Log(Debug, "add waiting connPtr %p => waiting %d create %d", |
729 | (void *)connPtr, poolPtr->wqueue.wait.num, (int)create); |
730 | } |
731 | } |
732 | |
733 | if (create) { |
734 | int idle, current; |
735 | |
736 | Ns_MutexLock(&poolPtr->threads.lock); |
737 | idle = poolPtr->threads.idle; |
738 | current = poolPtr->threads.current; |
739 | poolPtr->threads.current ++; |
740 | poolPtr->threads.creating ++; |
741 | Ns_MutexUnlock(&poolPtr->threads.lock); |
742 | |
743 | Ns_Log(Notice, "NsQueueConn wantCreate %d waiting %d idle %d current %d", |
744 | (int)create, |
745 | poolPtr->wqueue.wait.num, |
746 | idle, |
747 | current); |
748 | |
749 | CreateConnThread(poolPtr); |
750 | } |
751 | |
752 | return queued; |
753 | } |
754 | |
755 | /* |
756 | *---------------------------------------------------------------------- |
757 | * |
758 | * WalkCallback -- |
759 | * |
760 | * Callback for Ns_UrlSpecificWalk() used in "ns_server map". Currently a |
761 | * placeholder, might output useful information in the future. |
762 | * |
763 | * Results: |
764 | * None. |
765 | * |
766 | * Side effects: |
767 | * None |
768 | * |
769 | *---------------------------------------------------------------------- |
770 | */ |
771 | static void |
772 | WalkCallback(Ns_DStringTcl_DString *dsPtr, const void *arg) |
773 | { |
774 | const ConnPool *poolPtr = (ConnPool *)arg; |
775 | Tcl_DStringAppendElement(dsPtr, poolPtr->pool); |
776 | } |
777 | |
778 | |
779 | |
780 | /* |
781 | *---------------------------------------------------------------------- |
782 | * |
783 | * SetPoolAttribute -- |
784 | * |
785 | * Helper function to factor out common code when modifying integer |
786 | * attributes in the pools structure. |
787 | * |
788 | * Results: |
789 | * Tcl result. |
790 | * |
791 | * Side effects: |
792 | * Sets interp result. |
793 | * |
794 | *---------------------------------------------------------------------- |
795 | */ |
796 | static int |
797 | SetPoolAttribute(Tcl_Interp *interp, int nargs, ConnPool *poolPtr, int *valuePtr, int value) { |
798 | |
799 | if (nargs == 1) { |
800 | Ns_MutexLock(&poolPtr->threads.lock); |
801 | *valuePtr = value; |
802 | Ns_MutexUnlock(&poolPtr->threads.lock); |
803 | } else { |
804 | /* |
805 | * Called without an argument, just return the current setting. |
806 | */ |
807 | assert(nargs == 0)((void) (0)); |
808 | } |
809 | |
810 | Tcl_SetObjResult(interp, Tcl_NewIntObj(*valuePtr)); |
811 | return TCL_OK0; |
812 | } |
813 | |
814 | |
815 | /* |
816 | *---------------------------------------------------------------------- |
817 | * |
818 | * ServerMaxThreadsObjCmd, subcommand of NsTclServerObjCmd -- |
819 | * |
820 | * Implements "ns_server ... maxthreads ...". |
821 | * |
822 | * Results: |
823 | * Tcl result. |
824 | * |
825 | * Side effects: |
826 | * Might update maxthreads setting of a pool |
827 | * |
828 | *---------------------------------------------------------------------- |
829 | */ |
830 | |
831 | static int |
832 | ServerMaxThreadsObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
833 | ConnPool *poolPtr, int nargs) |
834 | { |
835 | int result = TCL_OK0, value = 0; |
836 | Ns_ObjvValueRange range = {poolPtr->threads.min, poolPtr->wqueue.maxconns}; |
837 | Ns_ObjvSpec args[] = { |
838 | {"?maxthreads", Ns_ObjvInt, &value, &range}, |
839 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
840 | }; |
841 | |
842 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
843 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
844 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
845 | |
846 | if (Ns_ParseObjv(NULL((void*)0), args, interp, objc-nargs, objc, objv) != NS_OK) { |
847 | result = TCL_ERROR1; |
848 | } else { |
849 | result = SetPoolAttribute(interp, nargs, poolPtr, &poolPtr->threads.max, value); |
850 | } |
851 | return result; |
852 | } |
853 | |
854 | |
855 | /* |
856 | *---------------------------------------------------------------------- |
857 | * |
858 | * ServerMinThreadsObjCmd, subcommand of NsTclServerObjCmd -- |
859 | * |
860 | * Implements "ns_server ... minthreads ...". |
861 | * |
862 | * Results: |
863 | * Tcl result. |
864 | * |
865 | * Side effects: |
866 | * Might update minthreads setting of a pool |
867 | * |
868 | *---------------------------------------------------------------------- |
869 | */ |
870 | static int |
871 | ServerMinThreadsObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
872 | ConnPool *poolPtr, int nargs) |
873 | { |
874 | int result = TCL_OK0, value = 0; |
875 | Ns_ObjvValueRange range = {1, poolPtr->threads.max}; |
876 | Ns_ObjvSpec args[] = { |
877 | {"?minthreads", Ns_ObjvInt, &value, &range}, |
878 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
879 | }; |
880 | |
881 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
882 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
883 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
884 | |
885 | if (Ns_ParseObjv(NULL((void*)0), args, interp, objc-nargs, objc, objv) != NS_OK) { |
886 | result = TCL_ERROR1; |
887 | } else { |
888 | result = SetPoolAttribute(interp, nargs, poolPtr, &poolPtr->threads.min, value); |
889 | } |
890 | return result; |
891 | } |
892 | |
893 | static int |
894 | ServerPoolRateLimitObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
895 | ConnPool *poolPtr, int nargs) |
896 | { |
897 | int result = TCL_OK0, value = 0; |
898 | Ns_ObjvValueRange range = {-1, INT_MAX2147483647}; |
899 | Ns_ObjvSpec args[] = { |
900 | {"?poolratelimit", Ns_ObjvInt, &value, &range}, |
901 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
902 | }; |
903 | |
904 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
Duplicate code detected | |
905 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
906 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
907 | |
908 | if (Ns_ParseObjv(NULL((void*)0), args, interp, objc-nargs, objc, objv) != NS_OK) { |
909 | result = TCL_ERROR1; |
910 | } else { |
911 | result = SetPoolAttribute(interp, nargs, poolPtr, &poolPtr->rate.poolLimit, value); |
912 | } |
913 | return result; |
914 | } |
915 | static int |
916 | ServerConnectionRateLimitObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
917 | ConnPool *poolPtr, int nargs) |
918 | { |
919 | int result = TCL_OK0, value = 0; |
920 | Ns_ObjvValueRange range = {-1, INT_MAX2147483647}; |
921 | Ns_ObjvSpec args[] = { |
922 | {"?connectionratelimit", Ns_ObjvInt, &value, &range}, |
923 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
924 | }; |
925 | |
926 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
Similar code here | |
927 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
928 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
929 | |
930 | if (Ns_ParseObjv(NULL((void*)0), args, interp, objc-nargs, objc, objv) != NS_OK) { |
931 | result = TCL_ERROR1; |
932 | } else { |
933 | result = SetPoolAttribute(interp, nargs, poolPtr, &poolPtr->rate.defaultConnectionLimit, value); |
934 | } |
935 | return result; |
936 | } |
937 | |
938 | |
939 | /* |
940 | *---------------------------------------------------------------------- |
941 | * |
942 | * MapspecParse -- |
943 | * |
944 | * Check, if the mapspec Tcl_Obj in the first argument is of the right |
945 | * syntax and return its components as strings. Note that the lifetime of |
946 | * the returned strings depends on the lifetime of the first argument. |
947 | * |
948 | * Results: |
949 | * Ns_ReturnCode NS_OK or NS_ERROR; |
950 | * |
951 | * Side effects: |
952 | * None |
953 | * |
954 | *---------------------------------------------------------------------- |
955 | */ |
956 | |
957 | static Ns_ReturnCode |
958 | MapspecParse(Tcl_Interp *interp, Tcl_Obj *mapspecObj, char **method, char **url, |
959 | NsUrlSpaceContextSpec **specPtr) { |
960 | Ns_ReturnCode status = NS_ERROR; |
961 | int oc; |
962 | Tcl_Obj **ov; |
963 | |
964 | NS_NONNULL_ASSERT(mapspecObj != NULL)((void) (0)); |
965 | NS_NONNULL_ASSERT(method != NULL)((void) (0)); |
966 | NS_NONNULL_ASSERT(url != NULL)((void) (0)); |
967 | NS_NONNULL_ASSERT(specPtr != NULL)((void) (0)); |
968 | |
969 | if (Tcl_ListObjGetElements(NULL((void*)0), mapspecObj, &oc, &ov) == TCL_OK0) { |
970 | if (oc == 2 || oc == 3) { |
971 | status = NS_OK; |
972 | *method = Tcl_GetString(ov[0]); |
973 | *url = Tcl_GetString(ov[1]); |
974 | if (oc == 3) { |
975 | int oc2; |
976 | Tcl_Obj **ov2; |
977 | |
978 | if (Tcl_ListObjGetElements(NULL((void*)0), ov[2], &oc2, &ov2) == TCL_OK0 && oc2 == 2) { |
979 | *specPtr = NsUrlSpaceContextSpecNew(Tcl_GetString(ov2[0]), |
980 | Tcl_GetString(ov2[1])); |
981 | |
982 | } else { |
983 | status = NS_ERROR; |
984 | } |
985 | } else { |
986 | *specPtr = NULL((void*)0); |
987 | } |
988 | } |
989 | } |
990 | if (unlikely(status == NS_ERROR)(__builtin_expect((status == NS_ERROR), 0)) && interp != NULL((void*)0)) { |
991 | Ns_TclPrintfResult(interp, |
992 | "invalid mapspec '%s'; must be 2- or 3-element list " |
993 | "containing HTTP method, URL, and optionally a filtercontext", |
994 | Tcl_GetString(mapspecObj)); |
995 | } |
996 | |
997 | return status; |
998 | } |
999 | |
1000 | |
1001 | /* |
1002 | *---------------------------------------------------------------------- |
1003 | * |
1004 | * ServerMapObjCmd, subcommand of NsTclServerObjCmd -- |
1005 | * |
1006 | * Implements "ns_server ... map ...". |
1007 | * |
1008 | * Results: |
1009 | * Tcl result. |
1010 | * |
1011 | * Side effects: |
1012 | * Map method + URL to specified pool |
1013 | * |
1014 | *---------------------------------------------------------------------- |
1015 | */ |
1016 | static int |
1017 | ServerMapObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1018 | NsServer *servPtr, ConnPool *poolPtr, int nargs) |
1019 | { |
1020 | int result = TCL_OK0, noinherit = 0; |
1021 | Tcl_Obj *mapspecObj = NULL((void*)0); |
1022 | Ns_ObjvSpec lopts[] = { |
1023 | {"-noinherit", Ns_ObjvBool, &noinherit, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1024 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1025 | }; |
1026 | Ns_ObjvSpec args[] = { |
1027 | {"?mapspec", Ns_ObjvObj, &mapspecObj, NULL((void*)0)}, |
1028 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1029 | }; |
1030 | |
1031 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1032 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1033 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1034 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1035 | |
1036 | if (Ns_ParseObjv(lopts, args, interp, objc-nargs, objc, objv) != NS_OK) { |
1037 | result = TCL_ERROR1; |
1038 | } else if (mapspecObj != NULL((void*)0)) { |
1039 | char *method, *url; |
1040 | NsUrlSpaceContextSpec *specPtr = NULL((void*)0); |
1041 | |
1042 | if (MapspecParse(interp, mapspecObj, &method, &url, &specPtr) != NS_OK) { |
1043 | result = TCL_ERROR1; |
1044 | } else { |
1045 | unsigned int flags = 0u; |
1046 | Tcl_DString ds; |
1047 | |
1048 | if (noinherit != 0) { |
1049 | flags |= NS_OP_NOINHERIT0x02u; |
1050 | } |
1051 | |
1052 | Ns_MutexLock(&servPtr->urlspace.lock); |
1053 | Ns_UrlSpecificSet2(servPtr->server, method, url, poolid, poolPtr, flags, NULL((void*)0), specPtr); |
1054 | Ns_MutexUnlock(&servPtr->urlspace.lock); |
1055 | |
1056 | Tcl_DStringInit(&ds); |
1057 | Ns_Log(Notice, "pool[%s]: mapped %s %s%s -> %s", |
1058 | servPtr->server, method, url, |
1059 | (specPtr == NULL((void*)0) ? "" : NsUrlSpaceContextSpecAppend(&ds, specPtr)), |
1060 | poolPtr->pool); |
1061 | Tcl_DStringFree(&ds); |
1062 | } |
1063 | |
1064 | } else { |
1065 | Tcl_DString ds, *dsPtr = &ds; |
1066 | Tcl_Obj **ov, *fullListObj; |
1067 | int oc; |
1068 | |
1069 | /* |
1070 | * Return the current mappings just in the case, when the map |
1071 | * operation was called without the optional argument. |
1072 | */ |
1073 | Ns_DStringInitTcl_DStringInit(dsPtr); |
1074 | |
1075 | Ns_MutexLock(&servPtr->urlspace.lock); |
1076 | Ns_UrlSpecificWalk(poolid, servPtr->server, WalkCallback, dsPtr); |
1077 | Ns_MutexUnlock(&servPtr->urlspace.lock); |
1078 | |
1079 | /* |
1080 | * Convert the Tcl_Dstring into a list, and filter the elements |
1081 | * from different pools. |
1082 | */ |
1083 | fullListObj = Tcl_NewStringObj(dsPtr->string, dsPtr->length); |
1084 | Tcl_IncrRefCount(fullListObj)++(fullListObj)->refCount; |
1085 | |
1086 | result = Tcl_ListObjGetElements(interp, fullListObj, &oc, &ov); |
1087 | if (result == TCL_OK0) { |
1088 | Tcl_Obj *resultObj; |
1089 | int i; |
1090 | |
1091 | /* |
1092 | * The result should be always a proper list, so the potential |
1093 | * error should not occur. |
1094 | */ |
1095 | resultObj = Tcl_NewListObj(0, NULL((void*)0)); |
1096 | |
1097 | for (i = 0; i < oc; i++) { |
1098 | Tcl_Obj *elemObj = ov[i]; |
1099 | int length; |
1100 | |
1101 | /* |
1102 | * Get the last element, which is the pool, and compare it |
1103 | * with the current pool name. |
1104 | */ |
1105 | result = Tcl_ListObjLength(interp, elemObj, &length); |
1106 | if (result == TCL_OK0) { |
1107 | Tcl_Obj *lastSubElem; |
1108 | |
1109 | result = Tcl_ListObjIndex(interp, elemObj, length-1, &lastSubElem); |
1110 | if (result == TCL_OK0) { |
1111 | const char *pool = Tcl_GetString(lastSubElem); |
1112 | |
1113 | if (!STREQ(poolPtr->pool, pool)(((*(poolPtr->pool)) == (*(pool))) && (strcmp((poolPtr ->pool),(pool)) == 0))) { |
1114 | continue; |
1115 | } |
1116 | } |
1117 | } |
1118 | /* |
1119 | * The element is from the current pool. Remove the last |
1120 | * element (poolname) from the list... |
1121 | */ |
1122 | if (result == TCL_OK0) { |
1123 | result = Tcl_ListObjReplace(interp, elemObj, length-1, 1, 0, NULL((void*)0)); |
1124 | } |
1125 | /* |
1126 | * ... and append the element. |
1127 | */ |
1128 | if (result == TCL_OK0) { |
1129 | Tcl_ListObjAppendElement(interp, resultObj, elemObj); |
1130 | } else { |
1131 | break; |
1132 | } |
1133 | } |
1134 | if (result == TCL_OK0) { |
1135 | Tcl_SetObjResult(interp, resultObj); |
1136 | } else { |
1137 | Ns_TclPrintfResult(interp, "invalid result from mapped URLs"); |
1138 | } |
1139 | } |
1140 | Tcl_DecrRefCount(fullListObj)do { Tcl_Obj *_objPtr = (fullListObj); if (_objPtr->refCount -- <= 1) { TclFreeObj(_objPtr); } } while(0); |
1141 | Tcl_DStringFree(dsPtr); |
1142 | |
1143 | } |
1144 | |
1145 | return result; |
1146 | } |
1147 | |
1148 | |
1149 | /* |
1150 | *---------------------------------------------------------------------- |
1151 | * |
1152 | * ServerMappedObjCmd, subcommand of NsTclServerObjCmd -- |
1153 | * |
1154 | * Implements "ns_server ... mapped ". |
1155 | * |
1156 | * Results: |
1157 | * Tcl result. |
1158 | * |
1159 | * Side effects: |
1160 | * None |
1161 | * |
1162 | *---------------------------------------------------------------------- |
1163 | */ |
1164 | static int |
1165 | ServerMappedObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1166 | NsServer *servPtr, int nargs) |
1167 | { |
1168 | int result = TCL_OK0, noinherit = 0, exact = 0; |
1169 | Tcl_Obj *mapspecObj = NULL((void*)0); |
1170 | char *method, *url; |
1171 | NsUrlSpaceContextSpec *specPtr; |
1172 | Ns_ObjvSpec lopts[] = { |
1173 | {"-exact", Ns_ObjvBool, &exact, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1174 | {"-noinherit", Ns_ObjvBool, &noinherit, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1175 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1176 | }; |
1177 | Ns_ObjvSpec args[] = { |
1178 | {"mapspec", Ns_ObjvObj, &mapspecObj, NULL((void*)0)}, |
1179 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1180 | }; |
1181 | |
1182 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1183 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1184 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1185 | |
1186 | if (Ns_ParseObjv(lopts, args, interp, objc-nargs, objc, objv) != NS_OK) { |
1187 | result = TCL_ERROR1; |
1188 | } else if (MapspecParse(interp, mapspecObj, &method, &url, &specPtr) != NS_OK) { |
1189 | result = TCL_ERROR1; |
1190 | } else { |
1191 | unsigned int flags = 0u; |
1192 | const ConnPool *mappedPoolPtr; |
1193 | NsUrlSpaceOp op; |
1194 | |
1195 | if (noinherit != 0) { |
1196 | flags |= NS_OP_NOINHERIT0x02u; |
1197 | } |
1198 | |
1199 | if (exact == (int)NS_TRUE1) { |
1200 | op = NS_URLSPACE_EXACT; |
1201 | } else { |
1202 | op = NS_URLSPACE_DEFAULT; |
1203 | } |
1204 | |
1205 | Ns_MutexLock(&servPtr->urlspace.lock); |
1206 | mappedPoolPtr = (ConnPool *)NsUrlSpecificGet(servPtr, method, url, poolid, flags, op, |
1207 | NULL((void*)0), NULL((void*)0)); |
1208 | Ns_MutexUnlock(&servPtr->urlspace.lock); |
1209 | |
1210 | if (mappedPoolPtr != NULL((void*)0)) { |
1211 | Tcl_SetObjResult(interp, Tcl_NewStringObj(mappedPoolPtr->pool, -1)); |
1212 | } |
1213 | } |
1214 | |
1215 | return result; |
1216 | } |
1217 | |
1218 | |
1219 | /* |
1220 | *---------------------------------------------------------------------- |
1221 | * |
1222 | * ServerUnmapObjCmd, subcommand of NsTclServerObjCmd -- |
1223 | * |
1224 | * Implements "ns_server ... unmap ...". |
1225 | * |
1226 | * Results: |
1227 | * Tcl result. |
1228 | * |
1229 | * Side effects: |
1230 | * might unmap a method/url pair. |
1231 | * |
1232 | *---------------------------------------------------------------------- |
1233 | */ |
1234 | static int |
1235 | ServerUnmapObjCmd(ClientData UNUSED(clientData)UNUSED_clientData __attribute__((__unused__)), Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1236 | NsServer *servPtr, int nargs) |
1237 | { |
1238 | int result = TCL_OK0, noinherit = 0; |
1239 | char *method, *url; |
1240 | Tcl_Obj *mapspecObj = NULL((void*)0); |
1241 | NsUrlSpaceContextSpec *specPtr; |
1242 | Ns_ObjvSpec lopts[] = { |
1243 | {"-noinherit", Ns_ObjvBool, &noinherit, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1244 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1245 | }; |
1246 | Ns_ObjvSpec args[] = { |
1247 | {"mapspec", Ns_ObjvObj, &mapspecObj, NULL((void*)0)}, |
1248 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1249 | }; |
1250 | |
1251 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1252 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1253 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1254 | |
1255 | if (Ns_ParseObjv(lopts, args, interp, objc-nargs, objc, objv) != NS_OK) { |
1256 | result = TCL_ERROR1; |
1257 | } else if (MapspecParse(interp, mapspecObj, &method, &url, &specPtr) != NS_OK) { |
1258 | result = TCL_ERROR1; |
1259 | } else { |
1260 | bool_Bool success; |
1261 | unsigned int flags = 0u; |
1262 | const void *data; |
1263 | |
1264 | if (noinherit != 0) { |
1265 | flags |= NS_OP_NOINHERIT0x02u; |
1266 | } |
1267 | // TODO: for the time being |
1268 | flags |= NS_OP_ALLFILTERS0x10u; |
1269 | |
1270 | Ns_MutexLock(&servPtr->urlspace.lock); |
1271 | data = Ns_UrlSpecificDestroy(servPtr->server, method, url, poolid, flags); |
1272 | Ns_MutexUnlock(&servPtr->urlspace.lock); |
1273 | |
1274 | success = (data != NULL((void*)0)); |
1275 | // TODO: data is no good indicator when (all) context constraints are deleted. |
1276 | //if (success) { |
1277 | // Ns_Log(Notice, "pool[%s]: unmapped %s %s", servPtr->server, method, url); |
1278 | //} else { |
1279 | // Ns_Log(Warning, "pool[%s]: could not unmap %s %s", servPtr->server, method, url); |
1280 | //} |
1281 | Tcl_SetObjResult(interp, Tcl_NewBooleanObj(success)Tcl_NewIntObj((success)!=0)); |
1282 | } |
1283 | |
1284 | return result; |
1285 | } |
1286 | |
1287 | |
1288 | /* |
1289 | *---------------------------------------------------------------------- |
1290 | * |
1291 | * ServerListActive, ServerListQueued -- |
1292 | * |
1293 | * Backend for the "ns_server ... active ..." and |
1294 | * the "ns_server ... queued ..." commands. |
1295 | * |
1296 | * Results: |
1297 | * void |
1298 | * |
1299 | * Side effects: |
1300 | * Appends list data about active/queued connections to the Tcl_DString |
1301 | * provided in the first argument. |
1302 | * |
1303 | *---------------------------------------------------------------------- |
1304 | */ |
1305 | static void |
1306 | ServerListActive(Tcl_DString *dsPtr, ConnPool *poolPtr, bool_Bool checkforproxy) |
1307 | { |
1308 | int i; |
1309 | |
1310 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1311 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1312 | |
1313 | Ns_MutexLock(&poolPtr->tqueue.lock); |
1314 | for (i = 0; i < poolPtr->threads.max; i++) { |
1315 | const ConnThreadArg *argPtr = &poolPtr->tqueue.args[i]; |
1316 | |
1317 | if (argPtr->connPtr != NULL((void*)0)) { |
1318 | AppendConnList(dsPtr, argPtr->connPtr, "running", checkforproxy); |
1319 | } |
1320 | } |
1321 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
1322 | } |
1323 | |
1324 | static void |
1325 | ServerListQueued(Tcl_DString *dsPtr, ConnPool *poolPtr) |
1326 | { |
1327 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1328 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1329 | |
1330 | Ns_MutexLock(&poolPtr->wqueue.lock); |
1331 | AppendConnList(dsPtr, poolPtr->wqueue.wait.firstPtr, "queued", NS_FALSE0); |
1332 | Ns_MutexUnlock(&poolPtr->wqueue.lock); |
1333 | } |
1334 | |
1335 | |
1336 | /* |
1337 | *---------------------------------------------------------------------- |
1338 | * |
1339 | * ServerListActiveCmd, ServerListAllCmd, ServerListQueuedCmd -- |
1340 | * |
1341 | * Stubs for the "ns_server ... active ...", "ns_server ... all ..." |
1342 | * and the "ns_server ... queued ..." commands. |
1343 | * |
1344 | * Results: |
1345 | * Tcl result. |
1346 | * |
1347 | * Side effects: |
1348 | * None. |
1349 | * |
1350 | *---------------------------------------------------------------------- |
1351 | */ |
1352 | |
1353 | static int |
1354 | ServerListActiveCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1355 | ConnPool *poolPtr, int nargs) |
1356 | { |
1357 | int result = TCL_OK0, checkforproxy = (int)NS_FALSE0; |
1358 | Ns_ObjvSpec opts[] = { |
1359 | {"-checkforproxy", Ns_ObjvBool, &checkforproxy, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1360 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1361 | }; |
1362 | |
1363 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1364 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1365 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1366 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1367 | |
1368 | if (Ns_ParseObjv(opts, NULL((void*)0), interp, objc-nargs, objc, objv) != NS_OK) { |
1369 | result = TCL_ERROR1; |
1370 | } else { |
1371 | ServerListActive(dsPtr, poolPtr, (bool_Bool)checkforproxy); |
1372 | } |
1373 | return result; |
1374 | } |
1375 | |
1376 | static int |
1377 | ServerListQueuedCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1378 | ConnPool *poolPtr, int nargs) |
1379 | { |
1380 | int result = TCL_OK0; |
1381 | |
1382 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1383 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1384 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1385 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1386 | |
1387 | if (Ns_ParseObjv(NULL((void*)0), NULL((void*)0), interp, objc-nargs, objc, objv) != NS_OK) { |
1388 | result = TCL_ERROR1; |
1389 | } else { |
1390 | ServerListQueued(dsPtr, poolPtr); |
1391 | } |
1392 | return result; |
1393 | } |
1394 | |
1395 | static int |
1396 | ServerListAllCmd(Tcl_DString *dsPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv, |
1397 | ConnPool *poolPtr, int nargs) |
1398 | { |
1399 | int result = TCL_OK0, checkforproxy = (int)NS_FALSE0; |
1400 | Ns_ObjvSpec opts[] = { |
1401 | {"-checkforproxy", Ns_ObjvBool, &checkforproxy, INT2PTR(NS_TRUE)((void *)(intptr_t)(1))}, |
1402 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1403 | }; |
1404 | |
1405 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1406 | NS_NONNULL_ASSERT(interp != NULL)((void) (0)); |
1407 | NS_NONNULL_ASSERT(objv != NULL)((void) (0)); |
1408 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1409 | |
1410 | if (Ns_ParseObjv(opts, NULL((void*)0), interp, objc-nargs, objc, objv) != NS_OK) { |
1411 | result = TCL_ERROR1; |
1412 | } else { |
1413 | ServerListActive(dsPtr, poolPtr, (bool_Bool)checkforproxy); |
1414 | ServerListQueued(dsPtr, poolPtr); |
1415 | } |
1416 | return result; |
1417 | } |
1418 | |
1419 | |
1420 | /* |
1421 | *---------------------------------------------------------------------- |
1422 | * |
1423 | * NsTclServerObjCmd -- |
1424 | * |
1425 | * Implements "ns_server". This command provides configuration and status |
1426 | * information about a server. |
1427 | * |
1428 | * Results: |
1429 | * A standard Tcl result. |
1430 | * |
1431 | * Side effects: |
1432 | * None. |
1433 | * |
1434 | *---------------------------------------------------------------------- |
1435 | */ |
1436 | |
1437 | int |
1438 | NsTclServerObjCmd(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *const* objv) |
1439 | { |
1440 | const NsInterp *itPtr = clientData; |
1441 | int subcmd = 0, result = TCL_OK0, nargs = 0; |
1442 | NsServer *servPtr = NULL((void*)0); |
1443 | ConnPool *poolPtr; |
1444 | char *pool = NULL((void*)0), *optArg = NULL((void*)0); |
1445 | Tcl_DString ds, *dsPtr = &ds; |
1446 | |
1447 | enum { |
1448 | SActiveIdx, SAllIdx, |
1449 | SConnectionsIdx, SConnectionRateLimitIdx, |
1450 | SFiltersIdx, |
1451 | SKeepaliveIdx, |
1452 | SMapIdx, SMappedIdx, |
1453 | SMaxthreadsIdx, SMinthreadsIdx, |
1454 | SPagedirIdx, SPoolRateLimitIdx, SPoolsIdx, |
1455 | SQueuedIdx, |
1456 | SRequestprocsIdx, |
1457 | SServerdirIdx, SStatsIdx, |
1458 | STcllibIdx, SThreadsIdx, STracesIdx, |
1459 | SUnmapIdx, |
1460 | SUrl2fileIdx, SWaitingIdx |
1461 | }; |
1462 | |
1463 | static Ns_ObjvTable subcmds[] = { |
1464 | {"active", (unsigned int)SActiveIdx}, |
1465 | {"all", (unsigned int)SAllIdx}, |
1466 | {"connectionratelimit", (unsigned int)SConnectionRateLimitIdx}, |
1467 | {"connections", (unsigned int)SConnectionsIdx}, |
1468 | {"filters", (unsigned int)SFiltersIdx}, |
1469 | {"keepalive", (unsigned int)SKeepaliveIdx}, |
1470 | {"map", (unsigned int)SMapIdx}, |
1471 | {"mapped", (unsigned int)SMappedIdx}, |
1472 | {"maxthreads", (unsigned int)SMaxthreadsIdx}, |
1473 | {"minthreads", (unsigned int)SMinthreadsIdx}, |
1474 | {"pagedir", (unsigned int)SPagedirIdx}, |
1475 | {"poolratelimit", (unsigned int)SPoolRateLimitIdx}, |
1476 | {"pools", (unsigned int)SPoolsIdx}, |
1477 | {"queued", (unsigned int)SQueuedIdx}, |
1478 | {"requestprocs", (unsigned int)SRequestprocsIdx}, |
1479 | {"serverdir", (unsigned int)SServerdirIdx}, |
1480 | {"stats", (unsigned int)SStatsIdx}, |
1481 | {"tcllib", (unsigned int)STcllibIdx}, |
1482 | {"threads", (unsigned int)SThreadsIdx}, |
1483 | {"traces", (unsigned int)STracesIdx}, |
1484 | {"unmap", (unsigned int)SUnmapIdx}, |
1485 | {"url2file", (unsigned int)SUrl2fileIdx}, |
1486 | {"waiting", (unsigned int)SWaitingIdx}, |
1487 | {NULL((void*)0), 0u} |
1488 | }; |
1489 | Ns_ObjvSpec opts[] = { |
1490 | {"-server", Ns_ObjvServer, &servPtr, NULL((void*)0)}, |
1491 | {"-pool", Ns_ObjvString, &pool, NULL((void*)0)}, |
1492 | {"--", Ns_ObjvBreak, NULL((void*)0), NULL((void*)0)}, |
1493 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1494 | }; |
1495 | Ns_ObjvSpec args[] = { |
1496 | {"subcmd", Ns_ObjvIndex, &subcmd, subcmds}, |
1497 | {"?args", Ns_ObjvArgs, &nargs, NULL((void*)0)}, |
1498 | {NULL((void*)0), NULL((void*)0), NULL((void*)0), NULL((void*)0)} |
1499 | }; |
1500 | |
1501 | if (unlikely(Ns_ParseObjv(opts, args, interp, 1, objc, objv) != NS_OK)(__builtin_expect((Ns_ParseObjv(opts, args, interp, 1, objc, objv ) != NS_OK), 0))) { |
1502 | return TCL_ERROR1; |
1503 | } |
1504 | |
1505 | if ((subcmd == SPoolsIdx |
1506 | || subcmd == SFiltersIdx |
1507 | || subcmd == SPagedirIdx |
1508 | || subcmd == SRequestprocsIdx |
1509 | || subcmd == SUrl2fileIdx) |
1510 | && pool != NULL((void*)0)) { |
1511 | Ns_TclPrintfResult(interp, "option -pool is not allowed for this subcommand"); |
1512 | return TCL_ERROR1; |
1513 | } |
1514 | |
1515 | if (subcmd != SMinthreadsIdx |
1516 | && subcmd != SMaxthreadsIdx |
1517 | && subcmd != SMapIdx |
1518 | && subcmd != SMappedIdx |
1519 | && subcmd != SUnmapIdx |
1520 | && subcmd != SActiveIdx |
1521 | && subcmd != SQueuedIdx |
1522 | && subcmd != SAllIdx |
1523 | && subcmd != SPoolRateLimitIdx |
1524 | && subcmd != SConnectionRateLimitIdx |
1525 | ) { |
1526 | /* |
1527 | * Just for backwards compatibility |
1528 | */ |
1529 | if (nargs > 0) { |
1530 | Ns_LogDeprecated(objv, objc, "ns_server ?-pool p? ...", |
1531 | "Passing pool as second argument is deprecated."); |
1532 | optArg = Tcl_GetString(objv[objc-1]); |
1533 | pool = optArg; |
1534 | } |
1535 | } |
1536 | |
1537 | if (servPtr == NULL((void*)0)) { |
1538 | servPtr = itPtr->servPtr; |
1539 | } |
1540 | |
1541 | if (pool != NULL((void*)0)) { |
1542 | poolPtr = servPtr->pools.firstPtr; |
1543 | while (poolPtr != NULL((void*)0) && !STREQ(poolPtr->pool, pool)(((*(poolPtr->pool)) == (*(pool))) && (strcmp((poolPtr ->pool),(pool)) == 0))) { |
1544 | poolPtr = poolPtr->nextPtr; |
1545 | if (poolPtr != NULL((void*)0)) { |
1546 | } |
1547 | } |
1548 | if (unlikely(poolPtr == NULL)(__builtin_expect((poolPtr == ((void*)0)), 0))) { |
1549 | Ns_TclPrintfResult(interp, "no such pool '%s' for server '%s'", pool, servPtr->server); |
1550 | return TCL_ERROR1; |
1551 | } |
1552 | } else { |
1553 | poolPtr = servPtr->pools.defaultPtr; |
1554 | } |
1555 | |
1556 | switch (subcmd) { |
1557 | /* |
1558 | * These subcommands are server specific (do not allow -pool option) |
1559 | */ |
1560 | case SPoolsIdx: |
1561 | { |
1562 | Tcl_Obj *listObj = Tcl_NewListObj(0, NULL((void*)0)); |
1563 | |
1564 | for (poolPtr = servPtr->pools.firstPtr; poolPtr != NULL((void*)0); poolPtr = poolPtr->nextPtr) { |
1565 | Tcl_ListObjAppendElement(interp, listObj, Tcl_NewStringObj(poolPtr->pool, -1)); |
1566 | } |
1567 | Tcl_SetObjResult(interp, listObj); |
1568 | } |
1569 | break; |
1570 | |
1571 | case SFiltersIdx: |
1572 | Tcl_DStringInit(dsPtr); |
1573 | NsGetFilters(dsPtr, servPtr->server); |
1574 | Tcl_DStringResult(interp, dsPtr); |
1575 | break; |
1576 | |
1577 | case SPagedirIdx: |
1578 | Tcl_DStringInit(dsPtr); |
1579 | NsPageRoot(dsPtr, servPtr, NULL((void*)0)); |
1580 | Tcl_DStringResult(interp, dsPtr); |
1581 | break; |
1582 | |
1583 | case SServerdirIdx: |
1584 | Tcl_DStringInit(dsPtr); |
1585 | Tcl_DStringAppend(dsPtr, servPtr->fastpath.serverdir, -1); |
1586 | Tcl_DStringResult(interp, dsPtr); |
1587 | break; |
1588 | |
1589 | case SRequestprocsIdx: |
1590 | Tcl_DStringInit(dsPtr); |
1591 | NsGetRequestProcs(dsPtr, servPtr->server); |
1592 | Tcl_DStringResult(interp, dsPtr); |
1593 | break; |
1594 | |
1595 | case STracesIdx: |
1596 | Tcl_DStringInit(dsPtr); |
1597 | NsGetTraces(dsPtr, servPtr->server); |
1598 | Tcl_DStringResult(interp, dsPtr); |
1599 | break; |
1600 | |
1601 | case STcllibIdx: |
1602 | Tcl_SetObjResult(interp, Tcl_NewStringObj(servPtr->tcl.library, -1)); |
1603 | break; |
1604 | |
1605 | case SUrl2fileIdx: |
1606 | Tcl_DStringInit(dsPtr); |
1607 | NsGetUrl2FileProcs(dsPtr, servPtr->server); |
1608 | Tcl_DStringResult(interp, dsPtr); |
1609 | break; |
1610 | |
1611 | /* |
1612 | * These subcommands are pool-specific (allow -pool option) |
1613 | */ |
1614 | |
1615 | case SWaitingIdx: |
1616 | Tcl_SetObjResult(interp, Tcl_NewIntObj(poolPtr->wqueue.wait.num)); |
1617 | break; |
1618 | |
1619 | case SKeepaliveIdx: |
1620 | Ns_LogDeprecated(objv, objc, "ns_conn keepalive", NULL((void*)0)); |
1621 | Tcl_SetObjResult(interp, Tcl_NewIntObj(0)); |
1622 | break; |
1623 | |
1624 | case SMapIdx: |
1625 | result = ServerMapObjCmd(clientData, interp, objc, objv, servPtr, poolPtr, nargs); |
1626 | break; |
1627 | |
1628 | case SMappedIdx: |
1629 | result = ServerMappedObjCmd(clientData, interp, objc, objv, servPtr, nargs); |
1630 | break; |
1631 | |
1632 | case SUnmapIdx: |
1633 | result = ServerUnmapObjCmd(clientData, interp, objc, objv, servPtr, nargs); |
1634 | break; |
1635 | |
1636 | case SMaxthreadsIdx: |
1637 | result = ServerMaxThreadsObjCmd(clientData, interp, objc, objv, poolPtr, nargs); |
1638 | break; |
1639 | |
1640 | case SPoolRateLimitIdx: |
1641 | result = ServerPoolRateLimitObjCmd(clientData, interp, objc, objv, poolPtr, nargs); |
1642 | break; |
1643 | |
1644 | case SConnectionRateLimitIdx: |
1645 | result = ServerConnectionRateLimitObjCmd(clientData, interp, objc, objv, poolPtr, nargs); |
1646 | break; |
1647 | |
1648 | case SMinthreadsIdx: |
1649 | result = ServerMinThreadsObjCmd(clientData, interp, objc, objv, poolPtr, nargs); |
1650 | break; |
1651 | |
1652 | case SConnectionsIdx: |
1653 | Tcl_SetObjResult(interp, Tcl_NewLongObj((long)poolPtr->stats.processed)); |
1654 | break; |
1655 | |
1656 | case SStatsIdx: |
1657 | Tcl_DStringInit(dsPtr); |
1658 | |
1659 | Ns_DStringPrintf(dsPtr, "requests %lu ", poolPtr->stats.processed); |
1660 | Ns_DStringPrintf(dsPtr, "spools %lu ", poolPtr->stats.spool); |
1661 | Ns_DStringPrintf(dsPtr, "queued %lu ", poolPtr->stats.queued); |
1662 | Ns_DStringPrintf(dsPtr, "dropped %lu ", poolPtr->stats.dropped); |
1663 | Ns_DStringPrintf(dsPtr, "sent %" TCL_LL_MODIFIER"l" "d ", poolPtr->rate.bytesSent); |
1664 | Ns_DStringPrintf(dsPtr, "connthreads %lu", poolPtr->stats.connthreads); |
1665 | |
1666 | Ns_DStringAppend(dsPtr, " accepttime ")Tcl_DStringAppend((dsPtr), (" accepttime "), -1); |
1667 | Ns_DStringAppendTime(dsPtr, &poolPtr->stats.acceptTime); |
1668 | |
1669 | Ns_DStringAppend(dsPtr, " queuetime ")Tcl_DStringAppend((dsPtr), (" queuetime "), -1); |
1670 | Ns_DStringAppendTime(dsPtr, &poolPtr->stats.queueTime); |
1671 | |
1672 | Ns_DStringAppend(dsPtr, " filtertime ")Tcl_DStringAppend((dsPtr), (" filtertime "), -1); |
1673 | Ns_DStringAppendTime(dsPtr, &poolPtr->stats.filterTime); |
1674 | |
1675 | Ns_DStringAppend(dsPtr, " runtime ")Tcl_DStringAppend((dsPtr), (" runtime "), -1); |
1676 | Ns_DStringAppendTime(dsPtr, &poolPtr->stats.runTime); |
1677 | |
1678 | Ns_DStringAppend(dsPtr, " tracetime ")Tcl_DStringAppend((dsPtr), (" tracetime "), -1); |
1679 | Ns_DStringAppendTime(dsPtr, &poolPtr->stats.traceTime); |
1680 | |
1681 | Tcl_DStringResult(interp, dsPtr); |
1682 | break; |
1683 | |
1684 | case SThreadsIdx: |
1685 | Ns_MutexLock(&poolPtr->threads.lock); |
1686 | Ns_TclPrintfResult(interp, |
1687 | "min %d max %d current %d idle %d stopping 0", |
1688 | poolPtr->threads.min, poolPtr->threads.max, |
1689 | poolPtr->threads.current, poolPtr->threads.idle); |
1690 | Ns_MutexUnlock(&poolPtr->threads.lock); |
1691 | break; |
1692 | |
1693 | case SActiveIdx: |
1694 | Tcl_DStringInit(dsPtr); |
1695 | result = ServerListActiveCmd(dsPtr, interp, objc, objv, poolPtr, nargs); |
1696 | if (likely(result == NS_OK)(__builtin_expect((result == NS_OK), 1))) { |
1697 | Tcl_DStringResult(interp, dsPtr); |
1698 | } else { |
1699 | Tcl_DStringFree(dsPtr); |
1700 | } |
1701 | break; |
1702 | |
1703 | case SQueuedIdx: |
1704 | Tcl_DStringInit(dsPtr); |
1705 | result = ServerListQueuedCmd(dsPtr, interp, objc, objv, poolPtr, nargs); |
1706 | if (likely(result == NS_OK)(__builtin_expect((result == NS_OK), 1))) { |
1707 | Tcl_DStringResult(interp, dsPtr); |
1708 | } else { |
1709 | Tcl_DStringFree(dsPtr); |
1710 | } |
1711 | break; |
1712 | |
1713 | case SAllIdx: |
1714 | Tcl_DStringInit(dsPtr); |
1715 | result = ServerListAllCmd(dsPtr, interp, objc, objv, poolPtr, nargs); |
1716 | if (likely(result == NS_OK)(__builtin_expect((result == NS_OK), 1))) { |
1717 | Tcl_DStringResult(interp, dsPtr); |
1718 | } else { |
1719 | Tcl_DStringFree(dsPtr); |
1720 | } |
1721 | break; |
1722 | |
1723 | default: |
1724 | /* should never happen */ |
1725 | assert(subcmd && 0)((void) (0)); |
1726 | } |
1727 | |
1728 | return result; |
1729 | } |
1730 | |
1731 | |
1732 | /* |
1733 | *---------------------------------------------------------------------- |
1734 | * |
1735 | * NsStartServer -- |
1736 | * |
1737 | * Start the core connection thread interface. |
1738 | * |
1739 | * Results: |
1740 | * None. |
1741 | * |
1742 | * Side effects: |
1743 | * Minimum connection threads may be created. |
1744 | * |
1745 | *---------------------------------------------------------------------- |
1746 | */ |
1747 | |
1748 | void |
1749 | NsStartServer(const NsServer *servPtr) |
1750 | { |
1751 | ConnPool *poolPtr; |
1752 | int n; |
1753 | |
1754 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1755 | |
1756 | poolPtr = servPtr->pools.firstPtr; |
1757 | while (poolPtr != NULL((void*)0)) { |
1758 | poolPtr->threads.idle = 0; |
1759 | poolPtr->threads.current = poolPtr->threads.min; |
1760 | poolPtr->threads.creating = poolPtr->threads.min; |
1761 | for (n = 0; n < poolPtr->threads.min; ++n) { |
1762 | CreateConnThread(poolPtr); |
1763 | } |
1764 | poolPtr = poolPtr->nextPtr; |
1765 | } |
1766 | NsAsyncWriterQueueEnable(); |
1767 | } |
1768 | |
1769 | |
1770 | /* |
1771 | *---------------------------------------------------------------------- |
1772 | * |
1773 | * WakeupConnThreads -- |
1774 | * |
1775 | * Wake up every idle connection thread of the specified pool. |
1776 | * |
1777 | * Results: |
1778 | * None. |
1779 | * |
1780 | * Side effects: |
1781 | * None. |
1782 | * |
1783 | *---------------------------------------------------------------------- |
1784 | */ |
1785 | |
1786 | static void |
1787 | WakeupConnThreads(ConnPool *poolPtr) { |
1788 | int i; |
1789 | |
1790 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
1791 | |
1792 | Ns_MutexLock(&poolPtr->tqueue.lock); |
1793 | for (i = 0; i < poolPtr->threads.max; i++) { |
1794 | ConnThreadArg *argPtr = &poolPtr->tqueue.args[i]; |
1795 | |
1796 | if (argPtr->state == connThread_idle) { |
1797 | assert(argPtr->connPtr == NULL)((void) (0)); |
1798 | Ns_MutexLock(&argPtr->lock); |
1799 | Ns_CondSignal(&argPtr->cond); |
1800 | Ns_MutexUnlock(&argPtr->lock); |
1801 | } |
1802 | } |
1803 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
1804 | } |
1805 | |
1806 | |
1807 | /* |
1808 | *---------------------------------------------------------------------- |
1809 | * |
1810 | * NsStopServer -- |
1811 | * |
1812 | * Signal and wait for connection threads to exit. |
1813 | * |
1814 | * Results: |
1815 | * None. |
1816 | * |
1817 | * Side effects: |
1818 | * None. |
1819 | * |
1820 | *---------------------------------------------------------------------- |
1821 | */ |
1822 | |
1823 | void |
1824 | NsStopServer(NsServer *servPtr) |
1825 | { |
1826 | ConnPool *poolPtr; |
1827 | |
1828 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1829 | |
1830 | Ns_Log(Notice, "server [%s]: stopping", servPtr->server); |
1831 | servPtr->pools.shutdown = NS_TRUE1; |
1832 | poolPtr = servPtr->pools.firstPtr; |
1833 | while (poolPtr != NULL((void*)0)) { |
1834 | WakeupConnThreads(poolPtr); |
1835 | poolPtr = poolPtr->nextPtr; |
1836 | } |
1837 | } |
1838 | |
1839 | void |
1840 | NsWaitServer(NsServer *servPtr, const Ns_Time *toPtr) |
1841 | { |
1842 | ConnPool *poolPtr; |
1843 | Ns_Thread joinThread; |
1844 | Ns_ReturnCode status; |
1845 | |
1846 | NS_NONNULL_ASSERT(servPtr != NULL)((void) (0)); |
1847 | NS_NONNULL_ASSERT(toPtr != NULL)((void) (0)); |
1848 | |
1849 | status = NS_OK; |
1850 | poolPtr = servPtr->pools.firstPtr; |
1851 | Ns_MutexLock(&servPtr->pools.lock); |
1852 | while (poolPtr != NULL((void*)0) && status == NS_OK) { |
1853 | while (status == NS_OK && |
1854 | (poolPtr->wqueue.wait.firstPtr != NULL((void*)0) |
1855 | || poolPtr->threads.current > 0)) { |
1856 | status = Ns_CondTimedWait(&poolPtr->wqueue.cond, |
1857 | &servPtr->pools.lock, toPtr); |
1858 | } |
1859 | poolPtr = poolPtr->nextPtr; |
1860 | } |
1861 | joinThread = servPtr->pools.joinThread; |
1862 | servPtr->pools.joinThread = NULL((void*)0); |
1863 | Ns_MutexUnlock(&servPtr->pools.lock); |
1864 | if (status != NS_OK) { |
1865 | Ns_Log(Warning, "server [%s]: timeout waiting for connection thread exit", servPtr->server); |
1866 | } else { |
1867 | if (joinThread != NULL((void*)0)) { |
1868 | Ns_ThreadJoin(&joinThread, NULL((void*)0)); |
1869 | } |
1870 | Ns_Log(Notice, "server [%s]: connection threads stopped", servPtr->server); |
1871 | } |
1872 | } |
1873 | |
1874 | |
1875 | /* |
1876 | *---------------------------------------------------------------------- |
1877 | * |
1878 | * NsConnArgProc -- |
1879 | * |
1880 | * Ns_GetProcInfo callback for a running conn thread. |
1881 | * |
1882 | * Results: |
1883 | * None. |
1884 | * |
1885 | * Side effects: |
1886 | * See AppendConn. |
1887 | * |
1888 | *---------------------------------------------------------------------- |
1889 | */ |
1890 | |
1891 | void |
1892 | NsConnArgProc(Tcl_DString *dsPtr, const void *arg) |
1893 | { |
1894 | const ConnThreadArg *argPtr = arg; |
1895 | |
1896 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
1897 | |
1898 | if (arg != NULL((void*)0)) { |
1899 | ConnPool *poolPtr = argPtr->poolPtr; |
1900 | |
1901 | Ns_MutexLock(&poolPtr->tqueue.lock); |
1902 | AppendConn(dsPtr, argPtr->connPtr, "running", NS_FALSE0); |
1903 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
1904 | } else { |
1905 | Tcl_DStringAppendElement(dsPtr, NS_EMPTY_STRING); |
1906 | } |
1907 | } |
1908 | |
1909 | |
1910 | /* |
1911 | *---------------------------------------------------------------------- |
1912 | * |
1913 | * ConnThreadSetName -- |
1914 | * |
1915 | * Set the conn thread name based on server name, pool name, threadID and |
1916 | * connID. The pool name is always non-null, but might have an empty |
1917 | * string as "name". |
1918 | * |
1919 | * Results: |
1920 | * None. |
1921 | * |
1922 | * Side effects: |
1923 | * Update thread name (internally just a printf operation) |
1924 | * |
1925 | *---------------------------------------------------------------------- |
1926 | */ |
1927 | |
1928 | static void |
1929 | ConnThreadSetName(const char *server, const char *pool, uintptr_t threadId, uintptr_t connId) |
1930 | { |
1931 | NS_NONNULL_ASSERT(server != NULL)((void) (0)); |
1932 | NS_NONNULL_ASSERT(pool != NULL)((void) (0)); |
1933 | |
1934 | Ns_ThreadSetName("-conn:%s:%s:%" PRIuPTR"l" "u" ":%" PRIuPTR"l" "u" "-", |
1935 | server, NsPoolName(pool), threadId, connId); |
1936 | } |
1937 | |
1938 | |
1939 | /* |
1940 | *---------------------------------------------------------------------- |
1941 | * |
1942 | * NsConnThread -- |
1943 | * |
1944 | * Main connection service thread. |
1945 | * |
1946 | * Results: |
1947 | * None. |
1948 | * |
1949 | * Side effects: |
1950 | * Connections are removed from the waiting queue and serviced. |
1951 | * |
1952 | *---------------------------------------------------------------------- |
1953 | */ |
1954 | |
1955 | void |
1956 | NsConnThread(void *arg) |
1957 | { |
1958 | ConnThreadArg *argPtr; |
1959 | ConnPool *poolPtr; |
1960 | NsServer *servPtr; |
1961 | Conn *connPtr = NULL((void*)0); |
1962 | Ns_Time wait, *timePtr = &wait; |
1963 | uintptr_t threadId; |
1964 | bool_Bool duringShutdown, fromQueue; |
1965 | int cpt, ncons, current; |
1966 | Ns_ReturnCode status = NS_OK; |
1967 | Ns_Time timeout; |
1968 | const char *exitMsg; |
1969 | Ns_Thread joinThread; |
1970 | Ns_Mutex *threadsLockPtr, *tqueueLockPtr, *wqueueLockPtr; |
1971 | |
1972 | NS_NONNULL_ASSERT(arg != NULL)((void) (0)); |
1973 | |
1974 | /* |
1975 | * Set the ConnThreadArg into thread local storage and get the id |
1976 | * of the thread. |
1977 | */ |
1978 | argPtr = arg; |
1979 | poolPtr = argPtr->poolPtr; |
1980 | assert(poolPtr != NULL)((void) (0)); |
1981 | |
1982 | tqueueLockPtr = &poolPtr->tqueue.lock; |
1983 | Ns_TlsSet(&argtls, argPtr); |
1984 | |
1985 | Ns_MutexLock(tqueueLockPtr); |
1986 | argPtr->state = connThread_warmup; |
1987 | Ns_MutexUnlock(tqueueLockPtr); |
1988 | |
1989 | threadsLockPtr = &poolPtr->threads.lock; |
1990 | |
1991 | Ns_MutexLock(threadsLockPtr); |
1992 | threadId = poolPtr->threads.nextid++; |
1993 | if (poolPtr->threads.creating > 0) { |
1994 | poolPtr->threads.creating--; |
1995 | } |
1996 | Ns_MutexUnlock(threadsLockPtr); |
1997 | |
1998 | servPtr = poolPtr->servPtr; |
1999 | ConnThreadSetName(servPtr->server, poolPtr->pool, threadId, 0); |
2000 | |
2001 | Ns_ThreadSelf(&joinThread); |
2002 | |
2003 | cpt = poolPtr->threads.connsperthread; |
2004 | ncons = cpt; |
2005 | timeout = poolPtr->threads.timeout; |
2006 | |
2007 | /* |
2008 | * Initialize the connection thread with the blueprint to avoid |
2009 | * the initialization delay when the first connection comes in. |
2010 | */ |
2011 | { |
2012 | Tcl_Interp *interp; |
2013 | Ns_Time start, end, diff; |
2014 | |
2015 | Ns_GetTime(&start); |
2016 | interp = NsTclAllocateInterp(servPtr); |
2017 | Ns_GetTime(&end); |
2018 | Ns_DiffTime(&end, &start, &diff); |
2019 | Ns_Log(Notice, "thread initialized (" NS_TIME_FMT"%" "l" "d" ".%06ld" " secs)", |
2020 | (int64_t)diff.sec, diff.usec); |
2021 | Ns_TclDeAllocateInterp(interp); |
2022 | argPtr->state = connThread_ready; |
2023 | } |
2024 | |
2025 | wqueueLockPtr = &poolPtr->wqueue.lock; |
2026 | |
2027 | /* |
2028 | * Start handling connections. |
2029 | */ |
2030 | |
2031 | for (;;) { |
2032 | |
2033 | /* |
2034 | * We are ready to process requests. Pick it either a request |
2035 | * from the waiting queue, or go to a waiting state and add |
2036 | * yourself to the conn thread queue. |
2037 | */ |
2038 | assert(argPtr->connPtr == NULL)((void) (0)); |
2039 | assert(argPtr->state == connThread_ready)((void) (0)); |
2040 | |
2041 | if (poolPtr->wqueue.wait.firstPtr != NULL((void*)0)) { |
2042 | connPtr = NULL((void*)0); |
2043 | Ns_MutexLock(wqueueLockPtr); |
2044 | if (poolPtr->wqueue.wait.firstPtr != NULL((void*)0)) { |
2045 | /* |
2046 | * There are waiting requests. Pull the first connection of |
2047 | * the waiting list and assign it to the ConnThreadArg. |
2048 | */ |
2049 | connPtr = poolPtr->wqueue.wait.firstPtr; |
2050 | poolPtr->wqueue.wait.firstPtr = connPtr->nextPtr; |
2051 | if (poolPtr->wqueue.wait.lastPtr == connPtr) { |
2052 | poolPtr->wqueue.wait.lastPtr = NULL((void*)0); |
2053 | } |
2054 | connPtr->nextPtr = NULL((void*)0); |
2055 | poolPtr->wqueue.wait.num --; |
2056 | } |
2057 | Ns_MutexUnlock(wqueueLockPtr); |
2058 | |
2059 | argPtr->connPtr = connPtr; |
2060 | fromQueue = NS_TRUE1; |
2061 | } else { |
2062 | fromQueue = NS_FALSE0; |
2063 | } |
2064 | |
2065 | if (argPtr->connPtr == NULL((void*)0)) { |
2066 | /* |
2067 | * There is nothing urgent to do. We can add ourself to the |
2068 | * conn thread queue. |
2069 | */ |
2070 | Ns_MutexLock(threadsLockPtr); |
2071 | poolPtr->threads.idle ++; |
2072 | Ns_MutexUnlock(threadsLockPtr); |
2073 | |
2074 | Ns_MutexLock(tqueueLockPtr); |
2075 | argPtr->state = connThread_idle; |
2076 | /* |
2077 | * We put an entry into the thread queue. However, we must |
2078 | * take care, that signals are not sent, before this thread |
2079 | * is waiting for it. Therefore, we lock the connection |
2080 | * thread specific lock right here, also the signal sending |
2081 | * code uses the same lock. |
2082 | */ |
2083 | Ns_MutexLock(&argPtr->lock); |
2084 | |
2085 | argPtr->nextPtr = poolPtr->tqueue.nextPtr; |
2086 | poolPtr->tqueue.nextPtr = argPtr; |
2087 | Ns_MutexUnlock(tqueueLockPtr); |
2088 | |
2089 | while (!servPtr->pools.shutdown) { |
2090 | |
2091 | Ns_GetTime(timePtr); |
2092 | Ns_IncrTime(timePtr, timeout.sec, timeout.usec); |
2093 | |
2094 | /* |
2095 | * Wait until someone wakes us up, or a timeout happens. |
2096 | */ |
2097 | status = Ns_CondTimedWait(&argPtr->cond, &argPtr->lock, timePtr); |
2098 | |
2099 | if (unlikely(status == NS_TIMEOUT)(__builtin_expect((status == NS_TIMEOUT), 0))) { |
2100 | if (unlikely(argPtr->connPtr != NULL)(__builtin_expect((argPtr->connPtr != ((void*)0)), 0))) { |
2101 | /* |
2102 | * This should not happen: we had a timeout, but there |
2103 | * is a connection to be handled; when a connection |
2104 | * comes in, we get signaled and should see therefore |
2105 | * no timeout. Maybe the signal was lost? |
2106 | */ |
2107 | Ns_Log(Warning, "signal lost, resuming after timeout"); |
2108 | status = NS_OK; |
2109 | |
2110 | } else if (poolPtr->threads.current <= poolPtr->threads.min) { |
2111 | /* |
2112 | * We have a timeout, but we should not reduce the |
2113 | * number of threads below min-threads. |
2114 | */ |
2115 | NsIdleCallback(servPtr); |
2116 | continue; |
2117 | |
2118 | } else { |
2119 | /* |
2120 | * We have a timeout, and the thread can exit. |
2121 | */ |
2122 | break; |
2123 | } |
2124 | } |
2125 | |
2126 | if (likely(argPtr->connPtr != NULL)(__builtin_expect((argPtr->connPtr != ((void*)0)), 1))) { |
2127 | /* |
2128 | * We got something to do; therefore, leave this loop. |
2129 | */ |
2130 | break; |
2131 | } |
2132 | |
2133 | Ns_Log(Debug, "Unexpected condition after CondTimedWait; maybe shutdown?"); |
2134 | } |
2135 | |
2136 | Ns_MutexUnlock(&argPtr->lock); |
2137 | |
2138 | assert(argPtr->state == connThread_idle)((void) (0)); |
2139 | |
2140 | if (argPtr->connPtr == NULL((void*)0)) { |
2141 | /* |
2142 | * We were not signaled on purpose, so we have to dequeue |
2143 | * the current thread. |
2144 | */ |
2145 | ConnThreadArg *aPtr, **prevPtr; |
2146 | |
2147 | Ns_MutexLock(tqueueLockPtr); |
2148 | for (aPtr = poolPtr->tqueue.nextPtr, prevPtr = &poolPtr->tqueue.nextPtr; |
2149 | aPtr != NULL((void*)0); |
2150 | prevPtr = &aPtr->nextPtr, aPtr = aPtr->nextPtr) { |
2151 | if (aPtr == argPtr) { |
2152 | /* |
2153 | * This request is for us. |
2154 | */ |
2155 | *prevPtr = aPtr->nextPtr; |
2156 | argPtr->nextPtr = NULL((void*)0); |
2157 | break; |
2158 | } |
2159 | } |
2160 | argPtr->state = connThread_busy; |
2161 | Ns_MutexUnlock(tqueueLockPtr); |
2162 | } else { |
2163 | Ns_MutexLock(tqueueLockPtr); |
2164 | argPtr->state = connThread_busy; |
2165 | Ns_MutexUnlock(tqueueLockPtr); |
2166 | } |
2167 | |
2168 | Ns_MutexLock(threadsLockPtr); |
2169 | poolPtr->threads.idle --; |
2170 | Ns_MutexUnlock(threadsLockPtr); |
2171 | |
2172 | if (servPtr->pools.shutdown) { |
2173 | exitMsg = "shutdown pending"; |
2174 | break; |
2175 | } else if (status == NS_TIMEOUT) { |
2176 | exitMsg = "idle thread terminates"; |
2177 | break; |
2178 | } |
2179 | } |
2180 | |
2181 | connPtr = argPtr->connPtr; |
2182 | assert(connPtr != NULL)((void) (0)); |
2183 | |
2184 | Ns_GetTime(&connPtr->requestDequeueTime); |
2185 | |
2186 | /* |
2187 | * Run the connection if possible (requires a valid sockPtr and a |
2188 | * successful NsGetRequest() operation). |
2189 | */ |
2190 | if (likely(connPtr->sockPtr != NULL)(__builtin_expect((connPtr->sockPtr != ((void*)0)), 1))) { |
2191 | /* |
2192 | * Get the request from the sockPtr (either from read-ahead or via |
2193 | * parsing). |
2194 | */ |
2195 | connPtr->reqPtr = NsGetRequest(connPtr->sockPtr, &connPtr->requestDequeueTime); |
2196 | |
2197 | /* |
2198 | * If there is no request, produce a warning and close the |
2199 | * connection. |
2200 | */ |
2201 | if (connPtr->reqPtr == NULL((void*)0)) { |
2202 | Ns_Log(Warning, "connPtr %p has no reqPtr, close this connection", (void *)connPtr); |
2203 | (void) Ns_ConnClose((Ns_Conn *)connPtr); |
2204 | } else { |
2205 | /* |
2206 | * Everything is supplied, run the request. ConnRun() |
2207 | * closes finally the connection. |
2208 | */ |
2209 | ConnThreadSetName(servPtr->server, poolPtr->pool, threadId, connPtr->id); |
2210 | ConnRun(connPtr); |
2211 | } |
2212 | } else { |
2213 | /* |
2214 | * If we have no sockPtr, we can't do much here. |
2215 | */ |
2216 | Ns_Log(Warning, "connPtr %p has no socket, close this connection", (void *)connPtr); |
2217 | (void) Ns_ConnClose((Ns_Conn *)connPtr); |
2218 | } |
2219 | |
2220 | /* |
2221 | * Protect connPtr->headers (and other members) against other threads, |
2222 | * since we are deallocating its content. This is especially important |
2223 | * for e.g. "ns_server active" since it accesses the header fields. |
2224 | */ |
2225 | Ns_MutexLock(tqueueLockPtr); |
2226 | connPtr->flags &= ~NS_CONN_CONFIGURED0x1000000u; |
2227 | |
2228 | /* |
2229 | * We are done with the headers, reset these for further reuse. |
2230 | */ |
2231 | Ns_SetTrunc(connPtr->headers, 0); |
2232 | |
2233 | argPtr->state = connThread_ready; |
2234 | Ns_MutexUnlock(tqueueLockPtr); |
2235 | |
2236 | /* |
2237 | * Push connection to the free list. |
2238 | */ |
2239 | argPtr->connPtr = NULL((void*)0); |
2240 | |
2241 | if (connPtr->prevPtr != NULL((void*)0)) { |
2242 | connPtr->prevPtr->nextPtr = connPtr->nextPtr; |
2243 | } |
2244 | if (connPtr->nextPtr != NULL((void*)0)) { |
2245 | connPtr->nextPtr->prevPtr = connPtr->prevPtr; |
2246 | } |
2247 | connPtr->prevPtr = NULL((void*)0); |
2248 | |
2249 | Ns_MutexLock(wqueueLockPtr); |
2250 | connPtr->nextPtr = poolPtr->wqueue.freePtr; |
2251 | poolPtr->wqueue.freePtr = connPtr; |
2252 | Ns_MutexUnlock(wqueueLockPtr); |
2253 | |
2254 | if (cpt != 0) { |
2255 | int waiting, idle, lowwater; |
2256 | |
2257 | --ncons; |
2258 | |
2259 | /* |
2260 | * Get a consistent snapshot of the controlling variables. |
2261 | */ |
2262 | Ns_MutexLock(wqueueLockPtr); |
2263 | Ns_MutexLock(threadsLockPtr); |
2264 | waiting = poolPtr->wqueue.wait.num; |
2265 | lowwater = poolPtr->wqueue.lowwatermark; |
2266 | idle = poolPtr->threads.idle; |
2267 | current = poolPtr->threads.current; |
2268 | Ns_MutexUnlock(threadsLockPtr); |
2269 | Ns_MutexUnlock(wqueueLockPtr); |
2270 | |
2271 | if (Ns_LogSeverityEnabled(Debug)) { |
2272 | Ns_Time now, acceptTime, queueTime, filterTime, netRunTime, runTime, fullTime; |
2273 | |
2274 | Ns_DiffTime(&connPtr->requestQueueTime, &connPtr->acceptTime, &acceptTime); |
2275 | Ns_DiffTime(&connPtr->requestDequeueTime, &connPtr->requestQueueTime, &queueTime); |
2276 | Ns_DiffTime(&connPtr->filterDoneTime, &connPtr->requestDequeueTime, &filterTime); |
2277 | |
2278 | Ns_GetTime(&now); |
2279 | Ns_DiffTime(&now, &connPtr->requestDequeueTime, &runTime); |
2280 | Ns_DiffTime(&now, &connPtr->filterDoneTime, &netRunTime); |
2281 | Ns_DiffTime(&now, &connPtr->requestQueueTime, &fullTime); |
2282 | |
2283 | Ns_Log(Debug, "[%d] end of job, waiting %d current %d idle %d ncons %d fromQueue %d" |
2284 | " start " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2285 | " " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2286 | " accept " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2287 | " queue " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2288 | " filter " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2289 | " run " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2290 | " netrun " NS_TIME_FMT"%" "l" "d" ".%06ld" |
2291 | " total " NS_TIME_FMT"%" "l" "d" ".%06ld", |
2292 | ThreadNr(poolPtr, argPtr)(int)(((argPtr) - (poolPtr)->tqueue.args)), |
2293 | waiting, poolPtr->threads.current, idle, ncons, fromQueue ? 1 : 0, |
2294 | (int64_t) connPtr->acceptTime.sec, connPtr->acceptTime.usec, |
2295 | (int64_t) connPtr->requestQueueTime.sec, connPtr->requestQueueTime.usec, |
2296 | (int64_t) acceptTime.sec, acceptTime.usec, |
2297 | (int64_t) queueTime.sec, queueTime.usec, |
2298 | (int64_t) filterTime.sec, filterTime.usec, |
2299 | (int64_t) runTime.sec, runTime.usec, |
2300 | (int64_t) netRunTime.sec, netRunTime.usec, |
2301 | (int64_t) fullTime.sec, fullTime.usec |
2302 | ); |
2303 | } |
2304 | |
2305 | if (waiting > 0) { |
2306 | /* |
2307 | * There are waiting requests. Work on those unless we |
2308 | * are expiring or we are already under the lowwater |
2309 | * mark of connection threads, or we are the last man |
2310 | * standing. |
2311 | */ |
2312 | if (ncons > 0 || waiting > lowwater || current <= 1) { |
2313 | continue; |
2314 | } |
2315 | } |
2316 | |
2317 | if (ncons <= 0) { |
2318 | exitMsg = "exceeded max connections per thread"; |
2319 | break; |
2320 | } |
2321 | } else if (ncons <= 0) { |
2322 | /* Served given # of connections in this thread */ |
2323 | exitMsg = "exceeded max connections per thread"; |
2324 | break; |
2325 | } |
2326 | } |
2327 | argPtr->state = connThread_dead; |
2328 | |
2329 | Ns_MutexLock(&servPtr->pools.lock); |
2330 | duringShutdown = servPtr->pools.shutdown; |
2331 | Ns_MutexUnlock(&servPtr->pools.lock); |
2332 | |
2333 | |
2334 | { |
2335 | bool_Bool wakeup; |
2336 | /* |
2337 | * Record the fact that this driver is exiting by decrementing the |
2338 | * actually running threads and wakeup the driver to check against |
2339 | * thread starvation (due to an insufficient number of connection |
2340 | * threads). |
2341 | */ |
2342 | Ns_MutexLock(threadsLockPtr); |
2343 | poolPtr->threads.current--; |
2344 | wakeup = (poolPtr->threads.current < poolPtr->threads.min); |
2345 | Ns_MutexUnlock(threadsLockPtr); |
2346 | |
2347 | /* |
2348 | * During shutdown, we do not want to restart connection |
2349 | * threads. The driver pointer might be already invalid. |
2350 | */ |
2351 | if (wakeup && connPtr != NULL((void*)0) && !duringShutdown) { |
2352 | assert(connPtr->drvPtr != NULL)((void) (0)); |
2353 | NsWakeupDriver(connPtr->drvPtr); |
2354 | } |
2355 | } |
2356 | |
2357 | /* |
2358 | * During shutdown, the main thread waits for signals on the |
2359 | * condition variable to check whether all threads have terminated |
2360 | * already. |
2361 | */ |
2362 | if (duringShutdown) { |
2363 | Ns_CondSignal(&poolPtr->wqueue.cond); |
2364 | } |
2365 | |
2366 | Ns_MutexLock(&servPtr->pools.lock); |
2367 | joinThread = servPtr->pools.joinThread; |
2368 | Ns_ThreadSelf(&servPtr->pools.joinThread); |
2369 | Ns_MutexUnlock(&servPtr->pools.lock); |
2370 | |
2371 | if (joinThread != NULL((void*)0)) { |
2372 | Ns_ThreadJoin(&joinThread, NULL((void*)0)); |
2373 | } |
2374 | |
2375 | Ns_Log(Notice, "exiting: %s", exitMsg); |
2376 | |
2377 | Ns_MutexLock(tqueueLockPtr); |
2378 | argPtr->state = connThread_free; |
2379 | Ns_MutexUnlock(tqueueLockPtr); |
2380 | |
2381 | Ns_ThreadExit(argPtr); |
2382 | } |
2383 | |
2384 | /* |
2385 | *---------------------------------------------------------------------- |
2386 | * |
2387 | * NsHeaderSetGet -- |
2388 | * |
2389 | * Return an Ns_Set for request headers with some defaults. |
2390 | * |
2391 | * Results: |
2392 | * Ns_Set * |
2393 | * |
2394 | * Side effects: |
2395 | * None. |
2396 | * |
2397 | *---------------------------------------------------------------------- |
2398 | */ |
2399 | Ns_Set *NsHeaderSetGet(size_t size) |
2400 | { |
2401 | Ns_Set *result; |
2402 | |
2403 | result = Ns_SetCreateSz(NS_SET_NAME_REQ"req", MAX(10, size)(((10)>(size))?(10):(size))); |
2404 | #ifdef NS_SET_DSTRING |
2405 | Ns_SetDataPrealloc(result, 4095); |
2406 | #endif |
2407 | |
2408 | return result; |
2409 | } |
2410 | |
2411 | |
2412 | /* |
2413 | *---------------------------------------------------------------------- |
2414 | * |
2415 | * ConnRun -- |
2416 | * |
2417 | * Run the actual non-null request and close it finally the connection. |
2418 | * |
2419 | * Results: |
2420 | * None. |
2421 | * |
2422 | * Side effects: |
2423 | * Potential side-effects caused by the callbacks. |
2424 | * |
2425 | *---------------------------------------------------------------------- |
2426 | */ |
2427 | static void |
2428 | ConnRun(Conn *connPtr) |
2429 | { |
2430 | Sock *sockPtr; |
2431 | Ns_Conn *conn; |
2432 | const NsServer *servPtr; |
2433 | Ns_ReturnCode status; |
2434 | const char *auth; |
2435 | |
2436 | NS_NONNULL_ASSERT(connPtr != NULL)((void) (0)); |
2437 | |
2438 | conn = (Ns_Conn *)connPtr; |
2439 | sockPtr = connPtr->sockPtr; |
2440 | |
2441 | assert(sockPtr != NULL)((void) (0)); |
2442 | assert(sockPtr->reqPtr != NULL)((void) (0)); |
2443 | |
2444 | /* |
2445 | * Make sure we update peer address with actual remote IP address |
2446 | */ |
2447 | (void) Ns_ConnSetPeer(conn, |
2448 | (struct sockaddr *)&(sockPtr->sa), |
2449 | (struct sockaddr *)&(sockPtr->clientsa) |
2450 | ); |
2451 | |
2452 | /* |
2453 | * Get the request data from the reqPtr to ease life-time management in |
2454 | * connection threads. It would be probably sufficient to clear just the |
2455 | * request line, but we want to play it safe and clear everything. |
2456 | */ |
2457 | connPtr->request = connPtr->reqPtr->request; |
2458 | memset(&(connPtr->reqPtr->request), 0, sizeof(struct Ns_Request)); |
2459 | |
2460 | /* |
2461 | Ns_Log(Notice, "ConnRun connPtr %p req %p %s", connPtr, connPtr->request, connPtr->request.line); |
2462 | */ |
2463 | |
2464 | /* |
2465 | * Move connPtr->reqPtr->headers to connPtr->headers (named "req") for the |
2466 | * delivery thread and get a fresh or preallocated structure for the next |
2467 | * request in this connection thread. |
2468 | */ |
2469 | { |
2470 | Ns_Set *preallocedHeaders = connPtr->headers; |
2471 | if (preallocedHeaders == NULL((void*)0)) { |
2472 | preallocedHeaders = NsHeaderSetGet(connPtr->reqPtr->headers->maxSize); |
2473 | } else { |
2474 | #ifdef NS_SET_DSTRING |
2475 | Ns_Log(Ns_LogNsSetDebug, "SSS ConnRun REUSE %p '%s': size %lu/%lu buffer %d/%d", |
2476 | (void*)preallocedHeaders, preallocedHeaders->name, |
2477 | preallocedHeaders->size, preallocedHeaders->maxSize, |
2478 | preallocedHeaders->data.length, preallocedHeaders->data.spaceAvl); |
2479 | #endif |
2480 | } |
2481 | connPtr->headers = connPtr->reqPtr->headers; |
2482 | connPtr->reqPtr->headers = preallocedHeaders; |
2483 | } |
2484 | |
2485 | /* |
2486 | * Flag, that the connection is fully configured and we can use its |
2487 | * data. |
2488 | */ |
2489 | connPtr->flags |= NS_CONN_CONFIGURED0x1000000u; |
2490 | connPtr->contentLength = connPtr->reqPtr->length; |
2491 | |
2492 | connPtr->nContentSent = 0u; |
2493 | connPtr->responseStatus = 200; |
2494 | connPtr->responseLength = -1; /* -1 == unknown (stream), 0 == zero bytes. */ |
2495 | connPtr->recursionCount = 0; |
2496 | connPtr->auth = NULL((void*)0); |
2497 | |
2498 | /* |
2499 | * keep == -1 means: Undecided, the default keep-alive rules are applied. |
2500 | */ |
2501 | connPtr->keep = -1; |
2502 | |
2503 | servPtr = connPtr->poolPtr->servPtr; |
2504 | Ns_ConnSetCompression(conn, servPtr->compress.enable ? servPtr->compress.level : 0); |
2505 | connPtr->compress = -1; |
2506 | |
2507 | connPtr->outputEncoding = servPtr->encoding.outputEncoding; |
2508 | connPtr->urlEncoding = servPtr->encoding.urlEncoding; |
2509 | |
2510 | Tcl_InitHashTable(&connPtr->files, TCL_STRING_KEYS(0)); |
2511 | |
2512 | memcpy(connPtr->idstr, "cns", 3u); |
2513 | (void)ns_uint64toa(&connPtr->idstr[3], (uint64_t)connPtr->id); |
2514 | |
2515 | if (connPtr->outputheaders == NULL((void*)0)) { |
2516 | connPtr->outputheaders = Ns_SetCreate(NS_SET_NAME_RESPONSE"resp"); |
2517 | } |
2518 | |
2519 | if (connPtr->request.version < 1.0) { |
2520 | conn->flags |= NS_CONN_SKIPHDRS0x002u; |
2521 | } |
2522 | if (servPtr->opts.hdrcase != Preserve) { |
2523 | size_t i; |
2524 | |
2525 | for (i = 0u; i < Ns_SetSize(connPtr->headers)((connPtr->headers)->size); ++i) { |
2526 | if (servPtr->opts.hdrcase == ToLower) { |
2527 | Ns_StrToLower(Ns_SetKey(connPtr->headers, i)((connPtr->headers)->fields[(i)].name)); |
2528 | } else { |
2529 | Ns_StrToUpper(Ns_SetKey(connPtr->headers, i)((connPtr->headers)->fields[(i)].name)); |
2530 | } |
2531 | } |
2532 | } |
2533 | auth = Ns_SetIGet(connPtr->headers, "authorization"); |
2534 | if (auth != NULL((void*)0)) { |
2535 | NsParseAuth(connPtr, auth); |
2536 | } |
2537 | if ((conn->request.method != NULL((void*)0)) && STREQ(conn->request.method, "HEAD")(((*(conn->request.method)) == (*("HEAD"))) && (strcmp ((conn->request.method),("HEAD")) == 0))) { |
2538 | conn->flags |= NS_CONN_SKIPBODY0x004u; |
2539 | } |
2540 | |
2541 | if (sockPtr->drvPtr->requestProc != NULL((void*)0)) { |
2542 | /* |
2543 | * Run the driver's private handler |
2544 | */ |
2545 | Ns_GetTime(&connPtr->filterDoneTime); |
2546 | status = (*sockPtr->drvPtr->requestProc)(sockPtr->drvPtr->arg, conn); |
2547 | } else if ((connPtr->request.protocol != NULL((void*)0)) && (connPtr->request.host != NULL((void*)0))) { |
2548 | /* |
2549 | * Run proxy request |
2550 | */ |
2551 | Ns_GetTime(&connPtr->filterDoneTime); |
2552 | status = NsConnRunProxyRequest((Ns_Conn *) connPtr); |
2553 | } else { |
2554 | /* |
2555 | * Run classical HTTP requests |
2556 | */ |
2557 | |
2558 | status = NsRunFilters(conn, NS_FILTER_PRE_AUTH); |
2559 | Ns_GetTime(&connPtr->filterDoneTime); |
2560 | |
2561 | if (connPtr->sockPtr == NULL((void*)0)) { |
2562 | /* |
2563 | * If - for what-ever reason - a filter has closed the connection, |
2564 | * treat the result as NS_FILTER_RETURN. Other feedback to this |
2565 | * connection can not work anymore. |
2566 | */ |
2567 | Ns_Log(Debug, "Filter closed connection; cancel further request processing"); |
2568 | |
2569 | status = NS_FILTER_RETURN; |
2570 | } |
2571 | |
2572 | if (status == NS_OK) { |
2573 | status = Ns_AuthorizeRequest(servPtr->server, |
2574 | connPtr->request.method, |
2575 | connPtr->request.url, |
2576 | Ns_ConnAuthUser(conn), |
2577 | Ns_ConnAuthPasswd(conn), |
2578 | Ns_ConnPeerAddr(conn)); |
2579 | switch (status) { |
2580 | case NS_OK: |
2581 | status = NsRunFilters(conn, NS_FILTER_POST_AUTH); |
2582 | Ns_GetTime(&connPtr->filterDoneTime); |
2583 | if (status == NS_OK && (connPtr->sockPtr != NULL((void*)0))) { |
2584 | /* |
2585 | * Run the actual request |
2586 | */ |
2587 | status = Ns_ConnRunRequest(conn); |
2588 | } |
2589 | break; |
2590 | |
2591 | case NS_FORBIDDEN: |
2592 | (void) Ns_ConnReturnForbidden(conn); |
2593 | break; |
2594 | |
2595 | case NS_UNAUTHORIZED: |
2596 | (void) Ns_ConnReturnUnauthorized(conn); |
2597 | break; |
2598 | |
2599 | case NS_ERROR: NS_FALL_THROUGH((void)0); /* fall through */ |
2600 | case NS_FILTER_BREAK: NS_FALL_THROUGH((void)0); /* fall through */ |
2601 | case NS_FILTER_RETURN: NS_FALL_THROUGH((void)0); /* fall through */ |
2602 | case NS_TIMEOUT: |
2603 | (void)Ns_ConnTryReturnInternalError(conn, status, "after authorize request"); |
2604 | break; |
2605 | } |
2606 | } else if (status != NS_FILTER_RETURN) { |
2607 | /* |
2608 | * If not ok or filter_return, then the pre-auth filter caught |
2609 | * an error. We are not going to proceed, but also we |
2610 | * can't count on the filter to have sent a response |
2611 | * back to the client. So, send an error response. |
2612 | */ |
2613 | (void)Ns_ConnTryReturnInternalError(conn, status, "after pre_auth filter"); |
2614 | /* |
2615 | * Set the status so that NS_FILTER_TRACE can still run. |
2616 | */ |
2617 | status = NS_FILTER_RETURN; |
2618 | } |
2619 | } |
2620 | |
2621 | /* |
2622 | * Update run time statistics to make these usable for traces (e.g. access log). |
2623 | */ |
2624 | NsConnTimeStatsUpdate(conn); |
2625 | |
2626 | if ((status == NS_OK) || (status == NS_FILTER_RETURN)) { |
2627 | status = NsRunFilters(conn, NS_FILTER_TRACE); |
2628 | if (status == NS_OK) { |
2629 | (void) NsRunFilters(conn, NS_FILTER_VOID_TRACE); |
2630 | /* |
2631 | * Run Server traces (e.g. writing access log entries) |
2632 | */ |
2633 | NsRunTraces(conn); |
2634 | } |
2635 | } else { |
2636 | Ns_Log(Notice, "not running NS_FILTER_TRACE status %d", status); |
2637 | } |
2638 | |
2639 | /* |
2640 | * Perform various garbage collection tasks. Note |
2641 | * the order is significant: The driver freeProc could |
2642 | * possibly use Tcl and Tcl deallocate callbacks |
2643 | * could possibly access header and/or request data. |
2644 | */ |
2645 | |
2646 | NsRunCleanups(conn); |
2647 | NsClsCleanup(connPtr); |
2648 | NsFreeConnInterp(connPtr); |
2649 | |
2650 | /* |
2651 | * In case some leftover is in the buffer, signal the driver to |
2652 | * process the remaining bytes. |
2653 | * |
2654 | */ |
2655 | { |
2656 | bool_Bool wakeup; |
2657 | |
2658 | Ns_MutexLock(&sockPtr->drvPtr->lock); |
2659 | wakeup = (sockPtr->keep && (connPtr->reqPtr->leftover > 0u)); |
2660 | Ns_MutexUnlock(&sockPtr->drvPtr->lock); |
2661 | |
2662 | if (wakeup) { |
2663 | NsWakeupDriver(sockPtr->drvPtr); |
2664 | } |
2665 | } |
2666 | |
2667 | /* |
2668 | * Close the connection. This might free as well the content of |
2669 | * connPtr->reqPtr, so set it to NULL to avoid surprises, if someone might |
2670 | * want to access these structures. |
2671 | */ |
2672 | |
2673 | (void) Ns_ConnClose(conn); |
2674 | |
2675 | Ns_MutexLock(&connPtr->poolPtr->tqueue.lock); |
2676 | connPtr->reqPtr = NULL((void*)0); |
2677 | Ns_MutexUnlock(&connPtr->poolPtr->tqueue.lock); |
2678 | |
2679 | /* |
2680 | * Deactivate stream writer, if defined |
2681 | */ |
2682 | if (connPtr->fd != 0) { |
2683 | connPtr->fd = 0; |
2684 | } |
2685 | if (connPtr->strWriter != NULL((void*)0)) { |
2686 | void *wrPtr; |
2687 | |
2688 | NsWriterLock(); |
2689 | /* |
2690 | * Avoid potential race conditions, so refetch inside the lock. |
2691 | */ |
2692 | wrPtr = connPtr->strWriter; |
2693 | if (wrPtr != NULL((void*)0)) { |
2694 | NsWriterFinish(wrPtr); |
2695 | connPtr->strWriter = NULL((void*)0); |
2696 | } |
2697 | NsWriterUnlock(); |
2698 | } |
2699 | |
2700 | /* |
2701 | * Free Structures |
2702 | */ |
2703 | Ns_ConnClearQuery(conn); |
2704 | Ns_SetFree(connPtr->auth); |
2705 | connPtr->auth = NULL((void*)0); |
2706 | |
2707 | Ns_SetTrunc(connPtr->outputheaders, 0); |
2708 | |
2709 | if (connPtr->request.line != NULL((void*)0)) { |
2710 | /* |
2711 | * reqPtr is freed by FreeRequest() in the driver. |
2712 | */ |
2713 | Ns_ResetRequest(&connPtr->request); |
2714 | assert(connPtr->request.line == NULL)((void) (0)); |
2715 | } |
2716 | |
2717 | if (connPtr->clientData != NULL((void*)0)) { |
2718 | ns_free(connPtr->clientData); |
2719 | connPtr->clientData = NULL((void*)0); |
2720 | } |
2721 | |
2722 | NsConnTimeStatsFinalize(conn); |
2723 | |
2724 | } |
2725 | |
2726 | |
2727 | /* |
2728 | *---------------------------------------------------------------------- |
2729 | * |
2730 | * CreateConnThread -- |
2731 | * |
2732 | * Create a connection thread. |
2733 | * |
2734 | * Results: |
2735 | * None. |
2736 | * |
2737 | * Side effects: |
2738 | * New thread. |
2739 | * |
2740 | *---------------------------------------------------------------------- |
2741 | */ |
2742 | |
2743 | static void |
2744 | CreateConnThread(ConnPool *poolPtr) |
2745 | { |
2746 | Ns_Thread thread; |
2747 | ConnThreadArg *argPtr = NULL((void*)0); |
2748 | int i; |
2749 | |
2750 | #if !defined(NDEBUG1) |
2751 | { const char *threadName = Ns_ThreadGetName(); |
2752 | assert(strncmp("-driver:", threadName, 8u) == 0((void) (0)) |
2753 | || strncmp("-main", threadName, 5u) == 0((void) (0)) |
2754 | || strncmp("-spooler", threadName, 8u) == 0((void) (0)) |
2755 | || strncmp("-service-", threadName, 9u) == 0((void) (0)) |
2756 | )((void) (0)); |
2757 | } |
2758 | #endif |
2759 | |
2760 | NS_NONNULL_ASSERT(poolPtr != NULL)((void) (0)); |
2761 | |
2762 | /* |
2763 | * Get first free connection thread slot; selecting a slot and |
2764 | * occupying it has to be done under a mutex lock, since we do not |
2765 | * want someone else to pick the same. We are competing |
2766 | * potentially against driver/spooler threads and the main thread. |
2767 | * |
2768 | * TODO: Maybe we could do better than the linear search, but the queue |
2769 | * is usually short... |
2770 | */ |
2771 | Ns_MutexLock(&poolPtr->tqueue.lock); |
2772 | for (i = 0; likely(i < poolPtr->threads.max)(__builtin_expect((i < poolPtr->threads.max), 1)); i++) { |
2773 | if (poolPtr->tqueue.args[i].state == connThread_free) { |
2774 | argPtr = &(poolPtr->tqueue.args[i]); |
2775 | break; |
2776 | } |
2777 | } |
2778 | if (likely(argPtr != NULL)(__builtin_expect((argPtr != ((void*)0)), 1))) { |
2779 | argPtr->state = connThread_initial; |
2780 | poolPtr->stats.connthreads++; |
2781 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
2782 | |
2783 | /* Ns_Log(Notice, "CreateConnThread use thread slot [%d]", i);*/ |
2784 | |
2785 | argPtr->poolPtr = poolPtr; |
2786 | argPtr->connPtr = NULL((void*)0); |
2787 | argPtr->nextPtr = NULL((void*)0); |
2788 | argPtr->cond = NULL((void*)0); |
2789 | |
2790 | Ns_ThreadCreate(NsConnThread, argPtr, 0, &thread); |
2791 | } else { |
2792 | Ns_MutexUnlock(&poolPtr->tqueue.lock); |
2793 | |
2794 | Ns_MutexLock(&poolPtr->threads.lock); |
2795 | poolPtr->threads.current --; |
2796 | poolPtr->threads.creating --; |
2797 | Ns_MutexUnlock(&poolPtr->threads.lock); |
2798 | |
2799 | Ns_Log(Debug, "Cannot create additional connection thread in pool '%s', " |
2800 | "maxthreads (%d) are running", poolPtr->pool, i); |
2801 | } |
2802 | } |
2803 | |
2804 | |
2805 | /* |
2806 | *---------------------------------------------------------------------- |
2807 | * |
2808 | * AppendConn -- |
2809 | * |
2810 | * Append connection data to a Tcl_DString. |
2811 | * |
2812 | * Results: |
2813 | * None. |
2814 | * |
2815 | * Side effects: |
2816 | * None. |
2817 | * |
2818 | *---------------------------------------------------------------------- |
2819 | */ |
2820 | |
2821 | static void |
2822 | AppendConn(Tcl_DString *dsPtr, const Conn *connPtr, const char *state, bool_Bool checkforproxy) |
2823 | { |
2824 | Ns_Time now, diff; |
2825 | |
2826 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
2827 | NS_NONNULL_ASSERT(state != NULL)((void) (0)); |
2828 | |
2829 | /* |
2830 | * An annoying race condition can be lethal here. |
2831 | * |
2832 | * In the state "waiting", we have never a connPtr->reqPtr, therefore, we |
2833 | * can't even determine the peer address, nor the request method or the |
2834 | * request URL. Furthermore, there is no way to honor the "checkforproxy" |
2835 | * flag. |
2836 | */ |
2837 | if (connPtr != NULL((void*)0)) { |
2838 | Tcl_DStringStartSublist(dsPtr); |
2839 | |
2840 | if (connPtr->reqPtr != NULL((void*)0)) { |
2841 | const char *p; |
2842 | |
2843 | Tcl_DStringAppendElement(dsPtr, connPtr->idstr); |
2844 | |
2845 | if (checkforproxy) { |
2846 | /* |
2847 | * The user requested explicitly for "checkforproxy", so only |
2848 | * return the proxy value. |
2849 | */ |
2850 | p = Ns_ConnForwardedPeerAddr((const Ns_Conn *)connPtr); |
2851 | } else { |
2852 | p = Ns_ConnConfiguredPeerAddr((const Ns_Conn *)connPtr); |
2853 | } |
2854 | Tcl_DStringAppendElement(dsPtr, p); |
2855 | } else { |
2856 | /* |
2857 | * connPtr->reqPtr == NULL. Having no connPtr->reqPtr is normal |
2858 | * for "queued" requests but not for "running" requests. Report |
2859 | * this in the error log. |
2860 | */ |
2861 | Tcl_DStringAppendElement(dsPtr, "unknown"); |
2862 | if (*state == 'r') { |
2863 | Ns_Log(Notice, |
2864 | "AppendConn state '%s': request not available, can't determine peer address", |
2865 | state); |
2866 | } |
2867 | } |
2868 | |
2869 | Tcl_DStringAppendElement(dsPtr, state); |
2870 | |
2871 | if (connPtr->request.line != NULL((void*)0)) { |
2872 | Tcl_DStringAppendElement(dsPtr, (connPtr->request.method != NULL((void*)0)) ? connPtr->request.method : "?"); |
2873 | Tcl_DStringAppendElement(dsPtr, (connPtr->request.url != NULL((void*)0)) ? connPtr->request.url : "?"); |
2874 | } else { |
2875 | /* Ns_Log(Notice, "AppendConn: no request in state %s; ignore conn in output", state);*/ |
2876 | Tcl_DStringAppendElement(dsPtr, "unknown"); |
2877 | Tcl_DStringAppendElement(dsPtr, "unknown"); |
2878 | } |
2879 | Ns_GetTime(&now); |
2880 | Ns_DiffTime(&now, &connPtr->requestQueueTime, &diff); |
2881 | Ns_DStringNAppendTcl_DStringAppend(dsPtr, " ", 1); |
2882 | Ns_DStringAppendTime(dsPtr, &diff); |
2883 | Ns_DStringPrintf(dsPtr, " %" PRIuz"zu", connPtr->nContentSent); |
2884 | |
2885 | Tcl_DStringEndSublist(dsPtr); |
2886 | } else { |
2887 | Tcl_DStringAppendElement(dsPtr, NS_EMPTY_STRING); |
2888 | } |
2889 | } |
2890 | |
2891 | |
2892 | /* |
2893 | *---------------------------------------------------------------------- |
2894 | * |
2895 | * AppendConnList -- |
2896 | * |
2897 | * Append list of connection data to a Tcl_DString. |
2898 | * |
2899 | * Results: |
2900 | * None. |
2901 | * |
2902 | * Side effects: |
2903 | * None. |
2904 | * |
2905 | *---------------------------------------------------------------------- |
2906 | */ |
2907 | |
2908 | static void |
2909 | AppendConnList(Tcl_DString *dsPtr, const Conn *firstPtr, const char *state, bool_Bool checkforproxy) |
2910 | { |
2911 | NS_NONNULL_ASSERT(dsPtr != NULL)((void) (0)); |
2912 | NS_NONNULL_ASSERT(state != NULL)((void) (0)); |
2913 | |
2914 | while (firstPtr != NULL((void*)0)) { |
2915 | AppendConn(dsPtr, firstPtr, state, checkforproxy); |
2916 | firstPtr = firstPtr->nextPtr; |
2917 | } |
2918 | } |
2919 | |
2920 | /* |
2921 | * Local Variables: |
2922 | * mode: c |
2923 | * c-basic-offset: 4 |
2924 | * fill-column: 78 |
2925 | * indent-tabs-mode: nil |
2926 | * End: |
2927 | */ |