File: | out/../deps/cares/src/lib/ares_process.c |
Warning: | line 1328, column 21 The left operand of '==' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | ||||
2 | /* Copyright 1998 by the Massachusetts Institute of Technology. | |||
3 | * Copyright (C) 2004-2017 by Daniel Stenberg | |||
4 | * | |||
5 | * Permission to use, copy, modify, and distribute this | |||
6 | * software and its documentation for any purpose and without | |||
7 | * fee is hereby granted, provided that the above copyright | |||
8 | * notice appear in all copies and that both that copyright | |||
9 | * notice and this permission notice appear in supporting | |||
10 | * documentation, and that the name of M.I.T. not be used in | |||
11 | * advertising or publicity pertaining to distribution of the | |||
12 | * software without specific, written prior permission. | |||
13 | * M.I.T. makes no representations about the suitability of | |||
14 | * this software for any purpose. It is provided "as is" | |||
15 | * without express or implied warranty. | |||
16 | */ | |||
17 | ||||
18 | #include "ares_setup.h" | |||
19 | ||||
20 | #ifdef HAVE_SYS_UIO_H1 | |||
21 | # include <sys/uio.h> | |||
22 | #endif | |||
23 | #ifdef HAVE_NETINET_IN_H1 | |||
24 | # include <netinet/in.h> | |||
25 | #endif | |||
26 | #ifdef HAVE_NETINET_TCP_H1 | |||
27 | # include <netinet/tcp.h> | |||
28 | #endif | |||
29 | #ifdef HAVE_NETDB_H1 | |||
30 | # include <netdb.h> | |||
31 | #endif | |||
32 | #ifdef HAVE_ARPA_INET_H1 | |||
33 | # include <arpa/inet.h> | |||
34 | #endif | |||
35 | ||||
36 | #include "ares_nameser.h" | |||
37 | ||||
38 | #ifdef HAVE_STRINGS_H1 | |||
39 | # include <strings.h> | |||
40 | #endif | |||
41 | #ifdef HAVE_SYS_IOCTL_H1 | |||
42 | # include <sys/ioctl.h> | |||
43 | #endif | |||
44 | #ifdef NETWARE | |||
45 | # include <sys/filio.h> | |||
46 | #endif | |||
47 | ||||
48 | #include <assert.h> | |||
49 | #include <fcntl.h> | |||
50 | #include <limits.h> | |||
51 | ||||
52 | #include "ares.h" | |||
53 | #include "ares_dns.h" | |||
54 | #include "ares_nowarn.h" | |||
55 | #include "ares_private.h" | |||
56 | ||||
57 | ||||
58 | static int try_again(int errnum); | |||
59 | static void write_tcp_data(ares_channel channel, fd_set *write_fds, | |||
60 | ares_socket_t write_fd, struct timeval *now); | |||
61 | static void read_tcp_data(ares_channel channel, fd_set *read_fds, | |||
62 | ares_socket_t read_fd, struct timeval *now); | |||
63 | static void read_udp_packets(ares_channel channel, fd_set *read_fds, | |||
64 | ares_socket_t read_fd, struct timeval *now); | |||
65 | static void advance_tcp_send_queue(ares_channel channel, int whichserver, | |||
66 | ares_ssize_t num_bytes); | |||
67 | static void process_timeouts(ares_channel channel, struct timeval *now); | |||
68 | static void process_broken_connections(ares_channel channel, | |||
69 | struct timeval *now); | |||
70 | static void process_answer(ares_channel channel, unsigned char *abuf, | |||
71 | int alen, int whichserver, int tcp, | |||
72 | struct timeval *now); | |||
73 | static void handle_error(ares_channel channel, int whichserver, | |||
74 | struct timeval *now); | |||
75 | static void skip_server(ares_channel channel, struct query *query, | |||
76 | int whichserver); | |||
77 | static void next_server(ares_channel channel, struct query *query, | |||
78 | struct timeval *now); | |||
79 | static int open_tcp_socket(ares_channel channel, struct server_state *server); | |||
80 | static int open_udp_socket(ares_channel channel, struct server_state *server); | |||
81 | static int same_questions(const unsigned char *qbuf, int qlen, | |||
82 | const unsigned char *abuf, int alen); | |||
83 | static int same_address(struct sockaddr *sa, struct ares_addr *aa); | |||
84 | static int has_opt_rr(const unsigned char *abuf, int alen); | |||
85 | static void end_query(ares_channel channel, struct query *query, int status, | |||
86 | unsigned char *abuf, int alen); | |||
87 | ||||
88 | /* return true if now is exactly check time or later */ | |||
89 | int ares__timedout(struct timeval *now, | |||
90 | struct timeval *check) | |||
91 | { | |||
92 | long secs = (now->tv_sec - check->tv_sec); | |||
93 | ||||
94 | if(secs > 0) | |||
95 | return 1; /* yes, timed out */ | |||
96 | if(secs < 0) | |||
97 | return 0; /* nope, not timed out */ | |||
98 | ||||
99 | /* if the full seconds were identical, check the sub second parts */ | |||
100 | return (now->tv_usec - check->tv_usec >= 0); | |||
101 | } | |||
102 | ||||
103 | /* add the specific number of milliseconds to the time in the first argument */ | |||
104 | static void timeadd(struct timeval *now, int millisecs) | |||
105 | { | |||
106 | now->tv_sec += millisecs/1000; | |||
107 | now->tv_usec += (millisecs%1000)*1000; | |||
108 | ||||
109 | if(now->tv_usec >= 1000000) { | |||
110 | ++(now->tv_sec); | |||
111 | now->tv_usec -= 1000000; | |||
112 | } | |||
113 | } | |||
114 | ||||
115 | /* | |||
116 | * generic process function | |||
117 | */ | |||
118 | static void processfds(ares_channel channel, | |||
119 | fd_set *read_fds, ares_socket_t read_fd, | |||
120 | fd_set *write_fds, ares_socket_t write_fd) | |||
121 | { | |||
122 | struct timeval now = ares__tvnow(); | |||
123 | ||||
124 | write_tcp_data(channel, write_fds, write_fd, &now); | |||
125 | read_tcp_data(channel, read_fds, read_fd, &now); | |||
126 | read_udp_packets(channel, read_fds, read_fd, &now); | |||
127 | process_timeouts(channel, &now); | |||
128 | process_broken_connections(channel, &now); | |||
129 | } | |||
130 | ||||
131 | /* Something interesting happened on the wire, or there was a timeout. | |||
132 | * See what's up and respond accordingly. | |||
133 | */ | |||
134 | void ares_process(ares_channel channel, fd_set *read_fds, fd_set *write_fds) | |||
135 | { | |||
136 | processfds(channel, read_fds, ARES_SOCKET_BAD-1, write_fds, ARES_SOCKET_BAD-1); | |||
137 | } | |||
138 | ||||
139 | /* Something interesting happened on the wire, or there was a timeout. | |||
140 | * See what's up and respond accordingly. | |||
141 | */ | |||
142 | void ares_process_fd(ares_channel channel, | |||
143 | ares_socket_t read_fd, /* use ARES_SOCKET_BAD or valid | |||
144 | file descriptors */ | |||
145 | ares_socket_t write_fd) | |||
146 | { | |||
147 | processfds(channel, NULL((void*)0), read_fd, NULL((void*)0), write_fd); | |||
| ||||
148 | } | |||
149 | ||||
150 | ||||
151 | /* Return 1 if the specified error number describes a readiness error, or 0 | |||
152 | * otherwise. This is mostly for HP-UX, which could return EAGAIN or | |||
153 | * EWOULDBLOCK. See this man page | |||
154 | * | |||
155 | * http://devrsrc1.external.hp.com/STKS/cgi-bin/man2html? | |||
156 | * manpage=/usr/share/man/man2.Z/send.2 | |||
157 | */ | |||
158 | static int try_again(int errnum) | |||
159 | { | |||
160 | #if !defined EWOULDBLOCK11 && !defined EAGAIN11 | |||
161 | #error "Neither EWOULDBLOCK nor EAGAIN defined" | |||
162 | #endif | |||
163 | switch (errnum) | |||
164 | { | |||
165 | #ifdef EWOULDBLOCK11 | |||
166 | case EWOULDBLOCK11: | |||
167 | return 1; | |||
168 | #endif | |||
169 | #if defined EAGAIN11 && EAGAIN11 != EWOULDBLOCK11 | |||
170 | case EAGAIN11: | |||
171 | return 1; | |||
172 | #endif | |||
173 | } | |||
174 | return 0; | |||
175 | } | |||
176 | ||||
177 | static ares_ssize_t socket_writev(ares_channel channel, ares_socket_t s, const struct iovec * vec, int len) | |||
178 | { | |||
179 | if (channel->sock_funcs) | |||
180 | return channel->sock_funcs->asendv(s, vec, len, channel->sock_func_cb_data); | |||
181 | ||||
182 | return writev(s, vec, len); | |||
183 | } | |||
184 | ||||
185 | static ares_ssize_t socket_write(ares_channel channel, ares_socket_t s, const void * data, size_t len) | |||
186 | { | |||
187 | if (channel->sock_funcs) | |||
188 | { | |||
189 | struct iovec vec; | |||
190 | vec.iov_base = (void*)data; | |||
191 | vec.iov_len = len; | |||
192 | return channel->sock_funcs->asendv(s, &vec, 1, channel->sock_func_cb_data); | |||
193 | } | |||
194 | return swrite(s, data, len)(ares_ssize_t)send((int)(s), (void *)(data), (size_t)(len), ( int)(MSG_NOSIGNAL)); | |||
195 | } | |||
196 | ||||
197 | /* If any TCP sockets select true for writing, write out queued data | |||
198 | * we have for them. | |||
199 | */ | |||
200 | static void write_tcp_data(ares_channel channel, | |||
201 | fd_set *write_fds, | |||
202 | ares_socket_t write_fd, | |||
203 | struct timeval *now) | |||
204 | { | |||
205 | struct server_state *server; | |||
206 | struct send_request *sendreq; | |||
207 | struct iovec *vec; | |||
208 | int i; | |||
209 | ares_ssize_t scount; | |||
210 | ares_ssize_t wcount; | |||
211 | size_t n; | |||
212 | ||||
213 | if(!write_fds && (write_fd == ARES_SOCKET_BAD-1)) | |||
214 | /* no possible action */ | |||
215 | return; | |||
216 | ||||
217 | for (i = 0; i < channel->nservers; i++) | |||
218 | { | |||
219 | /* Make sure server has data to send and is selected in write_fds or | |||
220 | write_fd. */ | |||
221 | server = &channel->servers[i]; | |||
222 | if (!server->qhead || server->tcp_socket == ARES_SOCKET_BAD-1 || | |||
223 | server->is_broken) | |||
224 | continue; | |||
225 | ||||
226 | if(write_fds) { | |||
227 | if(!FD_ISSET(server->tcp_socket, write_fds)((((write_fds)->fds_bits)[((server->tcp_socket) / (8 * ( int) sizeof (__fd_mask)))] & ((__fd_mask) (1UL << ( (server->tcp_socket) % (8 * (int) sizeof (__fd_mask)))))) != 0)) | |||
228 | continue; | |||
229 | } | |||
230 | else { | |||
231 | if(server->tcp_socket != write_fd) | |||
232 | continue; | |||
233 | } | |||
234 | ||||
235 | if(write_fds) | |||
236 | /* If there's an error and we close this socket, then open | |||
237 | * another with the same fd to talk to another server, then we | |||
238 | * don't want to think that it was the new socket that was | |||
239 | * ready. This is not disastrous, but is likely to result in | |||
240 | * extra system calls and confusion. */ | |||
241 | FD_CLR(server->tcp_socket, write_fds)((void) (((write_fds)->fds_bits)[((server->tcp_socket) / (8 * (int) sizeof (__fd_mask)))] &= ~((__fd_mask) (1UL << ((server->tcp_socket) % (8 * (int) sizeof (__fd_mask))))) )); | |||
242 | ||||
243 | /* Count the number of send queue items. */ | |||
244 | n = 0; | |||
245 | for (sendreq = server->qhead; sendreq; sendreq = sendreq->next) | |||
246 | n++; | |||
247 | ||||
248 | /* Allocate iovecs so we can send all our data at once. */ | |||
249 | vec = ares_malloc(n * sizeof(struct iovec)); | |||
250 | if (vec) | |||
251 | { | |||
252 | /* Fill in the iovecs and send. */ | |||
253 | n = 0; | |||
254 | for (sendreq = server->qhead; sendreq; sendreq = sendreq->next) | |||
255 | { | |||
256 | vec[n].iov_base = (char *) sendreq->data; | |||
257 | vec[n].iov_len = sendreq->len; | |||
258 | n++; | |||
259 | } | |||
260 | wcount = socket_writev(channel, server->tcp_socket, vec, (int)n); | |||
261 | ares_free(vec); | |||
262 | if (wcount < 0) | |||
263 | { | |||
264 | if (!try_again(SOCKERRNO((*__errno_location ())))) | |||
265 | handle_error(channel, i, now); | |||
266 | continue; | |||
267 | } | |||
268 | ||||
269 | /* Advance the send queue by as many bytes as we sent. */ | |||
270 | advance_tcp_send_queue(channel, i, wcount); | |||
271 | } | |||
272 | else | |||
273 | { | |||
274 | /* Can't allocate iovecs; just send the first request. */ | |||
275 | sendreq = server->qhead; | |||
276 | ||||
277 | scount = socket_write(channel, server->tcp_socket, sendreq->data, sendreq->len); | |||
278 | if (scount < 0) | |||
279 | { | |||
280 | if (!try_again(SOCKERRNO((*__errno_location ())))) | |||
281 | handle_error(channel, i, now); | |||
282 | continue; | |||
283 | } | |||
284 | ||||
285 | /* Advance the send queue by as many bytes as we sent. */ | |||
286 | advance_tcp_send_queue(channel, i, scount); | |||
287 | } | |||
288 | } | |||
289 | } | |||
290 | ||||
291 | /* Consume the given number of bytes from the head of the TCP send queue. */ | |||
292 | static void advance_tcp_send_queue(ares_channel channel, int whichserver, | |||
293 | ares_ssize_t num_bytes) | |||
294 | { | |||
295 | struct send_request *sendreq; | |||
296 | struct server_state *server = &channel->servers[whichserver]; | |||
297 | while (num_bytes > 0) { | |||
298 | sendreq = server->qhead; | |||
299 | if ((size_t)num_bytes >= sendreq->len) { | |||
300 | num_bytes -= sendreq->len; | |||
301 | server->qhead = sendreq->next; | |||
302 | if (sendreq->data_storage) | |||
303 | ares_free(sendreq->data_storage); | |||
304 | ares_free(sendreq); | |||
305 | if (server->qhead == NULL((void*)0)) { | |||
306 | SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 0)do { if ((channel)->sock_state_cb) (channel)->sock_state_cb ((channel)->sock_state_cb_data, (server->tcp_socket), ( 1), (0)); } while(0); | |||
307 | server->qtail = NULL((void*)0); | |||
308 | ||||
309 | /* qhead is NULL so we cannot continue this loop */ | |||
310 | break; | |||
311 | } | |||
312 | } | |||
313 | else { | |||
314 | sendreq->data += num_bytes; | |||
315 | sendreq->len -= num_bytes; | |||
316 | num_bytes = 0; | |||
317 | } | |||
318 | } | |||
319 | } | |||
320 | ||||
321 | static ares_ssize_t socket_recvfrom(ares_channel channel, | |||
322 | ares_socket_t s, | |||
323 | void * data, | |||
324 | size_t data_len, | |||
325 | int flags, | |||
326 | struct sockaddr *from, | |||
327 | ares_socklen_t *from_len) | |||
328 | { | |||
329 | if (channel->sock_funcs
| |||
330 | return channel->sock_funcs->arecvfrom(s, data, data_len, | |||
331 | flags, from, from_len, | |||
332 | channel->sock_func_cb_data); | |||
333 | ||||
334 | #ifdef HAVE_RECVFROM1 | |||
335 | return recvfrom(s, data, data_len, flags, from, from_len); | |||
336 | #else | |||
337 | return sread(s, data, data_len)(ares_ssize_t)recv((int)(s), (void *)(data), (size_t)(data_len ), (int)(0)); | |||
338 | #endif | |||
339 | } | |||
340 | ||||
341 | static ares_ssize_t socket_recv(ares_channel channel, | |||
342 | ares_socket_t s, | |||
343 | void * data, | |||
344 | size_t data_len) | |||
345 | { | |||
346 | if (channel->sock_funcs) | |||
347 | return channel->sock_funcs->arecvfrom(s, data, data_len, 0, 0, 0, | |||
348 | channel->sock_func_cb_data); | |||
349 | ||||
350 | return sread(s, data, data_len)(ares_ssize_t)recv((int)(s), (void *)(data), (size_t)(data_len ), (int)(0)); | |||
351 | } | |||
352 | ||||
353 | /* If any TCP socket selects true for reading, read some data, | |||
354 | * allocate a buffer if we finish reading the length word, and process | |||
355 | * a packet if we finish reading one. | |||
356 | */ | |||
357 | static void read_tcp_data(ares_channel channel, fd_set *read_fds, | |||
358 | ares_socket_t read_fd, struct timeval *now) | |||
359 | { | |||
360 | struct server_state *server; | |||
361 | int i; | |||
362 | ares_ssize_t count; | |||
363 | ||||
364 | if(!read_fds && (read_fd == ARES_SOCKET_BAD-1)) | |||
365 | /* no possible action */ | |||
366 | return; | |||
367 | ||||
368 | for (i = 0; i < channel->nservers; i++) | |||
369 | { | |||
370 | /* Make sure the server has a socket and is selected in read_fds. */ | |||
371 | server = &channel->servers[i]; | |||
372 | if (server->tcp_socket == ARES_SOCKET_BAD-1 || server->is_broken) | |||
373 | continue; | |||
374 | ||||
375 | if(read_fds) { | |||
376 | if(!FD_ISSET(server->tcp_socket, read_fds)((((read_fds)->fds_bits)[((server->tcp_socket) / (8 * ( int) sizeof (__fd_mask)))] & ((__fd_mask) (1UL << ( (server->tcp_socket) % (8 * (int) sizeof (__fd_mask)))))) != 0)) | |||
377 | continue; | |||
378 | } | |||
379 | else { | |||
380 | if(server->tcp_socket != read_fd) | |||
381 | continue; | |||
382 | } | |||
383 | ||||
384 | if(read_fds) | |||
385 | /* If there's an error and we close this socket, then open another | |||
386 | * with the same fd to talk to another server, then we don't want to | |||
387 | * think that it was the new socket that was ready. This is not | |||
388 | * disastrous, but is likely to result in extra system calls and | |||
389 | * confusion. */ | |||
390 | FD_CLR(server->tcp_socket, read_fds)((void) (((read_fds)->fds_bits)[((server->tcp_socket) / (8 * (int) sizeof (__fd_mask)))] &= ~((__fd_mask) (1UL << ((server->tcp_socket) % (8 * (int) sizeof (__fd_mask))))) )); | |||
391 | ||||
392 | if (server->tcp_lenbuf_pos != 2) | |||
393 | { | |||
394 | /* We haven't yet read a length word, so read that (or | |||
395 | * what's left to read of it). | |||
396 | */ | |||
397 | count = socket_recv(channel, server->tcp_socket, | |||
398 | server->tcp_lenbuf + server->tcp_lenbuf_pos, | |||
399 | 2 - server->tcp_lenbuf_pos); | |||
400 | if (count <= 0) | |||
401 | { | |||
402 | if (!(count == -1 && try_again(SOCKERRNO((*__errno_location ()))))) | |||
403 | handle_error(channel, i, now); | |||
404 | continue; | |||
405 | } | |||
406 | ||||
407 | server->tcp_lenbuf_pos += (int)count; | |||
408 | if (server->tcp_lenbuf_pos == 2) | |||
409 | { | |||
410 | /* We finished reading the length word. Decode the | |||
411 | * length and allocate a buffer for the data. | |||
412 | */ | |||
413 | server->tcp_length = server->tcp_lenbuf[0] << 8 | |||
414 | | server->tcp_lenbuf[1]; | |||
415 | server->tcp_buffer = ares_malloc(server->tcp_length); | |||
416 | if (!server->tcp_buffer) { | |||
417 | handle_error(channel, i, now); | |||
418 | return; /* bail out on malloc failure. TODO: make this | |||
419 | function return error codes */ | |||
420 | } | |||
421 | server->tcp_buffer_pos = 0; | |||
422 | } | |||
423 | } | |||
424 | else | |||
425 | { | |||
426 | /* Read data into the allocated buffer. */ | |||
427 | count = socket_recv(channel, server->tcp_socket, | |||
428 | server->tcp_buffer + server->tcp_buffer_pos, | |||
429 | server->tcp_length - server->tcp_buffer_pos); | |||
430 | if (count <= 0) | |||
431 | { | |||
432 | if (!(count == -1 && try_again(SOCKERRNO((*__errno_location ()))))) | |||
433 | handle_error(channel, i, now); | |||
434 | continue; | |||
435 | } | |||
436 | ||||
437 | server->tcp_buffer_pos += (int)count; | |||
438 | if (server->tcp_buffer_pos == server->tcp_length) | |||
439 | { | |||
440 | /* We finished reading this answer; process it and | |||
441 | * prepare to read another length word. | |||
442 | */ | |||
443 | process_answer(channel, server->tcp_buffer, server->tcp_length, | |||
444 | i, 1, now); | |||
445 | ares_free(server->tcp_buffer); | |||
446 | server->tcp_buffer = NULL((void*)0); | |||
447 | server->tcp_lenbuf_pos = 0; | |||
448 | server->tcp_buffer_pos = 0; | |||
449 | } | |||
450 | } | |||
451 | } | |||
452 | } | |||
453 | ||||
454 | /* If any UDP sockets select true for reading, process them. */ | |||
455 | static void read_udp_packets(ares_channel channel, fd_set *read_fds, | |||
456 | ares_socket_t read_fd, struct timeval *now) | |||
457 | { | |||
458 | struct server_state *server; | |||
459 | int i; | |||
460 | ares_ssize_t count; | |||
461 | unsigned char buf[MAXENDSSZ4096 + 1]; | |||
462 | #ifdef HAVE_RECVFROM1 | |||
463 | ares_socklen_t fromlen; | |||
464 | union { | |||
465 | struct sockaddr sa; | |||
466 | struct sockaddr_in sa4; | |||
467 | struct sockaddr_in6 sa6; | |||
468 | } from; | |||
469 | #endif | |||
470 | ||||
471 | if(!read_fds
| |||
472 | /* no possible action */ | |||
473 | return; | |||
474 | ||||
475 | for (i = 0; i < channel->nservers; i++) | |||
476 | { | |||
477 | /* Make sure the server has a socket and is selected in read_fds. */ | |||
478 | server = &channel->servers[i]; | |||
479 | ||||
480 | if (server->udp_socket == ARES_SOCKET_BAD-1 || server->is_broken
| |||
481 | continue; | |||
482 | ||||
483 | if(read_fds
| |||
484 | if(!FD_ISSET(server->udp_socket, read_fds)((((read_fds)->fds_bits)[((server->udp_socket) / (8 * ( int) sizeof (__fd_mask)))] & ((__fd_mask) (1UL << ( (server->udp_socket) % (8 * (int) sizeof (__fd_mask)))))) != 0)) | |||
485 | continue; | |||
486 | } | |||
487 | else { | |||
488 | if(server->udp_socket != read_fd) | |||
489 | continue; | |||
490 | } | |||
491 | ||||
492 | if(read_fds
| |||
493 | /* If there's an error and we close this socket, then open | |||
494 | * another with the same fd to talk to another server, then we | |||
495 | * don't want to think that it was the new socket that was | |||
496 | * ready. This is not disastrous, but is likely to result in | |||
497 | * extra system calls and confusion. */ | |||
498 | FD_CLR(server->udp_socket, read_fds)((void) (((read_fds)->fds_bits)[((server->udp_socket) / (8 * (int) sizeof (__fd_mask)))] &= ~((__fd_mask) (1UL << ((server->udp_socket) % (8 * (int) sizeof (__fd_mask))))) )); | |||
499 | ||||
500 | /* To reduce event loop overhead, read and process as many | |||
501 | * packets as we can. */ | |||
502 | do { | |||
503 | if (server->udp_socket == ARES_SOCKET_BAD-1) | |||
504 | count = 0; | |||
505 | ||||
506 | else { | |||
507 | if (server->addr.family == AF_INET2) | |||
508 | fromlen = sizeof(from.sa4); | |||
509 | else | |||
510 | fromlen = sizeof(from.sa6); | |||
511 | count = socket_recvfrom(channel, server->udp_socket, (void *)buf, | |||
512 | sizeof(buf), 0, &from.sa, &fromlen); | |||
513 | } | |||
514 | ||||
515 | if (count == -1 && try_again(SOCKERRNO((*__errno_location ())))) | |||
516 | continue; | |||
517 | else if (count <= 0) | |||
518 | handle_error(channel, i, now); | |||
519 | #ifdef HAVE_RECVFROM1 | |||
520 | else if (!same_address(&from.sa, &server->addr)) | |||
521 | /* The address the response comes from does not match the address we | |||
522 | * sent the request to. Someone may be attempting to perform a cache | |||
523 | * poisoning attack. */ | |||
524 | break; | |||
525 | #endif | |||
526 | else | |||
527 | process_answer(channel, buf, (int)count, i, 0, now); | |||
528 | } while (count > 0); | |||
529 | } | |||
530 | } | |||
531 | ||||
532 | /* If any queries have timed out, note the timeout and move them on. */ | |||
533 | static void process_timeouts(ares_channel channel, struct timeval *now) | |||
534 | { | |||
535 | time_t t; /* the time of the timeouts we're processing */ | |||
536 | struct query *query; | |||
537 | struct list_node* list_head; | |||
538 | struct list_node* list_node; | |||
539 | ||||
540 | /* Process all the timeouts that have fired since the last time we processed | |||
541 | * timeouts. If things are going well, then we'll have hundreds/thousands of | |||
542 | * queries that fall into future buckets, and only a handful of requests | |||
543 | * that fall into the "now" bucket, so this should be quite quick. | |||
544 | */ | |||
545 | for (t = channel->last_timeout_processed; t <= now->tv_sec; t++) | |||
546 | { | |||
547 | list_head = &(channel->queries_by_timeout[t % ARES_TIMEOUT_TABLE_SIZE1024]); | |||
548 | for (list_node = list_head->next; list_node != list_head; ) | |||
549 | { | |||
550 | query = list_node->data; | |||
551 | list_node = list_node->next; /* in case the query gets deleted */ | |||
552 | if (query->timeout.tv_sec && ares__timedout(now, &query->timeout)) | |||
553 | { | |||
554 | query->error_status = ARES_ETIMEOUT12; | |||
555 | ++query->timeouts; | |||
556 | next_server(channel, query, now); | |||
557 | } | |||
558 | } | |||
559 | } | |||
560 | channel->last_timeout_processed = now->tv_sec; | |||
561 | } | |||
562 | ||||
563 | /* Handle an answer from a server. */ | |||
564 | static void process_answer(ares_channel channel, unsigned char *abuf, | |||
565 | int alen, int whichserver, int tcp, | |||
566 | struct timeval *now) | |||
567 | { | |||
568 | int tc, rcode, packetsz; | |||
569 | unsigned short id; | |||
570 | struct query *query; | |||
571 | struct list_node* list_head; | |||
572 | struct list_node* list_node; | |||
573 | ||||
574 | /* If there's no room in the answer for a header, we can't do much | |||
575 | * with it. */ | |||
576 | if (alen < HFIXEDSZ12) | |||
577 | return; | |||
578 | ||||
579 | /* Grab the query ID, truncate bit, and response code from the packet. */ | |||
580 | id = DNS_HEADER_QID(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)(abuf)[0]) << 8U) | ((unsigned int)((unsigned char)(abuf)[1]))))); | |||
581 | tc = DNS_HEADER_TC(abuf)(((abuf)[2] >> 1) & 0x1); | |||
582 | rcode = DNS_HEADER_RCODE(abuf)((abuf)[3] & 0xf); | |||
583 | ||||
584 | /* Find the query corresponding to this packet. The queries are | |||
585 | * hashed/bucketed by query id, so this lookup should be quick. Note that | |||
586 | * both the query id and the questions must be the same; when the query id | |||
587 | * wraps around we can have multiple outstanding queries with the same query | |||
588 | * id, so we need to check both the id and question. | |||
589 | */ | |||
590 | query = NULL((void*)0); | |||
591 | list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE2048]); | |||
592 | for (list_node = list_head->next; list_node != list_head; | |||
593 | list_node = list_node->next) | |||
594 | { | |||
595 | struct query *q = list_node->data; | |||
596 | if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen)) | |||
597 | { | |||
598 | query = q; | |||
599 | break; | |||
600 | } | |||
601 | } | |||
602 | if (!query) | |||
603 | return; | |||
604 | ||||
605 | packetsz = PACKETSZ512; | |||
606 | /* If we use EDNS and server answers with FORMERR without an OPT RR, the protocol | |||
607 | * extension is not understood by the responder. We must retry the query | |||
608 | * without EDNS enabled. */ | |||
609 | if (channel->flags & ARES_FLAG_EDNS(1 << 8)) | |||
610 | { | |||
611 | packetsz = channel->ednspsz; | |||
612 | if (rcode == FORMERRns_r_formerr && has_opt_rr(abuf, alen) != 1) | |||
613 | { | |||
614 | int qlen = (query->tcplen - 2) - EDNSFIXEDSZ11; | |||
615 | channel->flags ^= ARES_FLAG_EDNS(1 << 8); | |||
616 | query->tcplen -= EDNSFIXEDSZ11; | |||
617 | query->qlen -= EDNSFIXEDSZ11; | |||
618 | query->tcpbuf[0] = (unsigned char)((qlen >> 8) & 0xff); | |||
619 | query->tcpbuf[1] = (unsigned char)(qlen & 0xff); | |||
620 | DNS_HEADER_SET_ARCOUNT(query->tcpbuf + 2, 0)((((query->tcpbuf + 2) + 10)[0] = (unsigned char)(((0) >> 8) & 0xff)), (((query->tcpbuf + 2) + 10)[1] = (unsigned char)((0) & 0xff))); | |||
621 | query->tcpbuf = ares_realloc(query->tcpbuf, query->tcplen); | |||
622 | query->qbuf = query->tcpbuf + 2; | |||
623 | ares__send_query(channel, query, now); | |||
624 | return; | |||
625 | } | |||
626 | } | |||
627 | ||||
628 | /* If we got a truncated UDP packet and are not ignoring truncation, | |||
629 | * don't accept the packet, and switch the query to TCP if we hadn't | |||
630 | * done so already. | |||
631 | */ | |||
632 | if ((tc || alen > packetsz) && !tcp && !(channel->flags & ARES_FLAG_IGNTC(1 << 2))) | |||
633 | { | |||
634 | if (!query->using_tcp) | |||
635 | { | |||
636 | query->using_tcp = 1; | |||
637 | ares__send_query(channel, query, now); | |||
638 | } | |||
639 | return; | |||
640 | } | |||
641 | ||||
642 | /* Limit alen to PACKETSZ if we aren't using TCP (only relevant if we | |||
643 | * are ignoring truncation. | |||
644 | */ | |||
645 | if (alen > packetsz && !tcp) | |||
646 | alen = packetsz; | |||
647 | ||||
648 | /* If we aren't passing through all error packets, discard packets | |||
649 | * with SERVFAIL, NOTIMP, or REFUSED response codes. | |||
650 | */ | |||
651 | if (!(channel->flags & ARES_FLAG_NOCHECKRESP(1 << 7))) | |||
652 | { | |||
653 | if (rcode == SERVFAILns_r_servfail || rcode == NOTIMPns_r_notimpl || rcode == REFUSEDns_r_refused) | |||
654 | { | |||
655 | skip_server(channel, query, whichserver); | |||
656 | if (query->server == whichserver) | |||
657 | next_server(channel, query, now); | |||
658 | return; | |||
659 | } | |||
660 | } | |||
661 | ||||
662 | end_query(channel, query, ARES_SUCCESS0, abuf, alen); | |||
663 | } | |||
664 | ||||
665 | /* Close all the connections that are no longer usable. */ | |||
666 | static void process_broken_connections(ares_channel channel, | |||
667 | struct timeval *now) | |||
668 | { | |||
669 | int i; | |||
670 | for (i = 0; i < channel->nservers; i++) | |||
671 | { | |||
672 | struct server_state *server = &channel->servers[i]; | |||
673 | if (server->is_broken) | |||
674 | { | |||
675 | handle_error(channel, i, now); | |||
676 | } | |||
677 | } | |||
678 | } | |||
679 | ||||
680 | /* Swap the contents of two lists */ | |||
681 | static void swap_lists(struct list_node* head_a, | |||
682 | struct list_node* head_b) | |||
683 | { | |||
684 | int is_a_empty = ares__is_list_empty(head_a); | |||
685 | int is_b_empty = ares__is_list_empty(head_b); | |||
686 | struct list_node old_a = *head_a; | |||
687 | struct list_node old_b = *head_b; | |||
688 | ||||
689 | if (is_a_empty) { | |||
690 | ares__init_list_head(head_b); | |||
691 | } else { | |||
692 | *head_b = old_a; | |||
693 | old_a.next->prev = head_b; | |||
694 | old_a.prev->next = head_b; | |||
695 | } | |||
696 | if (is_b_empty) { | |||
697 | ares__init_list_head(head_a); | |||
698 | } else { | |||
699 | *head_a = old_b; | |||
700 | old_b.next->prev = head_a; | |||
701 | old_b.prev->next = head_a; | |||
702 | } | |||
703 | } | |||
704 | ||||
705 | static void handle_error(ares_channel channel, int whichserver, | |||
706 | struct timeval *now) | |||
707 | { | |||
708 | struct server_state *server; | |||
709 | struct query *query; | |||
710 | struct list_node list_head; | |||
711 | struct list_node* list_node; | |||
712 | ||||
713 | server = &channel->servers[whichserver]; | |||
714 | ||||
715 | /* Reset communications with this server. */ | |||
716 | ares__close_sockets(channel, server); | |||
717 | ||||
718 | /* Tell all queries talking to this server to move on and not try this | |||
719 | * server again. We steal the current list of queries that were in-flight to | |||
720 | * this server, since when we call next_server this can cause the queries to | |||
721 | * be re-sent to this server, which will re-insert these queries in that | |||
722 | * same server->queries_to_server list. | |||
723 | */ | |||
724 | ares__init_list_head(&list_head); | |||
725 | swap_lists(&list_head, &(server->queries_to_server)); | |||
726 | for (list_node = list_head.next; list_node != &list_head; ) | |||
727 | { | |||
728 | query = list_node->data; | |||
729 | list_node = list_node->next; /* in case the query gets deleted */ | |||
730 | assert(query->server == whichserver)((void) sizeof ((query->server == whichserver) ? 1 : 0), __extension__ ({ if (query->server == whichserver) ; else __assert_fail ("query->server == whichserver", "../deps/cares/src/lib/ares_process.c" , 730, __extension__ __PRETTY_FUNCTION__); })); | |||
731 | skip_server(channel, query, whichserver); | |||
732 | next_server(channel, query, now); | |||
733 | } | |||
734 | /* Each query should have removed itself from our temporary list as | |||
735 | * it re-sent itself or finished up... | |||
736 | */ | |||
737 | assert(ares__is_list_empty(&list_head))((void) sizeof ((ares__is_list_empty(&list_head)) ? 1 : 0 ), __extension__ ({ if (ares__is_list_empty(&list_head)) ; else __assert_fail ("ares__is_list_empty(&list_head)", "../deps/cares/src/lib/ares_process.c" , 737, __extension__ __PRETTY_FUNCTION__); })); | |||
738 | } | |||
739 | ||||
740 | static void skip_server(ares_channel channel, struct query *query, | |||
741 | int whichserver) | |||
742 | { | |||
743 | /* The given server gave us problems with this query, so if we have the | |||
744 | * luxury of using other servers, then let's skip the potentially broken | |||
745 | * server and just use the others. If we only have one server and we need to | |||
746 | * retry then we should just go ahead and re-use that server, since it's our | |||
747 | * only hope; perhaps we just got unlucky, and retrying will work (eg, the | |||
748 | * server timed out our TCP connection just as we were sending another | |||
749 | * request). | |||
750 | */ | |||
751 | if (channel->nservers > 1) | |||
752 | { | |||
753 | query->server_info[whichserver].skip_server = 1; | |||
754 | } | |||
755 | } | |||
756 | ||||
757 | static void next_server(ares_channel channel, struct query *query, | |||
758 | struct timeval *now) | |||
759 | { | |||
760 | /* We need to try each server channel->tries times. We have channel->nservers | |||
761 | * servers to try. In total, we need to do channel->nservers * channel->tries | |||
762 | * attempts. Use query->try to remember how many times we already attempted | |||
763 | * this query. Use modular arithmetic to find the next server to try. */ | |||
764 | while (++(query->try_count) < (channel->nservers * channel->tries)) | |||
765 | { | |||
766 | struct server_state *server; | |||
767 | ||||
768 | /* Move on to the next server. */ | |||
769 | query->server = (query->server + 1) % channel->nservers; | |||
770 | server = &channel->servers[query->server]; | |||
771 | ||||
772 | /* We don't want to use this server if (1) we decided this connection is | |||
773 | * broken, and thus about to be closed, (2) we've decided to skip this | |||
774 | * server because of earlier errors we encountered, or (3) we already | |||
775 | * sent this query over this exact connection. | |||
776 | */ | |||
777 | if (!server->is_broken && | |||
778 | !query->server_info[query->server].skip_server && | |||
779 | !(query->using_tcp && | |||
780 | (query->server_info[query->server].tcp_connection_generation == | |||
781 | server->tcp_connection_generation))) | |||
782 | { | |||
783 | ares__send_query(channel, query, now); | |||
784 | return; | |||
785 | } | |||
786 | ||||
787 | /* You might think that with TCP we only need one try. However, even | |||
788 | * when using TCP, servers can time-out our connection just as we're | |||
789 | * sending a request, or close our connection because they die, or never | |||
790 | * send us a reply because they get wedged or tickle a bug that drops | |||
791 | * our request. | |||
792 | */ | |||
793 | } | |||
794 | ||||
795 | /* If we are here, all attempts to perform query failed. */ | |||
796 | end_query(channel, query, query->error_status, NULL((void*)0), 0); | |||
797 | } | |||
798 | ||||
799 | void ares__send_query(ares_channel channel, struct query *query, | |||
800 | struct timeval *now) | |||
801 | { | |||
802 | struct send_request *sendreq; | |||
803 | struct server_state *server; | |||
804 | int timeplus; | |||
805 | ||||
806 | server = &channel->servers[query->server]; | |||
807 | if (query->using_tcp) | |||
808 | { | |||
809 | /* Make sure the TCP socket for this server is set up and queue | |||
810 | * a send request. | |||
811 | */ | |||
812 | if (server->tcp_socket == ARES_SOCKET_BAD-1) | |||
813 | { | |||
814 | if (open_tcp_socket(channel, server) == -1) | |||
815 | { | |||
816 | skip_server(channel, query, query->server); | |||
817 | next_server(channel, query, now); | |||
818 | return; | |||
819 | } | |||
820 | } | |||
821 | sendreq = ares_malloc(sizeof(struct send_request)); | |||
822 | if (!sendreq) | |||
823 | { | |||
824 | end_query(channel, query, ARES_ENOMEM15, NULL((void*)0), 0); | |||
825 | return; | |||
826 | } | |||
827 | memset(sendreq, 0, sizeof(struct send_request)); | |||
828 | /* To make the common case fast, we avoid copies by using the query's | |||
829 | * tcpbuf for as long as the query is alive. In the rare case where the | |||
830 | * query ends while it's queued for transmission, then we give the | |||
831 | * sendreq its own copy of the request packet and put it in | |||
832 | * sendreq->data_storage. | |||
833 | */ | |||
834 | sendreq->data_storage = NULL((void*)0); | |||
835 | sendreq->data = query->tcpbuf; | |||
836 | sendreq->len = query->tcplen; | |||
837 | sendreq->owner_query = query; | |||
838 | sendreq->next = NULL((void*)0); | |||
839 | if (server->qtail) | |||
840 | server->qtail->next = sendreq; | |||
841 | else | |||
842 | { | |||
843 | SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 1)do { if ((channel)->sock_state_cb) (channel)->sock_state_cb ((channel)->sock_state_cb_data, (server->tcp_socket), ( 1), (1)); } while(0); | |||
844 | server->qhead = sendreq; | |||
845 | } | |||
846 | server->qtail = sendreq; | |||
847 | query->server_info[query->server].tcp_connection_generation = | |||
848 | server->tcp_connection_generation; | |||
849 | } | |||
850 | else | |||
851 | { | |||
852 | if (server->udp_socket == ARES_SOCKET_BAD-1) | |||
853 | { | |||
854 | if (open_udp_socket(channel, server) == -1) | |||
855 | { | |||
856 | skip_server(channel, query, query->server); | |||
857 | next_server(channel, query, now); | |||
858 | return; | |||
859 | } | |||
860 | } | |||
861 | if (socket_write(channel, server->udp_socket, query->qbuf, query->qlen) == -1) | |||
862 | { | |||
863 | /* FIXME: Handle EAGAIN here since it likely can happen. */ | |||
864 | skip_server(channel, query, query->server); | |||
865 | next_server(channel, query, now); | |||
866 | return; | |||
867 | } | |||
868 | } | |||
869 | ||||
870 | /* For each trip through the entire server list, double the channel's | |||
871 | * assigned timeout, avoiding overflow. If channel->timeout is negative, | |||
872 | * leave it as-is, even though that should be impossible here. | |||
873 | */ | |||
874 | timeplus = channel->timeout; | |||
875 | { | |||
876 | /* How many times do we want to double it? Presume sane values here. */ | |||
877 | const int shift = query->try_count / channel->nservers; | |||
878 | ||||
879 | /* Is there enough room to shift timeplus left that many times? | |||
880 | * | |||
881 | * To find out, confirm that all of the bits we'll shift away are zero. | |||
882 | * Stop considering a shift if we get to the point where we could shift | |||
883 | * a 1 into the sign bit (i.e. when shift is within two of the bit | |||
884 | * count). | |||
885 | * | |||
886 | * This has the side benefit of leaving negative numbers unchanged. | |||
887 | */ | |||
888 | if(shift <= (int)(sizeof(int) * CHAR_BIT8 - 1) | |||
889 | && (timeplus >> (sizeof(int) * CHAR_BIT8 - 1 - shift)) == 0) | |||
890 | { | |||
891 | timeplus <<= shift; | |||
892 | } | |||
893 | } | |||
894 | ||||
895 | query->timeout = *now; | |||
896 | timeadd(&query->timeout, timeplus); | |||
897 | /* Keep track of queries bucketed by timeout, so we can process | |||
898 | * timeout events quickly. | |||
899 | */ | |||
900 | ares__remove_from_list(&(query->queries_by_timeout)); | |||
901 | ares__insert_in_list( | |||
902 | &(query->queries_by_timeout), | |||
903 | &(channel->queries_by_timeout[query->timeout.tv_sec % | |||
904 | ARES_TIMEOUT_TABLE_SIZE1024])); | |||
905 | ||||
906 | /* Keep track of queries bucketed by server, so we can process server | |||
907 | * errors quickly. | |||
908 | */ | |||
909 | ares__remove_from_list(&(query->queries_to_server)); | |||
910 | ares__insert_in_list(&(query->queries_to_server), | |||
911 | &(server->queries_to_server)); | |||
912 | } | |||
913 | ||||
914 | /* | |||
915 | * setsocknonblock sets the given socket to either blocking or non-blocking | |||
916 | * mode based on the 'nonblock' boolean argument. This function is highly | |||
917 | * portable. | |||
918 | */ | |||
919 | static int setsocknonblock(ares_socket_t sockfd, /* operate on this */ | |||
920 | int nonblock /* TRUE or FALSE */) | |||
921 | { | |||
922 | #if defined(USE_BLOCKING_SOCKETS) | |||
923 | ||||
924 | return 0; /* returns success */ | |||
925 | ||||
926 | #elif defined(HAVE_FCNTL_O_NONBLOCK1) | |||
927 | ||||
928 | /* most recent unix versions */ | |||
929 | int flags; | |||
930 | flags = fcntl(sockfd, F_GETFL3, 0); | |||
931 | if (FALSE0 != nonblock) | |||
932 | return fcntl(sockfd, F_SETFL4, flags | O_NONBLOCK04000); | |||
933 | else | |||
934 | return fcntl(sockfd, F_SETFL4, flags & (~O_NONBLOCK04000)); /* LCOV_EXCL_LINE */ | |||
935 | ||||
936 | #elif defined(HAVE_IOCTL_FIONBIO1) | |||
937 | ||||
938 | /* older unix versions */ | |||
939 | int flags = nonblock ? 1 : 0; | |||
940 | return ioctl(sockfd, FIONBIO0x5421, &flags); | |||
941 | ||||
942 | #elif defined(HAVE_IOCTLSOCKET_FIONBIO) | |||
943 | ||||
944 | #ifdef WATT32 | |||
945 | char flags = nonblock ? 1 : 0; | |||
946 | #else | |||
947 | /* Windows */ | |||
948 | unsigned long flags = nonblock ? 1UL : 0UL; | |||
949 | #endif | |||
950 | return ioctlsocket(sockfd, FIONBIO0x5421, &flags); | |||
951 | ||||
952 | #elif defined(HAVE_IOCTLSOCKET_CAMEL_FIONBIO) | |||
953 | ||||
954 | /* Amiga */ | |||
955 | long flags = nonblock ? 1L : 0L; | |||
956 | return IoctlSocket(sockfd, FIONBIO0x5421, flags); | |||
957 | ||||
958 | #elif defined(HAVE_SETSOCKOPT_SO_NONBLOCK) | |||
959 | ||||
960 | /* BeOS */ | |||
961 | long b = nonblock ? 1L : 0L; | |||
962 | return setsockopt(sockfd, SOL_SOCKET1, SO_NONBLOCK, &b, sizeof(b)); | |||
963 | ||||
964 | #else | |||
965 | # error "no non-blocking method was found/used/set" | |||
966 | #endif | |||
967 | } | |||
968 | ||||
969 | static int configure_socket(ares_socket_t s, int family, ares_channel channel) | |||
970 | { | |||
971 | union { | |||
972 | struct sockaddr sa; | |||
973 | struct sockaddr_in sa4; | |||
974 | struct sockaddr_in6 sa6; | |||
975 | } local; | |||
976 | ||||
977 | /* do not set options for user-managed sockets */ | |||
978 | if (channel->sock_funcs) | |||
979 | return 0; | |||
980 | ||||
981 | (void)setsocknonblock(s, TRUE1); | |||
982 | ||||
983 | #if defined(FD_CLOEXEC1) && !defined(MSDOS) | |||
984 | /* Configure the socket fd as close-on-exec. */ | |||
985 | if (fcntl(s, F_SETFD2, FD_CLOEXEC1) == -1) | |||
986 | return -1; /* LCOV_EXCL_LINE */ | |||
987 | #endif | |||
988 | ||||
989 | /* Set the socket's send and receive buffer sizes. */ | |||
990 | if ((channel->socket_send_buffer_size > 0) && | |||
991 | setsockopt(s, SOL_SOCKET1, SO_SNDBUF7, | |||
992 | (void *)&channel->socket_send_buffer_size, | |||
993 | sizeof(channel->socket_send_buffer_size)) == -1) | |||
994 | return -1; | |||
995 | ||||
996 | if ((channel->socket_receive_buffer_size > 0) && | |||
997 | setsockopt(s, SOL_SOCKET1, SO_RCVBUF8, | |||
998 | (void *)&channel->socket_receive_buffer_size, | |||
999 | sizeof(channel->socket_receive_buffer_size)) == -1) | |||
1000 | return -1; | |||
1001 | ||||
1002 | #ifdef SO_BINDTODEVICE25 | |||
1003 | if (channel->local_dev_name[0]) { | |||
1004 | if (setsockopt(s, SOL_SOCKET1, SO_BINDTODEVICE25, | |||
1005 | channel->local_dev_name, sizeof(channel->local_dev_name))) { | |||
1006 | /* Only root can do this, and usually not fatal if it doesn't work, so */ | |||
1007 | /* just continue on. */ | |||
1008 | } | |||
1009 | } | |||
1010 | #endif | |||
1011 | ||||
1012 | if (family == AF_INET2) { | |||
1013 | if (channel->local_ip4) { | |||
1014 | memset(&local.sa4, 0, sizeof(local.sa4)); | |||
1015 | local.sa4.sin_family = AF_INET2; | |||
1016 | local.sa4.sin_addr.s_addr = htonl(channel->local_ip4)__bswap_32 (channel->local_ip4); | |||
1017 | if (bind(s, &local.sa, sizeof(local.sa4)) < 0) | |||
1018 | return -1; | |||
1019 | } | |||
1020 | } | |||
1021 | else if (family == AF_INET610) { | |||
1022 | if (memcmp(channel->local_ip6, &ares_in6addr_any, | |||
1023 | sizeof(channel->local_ip6)) != 0) { | |||
1024 | memset(&local.sa6, 0, sizeof(local.sa6)); | |||
1025 | local.sa6.sin6_family = AF_INET610; | |||
1026 | memcpy(&local.sa6.sin6_addr, channel->local_ip6, | |||
1027 | sizeof(channel->local_ip6)); | |||
1028 | if (bind(s, &local.sa, sizeof(local.sa6)) < 0) | |||
1029 | return -1; | |||
1030 | } | |||
1031 | } | |||
1032 | ||||
1033 | return 0; | |||
1034 | } | |||
1035 | ||||
1036 | static int open_tcp_socket(ares_channel channel, struct server_state *server) | |||
1037 | { | |||
1038 | ares_socket_t s; | |||
1039 | int opt; | |||
1040 | ares_socklen_t salen; | |||
1041 | union { | |||
1042 | struct sockaddr_in sa4; | |||
1043 | struct sockaddr_in6 sa6; | |||
1044 | } saddr; | |||
1045 | struct sockaddr *sa; | |||
1046 | ||||
1047 | switch (server->addr.family) | |||
1048 | { | |||
1049 | case AF_INET2: | |||
1050 | sa = (void *)&saddr.sa4; | |||
1051 | salen = sizeof(saddr.sa4); | |||
1052 | memset(sa, 0, salen); | |||
1053 | saddr.sa4.sin_family = AF_INET2; | |||
1054 | if (server->addr.tcp_port) { | |||
1055 | saddr.sa4.sin_port = aresx_sitous(server->addr.tcp_port); | |||
1056 | } else { | |||
1057 | saddr.sa4.sin_port = aresx_sitous(channel->tcp_port); | |||
1058 | } | |||
1059 | memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4addr.addr4, | |||
1060 | sizeof(server->addr.addrV4addr.addr4)); | |||
1061 | break; | |||
1062 | case AF_INET610: | |||
1063 | sa = (void *)&saddr.sa6; | |||
1064 | salen = sizeof(saddr.sa6); | |||
1065 | memset(sa, 0, salen); | |||
1066 | saddr.sa6.sin6_family = AF_INET610; | |||
1067 | if (server->addr.tcp_port) { | |||
1068 | saddr.sa6.sin6_port = aresx_sitous(server->addr.tcp_port); | |||
1069 | } else { | |||
1070 | saddr.sa6.sin6_port = aresx_sitous(channel->tcp_port); | |||
1071 | } | |||
1072 | memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6addr.addr6, | |||
1073 | sizeof(server->addr.addrV6addr.addr6)); | |||
1074 | break; | |||
1075 | default: | |||
1076 | return -1; /* LCOV_EXCL_LINE */ | |||
1077 | } | |||
1078 | ||||
1079 | /* Acquire a socket. */ | |||
1080 | s = ares__open_socket(channel, server->addr.family, SOCK_STREAMSOCK_STREAM, 0); | |||
1081 | if (s == ARES_SOCKET_BAD-1) | |||
1082 | return -1; | |||
1083 | ||||
1084 | /* Configure it. */ | |||
1085 | if (configure_socket(s, server->addr.family, channel) < 0) | |||
1086 | { | |||
1087 | ares__close_socket(channel, s); | |||
1088 | return -1; | |||
1089 | } | |||
1090 | ||||
1091 | #ifdef TCP_NODELAY1 | |||
1092 | /* | |||
1093 | * Disable the Nagle algorithm (only relevant for TCP sockets, and thus not | |||
1094 | * in configure_socket). In general, in DNS lookups we're pretty much | |||
1095 | * interested in firing off a single request and then waiting for a reply, | |||
1096 | * so batching isn't very interesting. | |||
1097 | */ | |||
1098 | opt = 1; | |||
1099 | if (channel->sock_funcs == 0 | |||
1100 | && | |||
1101 | setsockopt(s, IPPROTO_TCPIPPROTO_TCP, TCP_NODELAY1, | |||
1102 | (void *)&opt, sizeof(opt)) == -1) | |||
1103 | { | |||
1104 | ares__close_socket(channel, s); | |||
1105 | return -1; | |||
1106 | } | |||
1107 | #endif | |||
1108 | ||||
1109 | if (channel->sock_config_cb) | |||
1110 | { | |||
1111 | int err = channel->sock_config_cb(s, SOCK_STREAMSOCK_STREAM, | |||
1112 | channel->sock_config_cb_data); | |||
1113 | if (err < 0) | |||
1114 | { | |||
1115 | ares__close_socket(channel, s); | |||
1116 | return err; | |||
1117 | } | |||
1118 | } | |||
1119 | ||||
1120 | /* Connect to the server. */ | |||
1121 | if (ares__connect_socket(channel, s, sa, salen) == -1) | |||
1122 | { | |||
1123 | int err = SOCKERRNO((*__errno_location ())); | |||
1124 | ||||
1125 | if (err != EINPROGRESS115 && err != EWOULDBLOCK11) | |||
1126 | { | |||
1127 | ares__close_socket(channel, s); | |||
1128 | return -1; | |||
1129 | } | |||
1130 | } | |||
1131 | ||||
1132 | if (channel->sock_create_cb) | |||
1133 | { | |||
1134 | int err = channel->sock_create_cb(s, SOCK_STREAMSOCK_STREAM, | |||
1135 | channel->sock_create_cb_data); | |||
1136 | if (err < 0) | |||
1137 | { | |||
1138 | ares__close_socket(channel, s); | |||
1139 | return err; | |||
1140 | } | |||
1141 | } | |||
1142 | ||||
1143 | SOCK_STATE_CALLBACK(channel, s, 1, 0)do { if ((channel)->sock_state_cb) (channel)->sock_state_cb ((channel)->sock_state_cb_data, (s), (1), (0)); } while(0); | |||
1144 | server->tcp_buffer_pos = 0; | |||
1145 | server->tcp_socket = s; | |||
1146 | server->tcp_connection_generation = ++channel->tcp_connection_generation; | |||
1147 | return 0; | |||
1148 | } | |||
1149 | ||||
1150 | static int open_udp_socket(ares_channel channel, struct server_state *server) | |||
1151 | { | |||
1152 | ares_socket_t s; | |||
1153 | ares_socklen_t salen; | |||
1154 | union { | |||
1155 | struct sockaddr_in sa4; | |||
1156 | struct sockaddr_in6 sa6; | |||
1157 | } saddr; | |||
1158 | struct sockaddr *sa; | |||
1159 | ||||
1160 | switch (server->addr.family) | |||
1161 | { | |||
1162 | case AF_INET2: | |||
1163 | sa = (void *)&saddr.sa4; | |||
1164 | salen = sizeof(saddr.sa4); | |||
1165 | memset(sa, 0, salen); | |||
1166 | saddr.sa4.sin_family = AF_INET2; | |||
1167 | if (server->addr.udp_port) { | |||
1168 | saddr.sa4.sin_port = aresx_sitous(server->addr.udp_port); | |||
1169 | } else { | |||
1170 | saddr.sa4.sin_port = aresx_sitous(channel->udp_port); | |||
1171 | } | |||
1172 | memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4addr.addr4, | |||
1173 | sizeof(server->addr.addrV4addr.addr4)); | |||
1174 | break; | |||
1175 | case AF_INET610: | |||
1176 | sa = (void *)&saddr.sa6; | |||
1177 | salen = sizeof(saddr.sa6); | |||
1178 | memset(sa, 0, salen); | |||
1179 | saddr.sa6.sin6_family = AF_INET610; | |||
1180 | if (server->addr.udp_port) { | |||
1181 | saddr.sa6.sin6_port = aresx_sitous(server->addr.udp_port); | |||
1182 | } else { | |||
1183 | saddr.sa6.sin6_port = aresx_sitous(channel->udp_port); | |||
1184 | } | |||
1185 | memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6addr.addr6, | |||
1186 | sizeof(server->addr.addrV6addr.addr6)); | |||
1187 | break; | |||
1188 | default: | |||
1189 | return -1; /* LCOV_EXCL_LINE */ | |||
1190 | } | |||
1191 | ||||
1192 | /* Acquire a socket. */ | |||
1193 | s = ares__open_socket(channel, server->addr.family, SOCK_DGRAMSOCK_DGRAM, 0); | |||
1194 | if (s == ARES_SOCKET_BAD-1) | |||
1195 | return -1; | |||
1196 | ||||
1197 | /* Set the socket non-blocking. */ | |||
1198 | if (configure_socket(s, server->addr.family, channel) < 0) | |||
1199 | { | |||
1200 | ares__close_socket(channel, s); | |||
1201 | return -1; | |||
1202 | } | |||
1203 | ||||
1204 | if (channel->sock_config_cb) | |||
1205 | { | |||
1206 | int err = channel->sock_config_cb(s, SOCK_DGRAMSOCK_DGRAM, | |||
1207 | channel->sock_config_cb_data); | |||
1208 | if (err < 0) | |||
1209 | { | |||
1210 | ares__close_socket(channel, s); | |||
1211 | return err; | |||
1212 | } | |||
1213 | } | |||
1214 | ||||
1215 | /* Connect to the server. */ | |||
1216 | if (ares__connect_socket(channel, s, sa, salen) == -1) | |||
1217 | { | |||
1218 | int err = SOCKERRNO((*__errno_location ())); | |||
1219 | ||||
1220 | if (err != EINPROGRESS115 && err != EWOULDBLOCK11) | |||
1221 | { | |||
1222 | ares__close_socket(channel, s); | |||
1223 | return -1; | |||
1224 | } | |||
1225 | } | |||
1226 | ||||
1227 | if (channel->sock_create_cb) | |||
1228 | { | |||
1229 | int err = channel->sock_create_cb(s, SOCK_DGRAMSOCK_DGRAM, | |||
1230 | channel->sock_create_cb_data); | |||
1231 | if (err < 0) | |||
1232 | { | |||
1233 | ares__close_socket(channel, s); | |||
1234 | return err; | |||
1235 | } | |||
1236 | } | |||
1237 | ||||
1238 | SOCK_STATE_CALLBACK(channel, s, 1, 0)do { if ((channel)->sock_state_cb) (channel)->sock_state_cb ((channel)->sock_state_cb_data, (s), (1), (0)); } while(0); | |||
1239 | ||||
1240 | server->udp_socket = s; | |||
1241 | return 0; | |||
1242 | } | |||
1243 | ||||
1244 | static int same_questions(const unsigned char *qbuf, int qlen, | |||
1245 | const unsigned char *abuf, int alen) | |||
1246 | { | |||
1247 | struct { | |||
1248 | const unsigned char *p; | |||
1249 | int qdcount; | |||
1250 | char *name; | |||
1251 | long namelen; | |||
1252 | int type; | |||
1253 | int dnsclass; | |||
1254 | } q, a; | |||
1255 | int i, j; | |||
1256 | ||||
1257 | if (qlen < HFIXEDSZ12 || alen < HFIXEDSZ12) | |||
1258 | return 0; | |||
1259 | ||||
1260 | /* Extract qdcount from the request and reply buffers and compare them. */ | |||
1261 | q.qdcount = DNS_HEADER_QDCOUNT(qbuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((qbuf) + 4)[0]) << 8U) | ((unsigned int )((unsigned char)((qbuf) + 4)[1]))))); | |||
1262 | a.qdcount = DNS_HEADER_QDCOUNT(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((abuf) + 4)[0]) << 8U) | ((unsigned int )((unsigned char)((abuf) + 4)[1]))))); | |||
1263 | if (q.qdcount != a.qdcount) | |||
1264 | return 0; | |||
1265 | ||||
1266 | /* For each question in qbuf, find it in abuf. */ | |||
1267 | q.p = qbuf + HFIXEDSZ12; | |||
1268 | for (i = 0; i < q.qdcount; i++) | |||
1269 | { | |||
1270 | /* Decode the question in the query. */ | |||
1271 | if (ares_expand_name(q.p, qbuf, qlen, &q.name, &q.namelen) | |||
1272 | != ARES_SUCCESS0) | |||
1273 | return 0; | |||
1274 | q.p += q.namelen; | |||
1275 | if (q.p + QFIXEDSZ4 > qbuf + qlen) | |||
1276 | { | |||
1277 | ares_free(q.name); | |||
1278 | return 0; | |||
1279 | } | |||
1280 | q.type = DNS_QUESTION_TYPE(q.p)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)(q.p)[0]) << 8U) | ((unsigned int)((unsigned char)(q.p)[1]))))); | |||
1281 | q.dnsclass = DNS_QUESTION_CLASS(q.p)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((q.p) + 2)[0]) << 8U) | ((unsigned int )((unsigned char)((q.p) + 2)[1]))))); | |||
1282 | q.p += QFIXEDSZ4; | |||
1283 | ||||
1284 | /* Search for this question in the answer. */ | |||
1285 | a.p = abuf + HFIXEDSZ12; | |||
1286 | for (j = 0; j < a.qdcount; j++) | |||
1287 | { | |||
1288 | /* Decode the question in the answer. */ | |||
1289 | if (ares_expand_name(a.p, abuf, alen, &a.name, &a.namelen) | |||
1290 | != ARES_SUCCESS0) | |||
1291 | { | |||
1292 | ares_free(q.name); | |||
1293 | return 0; | |||
1294 | } | |||
1295 | a.p += a.namelen; | |||
1296 | if (a.p + QFIXEDSZ4 > abuf + alen) | |||
1297 | { | |||
1298 | ares_free(q.name); | |||
1299 | ares_free(a.name); | |||
1300 | return 0; | |||
1301 | } | |||
1302 | a.type = DNS_QUESTION_TYPE(a.p)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)(a.p)[0]) << 8U) | ((unsigned int)((unsigned char)(a.p)[1]))))); | |||
1303 | a.dnsclass = DNS_QUESTION_CLASS(a.p)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((a.p) + 2)[0]) << 8U) | ((unsigned int )((unsigned char)((a.p) + 2)[1]))))); | |||
1304 | a.p += QFIXEDSZ4; | |||
1305 | ||||
1306 | /* Compare the decoded questions. */ | |||
1307 | if (strcasecmp(q.name, a.name) == 0 && q.type == a.type | |||
1308 | && q.dnsclass == a.dnsclass) | |||
1309 | { | |||
1310 | ares_free(a.name); | |||
1311 | break; | |||
1312 | } | |||
1313 | ares_free(a.name); | |||
1314 | } | |||
1315 | ||||
1316 | ares_free(q.name); | |||
1317 | if (j == a.qdcount) | |||
1318 | return 0; | |||
1319 | } | |||
1320 | return 1; | |||
1321 | } | |||
1322 | ||||
1323 | static int same_address(struct sockaddr *sa, struct ares_addr *aa) | |||
1324 | { | |||
1325 | void *addr1; | |||
1326 | void *addr2; | |||
1327 | ||||
1328 | if (sa->sa_family == aa->family) | |||
| ||||
1329 | { | |||
1330 | switch (aa->family) | |||
1331 | { | |||
1332 | case AF_INET2: | |||
1333 | addr1 = &aa->addrV4addr.addr4; | |||
1334 | addr2 = &(CARES_INADDR_CAST(struct sockaddr_in *, sa)((struct sockaddr_in *)((void *)sa)))->sin_addr; | |||
1335 | if (memcmp(addr1, addr2, sizeof(aa->addrV4addr.addr4)) == 0) | |||
1336 | return 1; /* match */ | |||
1337 | break; | |||
1338 | case AF_INET610: | |||
1339 | addr1 = &aa->addrV6addr.addr6; | |||
1340 | addr2 = &(CARES_INADDR_CAST(struct sockaddr_in6 *, sa)((struct sockaddr_in6 *)((void *)sa)))->sin6_addr; | |||
1341 | if (memcmp(addr1, addr2, sizeof(aa->addrV6addr.addr6)) == 0) | |||
1342 | return 1; /* match */ | |||
1343 | break; | |||
1344 | default: | |||
1345 | break; /* LCOV_EXCL_LINE */ | |||
1346 | } | |||
1347 | } | |||
1348 | return 0; /* different */ | |||
1349 | } | |||
1350 | ||||
1351 | /* search for an OPT RR in the response */ | |||
1352 | static int has_opt_rr(const unsigned char *abuf, int alen) | |||
1353 | { | |||
1354 | unsigned int qdcount, ancount, nscount, arcount, i; | |||
1355 | const unsigned char *aptr; | |||
1356 | int status; | |||
1357 | ||||
1358 | if (alen < HFIXEDSZ12) | |||
1359 | return -1; | |||
1360 | ||||
1361 | /* Parse the answer header. */ | |||
1362 | qdcount = DNS_HEADER_QDCOUNT(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((abuf) + 4)[0]) << 8U) | ((unsigned int )((unsigned char)((abuf) + 4)[1]))))); | |||
1363 | ancount = DNS_HEADER_ANCOUNT(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((abuf) + 6)[0]) << 8U) | ((unsigned int )((unsigned char)((abuf) + 6)[1]))))); | |||
1364 | nscount = DNS_HEADER_NSCOUNT(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((abuf) + 8)[0]) << 8U) | ((unsigned int )((unsigned char)((abuf) + 8)[1]))))); | |||
1365 | arcount = DNS_HEADER_ARCOUNT(abuf)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((abuf) + 10)[0]) << 8U) | ((unsigned int )((unsigned char)((abuf) + 10)[1]))))); | |||
1366 | ||||
1367 | aptr = abuf + HFIXEDSZ12; | |||
1368 | ||||
1369 | /* skip the questions */ | |||
1370 | for (i = 0; i < qdcount; i++) | |||
1371 | { | |||
1372 | char* name; | |||
1373 | long len; | |||
1374 | status = ares_expand_name(aptr, abuf, alen, &name, &len); | |||
1375 | if (status != ARES_SUCCESS0) | |||
1376 | return -1; | |||
1377 | ares_free_string(name); | |||
1378 | if (aptr + len + QFIXEDSZ4 > abuf + alen) | |||
1379 | return -1; | |||
1380 | aptr += len + QFIXEDSZ4; | |||
1381 | } | |||
1382 | ||||
1383 | /* skip the ancount and nscount */ | |||
1384 | for (i = 0; i < ancount + nscount; i++) | |||
1385 | { | |||
1386 | char* name; | |||
1387 | long len; | |||
1388 | int dlen; | |||
1389 | status = ares_expand_name(aptr, abuf, alen, &name, &len); | |||
1390 | if (status != ARES_SUCCESS0) | |||
1391 | return -1; | |||
1392 | ares_free_string(name); | |||
1393 | if (aptr + len + RRFIXEDSZ10 > abuf + alen) | |||
1394 | return -1; | |||
1395 | aptr += len; | |||
1396 | dlen = DNS_RR_LEN(aptr)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((aptr) + 8)[0]) << 8U) | ((unsigned int )((unsigned char)((aptr) + 8)[1]))))); | |||
1397 | aptr += RRFIXEDSZ10; | |||
1398 | if (aptr + dlen > abuf + alen) | |||
1399 | return -1; | |||
1400 | aptr += dlen; | |||
1401 | } | |||
1402 | ||||
1403 | /* search for rr type (41) - opt */ | |||
1404 | for (i = 0; i < arcount; i++) | |||
1405 | { | |||
1406 | char* name; | |||
1407 | long len; | |||
1408 | int dlen; | |||
1409 | status = ares_expand_name(aptr, abuf, alen, &name, &len); | |||
1410 | if (status != ARES_SUCCESS0) | |||
1411 | return -1; | |||
1412 | ares_free_string(name); | |||
1413 | if (aptr + len + RRFIXEDSZ10 > abuf + alen) | |||
1414 | return -1; | |||
1415 | aptr += len; | |||
1416 | ||||
1417 | if (DNS_RR_TYPE(aptr)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)(aptr)[0]) << 8U) | ((unsigned int)((unsigned char)(aptr)[1]))))) == T_OPT41) | |||
1418 | return 1; | |||
1419 | ||||
1420 | dlen = DNS_RR_LEN(aptr)((unsigned short)((unsigned int) 0xffff & (((unsigned int )((unsigned char)((aptr) + 8)[0]) << 8U) | ((unsigned int )((unsigned char)((aptr) + 8)[1]))))); | |||
1421 | aptr += RRFIXEDSZ10; | |||
1422 | if (aptr + dlen > abuf + alen) | |||
1423 | return -1; | |||
1424 | aptr += dlen; | |||
1425 | } | |||
1426 | ||||
1427 | return 0; | |||
1428 | } | |||
1429 | ||||
1430 | static void end_query (ares_channel channel, struct query *query, int status, | |||
1431 | unsigned char *abuf, int alen) | |||
1432 | { | |||
1433 | int i; | |||
1434 | ||||
1435 | /* First we check to see if this query ended while one of our send | |||
1436 | * queues still has pointers to it. | |||
1437 | */ | |||
1438 | for (i = 0; i < channel->nservers; i++) | |||
1439 | { | |||
1440 | struct server_state *server = &channel->servers[i]; | |||
1441 | struct send_request *sendreq; | |||
1442 | for (sendreq = server->qhead; sendreq; sendreq = sendreq->next) | |||
1443 | if (sendreq->owner_query == query) | |||
1444 | { | |||
1445 | sendreq->owner_query = NULL((void*)0); | |||
1446 | assert(sendreq->data_storage == NULL)((void) sizeof ((sendreq->data_storage == ((void*)0)) ? 1 : 0), __extension__ ({ if (sendreq->data_storage == ((void* )0)) ; else __assert_fail ("sendreq->data_storage == NULL" , "../deps/cares/src/lib/ares_process.c", 1446, __extension__ __PRETTY_FUNCTION__); })); | |||
1447 | if (status == ARES_SUCCESS0) | |||
1448 | { | |||
1449 | /* We got a reply for this query, but this queued sendreq | |||
1450 | * points into this soon-to-be-gone query's tcpbuf. Probably | |||
1451 | * this means we timed out and queued the query for | |||
1452 | * retransmission, then received a response before actually | |||
1453 | * retransmitting. This is perfectly fine, so we want to keep | |||
1454 | * the connection running smoothly if we can. But in the worst | |||
1455 | * case we may have sent only some prefix of the query, with | |||
1456 | * some suffix of the query left to send. Also, the buffer may | |||
1457 | * be queued on multiple queues. To prevent dangling pointers | |||
1458 | * to the query's tcpbuf and handle these cases, we just give | |||
1459 | * such sendreqs their own copy of the query packet. | |||
1460 | */ | |||
1461 | sendreq->data_storage = ares_malloc(sendreq->len); | |||
1462 | if (sendreq->data_storage != NULL((void*)0)) | |||
1463 | { | |||
1464 | memcpy(sendreq->data_storage, sendreq->data, sendreq->len); | |||
1465 | sendreq->data = sendreq->data_storage; | |||
1466 | } | |||
1467 | } | |||
1468 | if ((status != ARES_SUCCESS0) || (sendreq->data_storage == NULL((void*)0))) | |||
1469 | { | |||
1470 | /* We encountered an error (probably a timeout, suggesting the | |||
1471 | * DNS server we're talking to is probably unreachable, | |||
1472 | * wedged, or severely overloaded) or we couldn't copy the | |||
1473 | * request, so mark the connection as broken. When we get to | |||
1474 | * process_broken_connections() we'll close the connection and | |||
1475 | * try to re-send requests to another server. | |||
1476 | */ | |||
1477 | server->is_broken = 1; | |||
1478 | /* Just to be paranoid, zero out this sendreq... */ | |||
1479 | sendreq->data = NULL((void*)0); | |||
1480 | sendreq->len = 0; | |||
1481 | } | |||
1482 | } | |||
1483 | } | |||
1484 | ||||
1485 | /* Invoke the callback */ | |||
1486 | query->callback(query->arg, status, query->timeouts, abuf, alen); | |||
1487 | ares__free_query(query); | |||
1488 | ||||
1489 | /* Simple cleanup policy: if no queries are remaining, close all network | |||
1490 | * sockets unless STAYOPEN is set. | |||
1491 | */ | |||
1492 | if (!(channel->flags & ARES_FLAG_STAYOPEN(1 << 4)) && | |||
1493 | ares__is_list_empty(&(channel->all_queries))) | |||
1494 | { | |||
1495 | for (i = 0; i < channel->nservers; i++) | |||
1496 | ares__close_sockets(channel, &channel->servers[i]); | |||
1497 | } | |||
1498 | } | |||
1499 | ||||
1500 | void ares__free_query(struct query *query) | |||
1501 | { | |||
1502 | /* Remove the query from all the lists in which it is linked */ | |||
1503 | ares__remove_from_list(&(query->queries_by_qid)); | |||
1504 | ares__remove_from_list(&(query->queries_by_timeout)); | |||
1505 | ares__remove_from_list(&(query->queries_to_server)); | |||
1506 | ares__remove_from_list(&(query->all_queries)); | |||
1507 | /* Zero out some important stuff, to help catch bugs */ | |||
1508 | query->callback = NULL((void*)0); | |||
1509 | query->arg = NULL((void*)0); | |||
1510 | /* Deallocate the memory associated with the query */ | |||
1511 | ares_free(query->tcpbuf); | |||
1512 | ares_free(query->server_info); | |||
1513 | ares_free(query); | |||
1514 | } | |||
1515 | ||||
1516 | ares_socket_t ares__open_socket(ares_channel channel, | |||
1517 | int af, int type, int protocol) | |||
1518 | { | |||
1519 | if (channel->sock_funcs) | |||
1520 | return channel->sock_funcs->asocket(af, | |||
1521 | type, | |||
1522 | protocol, | |||
1523 | channel->sock_func_cb_data); | |||
1524 | else | |||
1525 | return socket(af, type, protocol); | |||
1526 | } | |||
1527 | ||||
1528 | int ares__connect_socket(ares_channel channel, | |||
1529 | ares_socket_t sockfd, | |||
1530 | const struct sockaddr *addr, | |||
1531 | ares_socklen_t addrlen) | |||
1532 | { | |||
1533 | if (channel->sock_funcs) | |||
1534 | return channel->sock_funcs->aconnect(sockfd, | |||
1535 | addr, | |||
1536 | addrlen, | |||
1537 | channel->sock_func_cb_data); | |||
1538 | else | |||
1539 | return connect(sockfd, addr, addrlen); | |||
1540 | } | |||
1541 | ||||
1542 | void ares__close_socket(ares_channel channel, ares_socket_t s) | |||
1543 | { | |||
1544 | if (channel->sock_funcs) | |||
1545 | channel->sock_funcs->aclose(s, channel->sock_func_cb_data); | |||
1546 | else | |||
1547 | sclose(s)close((s)); | |||
1548 | } |