1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
| 476 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 477 struct sock *sk, u32 port_offset, 478 int (*check_established)(struct inet_timewait_death_row *, 479 struct sock *, __u16, struct inet_timewait_sock **)) 480 { 481 struct inet_hashinfo *hinfo = death_row->hashinfo; 482 const unsigned short snum = inet_sk(sk)->inet_num; 483 struct inet_bind_hashbucket *head; 484 struct inet_bind_bucket *tb; 485 int ret; 486 struct net *net = sock_net(sk); 487 488 if (!snum) { 489 int i, remaining, low, high, port; 490 static u32 hint; 491 u32 offset = hint + port_offset; 492 struct inet_timewait_sock *tw = NULL; 493 494 inet_get_local_port_range(net, &low, &high); 495 remaining = (high - low) + 1; 496 497 /* By starting with offset being an even number, 498 * we tend to leave about 50% of ports for other uses, 499 * like bind(0). 500 */ 501 offset &= ~1; 502 503 local_bh_disable(); 504 for (i = 0; i < remaining; i++) { 505 port = low + (i + offset) % remaining; 506 if (inet_is_local_reserved_port(net, port)) 507 continue; 508 head = &hinfo->bhash[inet_bhashfn(net, port, 509 hinfo->bhash_size)]; 510 spin_lock(&head->lock); 511 512 /* Does not bother with rcv_saddr checks, 513 * because the established check is already 514 * unique enough. 515 */ 516 inet_bind_bucket_for_each(tb, &head->chain) { 517 if (net_eq(ib_net(tb), net) && 518 tb->port == port) { 519 if (tb->fastreuse >= 0 || 520 tb->fastreuseport >= 0) 521 goto next_port; 522 WARN_ON(hlist_empty(&tb->owners)); 523 if (!check_established(death_row, sk, 524 port, &tw)) 525 goto ok; 526 goto next_port; 527 } 528 } 529 530 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 531 net, head, port); 532 if (!tb) { 533 spin_unlock(&head->lock); 534 break; 535 } 536 tb->fastreuse = -1; 537 tb->fastreuseport = -1; 538 goto ok; 539 540 next_port: 541 spin_unlock(&head->lock); 542 } 543 local_bh_enable(); 544 545 return -EADDRNOTAVAIL; 546 547 ok: 548 hint += (i + 2) & ~1; 549 550 /* Head lock still held and bh's disabled */ 551 inet_bind_hash(sk, tb, port); 552 if (sk_unhashed(sk)) { 553 inet_sk(sk)->inet_sport = htons(port); 554 __inet_hash_nolisten(sk, (struct sock *)tw); 555 } 556 if (tw) 557 inet_twsk_bind_unhash(tw, hinfo); 558 spin_unlock(&head->lock); 559 560 if (tw) 561 inet_twsk_deschedule_put(tw); 562 563 ret = 0; 564 goto out; 565 } 566 567 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; 568 tb = inet_csk(sk)->icsk_bind_hash; 569 spin_lock_bh(&head->lock); 570 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 571 __inet_hash_nolisten(sk, NULL); 572 spin_unlock_bh(&head->lock); 573 return 0; 574 } else { 575 spin_unlock(&head->lock); 576 /* No definite answer... Walk to established hash table */ 577 ret = check_established(death_row, sk, snum, NULL); 578 out: 579 local_bh_enable(); 580 return ret; 581 } 582 }
|