Changeset 3878


Ignore:
Timestamp:
Nov 18, 2007, 5:35:28 PM (15 years ago)
Author:
charles
Message:

back to the "router death" issue: throttle how many connections T will try to open at any one time

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/libtransmission/peer-mgr.c

    r3876 r3878  
    7575
    7676    /* how frequently to decide which peers live and die */
    77     RECONNECT_PERIOD_MSEC = (5 * 1000),
     77    RECONNECT_PERIOD_MSEC = (6 * 1000),
    7878
    7979    /* how frequently to refill peers' request lists */
     
    8888
    8989    /* arbitrary */
    90     MAX_CONNECTED_PEERS_PER_TORRENT = 60,
     90    MAX_CONNECTED_PEERS_PER_TORRENT = 50,
    9191
    9292    /* when many peers are available, keep idle ones this long */
     
    9898    /* how many peers to unchoke per-torrent. */
    9999    /* FIXME: make this user-configurable? */
    100     NUM_UNCHOKED_PEERS_PER_TORRENT = 20, /* arbitrary */
     100    NUM_UNCHOKED_PEERS_PER_TORRENT = 10, /* arbitrary */
    101101
    102102    /* set this too high and there will be a lot of churn.
     
    18081808    else
    18091809    {
    1810         int i, nCandidates, nBad, nAdd;
     1810        int i, nCandidates, nBad;
    18111811        struct peer_atom ** candidates = getPeerCandidates( t, &nCandidates );
    18121812        struct tr_peer ** connections = getPeersToClose( t, &nBad );
    1813         const int peerCount = tr_ptrArraySize( t->peers );
    18141813
    18151814        if( nBad || nCandidates )
     
    18351834
    18361835        /* add some new ones */
    1837         nAdd = !peerCount ? MAX_CONNECTED_PEERS_PER_TORRENT
    1838                           : MAX_RECONNECTIONS_PER_PULSE;
    1839         for( i=0; i<nAdd && i<nCandidates && i<MAX_RECONNECTIONS_PER_PULSE; ++i )
     1836        for( i=0; i<nCandidates && i<MAX_RECONNECTIONS_PER_PULSE; ++i )
    18401837        {
    18411838            tr_peerMgr * mgr = t->manager;
Note: See TracChangeset for help on using the changeset viewer.