source: trunk/libtransmission/bandwidth.c @ 12918

Last change on this file since 12918 was 12653, checked in by jordan, 10 years ago

remove dead code

  • Property svn:keywords set to Date Rev Author Id
File size: 11.4 KB
Line 
1/*
2 * This file Copyright (C) Mnemosyne LLC
3 *
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
9 *
10 * $Id: bandwidth.c 12653 2011-08-08 16:58:29Z jordan $
11 */
12
13#include <assert.h>
14#include <limits.h>
15#include <string.h> /* memset() */
16
17#include "transmission.h"
18#include "bandwidth.h"
19#include "crypto.h" /* tr_cryptoWeakRandInt() */
20#include "peer-io.h"
21#include "utils.h"
22
23#define dbgmsg( ... ) \
24    do { \
25        if( tr_deepLoggingIsActive( ) ) \
26            tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27    } while( 0 )
28
29/***
30****
31***/
32
33static unsigned int
34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
35{
36    if( !now )
37        now = tr_time_msec();
38
39    if( now != r->cache_time )
40    {
41        int i = r->newest;
42        uint64_t bytes = 0;
43        const uint64_t cutoff = now - interval_msec;
44        struct bratecontrol * rvolatile = (struct bratecontrol*) r;
45
46        for( ;; )
47        {
48            if( r->transfers[i].date <= cutoff )
49                break;
50
51            bytes += r->transfers[i].size;
52
53            if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
54            if( i == r->newest ) break; /* we've come all the way around */
55        }
56
57        rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec);
58        rvolatile->cache_time = now;
59    }
60
61    return r->cache_val;
62}
63
64static void
65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
66{
67    if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
68        r->transfers[r->newest].size += size;
69    else
70    {
71        if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
72        r->transfers[r->newest].date = now;
73        r->transfers[r->newest].size = size;
74    }
75
76    /* invalidate cache_val*/
77    r->cache_time = 0;
78}
79
80/******
81*******
82*******
83******/
84
85static int
86compareBandwidth( const void * va, const void * vb )
87{
88    const tr_bandwidth * a = va;
89    const tr_bandwidth * b = vb;
90    return a->uniqueKey - b->uniqueKey;
91}
92
93/***
94****
95***/
96
97void
98tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
99{
100    static unsigned int uniqueKey = 0;
101
102    b->session = session;
103    b->children = TR_PTR_ARRAY_INIT;
104    b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
105    b->uniqueKey = uniqueKey++;
106    b->band[TR_UP].honorParentLimits = true;
107    b->band[TR_DOWN].honorParentLimits = true;
108    tr_bandwidthSetParent( b, parent );
109}
110
111void
112tr_bandwidthDestruct( tr_bandwidth * b )
113{
114    assert( tr_isBandwidth( b ) );
115
116    tr_bandwidthSetParent( b, NULL );
117    tr_ptrArrayDestruct( &b->children, NULL );
118
119    memset( b, ~0, sizeof( tr_bandwidth ) );
120}
121
122/***
123****
124***/
125
126void
127tr_bandwidthSetParent( tr_bandwidth  * b,
128                       tr_bandwidth  * parent )
129{
130    assert( tr_isBandwidth( b ) );
131    assert( b != parent );
132
133    if( b->parent )
134    {
135        void * removed;
136
137        assert( tr_isBandwidth( b->parent ) );
138
139        removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, compareBandwidth );
140        assert( removed == b );
141        assert( tr_ptrArrayFindSorted( &b->parent->children, b, compareBandwidth ) == NULL );
142
143        b->parent = NULL;
144    }
145
146    if( parent )
147    {
148        assert( tr_isBandwidth( parent ) );
149        assert( parent->parent != b );
150
151        assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == NULL );
152        tr_ptrArrayInsertSorted( &parent->children, b, compareBandwidth );
153        assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == b );
154        b->parent = parent;
155    }
156}
157
158/***
159****
160***/
161
162static void
163allocateBandwidth( tr_bandwidth  * b,
164                   tr_priority_t   parent_priority,
165                   tr_direction    dir,
166                   unsigned int    period_msec,
167                   tr_ptrArray   * peer_pool )
168{
169    const tr_priority_t priority = MAX( parent_priority, b->priority );
170
171    assert( tr_isBandwidth( b ) );
172    assert( tr_isDirection( dir ) );
173
174    /* set the available bandwidth */
175    if( b->band[dir].isLimited )
176    {
177        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
178        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
179    }
180
181    /* add this bandwidth's peer, if any, to the peer pool */
182    if( b->peer != NULL ) {
183        b->peer->priority = priority;
184        tr_ptrArrayAppend( peer_pool, b->peer );
185    }
186
187    /* traverse & repeat for the subtree */
188    if( 1 ) {
189        int i;
190        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
191        const int n = tr_ptrArraySize( &b->children );
192        for( i=0; i<n; ++i )
193            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
194    }
195}
196
197static void
198phaseOne( tr_ptrArray * peerArray, tr_direction dir )
199{
200    int i, n;
201    int peerCount = tr_ptrArraySize( peerArray );
202    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
203
204    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
205     * peers from starving the others. Loop through the peers, giving each a
206     * small chunk of bandwidth. Keep looping until we run out of bandwidth
207     * and/or peers that can use it */
208    n = peerCount;
209    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
210    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
211    while( n > 0 )
212    {
213        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
214         * frame right away and leave enough buffered data for the next frame to go
215         * out in a timely manner. */
216        const size_t increment = 3000;
217
218        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
219
220        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
221
222        if( bytesUsed == (int)increment )
223            ++i;
224        else {
225            /* peer is done writing for now; move it to the end of the list */
226            tr_peerIo * pio = peers[i];
227            peers[i] = peers[n-1];
228            peers[n-1] = pio;
229            --n;
230        }
231
232        if( i >= n )
233            i = 0;
234    }
235}
236
237void
238tr_bandwidthAllocate( tr_bandwidth  * b,
239                      tr_direction    dir,
240                      unsigned int    period_msec )
241{
242    int i, peerCount;
243    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
244    tr_ptrArray low = TR_PTR_ARRAY_INIT;
245    tr_ptrArray high = TR_PTR_ARRAY_INIT;
246    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
247    struct tr_peerIo ** peers;
248
249    /* allocateBandwidth() is a helper function with two purposes:
250     * 1. allocate bandwidth to b and its subtree
251     * 2. accumulate an array of all the peerIos from b and its subtree. */
252    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
253    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
254    peerCount = tr_ptrArraySize( &tmp );
255
256    for( i=0; i<peerCount; ++i )
257    {
258        tr_peerIo * io = peers[i];
259        tr_peerIoRef( io );
260
261        tr_peerIoFlushOutgoingProtocolMsgs( io );
262
263        switch( io->priority ) {
264            case TR_PRI_HIGH:   tr_ptrArrayAppend( &high,   io ); /* fall through */
265            case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
266            default:            tr_ptrArrayAppend( &low,    io );
267        }
268    }
269
270    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
271     * peers from starving the others. Loop through the peers, giving each a
272     * small chunk of bandwidth. Keep looping until we run out of bandwidth
273     * and/or peers that can use it */
274    phaseOne( &high, dir );
275    phaseOne( &normal, dir );
276    phaseOne( &low, dir );
277
278    /* Second phase of IO. To help us scale in high bandwidth situations,
279     * enable on-demand IO for peers with bandwidth left to burn.
280     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
281     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
282    for( i=0; i<peerCount; ++i )
283        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
284
285    for( i=0; i<peerCount; ++i )
286        tr_peerIoUnref( peers[i] );
287
288    /* cleanup */
289    tr_ptrArrayDestruct( &normal, NULL );
290    tr_ptrArrayDestruct( &high, NULL );
291    tr_ptrArrayDestruct( &low, NULL );
292    tr_ptrArrayDestruct( &tmp, NULL );
293}
294
295void
296tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
297{
298    assert( tr_isBandwidth( b ) );
299    assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
300
301    b->peer = peer;
302}
303
304/***
305****
306***/
307
308static unsigned int
309bandwidthClamp( const tr_bandwidth  * b,
310                uint64_t              now,
311                tr_direction          dir,
312                unsigned int          byteCount )
313{
314    assert( tr_isBandwidth( b ) );
315    assert( tr_isDirection( dir ) );
316
317    if( b )
318    {
319        if( b->band[dir].isLimited )
320        {
321            byteCount = MIN( byteCount, b->band[dir].bytesLeft );
322
323            /* if we're getting close to exceeding the speed limit,
324             * clamp down harder on the bytes available */
325            if( byteCount > 0 )
326            {
327                double current;
328                double desired;
329                double r;
330
331                if( now == 0 )
332                    now = tr_time_msec( );
333
334                current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN );
335                desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN );
336                r = desired >= 1 ? current / desired : 0;
337
338                     if( r > 1.0 ) byteCount = 0;
339                else if( r > 0.9 ) byteCount *= 0.8;
340                else if( r > 0.8 ) byteCount *= 0.9;
341            }
342        }
343
344        if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) )
345            byteCount = bandwidthClamp( b->parent, now, dir, byteCount );
346    }
347
348    return byteCount;
349}
350unsigned int
351tr_bandwidthClamp( const tr_bandwidth  * b,
352                   tr_direction          dir,
353                   unsigned int          byteCount )
354{
355    return bandwidthClamp( b, 0, dir, byteCount );
356}
357
358
359unsigned int
360tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
361{
362    assert( tr_isBandwidth( b ) );
363    assert( tr_isDirection( dir ) );
364
365    return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
366}
367
368unsigned int
369tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
370{
371    assert( tr_isBandwidth( b ) );
372    assert( tr_isDirection( dir ) );
373
374    return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
375}
376
377void
378tr_bandwidthUsed( tr_bandwidth  * b,
379                  tr_direction    dir,
380                  size_t          byteCount,
381                  bool         isPieceData,
382                  uint64_t        now )
383{
384    struct tr_band * band;
385
386    assert( tr_isBandwidth( b ) );
387    assert( tr_isDirection( dir ) );
388
389    band = &b->band[dir];
390
391    if( band->isLimited && isPieceData )
392        band->bytesLeft -= MIN( band->bytesLeft, byteCount );
393
394#ifdef DEBUG_DIRECTION
395if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
396fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
397         b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
398#endif
399
400    bytesUsed( now, &band->raw, byteCount );
401
402    if( isPieceData )
403        bytesUsed( now, &band->piece, byteCount );
404
405    if( b->parent != NULL )
406        tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );
407}
Note: See TracBrowser for help on using the repository browser.