# min max from tuples in list

Peter Otten __peter__ at web.de
Thu Dec 12 13:54:10 CET 2013

```Steven D'Aprano wrote:

> In any case, sorting in Python is amazingly fast. You may be pleasantly
> surprised that a version that sorts your data, while nominally
> O(N log N), may be much faster than an O(N) solution that doesn't require
> sorted data. If I were a betting man, I'd be willing to wager a shiny new
> dollar[1] that sorting works out faster for reasonable sized sets of data.

Well, that was my first reaction, too. But then

\$ cat keyminmax.py
import operator
import itertools
import collections

def minmax_groupby(items):
for key, group in itertools.groupby(sorted(items),
key=operator.itemgetter(0)):
minpair = maxpair = next(group)
for maxpair in group:
pass
yield key, minpair[1], maxpair[1]

def minmax_dict(items):
d = collections.defaultdict(list)
for key, value in items:
d[key].append(value)
for key, values in d.items():
yield key, min(values), max(values)

a = [(52, 193), (52, 193), (52, 192), (51, 193), (51, 191), (51, 190),
(51, 189), (51, 188), (50, 194), (50, 187),(50, 186), (50, 185),
(50, 184), (49, 194), (49, 183), (49, 182), (49, 181), (48, 194),
(48, 180), (48, 179), (48, 178), (48, 177), (47, 194), (47, 176),
(47, 175), (47, 174), (47, 173), (46, 195), (46, 172), (46, 171),
(46, 170), (46, 169), (45, 195), (45, 168), (45, 167), (45, 166),
(44, 195), (44, 165), (44, 164), (44, 163), (44, 162), (43, 195),
(43, 161), (43, 160), (43, 159), (43, 158), (42, 196), (42, 157),
(42, 156), (42, 155), (41, 196), (41, 154), (41, 153), (41, 152),
(41, 151), (40, 196), (40, 150), (40, 149), (40, 148), (40, 147),
(39, 196), (39, 146), (39, 145), (39, 144), (39, 143), (38, 196),
(38, 142), (38, 141), (38, 140), (37, 197), (37, 139), (37, 138),
(37, 137), (37, 136), (36, 197), (36, 135), (36, 134), (36, 133)]

from collections import deque
from itertools import groupby
from operator import itemgetter

def collect(data):
data = sorted(data)
groups = groupby(data, itemgetter(0))
d = deque([], maxlen=1)
for key, subiter in groups:
smallest = largest = next(subiter)[1]
d.extend(subiter)
try:
largest = d.pop()[1]
except IndexError:
pass
yield (key, smallest, largest)

def time_dict():
for item in minmax_dict(a):
pass

def time_groupby():
for item in minmax_groupby(a):
pass

def time_daprano():
for item in collect(a):
pass

\$ python -m timeit -s 'from keyminmax import time_groupby as t' 't()'
10000 loops, best of 3: 68.6 usec per loop
\$ python -m timeit -s 'from keyminmax import time_dict as t' 't()'
10000 loops, best of 3: 53.3 usec per loop
\$ python -m timeit -s 'from keyminmax import time_daprano as t' 't()'
10000 loops, best of 3: 75.7 usec per loop

So yes, sorting seems to be slower even for small datasets.

```