This is an auto-generated version of Numpy Example List with added documentation from doc strings and arguments specification for methods and functions of Numpy 1.0.4.
Please do not edit this page directly. To update this page just follow the instructions.
Contents
- ...
- []
- T
- abs()
- absolute()
- accumulate
- add()
- alen()
- all()
- allclose()
- alltrue()
- alterdot()
- amax()
- amin()
- angle()
- any()
- append()
- apply_along_axis()
- apply_over_axes()
- arange()
- arccos()
- arccosh()
- arcsin()
- arcsinh()
- arctan()
- arctan2()
- arctanh()
- argmax()
- argmin()
- argsort()
- argwhere()
- around()
- array()
- array2string()
- array_equal()
- array_equiv()
- array_repr()
- array_split()
- array_str()
- arrayrange
- asanyarray()
- asarray()
- asarray_chkfinite()
- ascontiguousarray()
- asfarray()
- asfortranarray()
- asmatrix()
- asscalar()
- astype()
- atleast_1d()
- atleast_2d()
- atleast_3d()
- average()
- bartlett()
- base_repr()
- beta()
- binary_repr()
- bincount()
- binomial()
- bitwise_and()
- bitwise_not()
- bitwise_or()
- bitwise_xor()
- blackman()
- bmat()
- broadcast()
- byte_bounds()
- bytes()
- byteswap()
- c_
- cast[]()
- can_cast()
- ceil()
- choose()
- clip()
- column_stack()
- common_type()
- compare_chararrays()
- compress()
- concatenate()
- conj()
- conjugate()
- convolve()
- copy()
- corrcoef()
- correlate()
- cos()
- cosh()
- cov()
- cross()
- cumprod()
- cumproduct()
- cumsum()
- delete()
- deprecate()
- diag()
- diagflat()
- diagonal()
- diff()
- digitize()
- disp()
- divide()
- dot()
- dsplit()
- dstack()
- dtype() or .dtype
- dump()
- dumps()
- ediff1d()
- empty()
- empty_like()
- equal()
- exp()
- expand_dims()
- expm1()
- extract()
- eye()
- fabs()
- fastCopyAndTranspose()
- fft
- fftfreq
- fftshift
- fill()
- finfo()
- fix()
- flat
- flatnonzero()
- flatten()
- fliplr()
- flipud()
- floor()
- floor_divide()
- fmod()
- frexp()
- fromarrays()
- frombuffer()
- fromfile()
- fromfunction()
- fromiter()
- frompyfunc()
- fromrecords()
- fromstring()
- generic()
- get_array_wrap()
- get_include()
- get_numarray_include()
- get_numpy_include()
- get_printoptions()
- getbuffer()
- getbufsize()
- geterr()
- geterrcall()
- geterrobj()
- getfield()
- gradient()
- greater()
- greater_equal()
- gumbel()
- hamming()
- hanning()
- histogram()
- histogram2d()
- histogramdd()
- hsplit()
- hstack()
- hypot()
- i0()
- identity()
- ifft
- imag() or .imag
- index_exp
- indices()
- inf
- info() or .info
- inner()
- insert()
- int_asbuffer()
- interp()
- intersect1d()
- intersect1d_nu()
- inv()
- invert()
- iscomplex()
- iscomplexobj()
- isfinite()
- isfortran()
- isinf()
- isnan()
- isneginf()
- isposinf()
- isreal()
- isrealobj()
- isscalar()
- issctype()
- issubclass_()
- issubdtype()
- issubsctype()
- item()
- itemset()
- iterable()
- ix_()
- kaiser()
- kron()
- ldexp()
- left_shift()
- less()
- less_equal()
- lexsort()
- linspace()
- load()
- loads()
- loadtxt()
- log()
- log10()
- log1p()
- log2()
- logical_and()
- logical_not()
- logical_or()
- logical_xor()
- logspace()
- lstsq()
- mat()
- matrix()
- max()
- maximum()
- maximum_sctype()
- may_share_memory()
- mean()
- median()
- meshgrid()
- mgrid
- min()
- minimum()
- mintypecode()
- mod()
- modf()
- msort()
- multiply()
- nan
- nan_to_num()
- nanargmax()
- nanargmin()
- nanmax()
- nanmin()
- nansum()
- ndenumerate()
- ndim() or .ndim
- ndindex()
- negative()
- newaxis
- newbuffer()
- newbyteorder()
- nonzero()
- not_equal()
- obj2sctype()
- ogrid
- ones()
- ones_like()
- outer()
- permutation()
- piecewise()
- pinv()
- pkgload()
- place()
- poisson()
- poly()
- poly1d()
- polyadd()
- polyder()
- polydiv()
- polyfit()
- polyint()
- polymul()
- polysub()
- polyval()
- power()
- prod()
- product()
- ptp()
- put()
- putmask()
- r_
- randint()
- random_integers()
- random_sample()
- ranf()
- rank()
- ravel()
- real() or .real
- real_if_close()
- recarray()
- reciprocal()
- reduce
- remainder()
- repeat()
- require()
- reshape()
- resize()
- restoredot()
- right_shift()
- rint()
- roll()
- rollaxis()
- roots()
- rot90()
- round()
- round_()
- row_stack()
- s_
- sample()
- savetxt()
- sctype2char()
- searchsorted()
- seed()
- select()
- set_numeric_ops()
- set_printoptions()
- set_string_function()
- setbufsize()
- setdiff1d()
- seterr()
- seterrcall()
- seterrobj()
- setfield()
- setflags()
- setmember1d()
- setxor1d()
- shape() or .shape
- show_config()
- shuffle()
- sign()
- signbit()
- sin()
- sinc()
- sinh()
- size() or .size
- slice
- solve()
- sometrue()
- sort()
- sort_complex()
- source()
- split()
- sqrt()
- square()
- squeeze()
- standard_normal()
- std()
- subtract()
- sum()
- svd()
- swapaxes()
- take()
- tan()
- tanh()
- tensordot()
- test()
- tile()
- tofile()
- tolist()
- tostring()
- trace()
- transpose()
- trapz()
- tri()
- tril()
- trim_zeros()
- triu()
- true_divide()
- typeDict
- typename()
- uniform()
- union1d()
- unique()
- unique1d()
- unravel_index()
- unwrap()
- vander()
- var()
- vdot()
- vectorize()
- view()
- vonmises()
- vsplit()
- vstack()
- weibull()
- where()
- who()
- zeros()
- zeros_like()
...
>>> from numpy import *
>>> a = arange(12)
>>> a = a.reshape(3,2,2)
>>> print a
[[[ 0 1]
[ 2 3]]
[[ 4 5]
[ 6 7]]
[[ 8 9]
[10 11]]]
>>> a[...,0] # same as a[:,:,0]
array([[ 0, 2],
[ 4, 6],
[ 8, 10]])
>>> a[1:,...] # same as a[1:,:,:] or just a[1:]
array([[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
[]
>>> from numpy import *
>>> a = array([ [ 0, 1, 2, 3, 4],
... [10,11,12,13,14],
... [20,21,22,23,24],
... [30,31,32,33,34] ])
>>>
>>> a[0,0] # indices start by zero
0
>>> a[-1] # last row
array([30, 31, 32, 33, 34])
>>> a[1:3,1:4] # subarray
array([[11, 12, 13],
[21, 22, 23]])
>>>
>>> i = array([0,1,2,1]) # array of indices for the first axis
>>> j = array([1,2,3,4]) # array of indices for the second axis
>>> a[i,j]
array([ 1, 12, 23, 14])
>>>
>>> a[a<13] # boolean indexing
array([ 0, 1, 2, 3, 4, 10, 11, 12])
>>>
>>> b1 = array( [True,False,True,False] ) # boolean row selector
>>> a[b1,:]
array([[ 0, 1, 2, 3, 4],
[20, 21, 22, 23, 24]])
>>>
>>> b2 = array( [False,True,True,False,True] ) # boolean column selector
>>> a[:,b2]
array([[ 1, 2, 4],
[11, 12, 14],
[21, 22, 24],
[31, 32, 34]])
See also: ..., newaxis, ix_, indices, nonzero, where, slice
T
ndarray.T
Same as self.transpose() except self is returned for self.ndim < 2.
>>> from numpy import *
>>> x = array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T # shortcut for transpose()
array([[ 1., 3.],
[ 2., 4.]])
See also: transpose
abs()
numpy.abs(...)
y = absolute(x) takes |x| elementwise.
>>> from numpy import *
>>> abs(-1)
1
>>> abs(array([-1.2, 1.2]))
array([ 1.2, 1.2])
>>> abs(1.2+1j)
1.5620499351813308
absolute()
numpy.absolute(...)
y = absolute(x) takes |x| elementwise.
Synonym for abs()
See abs
accumulate
>>> from numpy import *
>>> add.accumulate(array([1.,2.,3.,4.])) # like reduce() but also gives intermediate results
array([ 1., 3., 6., 10.])
>>> array([1., 1.+2., (1.+2.)+3., ((1.+2.)+3.)+4.]) # this is what it computed
array([ 1., 3., 6., 10.])
>>> multiply.accumulate(array([1.,2.,3.,4.])) # works also with other operands
array([ 1., 2., 6., 24.])
>>> array([1., 1.*2., (1.*2.)*3., ((1.*2.)*3.)*4.]) # this is what it computed
array([ 1., 2., 6., 24.])
>>> add.accumulate(array([[1,2,3],[4,5,6]]), axis = 0) # accumulate every column separately
array([[1, 2, 3],
[5, 7, 9]])
>>> add.accumulate(array([[1,2,3],[4,5,6]]), axis = 1) # accumulate every row separately
array([[ 1, 3, 6],
[ 4, 9, 15]])
See also: reduce, cumprod, cumsum
add()
numpy.add(...)
y = add(x1,x2) adds the arguments elementwise.
>>> from numpy import *
>>> add(array([-1.2, 1.2]), array([1,3]))
array([-0.2, 4.2])
>>> array([-1.2, 1.2]) + array([1,3])
array([-0.2, 4.2])
alen()
numpy.alen(a)
Return the length of a Python object interpreted as an array of at least 1 dimension. Blah, Blah.
all()
numpy.all(a, axis=None, out=None)
Return true if all elements of x are true: *See Also*: `ndarray.all` : equivalent method `alltrue` : equivalent function
ndarray.all(...)
a.all(axis=None)
>>> from numpy import *
>>> a = array([True, False, True])
>>> a.all() # if all elements of a are True: return True; otherwise False
False
>>> all(a) # this form also exists
False
>>> a = array([1,2,3])
>>> all(a > 0) # equivalent to (a > 0).all()
True
See also: any, alltrue, sometrue
allclose()
numpy.allclose(a, b, rtol=1.0000000000000001e-005, atol=1e-008)
Returns True if all components of a and b are equal subject to given tolerances. The relative error rtol must be positive and << 1.0 The absolute error atol usually comes into play for those elements of b that are very small or zero; it says how small a must be also.
>>> allclose(array([1e10,1e-7]), array([1.00001e10,1e-8]))
False
>>> allclose(array([1e10,1e-8]), array([1.00001e10,1e-9]))
True
>>> allclose(array([1e10,1e-8]), array([1.0001e10,1e-9]))
False
alltrue()
numpy.alltrue(a, axis=None, out=None)
Perform a logical_and over the given axis. *See Also*: `ndarray.all` : equivalent method `all` : equivalent function
>>> from numpy import *
>>> b = array([True, False, True, True])
>>> alltrue(b)
False
>>> a = array([1, 5, 2, 7])
>>> alltrue(a >= 5)
False
alterdot()
numpy.alterdot(...)
alterdot() changes all dot functions to use blas.
amax()
numpy.amax(a, axis=None, out=None)
Return the maximum of 'a' along dimension axis. Blah, Blah.
amin()
numpy.amin(a, axis=None, out=None)
Return the minimum of a along dimension axis. Blah, Blah.
angle()
numpy.angle(z, deg=0)
Return the angle of the complex argument z.
>>> from numpy import *
>>> angle(1+1j) # in radians
0.78539816339744828
>>> angle(1+1j,deg=True) # in degrees
45.0
any()
numpy.any(a, axis=None, out=None)
Return true if any elements of x are true. *See Also*: `ndarray.any` : equivalent method
ndarray.any(...)
a.any(axis=None, out=None)
>>> from numpy import *
>>> a = array([True, False, True])
>>> a.any() # gives True if at least 1 element of a is True, otherwise False
True
>>> any(a) # this form also exists
True
>>> a = array([1,2,3])
>>> (a >= 1).any() # equivalent to any(a >= 1)
True
See also: all, alltrue, sometrue
append()
numpy.append(arr, values, axis=None)
Append to the end of an array along axis (ravel first if None)
>>> from numpy import *
>>> a = array([10,20,30,40])
>>> append(a,50)
array([10, 20, 30, 40, 50])
>>> append(a,[50,60])
array([10, 20, 30, 40, 50, 60])
>>> a = array([[10,20,30],[40,50,60],[70,80,90]])
>>> append(a,[[15,15,15]],axis=0)
array([[10, 20, 30],
[40, 50, 60],
[70, 80, 90],
[15, 15, 15]])
>>> append(a,[[15],[15],[15]],axis=1)
array([[10, 20, 30, 15],
[40, 50, 60, 15],
[70, 80, 90, 15]])
See also: insert, delete, concatenate
apply_along_axis()
numpy.apply_along_axis(func1d, axis, arr, *args)
Execute func1d(arr[i],*args) where func1d takes 1-D arrays and arr is an N-d array. i varies so as to apply the function along the given axis for each 1-d subarray in arr.
>>> from numpy import *
>>> def myfunc(a): # function works on a 1d arrays, takes the average of the 1st an last element
... return (a[0]+a[-1])/2
...
>>> b = array([[1,2,3],[4,5,6],[7,8,9]])
>>> apply_along_axis(myfunc,0,b) # apply myfunc to each column (axis=0) of b
array([4, 5, 6])
>>> apply_along_axis(myfunc,1,b) # apply myfunc to each row (axis=1) of b
array([2, 5, 8])
See also: apply_over_axes, vectorize
apply_over_axes()
numpy.apply_over_axes(func, a, axes)
Apply a function repeatedly over multiple axes, keeping the same shape for the resulting array. func is called as res = func(a, axis). The result is assumed to be either the same shape as a or have one less dimension. This call is repeated for each axis in the axes sequence.
>>> from numpy import *
>>> a = arange(24).reshape(2,3,4) # a has 3 axes: 0,1 and 2
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> apply_over_axes(sum, a, [0,2]) # sum over all axes except axis=1, result has same shape as original
array([[[ 60],
[ 92],
[124]]])
See also: apply_along_axis, vectorize
arange()
numpy.arange(...)
arange([start,] stop[, step,], dtype=None) For integer arguments, just like range() except it returns an array whose type can be specified by the keyword argument dtype. If dtype is not specified, the type of the result is deduced from the type of the arguments. For floating point arguments, the length of the result is ceil((stop - start)/step). This rule may result in the last element of the result being greater than stop.
>>> from numpy import *
>>> arange(3)
array([0, 1, 2])
>>> arange(3.0)
array([ 0., 1., 2.])
>>> arange(3, dtype=float)
array([ 0., 1., 2.])
>>> arange(3,10) # start,stop
array([3, 4, 5, 6, 7, 8, 9])
>>> arange(3,10,2) # start,stop,step
array([3, 5, 7, 9])
See also: r_, linspace, logspace, mgrid, ogrid
arccos()
numpy.arccos(...)
y = arccos(x) inverse cosine elementwise.
>>> from numpy import *
>>> arccos(array([0, 1]))
array([ 1.57079633, 0. ])
See also: arcsin, arccosh, arctan, arctan2
arccosh()
numpy.arccosh(...)
y = arccosh(x) inverse hyperbolic cosine elementwise.
>>> from numpy import *
>>> arccosh(array([e, 10.0]))
array([ 1.65745445, 2.99322285])
See also: arccos, arcsinh, arctanh
arcsin()
numpy.arcsin(...)
y = arcsin(x) inverse sine elementwise.
>>> from numpy import *
>>> arcsin(array([0, 1]))
array([ 0. , 1.57079633])
See also: arccos, arctan, arcsinh
arcsinh()
numpy.arcsinh(...)
y = arcsinh(x) inverse hyperbolic sine elementwise.
>>> from numpy import *
>>> arcsinh(array([e, 10.0]))
array([ 1.72538256, 2.99822295])
See also: arccosh, arcsin, arctanh
arctan()
numpy.arctan(...)
y = arctan(x) inverse tangent elementwise.
>>> from numpy import *
>>> arctan(array([0, 1]))
array([ 0. , 0.78539816])
See also: arccos, arcsin, arctanh
arctan2()
numpy.arctan2(...)
y = arctan2(x1,x2) a safe and correct arctan(x1/x2)
>>> from numpy import *
>>> arctan2(array([0, 1]), array([1, 0]))
array([ 0. , 1.57079633])
See also: arcsin, arccos, arctan, arctanh
arctanh()
numpy.arctanh(...)
y = arctanh(x) inverse hyperbolic tangent elementwise.
>>> from numpy import *
>>> arctanh(array([0, -0.5]))
array([ 0. , -0.54930614])
See also: arcsinh, arccosh, arctan, arctan2
argmax()
numpy.argmax(a, axis=None)
Returns array of indices of the maximum values of along the given axis. *Parameters*: a : {array_like} Array to look in. axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis *Returns*: index_array : {integer_array} *Examples* >>> a = arange(6).reshape(2,3) >>> argmax(a) 5 >>> argmax(a,0) array([1, 1, 1]) >>> argmax(a,1) array([2, 2])
ndarray.argmax(...)
a.argmax(axis=None, out=None)
>>> from numpy import *
>>> a = array([10,20,30])
>>> maxindex = a.argmax()
>>> a[maxindex]
30
>>> a = array([[10,50,30],[60,20,40]])
>>> maxindex = a.argmax()
>>> maxindex
3
>>> a.ravel()[maxindex]
60
>>> a.argmax(axis=0) # for each column: the row index of the maximum value
array([1, 0, 1])
>>> a.argmax(axis=1) # for each row: the column index of the maximum value
array([1, 0])
>>> argmax(a) # also exists, slower, default is axis=-1
array([1, 0])
See also: argmin, nan, min, max, maximum, minimum
argmin()
numpy.argmin(a, axis=None)
Return array of indices to the minimum values along the given axis. *Parameters*: a : {array_like} Array to look in. axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis *Returns*: index_array : {integer_array} *Examples* >>> a = arange(6).reshape(2,3) >>> argmin(a) 0 >>> argmin(a,0) array([0, 0, 0]) >>> argmin(a,1) array([0, 0])
ndarray.argmin(...)
a.argmin(axis=None, out=None)
>>> from numpy import *
>>> a = array([10,20,30])
>>> minindex = a.argmin()
>>> a[minindex]
10
>>> a = array([[10,50,30],[60,20,40]])
>>> minindex = a.argmin()
>>> minindex
0
>>> a.ravel()[minindex]
10
>>> a.argmin(axis=0) # for each column: the row index of the minimum value
array([0, 1, 0])
>>> a.argmin(axis=1) # for each row: the column index of the minimum value
array([0, 1])
>>> argmin(a) # also exists, slower, default is axis=-1
array([0, 1])
See also: argmax, nan, min, max, maximum, minimum
argsort()
numpy.argsort(a, axis=-1, kind='quicksort', order=None)
Returns array of indices that index 'a' in sorted order. Perform an indirect sort along the given axis using the algorithm specified by the kind keyword. It returns an array of indices of the same shape as a that index data along the given axis in sorted order. *Parameters*: a : array Array to be sorted. axis : {None, int} optional Axis along which to sort. None indicates that the flattened array should be used. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm to use. order : {None, list type}, optional When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. *Returns*: index_array : {integer_array} Array of indices that sort 'a' along the specified axis. *See Also*: `lexsort` : Indirect stable sort with multiple keys. `sort` : Inplace sort. *Notes* The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: +-----------+-------+-------------+------------+-------+ | kind | speed | worst case | work space | stable| +===========+=======+=============+============+=======+ | quicksort | 1 | O(n^2) | 0 | no | +-----------+-------+-------------+------------+-------+ | mergesort | 2 | O(n*log(n)) | ~n/2 | yes | +-----------+-------+-------------+------------+-------+ | heapsort | 3 | O(n*log(n)) | 0 | no | +-----------+-------+-------------+------------+-------+ All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis.
ndarray.argsort(...)
a.argsort(axis=-1, kind='quicksort', order=None) -> indices Perform an indirect sort along the given axis using the algorithm specified by the kind keyword. It returns an array of indices of the same shape as 'a' that index data along the given axis in sorted order. :Parameters: axis : integer Axis to be indirectly sorted. None indicates that the flattened array should be used. Default is -1. kind : string Sorting algorithm to use. Possible values are 'quicksort', 'mergesort', or 'heapsort'. Default is 'quicksort'. order : list type or None When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. :Returns: indices : integer array Array of indices that sort 'a' along the specified axis. :SeeAlso: - lexsort : indirect stable sort with multiple keys - sort : inplace sort :Notes: ------ The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: |------------------------------------------------------| | kind | speed | worst case | work space | stable| |------------------------------------------------------| |'quicksort'| 1 | O(n^2) | 0 | no | |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | |'heapsort' | 3 | O(n*log(n)) | 0 | no | |------------------------------------------------------| All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis.
argsort(axis=-1, kind="quicksort")
>>> from numpy import *
>>> a = array([2,0,8,4,1])
>>> ind = a.argsort() # indices of sorted array using quicksort (default)
>>> ind
array([1, 4, 0, 3, 2])
>>> a[ind] # same effect as a.sort()
array([0, 1, 2, 4, 8])
>>> ind = a.argsort(kind='merge') # algorithm options are 'quicksort', 'mergesort' and 'heapsort'
>>> a = array([[8,4,1],[2,0,9]])
>>> ind = a.argsort(axis=0) # sorts on columns. NOT the same as a.sort(axis=1)
>>> ind
array([[1, 1, 0],
[0, 0, 1]])
>>> a[ind,[[0,1,2],[0,1,2]]] # 2-D arrays need fancy indexing if you want to sort them.
array([[2, 0, 1],
[8, 4, 9]])
>>> ind = a.argsort(axis=1) # sort along rows. Can use a.argsort(axis=-1) for last axis.
>>> ind
array([[2, 1, 0],
[1, 0, 2]])
>>> a = ones(17)
>>> a.argsort() # quicksort doesn't preserve original order.
array([ 0, 14, 13, 12, 11, 10, 9, 15, 8, 6, 5, 4, 3, 2, 1, 7, 16])
>>> a.argsort(kind="mergesort") # mergesort preserves order when possible. It is a stable sort.
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
>>> ind = argsort(a) # there is a functional form
argwhere()
numpy.argwhere(a)
Return a 2-d array of shape N x a.ndim where each row is a sequence of indices into a. This sequence must be converted to a tuple in order to be used to index into a.
around()
numpy.around(a, decimals=0, out=None)
Round a to the given number of decimals. The real and imaginary parts of complex numbers are rounded separately. The result of rounding a float is a float so the type must be cast if integers are desired. Nothing is done if the input is an integer array and the decimals parameter has a value >= 0. *Parameters*: a : {array_like} Array containing numbers whose rounded values are desired. If a is not an array, a conversion is attempted. decimals : {0, int}, optional Number of decimal places to round to. When decimals is negative it specifies the number of positions to the left of the decimal point. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Numpy rounds floats to floats by default. *Returns*: rounded_array : {array} If out=None, returns a new array of the same type as a containing the rounded values, otherwise a reference to the output array is returned. *See Also*: `round_` : equivalent function `ndarray.round` : equivalent method *Notes* Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in IEEE floating point and the errors introduced when scaling by powers of ten. *Examples* >>> around([.5, 1.5, 2.5, 3.5, 4.5]) array([ 0., 2., 2., 4., 4.]) >>> around([1,2,3,11], decimals=1) array([ 1, 2, 3, 11]) >>> around([1,2,3,11], decimals=-1) array([ 0, 0, 0, 10])
array()
numpy.array(...)
array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0) Return an array from object with the specified date-type. Inputs: object - an array, any object exposing the array interface, any object whose __array__ method returns an array, or any (nested) sequence. dtype - The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. copy - If true, then force a copy. Otherwise a copy will only occur if __array__ returns a copy, obj is a nested sequence, or a copy is needed to satisfy any of the other requirements order - Specify the order of the array. If order is 'C', then the array will be in C-contiguous order (last-index varies the fastest). If order is 'FORTRAN', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). If order is None, then the returned array may be in either C-, or Fortran-contiguous order or even discontiguous. subok - If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array ndmin - Specifies the minimum number of dimensions that the resulting array should have. 1's will be pre-pended to the shape as needed to meet this requirement.
numpy.core.records.array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, copy=True)
Construct a record array from a wide-variety of objects.
>>> from numpy import *
>>> array([1,2,3]) # conversion from a list to an array
array([1, 2, 3])
>>> array([1,2,3], dtype=complex) # output type is specified
array([ 1.+0.j, 2.+0.j, 3.+0.j])
>>> array(1, copy=0, subok=1, ndmin=1) # basically equivalent to atleast_1d
array([1])
>>> array(1, copy=0, subok=1, ndmin=2) # basically equivalent to atleast_2d
array([[1]])
>>> array(1, subok=1, ndmin=2) # like atleast_2d but always makes a copy
array([[1]])
>>> mydescriptor = {'names': ('gender','age','weight'), 'formats': ('S1', 'f4', 'f4')} # one way of specifying the data type
>>> a = array([('M',64.0,75.0),('F',25.0,60.0)], dtype=mydescriptor) # recarray
>>> print a
[('M', 64.0, 75.0) ('F', 25.0, 60.0)]
>>> a['weight']
array([ 75., 60.], dtype=float32)
>>> a.dtype.names # Access to the ordered field names
('gender','age','weight')
>>> mydescriptor = [('age',int16),('Nchildren',int8),('weight',float32)] # another way of specifying the data type
>>> a = array([(64,2,75.0),(25,0,60.0)], dtype=mydescriptor)
>>> a['Nchildren']
array([2, 0], dtype=int8)
>>> mydescriptor = dtype([('x', 'f4'),('y', 'f4'), # nested recarray
... ('nested', [('i', 'i2'),('j','i2')])])
>>> array([(1.0, 2.0, (1,2))], dtype=mydescriptor) # input one row
array([(1.0, 2.0, (1, 2))],
dtype=[('x', '<f4'), ('y', '<f4'), ('nested', [('i', '<i2'), ('j', '<i2')])])
>>> array([(1.0, 2.0, (1,2)), (2.1, 3.2, (3,2))], dtype=mydescriptor) # input two rows
array([(1.0, 2.0, (1, 2)), (2.0999999046325684, 3.2000000476837158, (3, 2))],
dtype=[('x', '<f4'), ('y', '<f4'), ('nested', [('i', '<i2'), ('j', '<i2')])])
>>> a=array([(1.0, 2.0, (1,2)), (2.1, 3.2, (3,2))], dtype=mydescriptor) # getting some columns
>>> a['x'] # a plain column
array([ 1. , 2.0999999], dtype=float32)
>>> a['nested'] # a nested column
array([(1, 2), (3, 2)],
dtype=[('i', '<i2'), ('j', '<i2')])
>>> a['nested']['i'] # a plain column inside a nested column
>>> mydescriptor = dtype([('x', 'f4'),('y', 'f4'), # nested recarray
... ('nested', [('i', 'i2'),('j','i2')])])
>>> array([(1.0, 2.0, (1,2))], dtype=mydescriptor) # input one row
array([(1.0, 2.0, (1, 2))],
dtype=[('x', '<f4'), ('y', '<f4'), ('nested', [('i', '<i2'), ('j', '<i2')])])
>>> array([(1.0, 2.0, (1,2)), (2.1, 3.2, (3,2))], dtype=mydescriptor) # input two rows
array([(1.0, 2.0, (1, 2)), (2.0999999046325684, 3.2000000476837158, (3, 2))],
dtype=[('x', '<f4'), ('y', '<f4'), ('nested', [('i', '<i2'), ('j', '<i2')])])
>>> a=array([(1.0, 2.0, (1,2)), (2.1, 3.2, (3,2))], dtype=mydescriptor) # getting some columns
>>> a['x'] # a plain column
array([ 1. , 2.0999999], dtype=float32)
>>> a['nested'] # a nested column
array([(1, 2), (3, 2)],
dtype=[('i', '<i2'), ('j', '<i2')])
>>> a['nested']['i'] # a plain column inside a nested column
array([1, 3], dtype=int16)
array2string()
numpy.array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", style=<built-in function repr>)
Return a string representation of an array. :Parameters: a : ndarray Input array. max_line_width : int The maximum number of columns the string should span. Newline characters splits the string appropriately after array elements. precision : int Floating point precision. suppress_small : bool Represent very small numbers as zero. separator : string Inserted between elements. prefix : string An array is typically printed as 'prefix(' + array2string(a) + ')' The length of the prefix string is used to align the output correctly. style : function Examples -------- >>> x = N.array([1e-16,1,2,3]) >>> print array2string(x,precision=2,separator=',',suppress_small=True) [ 0., 1., 2., 3.]
array_equal()
numpy.array_equal(a1, a2)
array_equiv()
numpy.array_equiv(a1, a2)
array_repr()
numpy.array_repr(arr, max_line_width=None, precision=None, suppress_small=None)
array_split()
numpy.array_split(ary, indices_or_sections, axis=0)
Divide an array into a list of sub-arrays. Description: Divide ary into a list of sub-arrays along the specified axis. If indices_or_sections is an integer, ary is divided into that many equally sized arrays. If it is impossible to make an equal split, each of the leading arrays in the list have one additional member. If indices_or_sections is a list of sorted integers, its entries define the indexes where ary is split. Arguments: ary -- N-D array. Array to be divided into sub-arrays. indices_or_sections -- integer or 1D array. If integer, defines the number of (close to) equal sized sub-arrays. If it is a 1D array of sorted indices, it defines the indexes at which ary is divided. Any empty list results in a single sub-array equal to the original array. axis -- integer. default=0. Specifies the axis along which to split ary. Caveats: Currently, the default for axis is 0. This means a 2D array is divided into multiple groups of rows. This seems like the appropriate default,
>>> from numpy import *
>>> a = array([[1,2,3,4],[5,6,7,8]])
>>> array_split(a,2,axis=0) # split a in 2 parts. row-wise
[array([[1, 2, 3, 4]]), array([[5, 6, 7, 8]])]
>>> array_split(a,4,axis=1) # split a in 4 parts, column-wise
[array([[1],
[5]]), array([[2],
[6]]), array([[3],
[7]]), array([[4],
[8]])]
>>> array_split(a,3,axis=1) # impossible to split in 3 equal parts -> first part(s) are bigger
[array([[1, 2],
[5, 6]]), array([[3],
[7]]), array([[4],
[8]])]
>>> array_split(a,[2,3],axis=1) # make a split before the 2nd and the 3rd column
[array([[1, 2],
[5, 6]]), array([[3],
[7]]), array([[4],
[8]])]
See also: dsplit, hsplit, vsplit, split, concatenate
array_str()
numpy.array_str(a, max_line_width=None, precision=None, suppress_small=None)
arrayrange
Synonym for arange()
See arange
asanyarray()
numpy.asanyarray(a, dtype=None, order=None)
Returns a as an array, but will pass subclasses through.
>>> from numpy import *
>>> a = array([[1,2],[5,8]])
>>> a
array([[1, 2],
[5, 8]])
>>> m = matrix('1 2; 5 8')
>>> m
matrix([[1, 2],
[5, 8]])
>>> asanyarray(a) # the array a is returned unmodified
array([[1, 2],
[5, 8]])
>>> asanyarray(m) # the matrix m is returned unmodified
matrix([[1, 2],
[5, 8]])
>>> asanyarray([1,2,3]) # a new array is constructed from the list
array([1, 2, 3])
See also: asmatrix, asarray, array, mat
asarray()
numpy.asarray(a, dtype=None, order=None)
Returns a as an array. Unlike array(), no copy is performed if a is already an array. Subclasses are converted to base class ndarray.
>>> from numpy import *
>>> m = matrix('1 2; 5 8')
>>> m
matrix([[1, 2],
[5, 8]])
>>> a = asarray(m) # a is array type with same contents as m -- data is not copied
>>> a
array([[1, 2],
[5, 8]])
>>> m[0,0] = -99
>>> m
matrix([[-99, 2],
[ 5, 8]])
>>> a # no copy was made, so modifying m modifies a, and vice versa
array([[-99, 2],
[ 5, 8]])
See also: asmatrix, array, matrix, mat
asarray_chkfinite()
numpy.asarray_chkfinite(a)
Like asarray, but check that no NaNs or Infs are present.
ascontiguousarray()
numpy.ascontiguousarray(a, dtype=None)
Return 'a' as an array contiguous in memory (C order).
asfarray()
numpy.asfarray(a, dtype=<type 'numpy.float64'>)
asfarray(a,dtype=None) returns a as a float array.
asfortranarray()
numpy.asfortranarray(a, dtype=None)
Return 'a' as an array laid out in Fortran-order in memory.
asmatrix()
numpy.asmatrix(data, dtype=None)
Returns 'data' as a matrix. Unlike matrix(), no copy is performed if 'data' is already a matrix or array. Equivalent to: matrix(data, copy=False)
>>> from numpy import *
>>> a = array([[1,2],[5,8]])
>>> a
array([[1, 2],
[5, 8]])
>>> m = asmatrix(a) # m is matrix type with same contents as a -- data is not copied
>>> m
matrix([[1, 2],
[5, 8]])
>>> a[0,0] = -99
>>> a
array([[-99, 2],
[ 5, 8]])
>>> m # no copy was made so modifying a modifies m, and vice versa
matrix([[-99, 2],
[ 5, 8]])
See also: asarray, array, matrix, mat
asscalar()
numpy.asscalar(a)
Convert an array of size 1 to its scalar equivalent.
astype()
ndarray.astype(...)
a.astype(t) -> Copy of array cast to type t. Cast array m to type t. t can be either a string representing a typecode, or a python type object of type int, float, or complex.
>>> from numpy import *
>>> x = array([1,2,3])
>>> y = x.astype(float64) # convert from int32 to float64
>>> type(y[0])
<type 'numpy.float64'>
>>> x.astype(None) # None implies converting to the default (float64)
array([1., 2., 3.])
See also: cast, dtype, ceil, floor, round_, fix
atleast_1d()
numpy.atleast_1d(*arys)
Force a sequence of arrays to each be at least 1D. Description: Force an array to be at least 1D. If an array is 0D, the array is converted to a single row of values. Otherwise, the array is unaltered. Arguments: *arys -- arrays to be converted to 1 or more dimensional array. Returns: input array converted to at least 1D array.
>>> from numpy import *
>>> a = 1 # 0-d array
>>> b = array([2,3]) # 1-d array
>>> c = array([[4,5],[6,7]]) # 2-d array
>>> d = arange(8).reshape(2,2,2) # 3-d array
>>> d
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> atleast_1d(a,b,c,d) # all output arrays have dim >= 1
[array([1]), array([2, 3]), array([[4, 5],
[6, 7]]), array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])]
See also: atleast_2d, atleast_3d, newaxis, expand_dims
atleast_2d()
numpy.atleast_2d(*arys)
Force a sequence of arrays to each be at least 2D. Description: Force an array to each be at least 2D. If the array is 0D or 1D, the array is converted to a single row of values. Otherwise, the array is unaltered. Arguments: arys -- arrays to be converted to 2 or more dimensional array. Returns: input array converted to at least 2D array.
>>> from numpy import *
>>> a = 1 # 0-d array
>>> b = array([2,3]) # 1-d array
>>> c = array([[4,5],[6,7]]) # 2-d array
>>> d = arange(8).reshape(2,2,2) # 3-d array
>>> d
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> atleast_2d(a,b,c,d) # all output arrays have dim >= 2
[array([[1]]), array([[2, 3]]), array([[4, 5],
[6, 7]]), array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])]
See also: atleast_1d, atleast_3d, newaxis, expand_dims
atleast_3d()
numpy.atleast_3d(*arys)
Force a sequence of arrays to each be at least 3D. Description: Force an array each be at least 3D. If the array is 0D or 1D, the array is converted to a single 1xNx1 array of values where N is the orginal length of the array. If the array is 2D, the array is converted to a single MxNx1 array of values where MxN is the orginal shape of the array. Otherwise, the array is unaltered. Arguments: arys -- arrays to be converted to 3 or more dimensional array. Returns: input array converted to at least 3D array.
>>> from numpy import *
>>> a = 1 # 0-d array
>>> b = array([2,3]) # 1-d array
>>> c = array([[4,5],[6,7]]) # 2-d array
>>> d = arange(8).reshape(2,2,2) # 3-d array
>>> d
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> atleast_3d(a,b,c,d) # all output arrays have dim >= 3
[array([[[1]]]), array([[[2],
[3]]]), array([[[4],
[5]],
[[6],
[7]]]), array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])]
See also: atleast_1d, atleast_2d, newaxis, expand_dims
average()
numpy.average(a, axis=None, weights=None, returned=False)
Average the array over the given axis. If the axis is None, average over all dimensions of the array. Equivalent to a.mean(axis) and to a.sum(axis) / size(a, axis) If weights are given, result is: sum(a * weights,axis) / sum(weights,axis), where the weights must have a's shape or be 1D with length the size of a in the given axis. Integer weights are converted to Float. Not specifying weights is equivalent to specifying weights that are all 1. If 'returned' is True, return a tuple: the result and the sum of the weights or count of values. The shape of these two results will be the same. Raises ZeroDivisionError if appropriate. (The version in MA does not -- it returns masked values).
>>> from numpy import *
>>> a = array([1,2,3,4,5])
>>> w = array([0.1, 0.2, 0.5, 0.2, 0.2]) # weights, not necessarily normalized
>>> average(a) # plain mean value
3.0
>>> average(a,weights=w) # weighted average
3.1666666666666665
>>> average(a,weights=w,returned=True) # output = weighted average, sum of weights
(3.1666666666666665, 1.2)
bartlett()
numpy.bartlett(M)
bartlett(M) returns the M-point Bartlett window.
base_repr()
numpy.base_repr(number, base=2, padding=0)
Return the representation of a number in the given base. Base can't be larger than 36.
beta()
numpy.random.beta(...)
Beta distribution over [0, 1]. beta(a, b, size=None) -> random values
>>> from numpy import *
>>> from numpy.random import *
>>> beta(a=1,b=10,size=(2,2)) # Beta distribution alpha=1, beta=10
array([[ 0.02571091, 0.04973536],
[ 0.04887027, 0.02382052]])
See also: seed
binary_repr()
numpy.binary_repr(num, width=None)
Return the binary representation of the input number as a string. This is equivalent to using base_repr with base 2, but about 25x faster. For negative numbers, if width is not given, a - sign is added to the front. If width is given, the two's complement of the number is returned, with respect to that width.
>>> from numpy import *
>>> a = 25
>>> binary_repr(a) # binary representation of 25
'11001'
>>> b = float_(pi) # numpy float has extra functionality ...
>>> b.nbytes # ... like the number of bytes it takes
8
>>> binary_repr(b.view('u8')) # view float number as an 8 byte integer, then get binary bitstring
'1010100010001000010110100011000'
bincount()
numpy.bincount(...)
bincount(x,weights=None) Return the number of occurrences of each value in x. x must be a list of non-negative integers. The output, b[i], represents the number of times that i is found in x. If weights is specified, every occurrence of i at a position p contributes weights[p] instead of 1. See also: histogram, digitize, unique.
>>> from numpy import *
>>> a = array([1,1,1,1,2,2,4,4,5,6,6,6]) # doesn't need to be sorted
>>> bincount(a) # 0 occurs 0 times, 1 occurs 4 times, 2 occurs twice, 3 occurs 0 times, ...
array([0, 4, 2, 0, 2, 1, 3])
>>> a = array([5,4,4,2,2])
>>> w = array([0.1, 0.2, 0.1, 0.3, 0.5])
>>> bincount(a) # 0 & 1 don't occur, 2 occurs twice, 3 doesn't occur, 4 occurs twice, 5 once
array([0, 0, 2, 0, 2, 1])
>>> bincount(a, weights=w)
array([ 0. , 0. , 0.8, 0. , 0.3, 0.1])
>>> # 0 occurs 0 times -> result[0] = 0
>>> # 1 occurs 0 times -> result[1] = 0
>>> # 2 occurs at indices 3 & 4 -> result[2] = w[3] + w[4]
>>> # 3 occurs 0 times -> result[3] = 0
>>> # 4 occurs at indices 1 & 2 -> result[4] = w[1] + w[2]
>>> # 5 occurs at index 0 -> result[5] = w[0]
binomial()
numpy.random.binomial(...)
Binomial distribution of n trials and p probability of success. binomial(n, p, size=None) -> random values
>>> from numpy import *
>>> from numpy.random import *
>>> binomial(n=100,p=0.5,size=(2,3)) # binomial distribution n trials, p= success probability
array([[38, 50, 53],
[56, 48, 54]])
>>> from pylab import * # histogram plot example
>>> hist(binomial(100,0.5,(1000)), 20)
See also: random_sample, uniform, standard_normal, seed
bitwise_and()
numpy.bitwise_and(...)
y = bitwise_and(x1,x2) computes x1 & x2 elementwise.
>>> from numpy import *
>>> bitwise_and(array([2,5,255]), array([4,4,4]))
array([0, 4, 4])
>>> bitwise_and(array([2,5,255,2147483647L],dtype=int32), array([4,4,4,2147483647L],dtype=int32))
array([ 0, 4, 4, 2147483647])
See also: bitwise_or, bitwise_xor, logical_and
bitwise_not()
numpy.bitwise_not(...)
y = invert(x) computes ~x (bit inversion) elementwise.
bitwise_or()
numpy.bitwise_or(...)
y = bitwise_or(x1,x2) computes x1 | x2 elementwise.
>>> from numpy import *
>>> bitwise_or(array([2,5,255]), array([4,4,4]))
array([ 6, 5, 255])
>>> bitwise_or(array([2,5,255,2147483647L],dtype=int32), array([4,4,4,2147483647L],dtype=int32))
array([ 6, 5, 255, 2147483647])
See also: bitwise_and, bitwise_xor, logical_or
bitwise_xor()
numpy.bitwise_xor(...)
y = bitwise_xor(x1,x2) computes x1 ^ x2 elementwise.
>>> from numpy import *
>>> bitwise_xor(array([2,5,255]), array([4,4,4]))
array([ 6, 1, 251])
>>> bitwise_xor(array([2,5,255,2147483647L],dtype=int32), array([4,4,4,2147483647L],dtype=int32))
array([ 6, 1, 251, 0])
See also: bitwise_and, bitwise_or, logical_xor
blackman()
numpy.blackman(M)
blackman(M) returns the M-point Blackman window.
bmat()
numpy.bmat(obj, ldict=None, gdict=None)
Build a matrix object from string, nested sequence, or array. Ex: F = bmat('A, B; C, D') F = bmat([[A,B],[C,D]]) F = bmat(r_[c_[A,B],c_[C,D]]) all produce the same Matrix Object [ A B ] [ C D ] if A, B, C, and D are appropriately shaped 2-d arrays.
>>> from numpy import *
>>> a = mat('1 2; 3 4')
>>> b = mat('5 6; 7 8')
>>> bmat('a b; b a') # all elements must be existing symbols
matrix([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
See also: mat
broadcast()
numpy.broadcast(...)
>>> from numpy import *
>>> a = array([[1,2],[3,4]])
>>> b = array([5,6])
>>> c = broadcast(a,b)
>>> c.nd # the number of dimensions in the broadcasted result
2
>>> c.shape # the shape of the broadcasted result
(2, 2)
>>> c.size # total size of the broadcasted result
4
>>> for value in c: print value
...
(1, 5)
(2, 6)
(3, 5)
(4, 6)
>>> c.reset() # reset the iterator to the beginning
>>> c.next() # next element
(1, 5)
See also: ndenumerate, ndindex, flat
byte_bounds()
numpy.byte_bounds(a)
(low, high) are pointers to the end-points of an array low is the first byte high is just *past* the last byte If the array is not single-segment, then it may not actually use every byte between these bounds. The array provided must conform to the Python-side of the array interface
bytes()
numpy.random.bytes(...)
Return random bytes. bytes(length) -> str
>>> from numpy import *
>>> from numpy.random import bytes
>>> print repr(bytes(5)) # string of 5 random bytes
'o\x07\x9f\xdf\xdf'
>>> print repr(bytes(5)) # another string of 5 random bytes
'\x98\xc9KD\xe0'
See also: shuffle, permutation, seed
byteswap()
ndarray.byteswap(...)
a.byteswap(False) -> View or copy. Swap the bytes in the array. Swap the bytes in the array. Return the byteswapped array. If the first argument is True, byteswap in-place and return a reference to self.
c_
numpy.c_
Translates slice objects to concatenation along the second axis.
>>> from numpy import *
>>> c_[1:5] # for single ranges, c_ works like r_
array([1, 2, 3, 4])
>>> c_[1:5,2:6] # for comma separated values, c_ stacks column-wise
array([[1, 2],
[2, 3],
[3, 4],
[4, 5]])
>>> a = array([[1,2,3],[4,5,6]])
>>> c_[a,a] # concatenation along last (default) axis (column-wise, that's why it's called c_)
array([[1, 2, 3, 1, 2, 3],
[4, 5, 6, 4, 5, 6]])
>>> c_['0',a,a] # concatenation along 1st axis, equivalent to r_[a,a]
array([[1, 2, 3],
[4, 5, 6],
[1, 2, 3],
[4, 5, 6]])
See also: r_, hstack, vstack, column_stack, concatenate, bmat, s_
cast[]()
>>> from numpy import *
>>> x = arange(3)
>>> x.dtype
dtype('int32')
>>> cast['int64'](x)
array([0, 1, 2], dtype=int64)
>>> cast['uint'](x)
array([0, 1, 2], dtype=uint32)
>>> cast[float128](x)
array([0.0, 1.0, 2.0], dtype=float128)
>>> cast.keys() # list dtype cast possibilities
<snip>
can_cast()
numpy.can_cast(...)
can_cast(from=d1, to=d2) Returns True if data type d1 can be cast to data type d2 without losing precision.
ceil()
numpy.ceil(...)
y = ceil(x) elementwise smallest integer >= x.
>>> from numpy import *
>>> a = array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7])
>>> ceil(a) # nearest integers greater-than or equal to a
array([-1., -1., -0., 1., 2., 2.])
See also: floor, round_, fix, astype
choose()
numpy.choose(a, choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices. Given an array of integers in {0, 1, ..., n-1} and a set of n choice arrays, this function will create a new array that merges each of the choice arrays. Where a value in `a` is i, then the new array will have the value that choices[i] contains in the same place. *Parameters*: a : int array This array must contain integers in [0, n-1], where n is the number of choices. choices : sequence of arrays Each of the choice arrays should have the same shape as the index array. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error 'wrap' : wrap around 'clip' : clip to the range *Returns*: merged_array : array *See Also*: `ndarray.choose` : equivalent method *Examples* >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], ... [20, 21, 22, 23], [30, 31, 32, 33]] >>> choose([2, 3, 1, 0], choices) array([20, 31, 12, 3]) >>> choose([2, 4, 1, 0], choices, mode='clip') array([20, 31, 12, 3]) >>> choose([2, 4, 1, 0], choices, mode='wrap') array([20, 1, 12, 3])
ndarray.choose(...)
a.choose(b0, b1, ..., bn, out=None, mode='raise') Return an array that merges the b_i arrays together using 'a' as the index The b_i arrays and 'a' must all be broadcastable to the same shape. The output at a particular position is the input array b_i at that position depending on the value of 'a' at that position. Therefore, 'a' must be an integer array with entries from 0 to n+1.;
>>> from numpy import *
>>> choice0 =array([10,12,14,16]) # selector and choice arrays must be equally sized
>>> choice1 =array([20,22,24,26])
>>> choice2 =array([30,32,34,36])
>>> selector = array([0,0,2,1]) # selector can only contain integers in range(number_of_choice_arrays)
>>> selector.choose(choice0,choice1,choice2)
array([10, 12, 34, 26])
>>> a = arange(4)
>>> choose(a >= 2, (choice0, choice1)) # separate function also exists
array([10, 12, 24, 26])
See also: compress, take, where, select
clip()
numpy.clip(a, a_min, a_max)
Limit the values of a to [a_min, a_max]. Equivalent to a[a < a_min] = a_min a[a > a_max] = a_max
ndarray.clip(...)
a.clip(min=, max=, out=None)
>>> from numpy import *
>>> a = array([5,15,25,3,13])
>>> a.clip(min=10,max=20)
array([10, 15, 20, 10, 13])
>>> clip(a,10,20) # this syntax also exists
column_stack()
numpy.column_stack(tup)
Stack 1D arrays as columns into a 2D array Description: Take a sequence of 1D arrays and stack them as columns to make a single 2D array. All arrays in the sequence must have the same first dimension. 2D arrays are stacked as-is, just like with hstack. 1D arrays are turned into 2D columns first. Arguments: tup -- sequence of 1D or 2D arrays. All arrays must have the same first dimension. Examples: >>> import numpy >>> a = array((1,2,3)) >>> b = array((2,3,4)) >>> numpy.column_stack((a,b)) array([[1, 2], [2, 3], [3, 4]])
>>> from numpy import *
>>> a = array([1,2])
>>> b = array([3,4])
>>> c = array([5,6])
>>> column_stack((a,b,c)) # a,b,c are 1-d arrays with equal length
array([[1, 3, 5],
[2, 4, 6]])
See also: concatenate, dstack, hstack, vstack, c_
common_type()
numpy.common_type(*arrays)
Given a sequence of arrays as arguments, return the best inexact scalar type which is "most" common amongst them. The return type will always be a inexact scalar type, even if all the arrays are integer arrays.
compare_chararrays()
numpy.compare_chararrays(...)
compress()
numpy.compress(condition, a, axis=None, out=None)
Return a where condition is true. Equivalent to a[condition].
ndarray.compress(...)
a.compress(condition=, axis=None, out=None)
>>> from numpy import *
>>> a = array([10, 20, 30, 40])
>>> condition = (a > 15) & (a < 35)
>>> condition
array([False, True, True, False], dtype=bool)
>>> a.compress(condition)
array([20, 30])
>>> a[condition] # same effect
array([20, 30])
>>> compress(a >= 30, a) # this form also exists
array([30, 40])
>>> b = array([[10,20,30],[40,50,60]])
>>> b.compress(b.ravel() >= 22)
array([30, 40, 50, 60])
>>> x = array([3,1,2])
>>> y = array([50, 101])
>>> b.compress(x >= 2, axis=1) # illustrates the use of the axis keyword
array([[10, 30],
[40, 60]])
>>> b.compress(y >= 100, axis=0)
array([[40, 50, 60]])
See also: choose, take, where, trim_zeros, unique
concatenate()
numpy.concatenate(...)
concatenate((a1, a2, ...), axis=0) Join arrays together. The tuple of sequences (a1, a2, ...) are joined along the given axis (default is the first one) into a single numpy array. Example: >>> concatenate( ([0,1,2], [5,6,7]) ) array([0, 1, 2, 5, 6, 7])
>>> from numpy import *
>>> x = array([[1,2],[3,4]])
>>> y = array([[5,6],[7,8]])
>>> concatenate((x,y)) # default is axis=0
array([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> concatenate((x,y),axis=1)
array([[1, 2, 5, 6],
[3, 4, 7, 8]])
See also: append, column_stack, dstack, hstack, vstack, array_split
conj()
numpy.conj(...)
y = conjugate(x) takes the conjugate of x elementwise.
ndarray.conj(...)
a.conj()
Synonym for conjugate()
See conjugate()
conjugate()
numpy.conjugate(...)
y = conjugate(x) takes the conjugate of x elementwise.
ndarray.conjugate(...)
a.conjugate()
>>> a = array([1+2j,3-4j])
>>> a.conj() # .conj() and .conjugate() are the same
array([ 1.-2.j, 3.+4.j])
>>> a.conjugate()
array([ 1.-2.j, 3.+4.j])
>>> conj(a) # is also possible
>>> conjugate(a) # is also possible
See also: vdot
convolve()
numpy.convolve(a, v, mode='full')
Returns the discrete, linear convolution of 1-D sequences a and v; mode can be 'valid', 'same', or 'full' to specify size of the resulting sequence.
copy()
numpy.copy(a)
Return an array copy of the given object.
ndarray.copy(...)
a.copy(|order) -> copy, possibly with different order. Return a copy of the array. Argument: order -- Order of returned copy (default 'C') If order is 'C' (False) then the result is contiguous (default). If order is 'Fortran' (True) then the result has fortran order. If order is 'Any' (None) then the result has fortran order only if m is already in fortran order.;
>>> from numpy import *
>>> a = array([1,2,3])
>>> a
array([1, 2, 3])
>>> b = a # b is a reference to a
>>> b[1] = 4
>>> a
array([1, 4, 3])
>>> a = array([1,2,3])
>>> b = a.copy() # b is now an independent copy of a
>>> b[1] = 4
>>> a
array([1, 2, 3])
>>> b
array([1, 4, 3])
See also: view
corrcoef()
numpy.corrcoef(x, y=None, rowvar=1, bias=0)
The correlation coefficients
>>> from numpy import *
>>> T = array([1.3, 4.5, 2.8, 3.9]) # temperature measurements
>>> P = array([2.7, 8.7, 4.7, 8.2]) # corresponding pressure measurements
>>> print corrcoef([T,P]) # correlation matrix of temperature and pressure
[[ 1. 0.98062258]
[ 0.98062258 1. ]]
>>> rho = array([8.5, 5.2, 6.9, 6.5]) # corresponding density measurements
>>> data = column_stack([T,P,rho])
>>> print corrcoef([T,P,rho]) # correlation matrix of T,P and rho
[[ 1. 0.98062258 -0.97090288]
[ 0.98062258 1. -0.91538464]
[-0.97090288 -0.91538464 1. ]]
correlate()
numpy.correlate(a, v, mode='valid')
Return the discrete, linear correlation of 1-D sequences a and v; mode can be 'valid', 'same', or 'full' to specify the size of the resulting sequence
cos()
numpy.cos(...)
y = cos(x) cosine elementwise.
>>> cos(array([0, pi/2, pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
cosh()
numpy.cosh(...)
y = cosh(x) hyperbolic cosine elementwise.
cov()
numpy.cov(m, y=None, rowvar=1, bias=0)
Estimate the covariance matrix. If m is a vector, return the variance. For matrices return the covariance matrix. If y is given it is treated as an additional (set of) variable(s). Normalization is by (N-1) where N is the number of observations (unbiased estimate). If bias is 1 then normalization is by N. If rowvar is non-zero (default), then each row is a variable with observations in the columns, otherwise each column is a variable and the observations are in the rows.
>>> from numpy import *
>>> x = array([1., 3., 8., 9.])
>>> variance = cov(x) # normalized by N-1
>>> variance = cov(x, bias=1) # normalized by N
>>> T = array([1.3, 4.5, 2.8, 3.9]) # temperature measurements
>>> P = array([2.7, 8.7, 4.7, 8.2]) # corresponding pressure measurements
>>> cov(T,P) # covariance between temperature and pressure
3.9541666666666657
>>> rho = array([8.5, 5.2, 6.9, 6.5]) # corresponding density measurements
>>> data = column_stack([T,P,rho])
>>> print cov(data) # covariance matrix of T,P and rho
[[ 1.97583333 3.95416667 -1.85583333]
[ 3.95416667 8.22916667 -3.57083333]
[-1.85583333 -3.57083333 1.84916667]]
cross()
numpy.cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None)
Return the cross product of two (arrays of) vectors. The cross product is performed over the last axis of a and b by default, and can handle axes with dimensions 2 and 3. For a dimension of 2, the z-component of the equivalent three-dimensional cross product is returned.
>>> from numpy import *
>>> x = array([1,2,3])
>>> y = array([4,5,6])
>>> cross(x,y) # vector cross-product
array([-3, 6, -3])
cumprod()
numpy.cumprod(a, axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis. Blah, Blah.
ndarray.cumprod(...)
a.cumprod(axis=None, dtype=None)
>>> from numpy import *
>>> a = array([1,2,3])
>>> a.cumprod() # total product 1*2*3 = 6, and intermediate results 1, 1*2
array([1, 2, 6])
>>> cumprod(a) # also exists
array([1, 2, 6])
>>> a = array([[1,2,3],[4,5,6]])
>>> a.cumprod(dtype=float) # specify type of output
array([1., 2., 6., 24., 120., 720.])
>>> a.cumprod(axis=0) # for each of the 3 columns: product and intermediate results
array([[ 1, 2, 3],
[ 4, 10, 18]])
>>> a.cumprod(axis=1) # for each of the two rows: product and intermediate results
array([[ 1, 2, 6],
[ 4, 20, 120]])
See also: accumulate, prod, cumsum
cumproduct()
numpy.cumproduct(a, axis=None, dtype=None, out=None)
Return the cumulative product over the given axis. Blah, Blah.
cumsum()
numpy.cumsum(a, axis=None, dtype=None, out=None)
Sum the array over the given axis. Blah, Blah.
ndarray.cumsum(...)
a.cumsum(axis=None, dtype=None, out=None)
>>> from numpy import *
>>> a = array([1,2,3]) # cumulative sum = intermediate summing results & total sum
>>> a.cumsum()
array([1, 3, 6])
>>> cumsum(a) # also exists
array([1, 3, 6])
>>> a = array([[1,2,3],[4,5,6]])
>>> a.cumsum(dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> a.cumsum(axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> a.cumsum(axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
See also: accumulate, sum, cumprod
delete()
numpy.delete(arr, obj, axis=None)
Return a new array with sub-arrays along an axis deleted. Return a new array with the sub-arrays (i.e. rows or columns) deleted along the given axis as specified by obj obj may be a slice_object (s_[3:5:2]) or an integer or an array of integers indicated which sub-arrays to remove. If axis is None, then ravel the array first. Example: >>> arr = [[3,4,5], ... [1,2,3], ... [6,7,8]] >>> delete(arr, 1, 1) array([[3, 5], [1, 3], [6, 8]]) >>> delete(arr, 1, 0) array([[3, 4, 5], [6, 7, 8]])
>>> from numpy import *
>>> a = array([0, 10, 20, 30, 40])
>>> delete(a, [2,4]) # remove a[2] and a[4]
array([ 0, 10, 30])
>>> a = arange(16).reshape(4,4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> delete(a, s_[1:3], axis=0) # remove rows 1 and 2
array([[ 0, 1, 2, 3],
[12, 13, 14, 15]])
>>> delete(a, s_[1:3], axis=1) # remove columns 1 and 2
array([[ 0, 3],
[ 4, 7],
[ 8, 11],
[12, 15]])
deprecate()
numpy.deprecate(func, oldname, newname)
diag()
numpy.diag(v, k=0)
returns a copy of the the k-th diagonal if v is a 2-d array or returns a 2-d array with v as the k-th diagonal if v is a 1-d array.
>>> from numpy import *
>>> a = arange(12).reshape(4,3)
>>> print a
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
>>> print diag(a,k=0)
[0 4 8]
>>> print diag(a,k=1)
[1 5]
>>> print diag(array([1,4,5]),k=0)
[[1 0 0]
[0 4 0]
[0 0 5]]
>>> print diag(array([1,4,5]),k=1)
[[0 1 0 0]
[0 0 4 0]
[0 0 0 5]
[0 0 0 0]]
See also: diagonal, diagflat, trace
diagflat()
numpy.diagflat(v, k=0)
>>> from numpy import *
>>> x = array([[5,6],[7,8]])
>>> diagflat(x) # flatten x, then put elements on diagonal
array([[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]])
See also: diag, diagonal, flatten
diagonal()
numpy.diagonal(a, offset=0, axis1=0, axis2=1)
Return specified diagonals. If a is 2-d, returns the diagonal of self with the given offset, i.e., the collection of elements of the form a[i,i+offset]. If a has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-d subarray whose diagonal is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. *Parameters*: a : {array_like} Array from whis the diagonals are taken. offset : {0, integer}, optional Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to main diagonal. axis1 : {0, integer}, optional Axis to be used as the first axis of the 2-d subarrays from which the diagonals should be taken. Defaults to first axis. axis2 : {1, integer}, optional Axis to be used as the second axis of the 2-d subarrays from which the diagonals should be taken. Defaults to second axis. *Returns*: array_of_diagonals : array of same type as a If a is 2-d, a 1-d array containing the diagonal is returned. If a has larger dimensions, then an array of diagonals is returned. *See Also*: `diag` : Matlab workalike for 1-d and 2-d arrays. `diagflat` : Create diagonal arrays. `trace` : Sum along diagonals. *Examples* >>> a = arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> a.diagonal() array([0, 3]) >>> a.diagonal(1) array([1]) >>> a = arange(8).reshape(2,2,2) >>> a array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> a.diagonal(0,-2,-1) array([[0, 3], [4, 7]])
ndarray.diagonal(...)
a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals If a is 2-d, return the diagonal of self with the given offset, i.e., the collection of elements of the form a[i,i+offset]. If a is n-d with n > 2, then the axes specified by axis1 and axis2 are used to determine the 2-d subarray whose diagonal is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. :Parameters: offset : integer Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to main diagonal. axis1 : integer Axis to be used as the first axis of the 2-d subarrays from which the diagonals should be taken. Defaults to first index. axis2 : integer Axis to be used as the second axis of the 2-d subarrays from which the diagonals should be taken. Defaults to second index. :Returns: array_of_diagonals : same type as original array If a is 2-d, then a 1-d array containing the diagonal is returned. If a is n-d, n > 2, then an array of diagonals is returned. :SeeAlso: - diag : matlab workalike for 1-d and 2-d arrays. - diagflat : creates diagonal arrays - trace : sum along diagonals Examples -------- >>> a = arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> a.diagonal() array([0, 3]) >>> a.diagonal(1) array([1]) >>> a = arange(8).reshape(2,2,2) >>> a array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> a.diagonal(0,-2,-1) array([[0, 3], [4, 7]])
>>> from numpy import *
>>> a = arange(12).reshape(3,4)
>>> print a
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
>>> a.diagonal()
array([ 0, 5, 10])
>>> a.diagonal(offset=1)
array([ 1, 6, 11])
>>> diagonal(a) # Also this form exists
array([ 0, 5, 10])
See also: diag, diagflat, trace
diff()
numpy.diff(a, n=1, axis=-1)
Calculate the nth order discrete difference along given axis.
>>> from numpy import *
>>> x = array([0,1,3,9,5,10])
>>> diff(x) # 1st-order differences between the elements of x
array([ 1, 2, 6, -4, 5])
>>> diff(x,n=2) # 2nd-order differences, equivalent to diff(diff(x))
array([ 1, 4, -10, 9])
>>> x = array([[1,3,6,10],[0,5,6,8]])
>>> diff(x) # 1st-order differences between the columns (default: axis=-1)
array([[2, 3, 4],
[5, 1, 2]])
>>> diff(x,axis=0) # 1st-order difference between the rows
array([[-1, 2, 0, -2]])
digitize()
numpy.digitize(...)
digitize(x,bins) Return the index of the bin to which each value of x belongs. Each index i returned is such that bins[i-1] <= x < bins[i] if bins is monotonically increasing, or bins [i-1] > x >= bins[i] if bins is monotonically decreasing. Beyond the bounds of the bins 0 or len(bins) is returned as appropriate.
>>> from numpy import *
>>> x = array([0.2, 6.4, 3.0, 1.6])
>>> bins = array([0.0, 1.0, 2.5, 4.0, 10.0]) # monotonically increasing
>>> d = digitize(x,bins) # in which bin falls each value of x?
>>> d
array([1, 4, 3, 2])
>>> for n in range(len(x)):
... print bins[d[n]-1], "<=", x[n], "<", bins[d[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
disp()
numpy.disp(mesg, device=None, linefeed=True)
Display a message to the given device (default is sys.stdout) with or without a linefeed.
divide()
numpy.divide(...)
y = divide(x1,x2) divides the arguments elementwise.
dot()
numpy.dot(...)
dot(a,b) Returns the dot product of a and b for arrays of floating point types. Like the generic numpy equivalent the product sum is over the last dimension of a and the second-to-last dimension of b. NB: The first argument is not conjugated.
>>> from numpy import *
>>> x = array([[1,2,3],[4,5,6]])
>>> x.shape
(2, 3)
>>> y = array([[1,2],[3,4],[5,6]])
>>> y.shape
(3, 2)
>>> dot(x,y) # matrix multiplication (2,3) x (3,2) -> (2,2)
array([[22, 28],
[49, 64]])
>>>
>>> import numpy
>>> if id(dot) == id(numpy.core.multiarray.dot): # A way to know if you use fast blas/lapack or not.
... print "Not using blas/lapack!"
See also: vdot, inner, multiply
dsplit()
numpy.dsplit(ary, indices_or_sections)
Split ary into multiple sub-arrays along the 3rd axis (depth) Description: Split a single array into multiple sub arrays. The array is divided into groups along the 3rd axis. If indices_or_sections is an integer, ary is divided into that many equally sized sub arrays. If it is impossible to make the sub-arrays equally sized, the operation throws a ValueError exception. See array_split and split for other options on indices_or_sections. Arguments: ary -- N-D array. Array to be divided into sub-arrays. indices_or_sections -- integer or 1D array. If integer, defines the number of (close to) equal sized sub-arrays. If it is a 1D array of sorted indices, it defines the indexes at which ary is divided. Any empty list results in a single sub-array equal to the original array. Returns: sequence of sub-arrays. The returned arrays have the same number of dimensions as the input array. Caveats: See vsplit caveats. Related: dstack, split, array_split, hsplit, vsplit. Examples: >>> a = array([[[1,2,3,4],[1,2,3,4]]]) >>> dsplit(a,2) [array([[[1, 2], [1, 2]]]), array([[[3, 4], [3, 4]]])]
>>> from numpy import *
>>> a = array([[1,2],[3,4]])
>>> b = dstack((a,a,a,a))
>>> b.shape # stacking in depth: for k in (0,..,3): b[:,:,k] = a
(2, 2, 4)
>>> c = dsplit(b,2) # split, depth-wise, in 2 equal parts
>>> print c[0].shape, c[1].shape # for k in (0,1): c[0][:,:,k] = a and c[1][:,:,k] = a
(2, 2, 2) (2, 2, 2)
>>> d = dsplit(b,[1,2]) # split before [:,:,1] and before [:,:,2]
>>> print d[0].shape, d[1].shape, d[2].shape # for any of the parts: d[.][:,:,k] = a
(2, 2, 1) (2, 2, 1) (2, 2, 2)
See also: split, array_split, hsplit, vsplit, dstack
dstack()
numpy.dstack(tup)
Stack arrays in sequence depth wise (along third dimension) Description: Take a sequence of arrays and stack them along the third axis. All arrays in the sequence must have the same shape along all but the third axis. This is a simple way to stack 2D arrays (images) into a single 3D array for processing. dstack will rebuild arrays divided by dsplit. Arguments: tup -- sequence of arrays. All arrays must have the same shape. Examples: >>> import numpy >>> a = array((1,2,3)) >>> b = array((2,3,4)) >>> numpy.dstack((a,b)) array([[[1, 2], [2, 3], [3, 4]]]) >>> a = array([[1],[2],[3]]) >>> b = array([[2],[3],[4]]) >>> numpy.dstack((a,b)) array([[[1, 2]], <BLANKLINE> [[2, 3]], <BLANKLINE> [[3, 4]]])
>>> from numpy import *
>>> a = array([[1,2],[3,4]]) # shapes of a and b can only differ in the 3rd dimension (if present)
>>> b = array([[5,6],[7,8]])
>>> dstack((a,b)) # stack arrays along a third axis (depth wise)
array([[[1, 5],
[2, 6]],
[[3, 7],
[4, 8]]])
See also: column_stack, concatenate, hstack, vstack, dsplit
dtype() or .dtype
numpy.dtype(...)
ndarray.dtype
Data-type for the array.
>>> from numpy import *
>>> dtype('int16') # using array-scalar type
dtype('int16')
>>> dtype([('f1', 'int16')]) # record, 1 field named 'f1', containing int16
dtype([('f1', '<i2')])
>>> dtype([('f1', [('f1', 'int16')])]) # record, 1 field named 'f1' containing a record that has 1 field.
dtype([('f1', [('f1', '<i2')])])
>>> dtype([('f1', 'uint'), ('f2', 'int32')]) # record with 2 fields: field 1 contains an unsigned int, 2nd field an int32
dtype([('f1', '<u4'), ('f2', '<i4')])
>>> dtype([('a','f8'),('b','S10')]) # using array-protocol type strings
dtype([('a', '<f8'), ('b', '|S10')])
>>> dtype("i4, (2,3)f8") # using comma-separated field formats. (2,3) is the shape
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
>>> dtype([('hello',('int',3)),('world','void',10)]) # using tuples. int is fixed-type: 3 is shape; void is flex-type: 10 is size.
dtype([('hello', '<i4', 3), ('world', '|V10')])
>>> dtype(('int16', {'x':('int8',0), 'y':('int8',1)})) # subdivide int16 in 2 int8, called x and y. 0 and 1 are the offsets in bytes
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
>>> dtype({'names':['gender','age'], 'formats':['S1',uint8]}) # using dictionaries. 2 fields named 'gender' and 'age'
dtype([('gender', '|S1'), ('age', '|u1')])
>>> dtype({'surname':('S25',0),'age':(uint8,25)}) # 0 and 25 are offsets in bytes
dtype([('surname', '|S25'), ('age', '|u1')])
>>>
>>> a = dtype('int32')
>>> a
dtype('int32')
>>> a.type # type object
<type 'numpy.int32'>
>>> a.kind # character code (one of 'biufcSUV') to identify general type
'i'
>>> a.char # unique char code of each of the 21 built-in types
'l'
>>> a.num # unique number of each of the 21 built-in types
7
>>> a.str # array-protocol typestring
'<i4'
>>> a.name # name of this datatype
'int32'
>>> a.byteorder # '=':native, '<':little endian, '>':big endian, '|':not applicable
'='
>>> a.itemsize # item size in bytes
4
>>> a = dtype({'surname':('S25',0),'age':(uint8,25)})
>>> a.fields.keys()
['age', 'surname']
>>> a.fields.values()
[(dtype('uint8'), 25), (dtype('|S25'), 0)]
>>> a = dtype([('x', 'f4'),('y', 'f4'), # nested field
... ('nested', [('i', 'i2'),('j','i2')])])
>>> a.fields['nested'] # access nested fields
(dtype([('i', '<i2'), ('j', '<i2')]), 8)
>>> a.fields['nested'][0].fields['i'] # access nested fields
(dtype('int16'), 0)
>>> a.fields['nested'][0].fields['i'][0].type
<type 'numpy.int16'>
See also: array, typeDict, astype
dump()
ndarray.dump(...)
a.dump(file) Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load Arguments: file -- string naming the dump file.
dumps()
ndarray.dumps(...)
a.dumps() returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array.
ediff1d()
numpy.ediff1d(ary, to_end=None, to_begin=None)
The differences between consecutive elements of an array, possibly with prefixed and/or appended values. :Parameters: - `ary` : array This array will be flattened before the difference is taken. - `to_end` : number, optional If provided, this number will be tacked onto the end of the returned differences. - `to_begin` : number, optional If provided, this number will be taked onto the beginning of the returned differences. :Returns: - `ed` : array The differences. Loosely, this will be (ary[1:] - ary[:-1]).
empty()
numpy.empty(...)
empty((d1,...,dn),dtype=float,order='C') Return a new array of shape (d1,...,dn) and given type with all its entries uninitialized. This can be faster than zeros.
>>> from numpy import *
>>> empty(3) # uninitialized array, size=3, dtype = float
array([ 6.08581638e+000, 3.45845952e-323, 4.94065646e-324])
>>> empty((2,3),int) # uninitialized array, dtype = int
array([[1075337192, 1075337192, 135609024],
[1084062604, 1197436517, 1129066306]])
See also: ones, zeros, eye, identity
empty_like()
numpy.empty_like(a)
Return an empty (uninitialized) array of the shape and typecode of a. Note that this does NOT initialize the returned array. If you require your array to be initialized, you should use zeros_like().
>>> from numpy import *
>>> a = array([[1,2,3],[4,5,6]])
>>> empty_like(a) # uninitialized array with the same shape and datatype as 'a'
array([[ 0, 25362433, 6571520],
[ 21248, 136447968, 4]])
See also: ones_like, zeros_like
equal()
numpy.equal(...)
y = equal(x1,x2) returns elementwise x1 == x2 in a bool array
exp()
numpy.exp(...)
y = exp(x) e**x elementwise.
expand_dims()
numpy.expand_dims(a, axis)
Expand the shape of a by including newaxis before given axis.
>>> from numpy import *
>>> x = array([1,2])
>>> expand_dims(x,axis=0) # Equivalent to x[newaxis,:] or x[None] or x[newaxis]
array([[1, 2]])
>>> expand_dims(x,axis=1) # Equivalent to x[:,newaxis]
array([[1],
[2]])
See also: newaxis, atleast_1d, atleast_2d, atleast_3d
expm1()
numpy.expm1(...)
y = expm1(x) e**x-1 elementwise.
extract()
numpy.extract(condition, arr)
Return the elements of ravel(arr) where ravel(condition) is True (in 1D). Equivalent to compress(ravel(condition), ravel(arr)).
eye()
numpy.eye(N, M=None, k=0, dtype=<type 'float'>)
eye returns a N-by-M 2-d array where the k-th diagonal is all ones, and everything else is zeros.
>>> from numpy import *
>>> eye(3,4,0,dtype=float) # a 3x4 matrix containing zeros except for the 0th diagonal that contains ones
array([[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.]])
>>> eye(3,4,1,dtype=float) # a 3x4 matrix containing zeros except for the 1st diagonal that contains ones
array([[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]])
See also: ones, zeros, empty, identity
fabs()
numpy.fabs(...)
y = fabs(x) absolute values.
fastCopyAndTranspose()
numpy.fastCopyAndTranspose(...)
_fastCopyAndTranspose(a)
fft
numpy.fft
Core FFT routines ================== Standard FFTs fft ifft fft2 ifft2 fftn ifftn Real FFTs rfft irfft rfft2 irfft2 rfftn irfftn Hermite FFTs hfft ihfft
>>> from numpy import *
>>> from numpy.fft import *
>>> signal = array([-2., 8., -6., 4., 1., 0., 3., 5.]) # could also be complex
>>> fourier = fft(signal)
>>> fourier
array([ 13. +0.j , 3.36396103 +4.05025253j,
2. +1.j , -9.36396103-13.94974747j,
-21. +0.j , -9.36396103+13.94974747j,
2. -1.j , 3.36396103 -4.05025253j])
>>>
>>> N = len(signal)
>>> fourier = empty(N,complex)
>>> for k in range(N): # equivalent but much slower
... fourier[k] = sum(signal * exp(-1j*2*pi*k*arange(N)/N))
...
>>> timestep = 0.1 # if unit=day -> freq unit=cycles/day
>>> fftfreq(N, d=timestep) # freqs corresponding to 'fourier'
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
See also: ifft, fftfreq, fftshift
fftfreq
>>> from numpy import *
>>> from numpy.fft import *
>>> signal = array([-2., 8., -6., 4., 1., 0., 3., 5.])
>>> fourier = fft(signal)
>>> N = len(signal)
>>> timestep = 0.1 # if unit=day -> freq unit=cycles/day
>>> freq = fftfreq(N, d=timestep) # freqs corresponding to 'fourier'
>>> freq
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
>>>
>>> fftshift(freq) # freqs in ascending order
array([-5. , -3.75, -2.5 , -1.25, 0. , 1.25, 2.5 , 3.75])
fftshift
>>> from numpy import *
>>> from numpy.fft import *
>>> signal = array([-2., 8., -6., 4., 1., 0., 3., 5.])
>>> fourier = fft(signal)
>>> N = len(signal)
>>> timestep = 0.1 # if unit=day -> freq unit=cycles/day
>>> freq = fftfreq(N, d=timestep) # freqs corresponding to 'fourier'
>>> freq
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
>>>
>>> freq = fftshift(freq) # freqs in ascending order
>>> freq
array([-5. , -3.75, -2.5 , -1.25, 0. , 1.25, 2.5 , 3.75])
>>> fourier = fftshift(fourier) # adjust fourier to new freq order
>>>
>>> freq = ifftshift(freq) # undo previous frequency shift
>>> fourier = ifftshift(fourier) # undo previous fourier shift
fill()
ndarray.fill(...)
a.fill(value) -> None. Fill the array with the scalar value.
>>> from numpy import *
>>> a = arange(4, dtype=int)
>>> a
array([0, 1, 2, 3])
>>> a.fill(7) # replace all elements with the number 7
>>> a
array([7, 7, 7, 7])
>>> a.fill(6.5) # fill value is converted to dtype of a
>>> a
array([6, 6, 6, 6])
See also: empty, zeros, ones, repeat
finfo()
numpy.finfo(...)
Machine limits for floating point types. :Parameters: dtype : floating point type or instance :SeeAlso: - numpy.lib.machar.MachAr
>>> from numpy import *
>>> f = finfo(float) # the numbers given are machine dependent
>>> f.nmant, f.nexp # nr of bits in the mantissa and in the exponent
(52, 11)
>>> f.machep # most negative n so that 1.0 + 2**n != 1.0
-52
>>> f.eps # floating point precision: 2**machep
array(2.2204460492503131e-16)
>>> f.precision # nr of precise decimal digits: int(-log10(eps))
15
>>> f.resolution # 10**(-precision)
array(1.0000000000000001e-15)
>>> f.negep # most negative n so that 1.0 - 2**n != 1.0
-53
>>> f.epsneg # floating point precision: 2**negep
array(1.1102230246251565e-16)
>>> f.minexp # most negative n so that 2**n gives normal numbers
-1022
>>> f.tiny # smallest usuable floating point nr: 2**minexp
array(2.2250738585072014e-308)
>>> f.maxexp # smallest positive n so that 2**n causes overflow
1024
>>> f.min, f.max # the most negative and most positive usuable floating number
(-1.7976931348623157e+308, array(1.7976931348623157e+308))
fix()
numpy.fix(x, y=None)
Round x to nearest integer towards zero.
>>> from numpy import *
>>> a = array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7])
>>> fix(a) # round a to nearest integer towards zero
array([-1., -1., 0., 0., 1., 1.])
See also: round_, ceil, floor, astype
flat
ndarray.flat
A 1-d flat iterator.
>>> from numpy import *
>>> a = array([[10,30],[40,60]])
>>> iter = a.flat # .flat returns an iterator
>>> iter.next() # cycle through array with .next()
10
>>> iter.next()
30
>>> iter.next()
40
flatnonzero()
numpy.flatnonzero(a)
Return indicies that are not-zero in flattened version of a Equivalent to a.ravel().nonzero()[0]
flatten()
ndarray.flatten(...)
a.flatten([fortran]) return a 1-d array (always copy)
>>> from numpy import *
>>> a = array([[[1,2]],[[3,4]]])
>>> print a
[[[1 2]]
[[3 4]]]
>>> b = a.flatten() # b is now a 1-d version of a, a new array, not a reference
>>> print b
[1 2 3 4]
fliplr()
numpy.fliplr(m)
returns an array m with the rows preserved and columns flipped in the left/right direction. Works on the first two dimensions of m.
>>> from numpy import *
>>> a = arange(12).reshape(4,3)
>>> a
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
>>> fliplr(a) # flip left-right
array([[ 2, 1, 0],
[ 5, 4, 3],
[ 8, 7, 6],
[11, 10, 9]])
flipud()
numpy.flipud(m)
returns an array with the columns preserved and rows flipped in the up/down direction. Works on the first dimension of m.
>>> from numpy import *
>>> a = arange(12).reshape(4,3)
>>> a
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
>>> flipud(a) # flip up-down
array([[ 9, 10, 11],
[ 6, 7, 8],
[ 3, 4, 5],
[ 0, 1, 2]])
floor()
numpy.floor(...)
y = floor(x) elementwise largest integer <= x
>>> from numpy import *
>>> a = array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7])
>>> floor(a)
array([-2., -2., -1., 0., 1., 1.]) # nearest integer smaller-than or equal to a # nearest integers greater-than or equal to a
See also: ceil, round_, fix, astype
floor_divide()
numpy.floor_divide(...)
y = floor_divide(x1,x2) floor divides the arguments elementwise.
fmod()
numpy.fmod(...)
y = fmod(x1,x2) computes (C-like) x1 % x2 elementwise.
frexp()
numpy.frexp(...)
y1,y2 = frexp(x) Split the number, x, into a normalized fraction (y1) and exponent (y2)
fromarrays()
numpy.core.records.fromarrays(arrayList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None)
create a record array from a (flat) list of arrays >>> x1=N.array([1,2,3,4]) >>> x2=N.array(['a','dd','xyz','12']) >>> x3=N.array([1.1,2,3,4]) >>> r = fromarrays([x1,x2,x3],names='a,b,c') >>> print r[1] (2, 'dd', 2.0) >>> x1[1]=34 >>> r.a array([1, 2, 3, 4])
>>> from numpy import *
>>> x = array(['Smith','Johnson','McDonald']) # datatype is string
>>> y = array(['F','F','M'], dtype='S1') # datatype is a single character
>>> z = array([20,25,23]) # datatype is integer
>>> data = rec.fromarrays([x,y,z], names='surname, gender, age') # convert to record array
>>> data[0]
('Smith', 'F', 20)
>>> data.age # names are available as attributes
array([20, 25, 23])
See also: view
frombuffer()
numpy.frombuffer(...)
frombuffer(buffer=, dtype=float, count=-1, offset=0) Returns a 1-d array of data type dtype from buffer. The buffer argument must be an object that exposes the buffer interface. If count is -1 then the entire buffer is used, otherwise, count is the size of the output. If offset is given then jump that far into the buffer. If the buffer has data that is out not in machine byte-order, than use a propert data type descriptor. The data will not be byteswapped, but the array will manage it in future operations.
>>> from numpy import *
>>> buffer = "\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08\
... @\x00\x00\x00\x00\x00\x00\x10@\x00\x00\x00\x00\x00\x00\x14@\x00\x00\x00\x00\x00\x00\x18@"
>>> a = frombuffer(buffer, complex128)
>>> a
array([ 1.+2.j, 3.+4.j, 5.+6.j])
See also: fromfunction, fromfile
fromfile()
numpy.fromfile(...)
fromfile(file=, dtype=float, count=-1, sep='') -> array. Required arguments: file -- open file object or string containing file name. Keyword arguments: dtype -- type and order of the returned array (default float) count -- number of items to input (default all) sep -- separater between items if file is a text file (default "") Return an array of the given data type from a text or binary file. The 'file' argument can be an open file or a string with the name of a file to read from. If 'count' == -1 the entire file is read, otherwise count is the number of items of the given type to read in. If 'sep' is "" it means to read binary data from the file using the specified dtype, otherwise it gives the separator between elements in a text file. The 'dtype' value is also used to determine the size and order of the items in binary files. Data written using the tofile() method can be conveniently recovered using this function. WARNING: This function should be used sparingly as the binary files are not platform independent. In particular, they contain no endianess or datatype information. Nevertheless it can be useful for reading in simply formatted or binary data quickly.
numpy.core.records.fromfile(fd, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None)
Create an array from binary file data If file is a string then that file is opened, else it is assumed to be a file object. >>> from tempfile import TemporaryFile >>> a = N.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> >>> fd=TemporaryFile() >>> a = a.newbyteorder('<') >>> a.tofile(fd) >>> >>> fd.seek(0) >>> r=fromfile(fd, formats='f8,i4,a5', shape=10, byteorder='<') >>> print r[5] (0.5, 10, 'abcde') >>> r.shape (10,)
>>> from numpy import *
>>> y = array([2.,4.,6.,8.])
>>> y.tofile("myfile.dat") # binary format
>>> y.tofile("myfile.txt", sep='\n', format = "%e") # ascii format, one column, exponential notation
>>> fromfile('myfile.dat', dtype=float)
array([ 2., 4., 6., 8.])
>>> fromfile('myfile.txt', dtype=float, sep='\n')
array([ 2., 4., 6., 8.])
See also: loadtxt, fromfunction, tofile, frombuffer, savetxt
fromfunction()
numpy.fromfunction(function, shape, **kwargs)
Returns an array constructed by calling a function on a tuple of number grids. The function should accept as many arguments as the length of shape and work on array inputs. The shape argument is a sequence of numbers indicating the length of the desired output for each axis. The function can also accept keyword arguments (except dtype), which will be passed through fromfunction to the function itself. The dtype argument (default float) determines the data-type of the index grid passed to the function.
>>> from numpy import *
>>> def f(i,j):
... return i**2 + j**2
...
>>> fromfunction(f, (3,3)) # evaluate functiom for all combinations of indices [0,1,2]x[0,1,2]
array([[0, 1, 4],
[1, 2, 5],
[4, 5, 8]])
See also: fromfile, frombuffer
fromiter()
numpy.fromiter(...)
fromiter(iterable, dtype, count=-1) Return a new 1d array initialized from iterable. If count is nonegative, the new array will have count elements, otherwise it's size is determined by the generator.
>>> from numpy import *
>>> import itertools
>>> mydata = [[55.5, 40],[60.5, 70]] # List of lists
>>> mydescriptor = {'names': ('weight','age'), 'formats': (float32, int32)} # Descriptor of the data
>>> myiterator = itertools.imap(tuple,mydata) # Clever way of putting list of lists into iterator
# of tuples. E.g.: myiterator.next() == (55.5, 40.)
>>> a = fromiter(myiterator, dtype = mydescriptor)
>>> a
array([(55.5, 40), (60.5, 70)],
dtype=[('weight', '<f4'), ('age', '<i4')])
See also: fromarrays, frombuffer, fromfile, fromfunction
frompyfunc()
numpy.frompyfunc(...)
frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays
fromrecords()
numpy.rec.fromrecords(...)
fromrecords(recList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None) create a recarray from a list of records in text form The data in the same field can be heterogeneous, they will be promoted to the highest data type. This method is intended for creating smaller record arrays. If used to create large array without formats defined r=fromrecords([(2,3.,'abc')]*1000000 it can be slow. If formats is None, then this will auto-detect formats. Use list of tuples rather than list of lists for faster processing.
1 >>> r=fromrecords([(456,'dbe',1.2),(2,'de',1.3)],names='col1,col2,col3')
2 >>> print r[0]
3 (456, 'dbe', 1.2)
4 >>> r.col1
5 array([456, 2])
6 >>> r.col2
7 chararray(['dbe', 'de'],
8 dtype='|S3')
9 >>> import cPickle
10 >>> print cPickle.loads(cPickle.dumps(r))
11 [(456, 'dbe', 1.2) (2, 'de', 1.3)]
See also: fromarrays
fromstring()
numpy.fromstring(...)
fromstring(string, dtype=float, count=-1, sep='') Return a new 1d array initialized from the raw binary data in string. If count is positive, the new array will have count elements, otherwise its size is determined by the size of string. If sep is not empty then the string is interpreted in ASCII mode and converted to the desired number type using sep as the separator between elements (extra whitespace is ignored).
generic()
numpy.generic(...)
>>> from numpy import *
>>> numpyscalar = string_('7') # Convert to numpy scalar
>>> numpyscalar # Looks like a build-in scalar...
'7'
>>> type(numpyscalar) # ... but it isn't
<type 'numpy.string_'>
>>> buildinscalar = '7' # Build-in python scalar
>>> type(buildinscalar)
<type 'str'>
>>> isinstance(numpyscalar, generic) # Check if scalar is a NumPy one
True
>>> isinstance(buildinscalar, generic) # Example on how to recognize NumPy scalars
False
get_array_wrap()
numpy.get_array_wrap(*args)
Find the wrapper for the array with the highest priority. In case of ties, leftmost wins. If no wrapper is found, return None
get_include()
numpy.get_include()
Return the directory in the package that contains the numpy/*.h header files. Extension modules that need to compile against numpy should use this function to locate the appropriate include directory. Using distutils: import numpy Extension('extension_name', ... include_dirs=[numpy.get_include()])
get_numarray_include()
numpy.get_numarray_include(type=None)
Return the directory in the package that contains the numpy/*.h header files. Extension modules that need to compile against numpy should use this function to locate the appropriate include directory. Using distutils: import numpy Extension('extension_name', ... include_dirs=[numpy.get_numarray_include()])
get_numpy_include()
numpy.get_numpy_include(*args, **kwds)
get_numpy_include is DEPRECATED in numpy: use get_include instead Return the directory in the package that contains the numpy/*.h header files. Extension modules that need to compile against numpy should use this function to locate the appropriate include directory. Using distutils: import numpy Extension('extension_name', ... include_dirs=[numpy.get_include()])
get_printoptions()
numpy.get_printoptions()
Return the current print options. :Returns: dictionary of current print options with keys - precision : int - threshold : int - edgeitems : int - linewidth : int - suppress : bool - nanstr : string - infstr : string :SeeAlso: - set_printoptions : parameter descriptions
getbuffer()
numpy.getbuffer(...)
getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of length size starting at offset. Default is the entire buffer. A read-write buffer is attempted followed by a read-only buffer.
getbufsize()
numpy.getbufsize()
Return the size of the buffer used in ufuncs.
geterr()
numpy.geterr()
Get the current way of handling floating-point errors. Returns a dictionary with entries "divide", "over", "under", and "invalid", whose values are from the strings "ignore", "print", "log", "warn", "raise", and "call".
geterrcall()
numpy.geterrcall()
Return the current callback function used on floating-point errors.
geterrobj()
numpy.geterrobj(...)
getfield()
ndarray.getfield(...)
a.getfield(dtype, offset) -> field of array as given type. Returns a field of the given array as a certain type. A field is a view of the array data with each itemsize determined by the given type and the offset into the current array.
gradient()
numpy.gradient(f, *varargs)
Calculate the gradient of an N-dimensional scalar function. Uses central differences on the interior and first differences on boundaries to give the same shape. Inputs: f -- An N-dimensional array giving samples of a scalar function varargs -- 0, 1, or N scalars giving the sample distances in each direction Outputs: N arrays of the same shape as f giving the derivative of f with respect to each dimension.
greater()
numpy.greater(...)
y = greater(x1,x2) returns elementwise x1 > x2 in a bool array.
greater_equal()
numpy.greater_equal(...)
y = greater_equal(x1,x2) returns elementwise x1 >= x2 in a bool array.
gumbel()
numpy.random.gumbel(...)
Gumbel distribution. gumbel(loc=0.0, scale=1.0, size=None)
>>> from numpy import *
>>> from numpy.random import *
>>> gumbel(loc=0.0,scale=1.0,size=(2,3)) # Gumbel distribution location=0.0, scale=1.0
array([[-1.25923601, 1.68758144, 1.76620507],
[ 1.96820048, -0.21219499, 1.83579566]])
>>> from pylab import * # histogram plot example
>>> hist(gumbel(0,1,(1000)), 50)
See also: random_sample, uniform, poisson, seed
hamming()
numpy.hamming(M)
hamming(M) returns the M-point Hamming window.
hanning()
numpy.hanning(M)
hanning(M) returns the M-point Hanning window.
histogram()
numpy.histogram(a, bins=10, range=None, normed=False)
Compute the histogram from a set of data. Parameters: a : array The data to histogram. n-D arrays will be flattened. bins : int or sequence of floats If an int, then the number of equal-width bins in the given range. Otherwise, a sequence of the lower bound of each bin. range : (float, float) The lower and upper range of the bins. If not provided, then (a.min(), a.max()) is used. Values outside of this range are allocated to the closest bin. normed : bool If False, the result array will contain the number of samples in each bin. If True, the result array is the value of the probability *density* function at the bin normalized such that the *integral* over the range is 1. Note that the sum of all of the histogram values will not usually be 1; it is not a probability *mass* function. Returns: hist : array The values of the histogram. See `normed` for a description of the possible semantics. lower_edges : float array The lower edges of each bin. SeeAlso: histogramdd
>>> from numpy import *
>>> x = array([0.2, 6.4, 3.0, 1.6, 0.9, 2.3, 1.6, 5.7, 8.5, 4.0, 12.8])
>>> bins = array([0.0, 1.0, 2.5, 4.0, 10.0]) # increasing monotonically
>>> N,bins = histogram(x,bins)
>>> N,bins
(array([2, 3, 1, 4, 1]), array([ 0. , 1. , 2.5, 4. , 10. ]))
>>> for n in range(len(bins)):
... if n < len(bins)-1:
... print "# ", N[n], "numbers fall into bin [", bins[n], ",", bins[n+1], "["
... else:
... print "# ", N[n], "numbers fall outside the bin range"
...
# 2 numbers fall into bin [ 0.0 , 1.0 [
# 3 numbers fall into bin [ 1.0 , 2.5 [
# 1 numbers fall into bin [ 2.5 , 4.0 [
# 4 numbers fall into bin [ 4.0 , 10.0 [
# 1 numbers fall outside the bin range
>>> N,bins = histogram(x,5,range=(0.0, 10.0)) # 5 bin boundaries in the range (0,10)
>>> N,bins
(array([4, 2, 2, 1, 2]), array([ 0., 2., 4., 6., 8.]))
>>> N,bins = histogram(x,5,range=(0.0, 10.0), normed=True) # normalize histogram, i.e. divide by len(x)
>>> N,bins
(array([ 0.18181818, 0.09090909, 0.09090909, 0.04545455, 0.09090909]), array([ 0., 2., 4., 6., 8.]))
histogram2d()
numpy.histogram2d(x, y, bins=10, range=None, normed=False, weights=None)
histogram2d(x,y, bins=10, range=None, normed=False) -> H, xedges, yedges Compute the 2D histogram from samples x,y. :Parameters: - `x,y` : Sample arrays (1D). - `bins` : Number of bins -or- [nbin x, nbin y] -or- [bin edges] -or- [x bin edges, y bin edges]. - `range` : A sequence of lower and upper bin edges (default: [min, max]). - `normed` : Boolean, if False, return the number of samples in each bin, if True, returns the density. - `weights` : An array of weights. The weights are normed only if normed is True. Should weights.sum() not equal N, the total bin count will not be equal to the number of samples. :Return: - `hist` : Histogram array. - `xedges, yedges` : Arrays defining the bin edges. Example: >>> x = random.randn(100,2) >>> hist2d, xedges, yedges = histogram2d(x, bins = (6, 7)) :SeeAlso: histogramdd
histogramdd()
numpy.histogramdd(sample, bins=10, range=None, normed=False, weights=None)
histogramdd(sample, bins=10, range=None, normed=False, weights=None) Return the N-dimensional histogram of the sample. Parameters: sample : sequence or array A sequence containing N arrays or an NxM array. Input data. bins : sequence or scalar A sequence of edge arrays, a sequence of bin counts, or a scalar which is the bin count for all dimensions. Default is 10. range : sequence A sequence of lower and upper bin edges. Default is [min, max]. normed : boolean If False, return the number of samples in each bin, if True, returns the density. weights : array Array of weights. The weights are normed only if normed is True. Should the sum of the weights not equal N, the total bin count will not be equal to the number of samples. Returns: hist : array Histogram array. edges : list List of arrays defining the lower bin edges. SeeAlso: histogram Example >>> x = random.randn(100,3) >>> hist3d, edges = histogramdd(x, bins = (5, 6, 7))
hsplit()
numpy.hsplit(ary, indices_or_sections)
Split ary into multiple columns of sub-arrays Description: Split a single array into multiple sub arrays. The array is divided into groups of columns. If indices_or_sections is an integer, ary is divided into that many equally sized sub arrays. If it is impossible to make the sub-arrays equally sized, the operation throws a ValueError exception. See array_split and split for other options on indices_or_sections. Arguments: ary -- N-D array. Array to be divided into sub-arrays. indices_or_sections -- integer or 1D array. If integer, defines the number of (close to) equal sized sub-arrays. If it is a 1D array of sorted indices, it defines the indexes at which ary is divided. Any empty list results in a single sub-array equal to the original array. Returns: sequence of sub-arrays. The returned arrays have the same number of dimensions as the input array. Related: hstack, split, array_split, vsplit, dsplit. Examples: >>> import numpy >>> a= array((1,2,3,4)) >>> numpy.hsplit(a,2) [array([1, 2]), array([3, 4])] >>> a = array([[1,2,3,4],[1,2,3,4]]) >>> hsplit(a,2) [array([[1, 2], [1, 2]]), array([[3, 4], [3, 4]])]
>>> from numpy import *
>>> a = array([[1,2,3,4],[5,6,7,8]])
>>> hsplit(a,2) # split, column-wise, in 2 equal parts
[array([[1, 2],
[5, 6]]), array([[3, 4],
[7, 8]])]
>>> hsplit(a,[1,2]) # split before column 1 and before column 2
[array([[1],
[5]]), array([[2],
[6]]), array([[3, 4],
[7, 8]])]
See also: split, array_split, dsplit, vsplit, hstack
hstack()
numpy.hstack(tup)
Stack arrays in sequence horizontally (column wise) Description: Take a sequence of arrays and stack them horizontally to make a single array. All arrays in the sequence must have the same shape along all but the second axis. hstack will rebuild arrays divided by hsplit. Arguments: tup -- sequence of arrays. All arrays must have the same shape. Examples: >>> import numpy >>> a = array((1,2,3)) >>> b = array((2,3,4)) >>> numpy.hstack((a,b)) array([1, 2, 3, 2, 3, 4]) >>> a = array([[1],[2],[3]]) >>> b = array([[2],[3],[4]]) >>> numpy.hstack((a,b)) array([[1, 2], [2, 3], [3, 4]])
>>> from numpy import *
>>> a =array([[1],[2]]) # 2x1 array
>>> b = array([[3,4],[5,6]]) # 2x2 array
>>> hstack((a,b,a)) # only the 2nd dimension of the arrays is allowed to be different
array([[1, 3, 4, 1],
[2, 5, 6, 2]])
See also: column_stack, concatenate, dstack, vstack, hsplit
hypot()
numpy.hypot(...)
y = hypot(x1,x2) sqrt(x1**2 + x2**2) elementwise
>>> from numpy import *
>>> hypot(3.,4.) # hypothenusa: sqrt(3**2 + 4**2) = 5
5.0
>>> z = array([2+3j, 3+4j])
>>> hypot(z.real, z.imag) # norm of complex numbers
array([ 3.60555128, 5. ])
i0()
numpy.i0(x)
identity()
numpy.identity(n, dtype=None)
Returns the identity 2-d array of shape n x n. identity(n)[i,j] == 1 for all i == j == 0 for all i != j
>>> from numpy import *
>>> identity(3,float)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
See also: empty, eye, ones, zeros
ifft
>>> from numpy import *
>>> from numpy.fft import *
>>> signal = array([-2., 8., -6., 4., 1., 0., 3., 5.])
>>> fourier = fft(signal)
>>> ifft(fourier) # Inverse fourier transform
array([-2. +0.00000000e+00j, 8. +1.51410866e-15j, -6. +3.77475828e-15j,
4. +2.06737026e-16j, 1. +0.00000000e+00j, 0. -1.92758271e-15j,
3. -3.77475828e-15j, 5. +2.06737026e-16j])
>>>
>>> allclose(signal.astype(complex), ifft(fft(signal))) # ifft(fft()) = original signal
True
>>>
>>> N = len(fourier)
>>> signal = empty(N,complex)
>>> for k in range(N): # equivalent but much slower
... signal[k] = sum(fourier * exp(+1j*2*pi*k*arange(N)/N)) / N
See also: fft, fftfreq, fftshift
imag() or .imag
numpy.imag(val)
Return the imaginary part of val. Useful if val maybe a scalar or an array.
ndarray.imag
Imaginary part of the array.
>>> from numpy import *
>>> a = array([1+2j,3+4j,5+6j])
>>> a.imag
array([ 2., 4., 6.])
>>> a.imag = 9
>>> a
array([ 1.+9.j, 3.+9.j, 5.+9.j])
>>> a.imag = array([9,8,7])
>>> a
array([ 1.+9.j, 3.+8.j, 5.+7.j])
index_exp
numpy.index_exp
A nicer way to build up index tuples for arrays. For any index combination, including slicing and axis insertion, 'a[indices]' is the same as 'a[index_exp[indices]]' for any array 'a'. However, 'index_exp[indices]' can be used anywhere in Python code and returns a tuple of slice objects that can be used in the construction of complex index expressions.
>>> from numpy import *
>>> myslice = index_exp[2:4,...,4,::-1] # myslice could now be passed to a function, for example.
>>> print myslice
(slice(2, 4, None), Ellipsis, 4, slice(None, None, -1))
indices()
numpy.indices(dimensions, dtype=<type 'int'>)
Returns an array representing a grid of indices with row-only, and column-only variation.
>>> from numpy import *
>>> indices((2,3))
array([[[0, 0, 0],
[1, 1, 1]],
[[0, 1, 2],
[0, 1, 2]]])
>>> a = array([ [ 0, 1, 2, 3, 4],
... [10,11,12,13,14],
... [20,21,22,23,24],
... [30,31,32,33,34] ])
>>> i,j = indices((2,3))
>>> a[i,j]
array([[ 0, 1, 2],
[10, 11, 12]])
See also: mgrid, [], ix_, slice
inf
numpy.inf
float(x) -> floating point number Convert a string or number to a floating point number, if possible.
>>> from numpy import *
>>> exp(array([1000.])) # inf = infinite = number too large to represent, machine dependent
array([ inf])
>>> x = array([2,-inf,1,inf])
>>> isfinite(x) # show which elements are not nan/inf/-inf
array([True, False, True, False], dtype=bool)
>>> isinf(x) # show which elements are inf/-inf
array([False, True, False, True], dtype=bool)
>>> isposinf(x) # show which elements are inf
array([False, False, False, True], dtype=bool)
>>> isneginf(x) # show which elements are -inf
array([False, True, False, False], dtype=bool)
>>> nan_to_num(x) # replace -inf/inf with most negative/positive representable number
array([ 2.00000000e+000, -1.79769313e+308, 1.00000000e+000,
1.79769313e+308])
info() or .info
numpy.info(object=None, maxwidth=76, output=<open file '<stdout>', mode 'w' at 0x00A5F068>, toplevel='numpy')
Get help information for a function, class, or module. Example: >>> from numpy import * >>> info(polyval) # doctest: +SKIP polyval(p, x) Evaluate the polymnomial p at x. Description: If p is of length N, this function returns the value: p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
inner()
numpy.inner(...)
innerproduct(a,b) Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. NB: The first argument is not conjugated.
>>> from numpy import *
>>> x = array([1,2,3])
>>> y = array([10,20,30])
>>> inner(x,y) # 1x10+2x20+3x30 = 140
140
insert()
numpy.insert(arr, obj, values, axis=None)
Return a new array with values inserted along the given axis before the given indices If axis is None, then ravel the array first. The obj argument can be an integer, a slice, or a sequence of integers. Example: >>> a = array([[1,2,3], ... [4,5,6], ... [7,8,9]]) >>> insert(a, [1,2], [[4],[5]], axis=0) array([[1, 2, 3], [4, 4, 4], [4, 5, 6], [5, 5, 5], [7, 8, 9]])
>>> from numpy import *
>>> a = array([10,20,30,40])
>>> insert(a,[1,3],50) # insert value 50 before elements [1] and [3]
array([10, 50, 20, 30, 50, 40])
>>> insert(a,[1,3],[50,60]) # insert value 50 before element [1] and value 60 before element [3]
array([10, 50, 20, 30, 60, 40])
>>> a = array([[10,20,30],[40,50,60],[70,80,90]])
>>> insert(a, [1,2], 100, axis=0) # insert row with values 100 before row[1] and before row[2]
array([[ 10, 20, 30],
[100, 100, 100],
[ 40, 50, 60],
[100, 100, 100],
[ 70, 80, 90]])
>>> insert(a, [0,1], [[100],[200]], axis=0)
array([[100, 100, 100],
[ 10, 20, 30],
[200, 200, 200],
[ 40, 50, 60],
[ 70, 80, 90]])
>>> insert(a, [0,1], [100,200], axis=1)
array([[100, 10, 200, 20, 30],
[100, 40, 200, 50, 60],
[100, 70, 200, 80, 90]])
int_asbuffer()
numpy.int_asbuffer(...)
interp()
numpy.interp(...)
interp(x, xp, fp, left=None, right=None) Return the value of a piecewise-linear function at each value in x. The piecewise-linear function, f, is defined by the known data-points fp=f(xp). The xp points must be sorted in increasing order but this is not checked. For values of x < xp[0] return the value given by left. If left is None, then return fp[0]. For values of x > xp[-1] return the value given by right. If right is None, then return fp[-1].
intersect1d()
numpy.intersect1d(ar1, ar2)
Intersection of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. Alternatively, use intersect1d_nu() which will find the unique values for you. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `intersection` : array :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
intersect1d_nu()
numpy.intersect1d_nu(ar1, ar2)
Intersection of 1D arrays with any elements. The input arrays do not have unique elements like intersect1d() requires. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `intersection` : array :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
inv()
numpy.linalg.inv(a)
>>> from numpy import *
>>> from numpy.linalg import inv
>>> a = array([[3,1,5],[1,0,8],[2,1,4]])
>>> print a
[[3 1 5]
[1 0 8]
[2 1 4]]
>>> inva = inv(a) # Inverse matrix
>>> print inva
[[ 1.14285714 -0.14285714 -1.14285714]
[-1.71428571 -0.28571429 2.71428571]
[-0.14285714 0.14285714 0.14285714]]
>>> dot(a,inva) # Check the result, should be eye(3) within machine precision
array([[ 1.00000000e-00, 2.77555756e-17, 3.60822483e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ -1.11022302e-16, 0.00000000e+00, 1.00000000e+00]])
invert()
numpy.invert(...)
y = invert(x) computes ~x (bit inversion) elementwise.
iscomplex()
numpy.iscomplex(x)
Return a boolean array where elements are True if that element is complex (has non-zero imaginary part). For scalars, return a boolean.
iscomplexobj()
numpy.iscomplexobj(x)
Return True if x is a complex type or an array of complex numbers. Unlike iscomplex(x), complex(3.0) is considered a complex object.
isfinite()
numpy.isfinite(...)
y = isfinite(x) returns True where x is finite
isfortran()
numpy.isfortran(a)
Returns True if 'a' is arranged in Fortran-order in memory with a.ndim > 1
isinf()
numpy.isinf(...)
y = isinf(x) returns True where x is +inf or -inf
isnan()
numpy.isnan(...)
y = isnan(x) returns True where x is Not-A-Number
isneginf()
numpy.isneginf(x, y=None)
Return a boolean array y with y[i] True for x[i] = -Inf. If y is an array, the result replaces the contents of y.
isposinf()
numpy.isposinf(x, y=None)
Return a boolean array y with y[i] True for x[i] = +Inf. If y is an array, the result replaces the contents of y.
isreal()
numpy.isreal(x)
Return a boolean array where elements are True if that element is real (has zero imaginary part) For scalars, return a boolean.
isrealobj()
numpy.isrealobj(x)
Return True if x is not a complex type. Unlike isreal(x), complex(3.0) is considered a complex object.
isscalar()
numpy.isscalar(num)
Returns True if the type of num is a scalar type.
issctype()
numpy.issctype(rep)
Determines whether the given object represents a numeric array type.
issubclass_()
numpy.issubclass_(arg1, arg2)
issubdtype()
numpy.issubdtype(arg1, arg2)
issubsctype()
numpy.issubsctype(arg1, arg2)
item()
ndarray.item(...)
a.item() -> copy of first array item as Python scalar. Copy the first element of array to a standard Python scalar and return it. The array must be of size one.
>>> from numpy import *
>>> a = array([5])
>>> type(a[0])
<type 'numpy.int32'>
>>> a.item() # Conversion of array of size 1 to Python scalar
5
>>> type(a.item())
<type 'int'>
>>> b = array([2,3,4])
>>> b[1].item() # Conversion of 2nd element to Python scalar
3
>>> type(b[1].item())
<type 'int'>
>>> b.item(2) # Return 3rd element converted to Python scalar
4
>>> type(b.item(2))
<type 'int'>
>>> type(b[2]) # b[2] is slower than b.item(2), and there is no conversion
<type 'numpy.int32'>
See also: []
itemset()
ndarray.itemset(...)
iterable()
numpy.iterable(y)
ix_()
numpy.ix_(*args)
Construct an open mesh from multiple sequences. This function takes n 1-d sequences and returns n outputs with n dimensions each such that the shape is 1 in all but one dimension and the dimension with the non-unit shape value cycles through all n dimensions. Using ix_() one can quickly construct index arrays that will index the cross product. a[ix_([1,3,7],[2,5,8])] returns the array a[1,2] a[1,5] a[1,8] a[3,2] a[3,5] a[3,8] a[7,2] a[7,5] a[7,8]
>>> from numpy import *
>>> a = arange(9).reshape(3,3)
>>> print a
[[0 1 2]
[3 4 5]
[6 7 8]]
>>> indices = ix_([0,1,2],[1,2,0]) # trick to be used with array broadcasting
>>> print indices
(array([[0],
[1],
[2]]), array([[1, 2, 0]]))
>>> print a[indices]
[[1 2 0]
[4 5 3]
[7 8 6]]
>>> # The latter array is the cross-product:
>>> # [[ a[0,1] a[0,2] a[0,0]]
... # [ a[1,1] a[1,2] a[1,0]]
... # [ a[2,1] a[2,2] a[2,0]]]
...
See also: [], indices, cross, outer
kaiser()
numpy.kaiser(M, beta)
kaiser(M, beta) returns a Kaiser window of length M with shape parameter beta.
kron()
numpy.kron(a, b)
kronecker product of a and b Kronecker product of two arrays is block array [[ a[ 0 ,0]*b, a[ 0 ,1]*b, ... , a[ 0 ,n-1]*b ], [ ... ... ], [ a[m-1,0]*b, a[m-1,1]*b, ... , a[m-1,n-1]*b ]]
ldexp()
numpy.ldexp(...)
y = ldexp(x1,x2) Compute y = x1 * 2**x2.
left_shift()
numpy.left_shift(...)
y = left_shift(x1,x2) computes x1 << x2 (x1 shifted to left by x2 bits) elementwise.
less()
numpy.less(...)
y = less(x1,x2) returns elementwise x1 < x2 in a bool array.
less_equal()
numpy.less_equal(...)
y = less_equal(x1,x2) returns elementwise x1 <= x2 in a bool array
lexsort()
numpy.lexsort(...)
lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys. Perform an indirect sort using a list of keys. The first key is sorted, then the second, and so on through the list of keys. At each step the previous order is preserved when equal keys are encountered. The result is a sort on multiple keys. If the keys represented columns of a spreadsheet, for example, this would sort using multiple columns (the last key being used for the primary sort order, the second-to-last key for the secondary sort order, and so on). The keys argument must be a sequence of things that can be converted to arrays of the same shape. Parameters: a : array type Array containing values that the returned indices should sort. axis : integer Axis to be indirectly sorted. None indicates that the flattened array should be used. Default is -1. Returns: indices : integer array Array of indices that sort the keys along the specified axis. The array has the same shape as the keys. SeeAlso: argsort : indirect sort sort : inplace sort
>>> from numpy import *
>>> serialnr = array([1023, 5202, 6230, 1671, 1682, 5241])
>>> height = array([40., 42., 60., 60., 98., 40.])
>>> width = array([50., 20., 70., 60., 15., 30.])
>>>
>>> # We want to sort the serial numbers with increasing height, _AND_
>>> # serial numbers with equal heights should be sorted with increasing width.
>>>
>>> indices = lexsort(keys = (width, height)) # mind the order!
>>> indices
array([5, 0, 1, 3, 2, 4])
>>> for n in indices:
... print serialnr[n], height[n], width[n]
...
5241 40.0 30.0
1023 40.0 50.0
5202 42.0 20.0
1671 60.0 60.0
6230 60.0 70.0
1682 98.0 15.0
>>>
>>> a = vstack([serialnr,width,height]) # Alternatively: all data in one big matrix
>>> print a # Mind the order of the rows!
[[ 1023. 5202. 6230. 1671. 1682. 5241.]
[ 50. 20. 70. 60. 15. 30.]
[ 40. 42. 60. 60. 98. 40.]]
>>> indices = lexsort(a) # Sort on last row, then on 2nd last row, etc.
>>> a.take(indices, axis=-1)
array([[ 5241., 1023., 5202., 1671., 6230., 1682.],
[ 30., 50., 20., 60., 70., 15.],
[ 40., 40., 42., 60., 60., 98.]])
linspace()
numpy.linspace(start, stop, num=50, endpoint=True, retstep=False)
Return evenly spaced numbers. Return num evenly spaced samples from start to stop. If endpoint is True, the last sample is stop. If retstep is True then return (seq, step_value), where step_value used. :Parameters: start : {float} The value the sequence starts at. stop : {float} The value the sequence stops at. If ``endpoint`` is false, then this is not included in the sequence. Otherwise it is guaranteed to be the last value. num : {integer} Number of samples to generate. Default is 50. endpoint : {boolean} If true, ``stop`` is the last sample. Otherwise, it is not included. Default is true. retstep : {boolean} If true, return ``(samples, step)``, where ``step`` is the spacing used in generating the samples. :Returns: samples : {array} ``num`` equally spaced samples from the range [start, stop] or [start, stop). step : {float} (Only if ``retstep`` is true) Size of spacing between samples. :See Also: `arange` : Similiar to linspace, however, when used with a float endpoint, that endpoint may or may not be included. `logspace`
>>> from numpy import *
>>> linspace(0,5,num=6) # 6 evenly spaced numbers between 0 and 5 incl.
array([ 0., 1., 2., 3., 4., 5.])
>>> linspace(0,5,num=10) # 10 evenly spaced numbers between 0 and 5 incl.
array([ 0. , 0.55555556, 1.11111111, 1.66666667, 2.22222222,
2.77777778, 3.33333333, 3.88888889, 4.44444444, 5. ])
>>> linspace(0,5,num=10,endpoint=False) # 10 evenly spaced numbers between 0 and 5 EXCL.
array([ 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5])
>>> stepsize = linspace(0,5,num=10,endpoint=False,retstep=True) # besides the usual array, also return the step size
>>> stepsize
(array([ 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5]), 0.5)
>>> myarray, stepsize = linspace(0,5,num=10,endpoint=False,retstep=True)
>>> stepsize
0.5
See also: arange, logspace, r_
load()
numpy.load(file)
Wrapper around cPickle.load which accepts either a file-like object or a filename.
loads()
numpy.loads(...)
loads(string) -- Load a pickle from the given string
loadtxt()
numpy.loadtxt(fname, dtype=<type 'float'>, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False)
Load ASCII data from fname into an array and return the array. The data must be regular, same number of values in every row fname can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in .gz See scipy.loadmat to read and write matfiles. Example usage: X = loadtxt('test.dat') # data in two columns t = X[:,0] y = X[:,1] Alternatively, you can do the same with "unpack"; see below X = loadtxt('test.dat') # a matrix of data x = loadtxt('test.dat') # a single column of data dtype - the data-type of the resulting array. If this is a record data-type, the the resulting array will be 1-d and each row will be interpreted as an element of the array. The number of columns used must match the number of fields in the data-type in this case. comments - the character used to indicate the start of a comment in the file delimiter is a string-like character used to seperate values in the file. If delimiter is unspecified or none, any whitespace string is a separator. converters, if not None, is a dictionary mapping column number to a function that will convert that column to a float. Eg, if column 0 is a date string: converters={0:datestr2num} skiprows is the number of rows from the top to skip usecols, if not None, is a sequence of integer column indexes to extract where 0 is the first column, eg usecols=(1,4,5) to extract just the 2nd, 5th and 6th columns unpack, if True, will transpose the matrix allowing you to unpack into named arguments on the left hand side t,y = load('test.dat', unpack=True) # for two column data x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)
>>> from numpy import *
>>>
>>> data = loadtxt("myfile.txt") # myfile.txt contains 4 columns of numbers
>>> t,z = data[:,0], data[:,3] # data is 2D numpy array
>>>
>>> t,x,y,z = loadtxt("myfile.txt", unpack=True) # to unpack all columns
>>> t,z = loadtxt("myfile.txt", usecols = (0,3), unpack=True) # to select just a few columns
>>> data = loadtxt("myfile.txt", skiprows = 7) # to skip 7 rows from top of file
>>> data = loadtxt("myfile.txt", comments = '!') # use '!' as comment char instead of '#'
>>> data = loadtxt("myfile.txt", delimiter=';') # use ';' as column separator instead of whitespace
>>> data = loadtxt("myfile.txt", dtype = int) # file contains integers instead of floats
log()
numpy.log(...)
y = log(x) logarithm base e elementwise.
log10()
numpy.log10(...)
y = log10(x) logarithm base 10 elementwise.
log1p()
numpy.log1p(...)
y = log1p(x) log(1+x) to base e elementwise.
log2()
numpy.log2(x, y=None)
Returns the base 2 logarithm of x If y is an array, the result replaces the contents of y.
logical_and()
numpy.logical_and(...)
y = logical_and(x1,x2) returns x1 and x2 elementwise.
>>> from numpy import *
>>> logical_and(array([0,0,1,1]), array([0,1,0,1]))
array([False, False, False, True], dtype=bool)
>>> logical_and(array([False,False,True,True]), array([False,True,False,True]))
array([False, False, False, True], dtype=bool)
See also: logical_or, logical_not, logical_xor, bitwise_and
logical_not()
numpy.logical_not(...)
y = logical_not(x) returns not x elementwise.
>>> from numpy import *
>>> logical_not(array([0,1]))
>>> logical_not(array([False,True]))
See also: logical_or, logical_not, logical_xor, bitwise_and
logical_or()
numpy.logical_or(...)
y = logical_or(x1,x2) returns x1 or x2 elementwise.
>>> from numpy import *
>>> logical_or(array([0,0,1,1]), array([0,1,0,1]))
>>> logical_or(array([False,False,True,True]), array([False,True,False,True]))
See also: logical_and, logical_not, logical_xor, bitwise_or
logical_xor()
numpy.logical_xor(...)
y = logical_xor(x1,x2) returns x1 xor x2 elementwise.
>>> from numpy import *
>>> logical_xor(array([0,0,1,1]), array([0,1,0,1]))
>>> logical_xor(array([False,False,True,True]), array([False,True,False,True]))
See also: logical_or, logical_not, logical_or, bitwise_xor
logspace()
numpy.logspace(start, stop, num=50, endpoint=True, base=10.0)
Evenly spaced numbers on a logarithmic scale. Computes int(num) evenly spaced exponents from base**start to base**stop. If endpoint=True, then last number is base**stop
>>> from numpy import *
>>> logspace(-2, 3, num = 6) # 6 evenly spaced pts on a logarithmic scale, from 10^{-2} to 10^3 incl.
array([ 1.00000000e-02, 1.00000000e-01, 1.00000000e+00,
1.00000000e+01, 1.00000000e+02, 1.00000000e+03])
>>> logspace(-2, 3, num = 10) # 10 evenly spaced pts on a logarithmic scale, from 10^{-2} to 10^3 incl.
array([ 1.00000000e-02, 3.59381366e-02, 1.29154967e-01,
4.64158883e-01, 1.66810054e+00, 5.99484250e+00,
2.15443469e+01, 7.74263683e+01, 2.78255940e+02,
1.00000000e+03])
>>> logspace(-2, 3, num = 6, endpoint=False) # 6 evenly spaced pts on a logarithmic scale, from 10^{-2} to 10^3 EXCL.
array([ 1.00000000e-02, 6.81292069e-02, 4.64158883e-01,
3.16227766e+00, 2.15443469e+01, 1.46779927e+02])
See also: arange, linspace, r_
lstsq()
numpy.linalg.lstsq(a, b, rcond=-1)
returns x,resids,rank,s where x minimizes 2-norm(|b - Ax|) resids is the sum square residuals rank is the rank of A s is the rank of the singular values of A in descending order If b is a matrix then x is also a matrix with corresponding columns. If the rank of A is less than the number of columns of A or greater than the number of rows, then residuals will be returned as an empty array otherwise resids = sum((b-dot(A,x)**2). Singular values less than s[0]*rcond are treated as zero.
lstsq() is most often used in the context of least-squares fitting of data. Suppose you obtain some noisy data y as a function of a variable t, e.g. velocity as a function of time. You can use lstsq() to fit a model to the data, if the model is linear in its parameters, that is if
y = p0 * f0(t) + p1 * f1(t) + ... + pN-1 * fN-1(t) + noise
where the pi are the parameters you want to obtain through fitting and the fi(t) are known functions of t. What follows is an example how you can do this.
First, for the example's sake, some data is simulated:
>>> from numpy import *
>>> from numpy.random import normal
>>> t = arange(0.0, 10.0, 0.05) # independent variable
>>> y = 2.0 * sin(2.*pi*t*0.6) + 2.7 * cos(2.*pi*t*0.6) + normal(0.0, 1.0, len(t))
We would like to fit this data with: model(t) = p0 * sin(2.*pi*t*0.6) + p1 * cos(2.*pi*t*0.6), where p0 and p1 are the unknown fit parameters. Here we go:
>>> from numpy.linalg import lstsq
>>> Nparam = 2 # we want to estimate 2 parameters: p_0 and p_1
>>> A = zeros((len(t),Nparam), float) # one big array with all the f_i(t)
>>> A[:,0] = sin(2.*pi*t*0.6) # f_0(t) stored
>>> A[:,1] = cos(2.*pi*t*0.6) # f_1(t) stored
>>> (p, residuals, rank, s) = lstsq(A,y)
>>> p # our final estimate of the parameters using noisy data
array([ 1.9315685 , 2.71165171])
>>> residuals # sum of the residuals: sum((p[0] * A[:,0] + p[1] * A[:,1] - y)**2)
array([ 217.23783374])
>>> rank # rank of the array A
2
>>> s # singular values of A
array([ 10., 10.])
See also: pinv, polyfit, solve
mat()
numpy.mat(data, dtype=None)
Returns 'data' as a matrix. Unlike matrix(), no copy is performed if 'data' is already a matrix or array. Equivalent to: matrix(data, copy=False)
>>> from numpy import *
>>> mat('1 3 4; 5 6 9') # matrices are always 2-dimensional
matrix([[1, 3, 4],
[5, 6, 9]])
>>> a = array([[1,2],[3,4]])
>>> m = mat(a) # convert 2-d array to matrix
>>> m
matrix([[1, 2],
[3, 4]])
>>> a[0] # result is 1-dimensional
array([1, 2])
>>> m[0] # result is 2-dimensional
matrix([[1, 2]])
>>> a.ravel() # result is 1-dimensional
array([1, 2, 3, 4])
>>> m.ravel() # result is 2-dimensional
matrix([[1, 2, 3, 4]])
>>> a*a # element-by-element multiplication
array([[ 1, 4],
[ 9, 16]])
>>> m*m # (algebraic) matrix multiplication
matrix([[ 7, 10],
[15, 22]])
>>> a**3 # element-wise power
array([[ 1, 8],
[27, 64]])
>>> m**3 # matrix multiplication m*m*m
matrix([[ 37, 54],
[ 81, 118]])
>>> m.T # transpose of the matrix
matrix([[1, 3],
[2, 4]])
>>> m.H # conjugate transpose (differs from .T for complex matrices)
matrix([[1, 3],
[2, 4]])
>>> m.I # inverse matrix
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
See also: bmat, array, dot, asmatrix
matrix()
numpy.matrix(...)
>>> from numpy import *
>>> matrix('1 3 4; 5 6 9') # matrix is synonymous with mat
matrix([[1, 3, 4],
[5, 6, 9]])
max()
numpy.max(a, axis=None, out=None)
Return the maximum of 'a' along dimension axis. Blah, Blah.
ndarray.max(...)
a.max(axis=None)
>>> from numpy import *
>>> a = array([10,20,30])
>>> a.max()
30
>>> a = array([[10,50,30],[60,20,40]])
>>> a.max()
60
>>> a.max(axis=0) # for each of the columns, find the maximum
array([60, 50, 40])
>>> a.max(axis=1) # for each of the rows, find the maximum
array([50, 60])
>>> max(a) # also exists, but is slower
See also: nan, argmax, maximum, ptp
maximum()
numpy.maximum(...)
y = maximum(x1,x2) returns maximum (if x1 > x2: x1; else: x2) elementwise.
>>> from numpy import *
>>> a = array([1,0,5])
>>> b = array([3,2,4])
>>> maximum(a,b) # element-by-element comparison
array([3, 2, 5])
>>> max(a.tolist(),b.tolist()) # standard Python function does not give the same!
[3, 2, 4]
See also: minimum, max, argmax
maximum_sctype()
numpy.maximum_sctype(t)
returns the sctype of highest precision of the same general kind as 't'
may_share_memory()
numpy.may_share_memory(a, b)
Determine if two arrays can share memory The memory-bounds of a and b are computed. If they overlap then this function returns True. Otherwise, it returns False. A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*.
mean()
numpy.mean(a, axis=None, dtype=None, out=None)
Compute the mean along the specified axis. Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. The dtype returned for integer type arrays is float *Parameters*: a : {array_like} Array containing numbers whose mean is desired. If a is not an array, a conversion is attempted. axis : {None, integer}, optional Axis along which the means are computed. The default is to compute the standard deviation of the flattened array. dtype : {None, dtype}, optional Type to use in computing the means. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: mean : {array, scalar}, see dtype parameter above If out=None, returns a new array containing the mean values, otherwise a reference to the output array is returned. *See Also*: `var` : Variance `std` : Standard deviation *Notes* The mean is the sum of the elements along the axis divided by the number of elements. *Examples* >>> a = array([[1,2],[3,4]]) >>> mean(a) 2.5 >>> mean(a,0) array([ 2., 3.]) >>> mean(a,1) array([ 1.5, 3.5])
ndarray.mean(...)
a.mean(axis=None, dtype=None, out=None) -> mean Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the means are computed. The default is to compute the standard deviation of the flattened array. dtype : type Type to use in computing the means. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: mean : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - var : variance - std : standard deviation Notes ----- The mean is the sum of the elements along the axis divided by the number of elements.
>>> from numpy import *
>>> a = array([1,2,7])
>>> a.mean()
3.3333333333333335
>>> a = array([[1,2,7],[4,9,6]])
>>> a.mean()
4.833333333333333
>>> a.mean(axis=0) # the mean of each of the 3 columns
array([ 2.5, 5.5, 6.5])
>>> a.mean(axis=1) # the mean of each of the 2 rows
array([ 3.33333333, 6.33333333])
See also: average, median, var, std, sum
median()
numpy.median(m)
median(m) returns a median of m along the first dimension of m.
>>> from numpy import *
>>> a = array([1,2,3,4,9])
>>> median(a)
3
>>> a = array([1,2,3,4,9,0])
>>> median(a)
2.5
See also: average, mean, var, std
meshgrid()
numpy.meshgrid(x, y)
For vectors x, y with lengths Nx=len(x) and Ny=len(y), return X, Y where X and Y are (Ny, Nx) shaped arrays with the elements of x and y repeated to fill the matrix EG, [X, Y] = meshgrid([1,2,3], [4,5,6,7]) X = 1 2 3 1 2 3 1 2 3 1 2 3 Y = 4 4 4 5 5 5 6 6 6 7 7 7
mgrid
numpy.mgrid
Construct a "meshgrid" in N-dimensions. grid = nd_grid() creates an instance which will return a mesh-grid when indexed. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a COMPLEX NUMBER (e.g. 5j), then the integer part of it's magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value IS INCLUSIVE. If instantiated with an argument of sparse=True, the mesh-grid is open (or not fleshed out) so that only one-dimension of each returned argument is greater than 1 Example: >>> mgrid = nd_grid() >>> mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], <BLANKLINE> [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid = nd_grid(sparse=True) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])]
>>> from numpy import *
>>> m = mgrid[1:3,2:5] # rectangular mesh grid with x-values [1,2] and y-values [2,3,4]
>>> print m
[[[1 1 1]
[2 2 2]]
[[2 3 4]
[2 3 4]]]
>>> m[0,1,2] # x-value of grid point with index coordinates (1,2)
2
>>> m[1,1,2] # y-value of grid point with index coordinates (1,2)
4
min()
numpy.min(a, axis=None, out=None)
Return the minimum of a along dimension axis. Blah, Blah.
ndarray.min(...)
a.min(axis=None)
>>> from numpy import *
>>> a = array([10,20,30])
>>> a.min()
10
>>> a = array([[10,50,30],[60,20,40]])
>>> a.min()
10
>>> a.min(axis=0) # for each of the columns, find the minimum
array([10, 20, 30])
>>> a.min(axis=1) # for each of the rows, find the minimum
array([10, 20])
>>> min(a) # also exists, but is slower
See also: nan, max, minimum, argmin, ptp
minimum()
numpy.minimum(...)
y = minimum(x1,x2) returns minimum (if x1 < x2: x1; else: x2) elementwise
>>> from numpy import *
>>> a = array([1,0,5])
>>> b = array([3,2,4])
>>> minimum(a,b) # element-by-element comparison
array([1, 0, 4])
>>> min(a.tolist(),b.tolist()) # Standard Python function does not give the same!
[1, 0, 5]
See also: min, maximum, argmin
mintypecode()
numpy.mintypecode(typechars, typeset='GDFgdf', default='d')
Return a minimum data type character from typeset that handles all typechars given The returned type character must be the smallest size such that an array of the returned type can handle the data from an array of type t for each t in typechars (or if typechars is an array, then its dtype.char). If the typechars does not intersect with the typeset, then default is returned. If t in typechars is not a string then t=asarray(t).dtype.char is applied.
mod()
numpy.mod(...)
y = remainder(x1,x2) computes x1-n*x2 where n is floor(x1 / x2)
modf()
numpy.modf(...)
y1,y2 = modf(x) breaks x into fractional (y1) and integral (y2) parts. Each output has the same sign as the input.
msort()
numpy.msort(a)
multiply()
numpy.multiply(...)
y = multiply(x1,x2) multiplies the arguments elementwise.
>>> from numpy import *
>>> multiply(array([3,6]), array([4,7]))
array([12, 42])
See also: dot
nan
numpy.nan
float(x) -> floating point number Convert a string or number to a floating point number, if possible.
>>> from numpy import *
>>> sqrt(array([-1.0]))
array([ nan]) # nan = NaN = Not A Number
>>> x = array([2, nan, 1])
>>> isnan(x) # show which elements are nan
array([False, True, False], dtype=bool)
>>> isfinite(x) # show which elements are not nan/inf/-inf
array([True, False, True], dtype=bool)
>>> nansum(x) # same as sum() but ignore nan elements
3.0
>>> nanmax(x) # same as max() but ignore nan elements
2.0
>>> nanmin(x) # same as min() but ignore nan elements
1.0
>>> nanargmin(x) # same as argmin() but ignore nan elements
2
>>> nanargmax(x) # same as argmax() but ignore nan elements
0
>>> nan_to_num(x) # replace all nan elements with 0.0
array([ 2., 0., 1.])
See also: inf
nan_to_num()
numpy.nan_to_num(x)
Returns a copy of replacing NaN's with 0 and Infs with large numbers The following mappings are applied: NaN -> 0 Inf -> limits.double_max -Inf -> limits.double_min
nanargmax()
numpy.nanargmax(a, axis=None)
Find the maximum over the given axis ignoring NaNs.
nanargmin()
numpy.nanargmin(a, axis=None)
Find the indices of the minimium over the given axis ignoring NaNs.
nanmax()
numpy.nanmax(a, axis=None)
Find the maximum over the given axis ignoring NaNs.
nanmin()
numpy.nanmin(a, axis=None)
Find the minimium over the given axis, ignoring NaNs.
nansum()
numpy.nansum(a, axis=None)
Sum the array over the given axis, treating NaNs as 0.
ndenumerate()
numpy.ndenumerate(...)
A simple nd index iterator over an array. Example: >>> a = array([[1,2],[3,4]]) >>> for index, x in ndenumerate(a): ... print index, x (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4
>>> from numpy import *
>>> a = arange(9).reshape(3,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15],
[16, 17, 18]])
>>> b = ndenumerate(a)
>>> for position,value in b: print position,value # position is the N-dimensional index
...
(0, 0) 10
(0, 1) 11
(0, 2) 12
(1, 0) 13
(1, 1) 14
(1, 2) 15
(2, 0) 16
(2, 1) 17
(2, 2) 18
ndim() or .ndim
numpy.ndim(a)
Return the number of dimensions of a. If a is not already an array, a conversion is attempted. Scalars are zero dimensional. *Parameters*: a : {array_like} Array whose number of dimensions are desired. If a is not an array, a conversion is attempted. *Returns*: number_of_dimensions : {integer} Returns the number of dimensions. *See Also*: `rank` : equivalent function. `ndarray.ndim` : equivalent method `shape` : dimensions of array `ndarray.shape` : dimensions of array *Examples* >>> ndim([[1,2,3],[4,5,6]]) 2 >>> ndim(array([[1,2,3],[4,5,6]])) 2 >>> ndim(1) 0
ndarray.ndim
Number of array dimensions.
>>> from numpy import *
>>> a = arange(12).reshape(3,4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> a.ndim # a has 2 axes
2
>>> a.shape = (2,2,3)
array([[[ 0, 1, 2],
[ 3, 4, 5]],
[[ 6, 7, 8],
[ 9, 10, 11]]])
>>> a.ndim # now a has 3 axes
3
>>> len(a.shape) # same as ndim
3
See also: shape
ndindex()
numpy.ndindex(...)
Pass in a sequence of integers corresponding to the number of dimensions in the counter. This iterator will then return an N-dimensional counter. Example: >>> for index in ndindex(3,2,1): ... print index (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0)
>>> for index in ndindex(4,3,2):
print index
(0,0,0)
(0,0,1)
(0,1,0)
...
(3,1,1)
(3,2,0)
(3,2,1)
See also: broadcast, ndenumerate
negative()
numpy.negative(...)
y = negative(x) determines -x elementwise
newaxis
numpy.newaxis
>>> from numpy import *
>>> x = arange(3)
>>> x
array([0, 1, 2])
>>> x[:,newaxis] # add a new dimension/axis
array([[0],
[1],
[2]])
>>> x[:,newaxis,newaxis] # add two new dimensions/axes
array([[[0]],
[[1]],
[[2]]])
>>> x[:,newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
>>> y = arange(3,6)
>>> x[:,newaxis] * y # outer product, same as outer(x,y)
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
>>> x.shape
(3,)
>>> x[newaxis,:].shape # x[newaxis,:] is equivalent to x[newaxis] and x[None]
(1,3)
>>> x[:,newaxis].shape
(3,1)
See also: [], atleast_1d, atleast_2d, atleast_3d, expand_dims
newbuffer()
numpy.newbuffer(...)
newbuffer(size) Return a new uninitialized buffer object of size bytes
newbyteorder()
ndarray.newbyteorder(...)
a.newbyteorder(<byteorder>) is equivalent to a.view(a.dtype.newbytorder(<byteorder>))
nonzero()
numpy.nonzero(a)
Return the indices of the elements of a which are not zero. *Parameters*: a : {array_like} *Returns*: tuple_of_arrays : {tuple} *Examples* >>> eye(3)[nonzero(eye(3))] array([ 1., 1., 1.]) >>> nonzero(eye(3)) (array([0, 1, 2]), array([0, 1, 2])) >>> eye(3)[nonzero(eye(3))] array([ 1., 1., 1.])
ndarray.nonzero(...)
a.nonzero() returns a tuple of arrays Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with a[a.nonzero()]. To group the indices by element, rather than dimension, use transpose(a.nonzero()) instead. The result of this is always a 2d array, with a row for each non-zero element.;
>>> from numpy import *
>>> x = array([1,0,2,-1,0,0,8])
>>> indices = x.nonzero() # find the indices of the nonzero elements
>>> indices
(array([0, 2, 3, 6]),)
>>> x[indices]
array([1, 2, -1, 8])
>>> y = array([[0,1,0],[2,0,3]])
>>> indices = y.nonzero()
>>> indices
(array([0, 1, 1]), array([1, 0, 2]))
>>> y[indices[0],indices[1]] # one way of doing it, explains what's in indices[0] and indices[1]
array([3, 4, 5])
>>> y[indices] # this way is shorter
array([3, 4, 5])
>>> y = array([1,3,5,7])
>>> indices = (y >= 5).nonzero()
>>> y[indices]
array([5, 7])
>>> nonzero(y) # function also exists
(array([0, 1, 2, 3]),)
See also: [], where, compress, choose, take
not_equal()
numpy.not_equal(...)
y = not_equal(x1,x2) returns elementwise x1 |= x2
obj2sctype()
numpy.obj2sctype(rep, default=None)
ogrid
numpy.ogrid
Construct a "meshgrid" in N-dimensions. grid = nd_grid() creates an instance which will return a mesh-grid when indexed. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a COMPLEX NUMBER (e.g. 5j), then the integer part of it's magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value IS INCLUSIVE. If instantiated with an argument of sparse=True, the mesh-grid is open (or not fleshed out) so that only one-dimension of each returned argument is greater than 1 Example: >>> mgrid = nd_grid() >>> mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], <BLANKLINE> [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid = nd_grid(sparse=True) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])]
>>> from numpy import *
>>> x,y = ogrid[0:3,0:3] # x and y are useful to use with broadcasting rules
>>> x
array([[0],
[1],
[2]])
>>> y
array([[0, 1, 2]])
>>> print x*y # example how to use broadcasting rules
[[0 0 0]
[0 1 2]
[0 2 4]]
See also: mgrid
ones()
numpy.ones(shape, dtype=None, order='C')
Returns an array of the given dimensions which is initialized to all ones.
>>> from numpy import *
>>> ones(5)
array([ 1., 1., 1., 1., 1.])
>>> ones((2,3), int)
array([[1, 1, 1],
[1, 1, 1]])
See also: ones_like, zeros, empty, eye, identity
ones_like()
numpy.ones_like(...)
y = ones_like(x) returns an array of ones of the shape and typecode of x.
>>> from numpy import *
>>> a = array([[1,2,3],[4,5,6]])
>>> ones_like(a) # ones initialised array with the same shape and datatype as 'a'
array([[1, 1, 1],
[1, 1, 1]])
See also: ones, zeros_like
outer()
numpy.outer(a, b)
Returns the outer product of two vectors. result[i,j] = a[i]*b[j] when a and b are vectors. Will accept any arguments that can be made into vectors.
>>> from numpy import *
>>> x = array([1,2,3])
>>> y = array([10,20,30])
>>> outer(x,y) # outer product
array([[10, 20, 30],
[20, 40, 60],
[30, 60, 90]])
permutation()
numpy.random.permutation(...)
Given an integer, return a shuffled sequence of integers >= 0 and < x; given a sequence, return a shuffled array copy. permutation(x)
>>> from numpy import *
>>> from numpy.random import permutation
>>> permutation(4) # permutation of integers from 0 to 3
array([0, 3, 1, 2])
>>> permutation(4) # another permutation of integers from 0 to 3
array([2, 1, 0, 3])
>>> permutation(4) # yet another permutation of integers from 0 to 3
array([3, 0, 2, 1])
See also: shuffle, bytes, seed
piecewise()
numpy.piecewise(x, condlist, funclist, *args, **kw)
Return a piecewise-defined function. x is the domain condlist is a list of boolean arrays or a single boolean array The length of the condition list must be n2 or n2-1 where n2 is the length of the function list. If len(condlist)==n2-1, then an 'otherwise' condition is formed by |'ing all the conditions and inverting. funclist is a list of functions to call of length (n2). Each function should return an array output for an array input Each function can take (the same set) of extra arguments and keyword arguments which are passed in after the function list. A constant may be used in funclist for a function that returns a constant (e.g. val and lambda x: val are equivalent in a funclist). The output is the same shape and type as x and is found by calling the functions on the appropriate portions of x. Note: This is similar to choose or select, except the the functions are only evaluated on elements of x that satisfy the corresponding condition. The result is |-- | f1(x) for condition1 y = --| f2(x) for condition2 | ... | fn(x) for conditionn |--
>>> from numpy import *
>>> f1 = lambda x: x*x
>>> f2 = lambda x: 2*x
>>> x = arange(-2.,3.,0.1)
>>> condition = (x>1)&(x<2) # boolean array
>>> y = piecewise(x,condition, [f1,1.]) # if condition is true, return f1, otherwise 1.
>>> y = piecewise(x, fabs(x)<=1, [f1,0]) + piecewise(x, x>1, [f2,0]) # 0. in ]-inf,-1[, f1 in [-1,+1], f2 in ]+1,+inf[
>>> print y
<snip>
See also: select
pinv()
numpy.linalg.pinv(a, rcond=1.0000000000000001e-015)
Return the (Moore-Penrose) pseudo-inverse of a 2-d array This method computes the generalized inverse using the singular-value decomposition and all singular values larger than rcond of the largest.
>>> from numpy import *
>>> from numpy.linalg import pinv,svd,lstsq
>>> A = array([[1., 3., 5.],[2., 4., 6.]])
>>> b = array([1., 3.])
>>>
>>> # Question: find x such that ||A*x-b|| is minimal
>>> # Answer: x = pinvA * b, with pinvA the pseudo-inverse of A
>>>
>>> pinvA = pinv(A)
>>> print pinvA
[[-1.33333333 1.08333333]
[-0.33333333 0.33333333]
[ 0.66666667 -0.41666667]]
>>> x = dot(pinvA, b)
>>> print x
[ 1.91666667 0.66666667 -0.58333333]
>>>
>>> # Relation with least-squares minimisation lstsq()
>>>
>>> x,resids,rank,s = lstsq(A,b)
>>> print x # the same solution for x as above
[ 1.91666667 0.66666667 -0.58333333]
>>>
>>> # Relation with singular-value decomposition svd()
>>>
>>> U,sigma,V = svd(A)
>>> S = zeros_like(A.transpose())
>>> for n in range(len(sigma)): S[n,n] = 1. / sigma[n]
>>> dot(V.transpose(), dot(S, U.transpose())) # = pinv(A)
array([[-1.33333333, 1.08333333],
[-0.33333333, 0.33333333],
[ 0.66666667, -0.41666667]])
See also: inv, lstsq, solve, svd
pkgload()
numpy.pkgload(*packages, **options)
place()
numpy.place(arr, mask, vals)
Similar to putmask arr[mask] = vals but the 1D array vals has the same number of elements as the non-zero values of mask. Inverse of extract.
poisson()
numpy.random.poisson(...)
Poisson distribution. poisson(lam=1.0, size=None) -> random values
>>> from numpy import *
>>> from numpy.random import *
>>> poisson(lam=0.5, size=(2,3)) # poisson distribution lambda=0.5
array([[2, 0, 0],
[1, 1, 0]])
See also: random_sample, uniform, standard_normal, seed
poly()
numpy.poly(seq_of_zeros)
Return a sequence representing a polynomial given a sequence of roots. If the input is a matrix, return the characteristic polynomial. Example: >>> b = roots([1,3,1,5,6]) >>> poly(b) array([ 1., 3., 1., 5., 6.])
poly1d()
numpy.poly1d(...)
A one-dimensional polynomial class. p = poly1d([1,2,3]) constructs the polynomial x**2 + 2 x + 3 p(0.5) evaluates the polynomial at the location p.r is a list of roots p.c is the coefficient array [1,2,3] p.order is the polynomial order (after leading zeros in p.c are removed) p[k] is the coefficient on the kth power of x (backwards from sequencing the coefficient array. polynomials can be added, substracted, multplied and divided (returns quotient and remainder). asarray(p) will also give the coefficient array, so polynomials can be used in all functions that accept arrays. p = poly1d([1,2,3], variable='lambda') will use lambda in the string representation of p.
>>> from numpy import *
>>> p1 = poly1d([2,3],r=1) # specify polynomial by its roots
>>> print p1
2
1 x - 5 x + 6
>>> p2 = poly1d([2,3],r=0) # specify polynomial by its coefficients
>>> print p2
2 x + 3
>>> print p1+p2 # +,-,*,/ and even ** are supported
2
1 x - 3 x + 9
>>> quotient,remainder = p1/p2 # division gives a tupple with the quotient and remainder
>>> print quotient,remainder
0.5 x - 3
15
>>> p3 = p1*p2
>>> print p3
3 2
2 x - 7 x - 3 x + 18
>>> p3([1,2,3,4]) # evaluate the polynomial in the values [1,2,3,4]
array([10, 0, 0, 22])
>>> p3[2] # the coefficient of x**2
-7
>>> p3.r # the roots of the polynomial
array([-1.5, 3. , 2. ])
>>> p3.c # the coefficients of the polynomial
array([ 2, -7, -3, 18])
>>> p3.o # the order of the polynomial
3
>>> print p3.deriv(m=2) # the 2nd derivative of the polynomial
12 x - 14
>>> print p3.integ(m=2,k=[1,2]) # integrate polynomial twice and use [1,2] as integration constants
5 4 3 2
0.1 x - 0.5833 x - 0.5 x + 9 x + 1 x + 2
polyadd()
numpy.polyadd(a1, a2)
Adds two polynomials represented as sequences
polyder()
numpy.polyder(p, m=1)
Return the mth derivative of the polynomial p.
polydiv()
numpy.polydiv(u, v)
Computes q and r polynomials so that u(s) = q(s)*v(s) + r(s) and deg r < deg v.
polyfit()
numpy.polyfit(x, y, deg, rcond=None, full=False)
Least squares polynomial fit. Required arguments x -- vector of sample points y -- vector or 2D array of values to fit deg -- degree of the fitting polynomial Keyword arguments rcond -- relative condition number of the fit (default len(x)*eps) full -- return full diagnostic output (default False) Returns full == False -- coefficients full == True -- coefficients, residuals, rank, singular values, rcond. Warns RankWarning -- if rank is reduced and not full output Do a best fit polynomial of degree 'deg' of 'x' to 'y'. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for n=2 p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Method: if X is a the Vandermonde Matrix computed from x (see http://mathworld.wolfram.com/VandermondeMatrix.html), then the polynomial least squares solution is given by the 'p' in X*p = y where X is a len(x) x N+1 matrix, p is a N+1 length vector, and y is a len(x) x 1 vector This equation can be solved as p = (XT*X)^-1 * XT * y where XT is the transpose of X and -1 denotes the inverse. However, this method is susceptible to rounding errors and generally the singular value decomposition is preferred and that is the method used here. The singular value method takes a paramenter, 'rcond', which sets a limit on the relative size of the smallest singular value to be used in solving the equation. This may result in lowering the rank of the Vandermonde matrix, in which case a RankWarning is issued. If polyfit issues a RankWarning, try a fit of lower degree or replace x by x - x.mean(), both of which will generally improve the condition number. The routine already normalizes the vector x by its maximum absolute value to help in this regard. The rcond parameter may also be set to a value smaller than its default, but this may result in bad fits. The current default value of rcond is len(x)*eps, where eps is the relative precision of the floating type being used, generally around 1e-7 and 2e-16 for IEEE single and double precision respectively. This value of rcond is fairly conservative but works pretty well when x - x.mean() is used in place of x. The warnings can be turned off by: >>> import numpy >>> import warnings >>> warnings.simplefilter('ignore',numpy.RankWarning) DISCLAIMER: Power series fits are full of pitfalls for the unwary once the degree of the fit becomes large or the interval of sample points is badly centered. The basic problem is that the powers x**n are generally a poor basis for the functions on the sample interval with the result that the Vandermonde matrix is ill conditioned and computation of the polynomial values is sensitive to coefficient error. The quality of the resulting fit should be checked against the data whenever the condition number is large, as the quality of polynomial fits *can not* be taken for granted. If all you want to do is draw a smooth curve through the y values and polyfit is not doing the job, try centering the sample range or look into scipy.interpolate, which includes some nice spline fitting functions that may be of use. For more info, see http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, but note that the k's and n's in the superscripts and subscripts on that page. The linear algebra is correct, however. See also polyval
>>> from numpy import *
>>> x = array([1,2,3,4,5])
>>> y = array([6, 11, 18, 27, 38])
>>> polyfit(x,y,2) # fit a 2nd degree polynomial to the data, result is x**2 + 2x + 3
array([ 1., 2., 3.])
>>> polyfit(x,y,1) # fit a 1st degree polynomial (straight line), result is 8x-4
array([ 8., -4.])
See also: lstsq
polyint()
numpy.polyint(p, m=1, k=None)
Return the mth analytical integral of the polynomial p. If k is None, then zero-valued constants of integration are used. otherwise, k should be a list of length m (or a scalar if m=1) to represent the constants of integration to use for each integration (starting with k[0])
polymul()
numpy.polymul(a1, a2)
Multiplies two polynomials represented as sequences.
polysub()
numpy.polysub(a1, a2)
Subtracts two polynomials represented as sequences
polyval()
numpy.polyval(p, x)
Evaluate the polynomial p at x. If x is a polynomial then composition. Description: If p is of length N, this function returns the value: p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1] x can be a sequence and p(x) will be returned for all elements of x. or x can be another polynomial and the composite polynomial p(x) will be returned. Notice: This can produce inaccurate results for polynomials with significant variability. Use carefully.
power()
numpy.power(...)
y = power(x1,x2) computes x1**x2 elementwise.
prod()
numpy.prod(a, axis=None, dtype=None, out=None)
Return the product of the elements along the given axis. Blah, Blah.
ndarray.prod(...)
a.prod(axis=None, dtype=None)
>>> from numpy import *
>>> a = array([1,2,3])
>>> a.prod() # 1 * 2 * 3 = 6
6
>>> prod(a) # also exists
6
>>> a = array([[1,2,3],[4,5,6]])
>>> a.prod(dtype=float) # specify type of output
720.0
>>> a.prod(axis=0) # for each of the 3 columns: product
array([ 4, 10, 18])
>>> a.prod(axis=1) # for each of the two rows: product
array([ 6, 120])
product()
numpy.product(a, axis=None, dtype=None, out=None)
Product of the array elements over the given axis. *Parameters*: a : {array_like} Array containing elements whose product is desired. If a is not an array, a conversion is attempted. axis : {None, integer} Axis over which the product is taken. If None is used, then the product is over all the array elements. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are multiplied. If dtype has the value None and the type of a is an integer type of precision less than the default platform integer, then the default platform integer precision is used. Otherwise, the dtype is the same as that of a. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: product_along_axis : {array, scalar}, see dtype parameter above. Returns an array whose shape is the same as a with the specified axis removed. Returns a 0d array when a is 1d or dtype=None. Returns a reference to the specified output array if specified. *See Also*: `ndarray.prod` : equivalent method *Examples* >>> product([1.,2.]) 2.0 >>> product([1.,2.], dtype=int32) 2 >>> product([[1.,2.],[3.,4.]]) 24.0 >>> product([[1.,2.],[3.,4.]], axis=1) array([ 2., 12.])
ptp()
numpy.ptp(a, axis=None, out=None)
Return maximum - minimum along the the given dimension. Blah, Blah.
ndarray.ptp(...)
a.ptp(axis=None) a.max(axis)-a.min(axis)
>>> from numpy import *
>>> a = array([5,15,25])
>>> a.ptp() # peak-to-peak = maximum - minimum
20
>>> a = array([[5,15,25],[3,13,33]])
>>> a.ptp()
30
>>> a.ptp(axis=0) # peak-to-peak value for each of the 3 columns
array([2, 2, 8])
>>> a.ptp(axis=1) # peak-to-peak value for each of the 2 rows
array([20, 30])
put()
numpy.put(a, ind, v, mode='raise')
Set a[n] = v[n] for all n in ind. If v is shorter than mask it will be repeated as necessary. In particular v can be a scalar or length 1 array. The routine put is the equivalent of the following (although the loop is in C for speed): ind = array(indices, copy=False) v = array(values, copy=False).astype(a.dtype) for i in ind: a.flat[i] = v[i] a must be a contiguous numpy array.
ndarray.put(...)
a.put(indices, values, mode) sets a.flat[n] = values[n] for each n in indices. If values is shorter than indices then it will repeat.
>>> from nump import *
>>> a = array([10,20,30,40])
>>> a.put([60,70,80], [0,3,2]) # first values, then indices
>>> a
array([60, 20, 80, 70])
>>> a[[0,3,2]] = [60,70,80] # same effect
>>> a.put([40,50], [0,3,2,1]) # if value array is too short, it is repeated
>>> a
array([40, 50, 40, 50])
>>> put(a, [0,3], [90]) # also exists, but here FIRST indices, THEN values
>>> a
array([90, 50, 40, 90])
putmask()
numpy.putmask(...)
putmask(a, mask, values) sets a.flat[n] = values[n] for each n where mask.flat[n] is true. If values is not the same size of a and mask then it will repeat. This gives different behavior than a[mask] = values.
>>> from nump import *
>>> a = array([10,20,30,40])
>>> mask = array([True,False,True,True]) # size mask = size a
>>> a.putmask([60,70,80,90], mask) # first values, then the mask
>>> a
array([60, 20, 80, 90])
>>> a = array([10,20,30,40])
>>> a[mask] # reference
array([60, 80, 90])
>>> a[mask] = array([60,70,80,90]) # NOT exactly the same as putmask
>>> a
array([60, 20, 70, 80])
>>> a.putmask([10,90], mask) # if value array is too short, it is repeated
>>> a
array([10, 20, 10, 90])
>>> putmask(a, mask, [60,70,80,90]) # also exists, but here FIRST mask, THEN values
r_
numpy.r_
Translates slice objects to concatenation along the first axis. For example: >>> r_[array([1,2,3]), 0, 0, array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6])
>>> from numpy import *
>>> r_[1:5] # same as arange(1,5)
array([1, 2, 3, 4])
>>> r_[1:10:4] # same as arange(1,10,4)
array([1, 5, 9])
>>> r_[1:10:4j] # same as linspace(1,10,4), 4 equally-spaced elements between 1 and 10 inclusive
array([ 1., 4., 7., 10.])
>>> r_[1:5,7,1:10:4] # sequences separated with commas are concatenated
array([1, 2, 3, 4, 7, 1, 5, 9])
>>> r_['r', 1:3] # return a matrix. If 1-d, result is a 1xN matrix
matrix([[1, 2]])
>>> r_['c',1:3] # return a matrix. If 1-d, result is a Nx1 matrix
matrix([[1],
[2]])
>>> a = array([[1,2,3],[4,5,6]])
>>> r_[a,a] # concatenation along 1st (default) axis (row-wise, that's why it's called r_)
array([[1, 2, 3],
[4, 5, 6],
[1, 2, 3],
[4, 5, 6]])
>>> r_['-1',a,a] # concatenation along last axis, same as c_[a,a]
array([[1, 2, 3, 1, 2, 3],
[4, 5, 6, 4, 5, 6]])
See also: c_, s_, arange, linspace, hstack, vstack, column_stack, concatenate, bmat
randint()
numpy.random.randint(...)
Return random integers x such that low <= x < high. randint(low, high=None, size=None) -> random values If high is None, then 0 <= x < low.
Synonym for random_integers()
See random_integers
random_integers()
numpy.random.random_integers(...)
Return random integers x such that low <= x <= high. random_integers(low, high=None, size=None) -> random values. If high is None, then 1 <= x <= low.
>>> from numpy import *
>>> from numpy.random import *
>>> random_integers(-1,5,(2,2))
array([[ 3, -1],
[-1, 0]])
See also: random_sample, uniform, poisson, seed
random_sample()
numpy.random.random_sample(...)
Return random floats in the half-open interval [0.0, 1.0). random_sample(size=None) -> random values
>>> from numpy import *
>>> from numpy.random import *
>>> random_sample((3,2))
array([[ 0.76228008, 0.00210605],
[ 0.44538719, 0.72154003],
[ 0.22876222, 0.9452707 ]])
See also: ranf, sample, rand, seed
ranf()
numpy.random.ranf(...)
Return random floats in the half-open interval [0.0, 1.0). random_sample(size=None) -> random values
Synonym for random_sample
See random_sample, sample
rank()
numpy.rank(a)
Return the number of dimensions of a. In old Numeric, rank was the term used for the number of dimensions. If a is not already an array, a conversion is attempted. Scalars are zero dimensional. *Parameters*: a : {array_like} Array whose number of dimensions is desired. If a is not an array, a conversion is attempted. *Returns*: number_of_dimensions : {integer} Returns the number of dimensions. *See Also*: `ndim` : equivalent function `ndarray.ndim` : equivalent method `shape` : dimensions of array `ndarray.shape` : dimensions of array *Examples* >>> rank([[1,2,3],[4,5,6]]) 2 >>> rank(array([[1,2,3],[4,5,6]])) 2 >>> rank(1) 0
ravel()
numpy.ravel(a, order='C')
Return a 1d array containing the elements of a. Returns the elements of a as a 1d array. The elements in the new array are taken in the order specified by the order keyword. The new array is a view of a if possible, otherwise it is a copy. *Parameters*: a : {array_like} order : {'C','F'}, optional If order is 'C' the elements are taken in row major order. If order is 'F' they are taken in column major order. *Returns*: 1d_array : {array} *See Also*: `ndarray.flat` : 1d iterator over the array. `ndarray.flatten` : 1d array copy of the elements of a in C order. *Examples* >>> x = array([[1,2,3],[4,5,6]]) >>> x array([[1, 2, 3], [4, 5, 6]]) >>> ravel(x) array([1, 2, 3, 4, 5, 6])
ndarray.ravel(...)
a.ravel([fortran]) return a 1-d array (copy only if needed)
>>> from numpy import *
>>> a = array([[1,2],[3,4]])
>>> a.ravel() # 1-d version of a
array([1, 2, 3, 4])
>>> b = a[:,0].ravel() # a[:,0] does not occupy a single memory segment, thus b is a copy, not a reference
>>> b
array([1, 3])
>>> c = a[0,:].ravel() # a[0,:] occupies a single memory segment, thus c is a reference, not a copy
>>> c
array([1, 2])
>>> b[0] = -1
>>> c[1] = -2
>>> a
array([[ 1, -2],
[ 3, 4]])
>>> ravel(a) # also exists
See also: flatten
real() or .real
numpy.real(val)
Return the real part of val. Useful if val maybe a scalar or an array.
ndarray.real
Real part of the array.
>>> from numpy import *
>>> a = array([1+2j,3+4j,5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = array([9,8,7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
real_if_close()
numpy.real_if_close(a, tol=100)
If a is a complex array, return it as a real array if the imaginary part is close enough to zero. "Close enough" is defined as tol*(machine epsilon of a's element type).
recarray()
numpy.recarray(...)
numpy.core.records.recarray(...)
>>> from numpy import *
>>> num = 2
>>> a = recarray(num, formats='i4,f8,f8',names='id,x,y')
>>> a['id'] = [3,4]
>>> a['id']
array([3, 4])
>>> a = rec.fromrecords([(35,1.2,7.3),(85,9.3,3.2)], names='id,x,y') # fromrecords is in the numpy.rec submodule
>>> a['id']
array([35, 85])
reciprocal()
numpy.reciprocal(...)
y = reciprocal(x) compute 1/x
reduce
>>> from numpy import *
>>> add.reduce(array([1.,2.,3.,4.])) # computes ((((1.)+2.)+3.)+4.)
10.0
>>> multiply.reduce(array([1.,2.,3.,4.])) # works also with other operands. Computes ((((1.)*2.)*3.)*4.)
24.0
>>> add.reduce(array([[1,2,3],[4,5,6]]), axis = 0) # reduce every column separately
array([5, 7, 9])
>>> add.reduce(array([[1,2,3],[4,5,6]]), axis = 1) # reduce every row separately
array([ 6, 15])
See also: accumulate, sum, prod
remainder()
numpy.remainder(...)
y = remainder(x1,x2) computes x1-n*x2 where n is floor(x1 / x2)
repeat()
numpy.repeat(a, repeats, axis=None)
Repeat elements of an array. *Parameters*: a : {array_like} Blah. repeats : {integer, integer_array} The number of repetitions for each element. If a plain integer, then it is applied to all elements. If an array, it needs to be of the same length as the chosen axis. axis : {None, integer}, optional The axis along which to repeat values. If None, then this function will operated on the flattened array `a` and return a similarly flat result. *Returns*: repeated_array : array *See Also*: `ndarray.repeat` : equivalent method *Examples* >>> repeat([0, 1, 2], 2) array([0, 0, 1, 1, 2, 2]) >>> repeat([0, 1, 2], [2, 3, 4]) array([0, 0, 1, 1, 1, 2, 2, 2, 2])
ndarray.repeat(...)
a.repeat(repeats=, axis=none) copy elements of a, repeats times. the repeats argument must be a sequence of length a.shape[axis] or a scalar.
>>> from numpy import *
>>> repeat(7., 4)
array([ 7., 7., 7., 7.])
>>> a = array([10,20])
>>> a.repeat([3,2])
array([10, 10, 10, 20, 20])
>>> repeat(a, [3,2]) # also exists
>>> a = array([[10,20],[30,40]])
>>> a.repeat([3,2,1,1])
array([10, 10, 10, 20, 20, 30, 40])
>>> a.repeat([3,2],axis=0)
array([[10, 20],
[10, 20],
[10, 20],
[30, 40],
[30, 40]])
>>> a.repeat([3,2],axis=1)
array([[10, 10, 10, 20, 20],
[30, 30, 30, 40, 40]])
See also: tile
require()
numpy.require(a, dtype=None, requirements=None)
reshape()
numpy.reshape(a, newshape, order='C')
Returns an array containing the data of a, but with a new shape. *Parameters*: a : array Array to be reshaped. newshape : shape tuple or int The new shape should be compatible with the original shape. If an integer, then the result will be a 1D array of that length. order : {'C', 'FORTRAN'}, optional Determines whether the array data should be viewed as in C (row-major) order or FORTRAN (column-major) order. *Returns*: reshaped_array : array This will be a new view object if possible; otherwise, it will return a copy. *See Also*: `ndarray.reshape` : Equivalent method.
ndarray.reshape(...)
a.reshape(d1, d2, ..., dn, order='c') Return a new array from this one. The new array must have the same number of elements as self. Also always returns a view or raises a ValueError if that is impossible.
>>> from numpy import *
>>> x = arange(12)
>>> x.reshape(3,4) # array with 3 rows and 4 columns. 3x4=12. Total number of elements is always the same.
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.reshape(3,2,2) # 3x2x2 array; 3x2x2 = 12. x itself does _not_ change.
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> x.reshape(2,-1) # 'missing' -1 value n is calculated so that 2xn=12, so n=6
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> x.reshape(12) # reshape(1,12) is not the same as reshape(12)
array([0,1,2,3,4,5,6,7,8,9,10,11])
>>> reshape(x,(2,6)) # Separate function reshape() also exists
resize()
numpy.resize(a, new_shape)
Return a new array with the specified shape. The original array's total size can be any size. The new array is filled with repeated copies of a. Note that a.resize(new_shape) will fill the array with 0's beyond current definition of a. *Parameters*: a : {array_like} Array to be reshaped. new_shape : {tuple} Shape of reshaped array. *Returns*: reshaped_array : {array} The new array is formed from the data in the old array, repeated if necessary to fill out the required number of elements, with the new shape.
ndarray.resize(...)
a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape. Change size and shape of self inplace. Array must own its own memory and not be referenced by other arrays. Returns None.
>>> from numpy import *
>>> a = array([1,2,3,4])
>>> a.resize(2,2) # changes shape of 'a' itself
>>> print a
[[1 2]
[3 4]]
>>> a.resize(3,2) # reallocates memoy of 'a' to change nr of elements, fills excess elements with 0
>>> print a
[[1 2]
[3 4]
[0 0]]
>>> a.resize(2,4)
>>> print a
[[1 2 3 4]
[0 0 0 0]]
>>> a.resize(2,1) # throws away elements of 'a' to fit new shape
>>> print a
[[1]
[2]]
But, there is a caveat:
>>> b = array([1,2,3,4]) >>> c = b # c is reference to b, it doesn't 'own' its data >>> c.resize(2,2) # no problem, nr of elements doesn't change >>> c.resize(2,3) # doesn't work, c is only a reference Traceback (most recent call last): File "<stdin>", line 1, in ? ValueError: cannot resize an array that has been referenced or is referencing another array in this way. Use the resize function >>> b.resize(2,3) # doesn't work, b is referenced by another array Traceback (most recent call last): File "<stdin>", line 1, in ? ValueError: cannot resize an array that has been referenced or is referencing another array in this way. Use the resize function
and it's not always obvious what the reference is:
>>> d = arange(4) >>> d array([0, 1, 2, 3]) >>> d.resize(5) # doesn't work, but where's the reference? Traceback (most recent call last): File "<stdin>", line 1, in ? ValueError: cannot resize an array that has been referenced or is referencing another array in this way. Use the resize function >>> _ # '_' was a reference to d! array([0, 1, 2, 3]) >>> d = resize(d, 5) # this does work, however >>> d array([0, 1, 2, 3, 0])
See also: reshape
restoredot()
numpy.restoredot(...)
restoredot() restores dots to defaults.
right_shift()
numpy.right_shift(...)
y = right_shift(x1,x2) computes x1 >> x2 (x1 shifted to right by x2 bits) elementwise.
rint()
numpy.rint(...)
y = rint(x) round x elementwise to the nearest integer, round halfway cases away from zero
roll()
numpy.roll(a, shift, axis=None)
Roll the elements in the array by 'shift' positions along the given axis.
rollaxis()
numpy.rollaxis(a, axis, start=0)
Return transposed array so that axis is rolled before start. if a.shape is (3,4,5,6) rollaxis(a, 3, 1).shape is (3,6,4,5) rollaxis(a, 2, 0).shape is (5,3,4,6) rollaxis(a, 1, 3).shape is (3,5,4,6) rollaxis(a, 1, 4).shape is (3,5,6,4)
>>> from numpy import *
>>> a = arange(3*4*5).reshape(3,4,5)
>>> a.shape
(3, 4, 5)
>>> b = rollaxis(a,1,0) # transpose array so that axis 1 is 'rolled' before axis 0
>>> b.shape
(4, 3, 5)
>>> b = rollaxis(a,0,2) # transpose array so that axis 0 is 'rolled' before axis 2
>>> b.shape
(4, 3, 5)
roots()
numpy.roots(p)
Return the roots of the polynomial coefficients in p. The values in the rank-1 array p are coefficients of a polynomial. If the length of p is n+1 then the polynomial is p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
rot90()
numpy.rot90(m, k=1)
returns the array found by rotating m by k*90 degrees in the counterclockwise direction. Works on the first two dimensions of m.
>>> from numpy import *
>>> a = arange(12).reshape(4,3)
>>> a
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
>>> rot90(a) # 'rotate' the matrix 90 degrees
array([[ 2, 5, 8, 11],
[ 1, 4, 7, 10],
[ 0, 3, 6, 9]])
round()
numpy.round(a, decimals=0, out=None)
Round a to the given number of decimals. The real and imaginary parts of complex numbers are rounded separately. The result of rounding a float is a float so the type must be cast if integers are desired. Nothing is done if the input is an integer array and the decimals parameter has a value >= 0. *Parameters*: a : {array_like} Array containing numbers whose rounded values are desired. If a is not an array, a conversion is attempted. decimals : {0, integer}, optional Number of decimal places to round to. When decimals is negative it specifies the number of positions to the left of the decimal point. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: rounded_array : {array} If out=None, returns a new array of the same type as a containing the rounded values, otherwise a reference to the output array is returned. *See Also*: `around` : equivalent function `ndarray.round` : equivalent method *Notes* Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in IEEE floating point and the errors introduced when scaling by powers of ten. *Examples* >>> round_([.5, 1.5, 2.5, 3.5, 4.5]) array([ 0., 2., 2., 4., 4.]) >>> round_([1,2,3,11], decimals=1) array([ 1, 2, 3, 11]) >>> round_([1,2,3,11], decimals=-1) array([ 0, 0, 0, 10])
ndarray.round(...)
a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places. Keyword arguments: decimals -- number of decimals to round to (default 0). May be negative. out -- existing array to use for output (default a). Return: Reference to out, where None specifies the original array a. Round to the specified number of decimals. When 'decimals' is negative it specifies the number of positions to the left of the decimal point. The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is >= 0. The keyword 'out' may be used to specify a different array to hold the result rather than the default 'a'. If the type of the array specified by 'out' differs from that of 'a', the result is cast to the new type, otherwise the original type is kept. Floats round to floats by default. Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in IEEE floating point and the errors introduced in scaling the numbers when 'decimals' is something other than 0.
round(decimals=0, out=None) -> reference to rounded values.
>>> from numpy import *
>>> array([1.2345, -1.647]).round() # rounds the items. Type remains float64.
array([ 1., -2.])
>>> array([1, -1]).round() # integer arrays stay as they are
array([ 1, -1])
>>> array([1.2345, -1.647]).round(decimals=1) # round to 1 decimal place
array([ 1.2, -1.6])
>>> array([1.2345+2.34j, -1.647-0.238j]).round() # both real and complex parts are rounded
array([ 1.+2.j, -2.-0.j])
>>> array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5]).round() # numpy rounds x.5 to nearest even.
array([ 0., 0., 1., 2., 2., 2.])
>>> a = zeros(3, dtype=int)
>>> array([1.2345, -1.647, 3.141]).round(out=a) # different output arrays may be specified
array([ 1, -2, 3])
>>> a # and the output is cast to the new type
array([ 1, -2, 3])
>>> round_(array([1.2345, -1.647])) # round_ is the functional form. -> a copy.
array([ 1., -2.])
>>> around(array([1.2345, -1.647])) # around is an alias of round_.
array([ 1., -2.])
See also: ceil, floor, fix, astype
round_()
numpy.round_(a, decimals=0, out=None)
Round a to the given number of decimals. The real and imaginary parts of complex numbers are rounded separately. The result of rounding a float is a float so the type must be cast if integers are desired. Nothing is done if the input is an integer array and the decimals parameter has a value >= 0. *Parameters*: a : {array_like} Array containing numbers whose rounded values are desired. If a is not an array, a conversion is attempted. decimals : {0, integer}, optional Number of decimal places to round to. When decimals is negative it specifies the number of positions to the left of the decimal point. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: rounded_array : {array} If out=None, returns a new array of the same type as a containing the rounded values, otherwise a reference to the output array is returned. *See Also*: `around` : equivalent function `ndarray.round` : equivalent method *Notes* Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in IEEE floating point and the errors introduced when scaling by powers of ten. *Examples* >>> round_([.5, 1.5, 2.5, 3.5, 4.5]) array([ 0., 2., 2., 4., 4.]) >>> round_([1,2,3,11], decimals=1) array([ 1, 2, 3, 11]) >>> round_([1,2,3,11], decimals=-1) array([ 0, 0, 0, 10])
row_stack()
numpy.row_stack(tup)
Stack arrays in sequence vertically (row wise) Description: Take a sequence of arrays and stack them vertically to make a single array. All arrays in the sequence must have the same shape along all but the first axis. vstack will rebuild arrays divided by vsplit. Arguments: tup -- sequence of arrays. All arrays must have the same shape. Examples: >>> import numpy >>> a = array((1,2,3)) >>> b = array((2,3,4)) >>> numpy.vstack((a,b)) array([[1, 2, 3], [2, 3, 4]]) >>> a = array([[1],[2],[3]]) >>> b = array([[2],[3],[4]]) >>> numpy.vstack((a,b)) array([[1], [2], [3], [2], [3], [4]])
s_
numpy.s_
A nicer way to build up index tuples for arrays. For any index combination, including slicing and axis insertion, 'a[indices]' is the same as 'a[index_exp[indices]]' for any array 'a'. However, 'index_exp[indices]' can be used anywhere in Python code and returns a tuple of slice objects that can be used in the construction of complex index expressions.
>>> from numpy import *
>>> s_[1:5] # easy slice generating. See r_[] examples.
slice(1, 5, None)
>>> s_[1:10:4]
slice(1, 10, 4)
>>> s_[1:10:4j]
slice(1, 10, 4j)
>>> s_['r',1:3] # to return a matrix. If 1-d, result is a 1xN matrix
('r', slice(1, 3, None))
>>> s_['c',1:3] # to return a matrix. If 1-d, result is a Nx1 matrix
('c', slice(1, 3, None))
See also: r_, c_, slice, index_exp
sample()
numpy.random.sample(...)
Return random floats in the half-open interval [0.0, 1.0). random_sample(size=None) -> random values
Synonym for random_sample
See also: random_sample, ranf
savetxt()
numpy.savetxt(fname, X, fmt='%.18e', delimiter=' ')
Save the data in X to file fname using fmt string to convert the data to strings fname can be a filename or a file handle. If the filename ends in .gz, the file is automatically saved in compressed gzip format. The load() command understands gzipped files transparently. Example usage: save('test.out', X) # X is an array save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays save('test2.out', x) # x is 1D save('test3.out', x, fmt='%1.4e') # use exponential notation delimiter is used to separate the fields, eg delimiter ',' for comma-separated values
>>> from numpy import *
>>> savetxt("myfile.txt", data) # data is 2D array
>>> savetxt("myfile.txt", x) # x is 1D array. 1 column in file.
>>> savetxt("myfile.txt", (x,y)) # x,y are 1D arrays. 2 rows in file.
>>> savetxt("myfile.txt", transpose((x,y))) # x,y are 1D arrays. 2 columns in file.
>>> savetxt("myfile.txt", transpose((x,y)), fmt='%6.3f') # use new format instead of '%.18e'
>>> savetxt("myfile.txt", data, delimiter = ';') # use ';' to separate columns instead of space
sctype2char()
numpy.sctype2char(sctype)
searchsorted()
numpy.searchsorted(a, v, side='left')
Return indices where keys in v should be inserted to maintain order. Find the indices into a sorted array such that if the corresponding keys in v were inserted before the indices the order of a would be preserved. If side='left', then the first such index is returned. If side='right', then the last such index is returned. If there is no such index because the key is out of bounds, then the length of a is returned, i.e., the key would need to be appended. The returned index array has the same shape as v. *Parameters*: a : 1-d array Array must be sorted in ascending order. v : array or list type Array of keys to be searched for in a. side : {'left', 'right'}, optional If 'left', the index of the first location where the key could be inserted is found, if 'right', the index of the last such element is returned. If the is no such element, then either 0 or N is returned, where N is the size of the array. *Returns*: indices : integer array Array of insertion points with the same shape as v. *See Also*: `sort` : Inplace sort. `histogram` : Produce histogram from 1-d data. *Notes* The array a must be 1-d and is assumed to be sorted in ascending order. Searchsorted uses binary search to find the required insertion points. *Examples* >>> searchsorted([1,2,3,4,5],[6,4,0]) array([5, 3, 0])
ndarray.searchsorted(...)
a.searchsorted(v, side='left') -> index array. Find the indices into a sorted array such that if the corresponding keys in v were inserted before the indices the order of a would be preserved. If side='left', then the first such index is returned. If side='right', then the last such index is returned. If there is no such index because the key is out of bounds, then the length of a is returned, i.e., the key would need to be appended. The returned index array has the same shape as v. :Parameters: v : array or list type Array of keys to be searched for in a. side : string Possible values are : 'left', 'right'. Default is 'left'. Return the first or last index where the key could be inserted. :Returns: indices : integer array The returned array has the same shape as v. :SeeAlso: - sort - histogram :Notes: ------- The array a must be 1-d and is assumed to be sorted in ascending order. Searchsorted uses binary search to find the required insertion points.
searchsorted(keys, side="left")
>>> from numpy import *
>>> a = array([1,2,2,3]) # a is 1-D and in ascending order.
>>> a.searchsorted(2) # side defaults to "left"
1 # a[1] is the first element in a >= 2
>>> a.searchsorted(2, side='right') # look for the other end of the run of twos
3 # a[3] is the first element in a > 2
>>> a.searchsorted(4) # 4 is greater than any element in a
4 # the returned index is 1 past the end of a.
>>> a.searchsorted([[1,2],[2,3]]) # whoa, fancy keys
array([[0, 1], # the returned array has the same shape as the keys
[1, 3]])
>>> searchsorted(a,2) # there is a functional form
1
seed()
numpy.random.seed(...)
Seed the generator. seed(seed=None) seed can be an integer, an array (or other sequence) of integers of any length, or None. If seed is None, then RandomState will try to read data from /dev/urandom (or the Windows analogue) if available or seed from the clock otherwise.
>>> seed([1]) # seed the pseudo-random number generator
>>> rand(3)
array([ 0.13436424, 0.84743374, 0.76377462])
>>> seed([1])
>>> rand(3)
array([ 0.13436424, 0.84743374, 0.76377462])
>>> rand(3)
array([ 0.25506903, 0.49543509, 0.44949106])
select()
numpy.select(condlist, choicelist, default=0)
Return an array composed of different elements in choicelist, depending on the list of conditions. :Parameters: condlist : list of N boolean arrays of length M The conditions C_0 through C_(N-1) which determine from which vector the output elements are taken. choicelist : list of N arrays of length M Th vectors V_0 through V_(N-1), from which the output elements are chosen. :Returns: output : 1-dimensional array of length M The output at position m is the m-th element of the first vector V_n for which C_n[m] is non-zero. Note that the output depends on the order of conditions, since the first satisfied condition is used. Equivalent to: output = [] for m in range(M): output += [V[m] for V,C in zip(values,cond) if C[m]] or [default]
>>> from numpy import *
>>> x = array([5., -2., 1., 0., 4., -1., 3., 10.])
>>> select([x < 0, x == 0, x <= 5], [x-0.1, 0.0, x+0.2], default = 100.)
array([ 5.2, -2.1, 1.2, 0. , 4.2, -1.1, 3.2, 100. ])
>>>
>>> # This is how it works:
>>>
>>> result = zeros_like(x)
>>> for n in range(len(x)):
... if x[n] < 0: result[n] = x[n]-0.1 # The order of the conditions matters. The first one that
... elif x[n] == 0: result[n] = 0.0 # matches, will be 'selected'.
... elif x[n] <= 5: result[n] = x[n]+0.2
... else: result[n] = 100. # The default is used when none of the conditions match
...
>>> result
array([ 5.2, -2.1, 1.2, 0. , 4.2, -1.1, 3.2, 100. ])
set_numeric_ops()
numpy.set_numeric_ops(...)
set_numeric_ops(op=func, ...) Set some or all of the number methods for all array objects. Do not forget **dict can be used as the argument list. Return the functions that were replaced, which can be stored and set later.
set_printoptions()
numpy.set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None)
Set options associated with printing. :Parameters: precision : int Number of digits of precision for floating point output (default 8). threshold : int Total number of array elements which trigger summarization rather than full repr (default 1000). edgeitems : int Number of array items in summary at beginning and end of each dimension (default 3). linewidth : int The number of characters per line for the purpose of inserting line breaks (default 75). suppress : bool Whether or not suppress printing of small floating point values using scientific notation (default False). nanstr : string String representation of floating point not-a-number (default nan). infstr : string String representation of floating point infinity (default inf).
>>> from numpy import *
>>> x = array([pi, 1.e-200])
>>> x
array([ 3.14159265e+000, 1.00000000e-200])
>>> set_printoptions(precision=3, suppress=True) # 3 digits behind decimal point + suppress small values
>>> x
array([ 3.142, 0. ])
>>>
>>> help(set_printoptions) # see help() for keywords 'threshold','edgeitems' and 'linewidth'
set_string_function()
numpy.set_string_function(...)
set_string_function(f, repr=1) Set the python function f to be the function used to obtain a pretty printable string version of an array whenever an array is printed. f(M) should expect an array argument M, and should return a string consisting of the desired representation of M for printing.
setbufsize()
numpy.setbufsize(size)
Set the size of the buffer used in ufuncs.
setdiff1d()
numpy.setdiff1d(ar1, ar2)
Set difference of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `difference` : array The values in ar1 that are not in ar2. :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
seterr()
numpy.seterr(all=None, divide=None, over=None, under=None, invalid=None)
Set how floating-point errors are handled. Valid values for each type of error are the strings "ignore", "warn", "raise", and "call". Returns the old settings. If 'all' is specified, values that are not otherwise specified will be set to 'all', otherwise they will retain their old values. Note that operations on integer scalar types (such as int16) are handled like floating point, and are affected by these settings. Example: >>> seterr(over='raise') # doctest: +SKIP {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} >>> seterr(all='warn', over='raise') # doctest: +SKIP {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} >>> int16(32000) * int16(3) # doctest: +SKIP Traceback (most recent call last): File "<stdin>", line 1, in ? FloatingPointError: overflow encountered in short_scalars >>> seterr(all='ignore') # doctest: +SKIP {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
seterrcall()
numpy.seterrcall(func)
Set the callback function used when a floating-point error handler is set to 'call' or the object with a write method for use when the floating-point error handler is set to 'log' 'func' should be a function that takes two arguments. The first is type of error ("divide", "over", "under", or "invalid"), and the second is the status flag (= divide + 2*over + 4*under + 8*invalid). Returns the old handler.
seterrobj()
numpy.seterrobj(...)
setfield()
ndarray.setfield(...)
m.setfield(value, dtype, offset) -> None. places val into field of the given array defined by the data type and offset.
setflags()
ndarray.setflags(...)
a.setflags(write=None, align=None, uic=None)
setmember1d()
numpy.setmember1d(ar1, ar2)
Return a boolean array of shape of ar1 containing True where the elements of ar1 are in ar2 and False otherwise. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `mask` : bool array The values ar1[mask] are in ar2. :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
setxor1d()
numpy.setxor1d(ar1, ar2)
Set exclusive-or of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `xor` : array The values that are only in one, but not both, of the input arrays. :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
shape() or .shape
numpy.shape(a)
Return the shape of a. *Parameters*: a : {array_like} Array whose shape is desired. If a is not an array, a conversion is attempted. *Returns*: tuple_of_integers : The elements of the tuple are the length of the corresponding array dimension. *Examples* >>> shape(eye(3)) (3, 3) >>> shape([[1,2]]) (1, 2)
ndarray.shape
Tuple of array dimensions.
>>> from numpy import *
>>> x = arange(12)
>>> x.shape
(12,)
>>> x.shape = (3,4) # array with 3 rows and 4 columns. 3x4=12. Total number of elements is always the same.
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.shape = (3,2,2) # 3x2x2 array; 3x2x2 = 12. x itself _does_ change, unlike reshape().
>>> x
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> x.shape = (2,-1) # 'missing' -1 value n is calculated so that 2xn=12, so n=6
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> x.shape = 12 # x.shape = (1,12) is not the same as x.shape = 12
>>> x
array([0,1,2,3,4,5,6,7,8,9,10,11])
See also: reshape
show_config()
numpy.show_config()
shuffle()
numpy.random.shuffle(...)
Modify a sequence in-place by shuffling its contents. shuffle(x)
>>> from numpy import *
>>> from numpy.random import shuffle
>>> x = array([1,50,-1,3])
>>> shuffle(x) # shuffle the elements of x
>>> print x
[-1 3 50 1]
>>> x = ['a','b','c','z']
>>> shuffle(x) # works with any sequence
>>> print x
['a', 'c', 'z', 'b']
See also: permutation, bytes
sign()
numpy.sign(...)
y = sign(x) returns -1 if x < 0 and 0 if x==0 and 1 if x > 0
signbit()
numpy.signbit(...)
y = signbit(x) returns True where signbit of x is set (x<0).
sin()
numpy.sin(...)
y = sin(x) sine elementwise.
sinc()
numpy.sinc(x)
sinc(x) returns sin(pi*x)/(pi*x) at all points of array x.
sinh()
numpy.sinh(...)
y = sinh(x) hyperbolic sine elementwise.
size() or .size
numpy.size(a, axis=None)
Return the number of elements along given axis. *Parameters*: a : {array_like} Array whose axis size is desired. If a is not an array, a conversion is attempted. axis : {None, integer}, optional Axis along which the elements are counted. None means all elements in the array. *Returns*: element_count : {integer} Count of elements along specified axis. *See Also*: `shape` : dimensions of array `ndarray.shape` : dimensions of array `ndarray.size` : number of elements in array *Examples* >>> a = array([[1,2,3],[4,5,6]]) >>> size(a) 6 >>> size(a,1) 3 >>> size(a,0) 2
ndarray.size
Number of elements in the array.
slice
>>> s = slice(3,9,2) # slice objects exist outside numpy
>>> from numpy import *
>>> a = arange(20)
>>> a[s]
array([3, 5, 7])
>>> a[3:9:2] # same thing
array([3, 5, 7])
See also: [], ..., newaxis, s_, ix_, indices, index_exp
solve()
numpy.linalg.solve(a, b)
Return the solution of a*x = b
>>> from numpy import *
>>> from numpy.linalg import solve
>>>
>>> # The system of equations we want to solve for (x0,x1,x2):
>>> # 3 * x0 + 1 * x1 + 5 * x2 = 6
>>> # 1 * x0 + 8 * x2 = 7
>>> # 2 * x0 + 1 * x1 + 4 * x2 = 8
>>>
>>> a = array([[3,1,5],[1,0,8],[2,1,4]])
>>> b = array([6,7,8])
>>> x = solve(a,b)
>>> print x # This is our solution
[-3.28571429 9.42857143 1.28571429]
>>>
>>> dot(a,x) # Just checking if we indeed obtain the righthand side
array([ 6., 7., 8.])
See also: inv
sometrue()
numpy.sometrue(a, axis=None, out=None)
Perform a logical_or over the given axis. *See Also*: `ndarray.any` : equivalent method
>>> from numpy import *
>>> b = array([True, False, True, True])
>>> sometrue(b)
True
>>> a = array([1, 5, 2, 7])
>>> sometrue(a >= 5)
True
sort()
numpy.sort(a, axis=-1, kind='quicksort', order=None)
Return copy of 'a' sorted along the given axis. Perform an inplace sort along the given axis using the algorithm specified by the kind keyword. *Parameters*: a : array Array to be sorted. axis : {None, int} optional Axis along which to sort. None indicates that the flattened array should be used. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm to use. order : {None, list type}, optional When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. *Returns*: sorted_array : array of same type as a *See Also*: `argsort` : Indirect sort. `lexsort` : Indirect stable sort on multiple keys. `searchsorted` : Find keys in sorted array. *Notes* The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: +-----------+-------+-------------+------------+-------+ | kind | speed | worst case | work space | stable| +===========+=======+=============+============+=======+ | quicksort | 1 | O(n^2) | 0 | no | +-----------+-------+-------------+------------+-------+ | mergesort | 2 | O(n*log(n)) | ~n/2 | yes | +-----------+-------+-------------+------------+-------+ | heapsort | 3 | O(n*log(n)) | 0 | no | +-----------+-------+-------------+------------+-------+ All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis.
ndarray.sort(...)
a.sort(axis=-1, kind='quicksort', order=None) -> None. Perform an inplace sort along the given axis using the algorithm specified by the kind keyword. :Parameters: axis : integer Axis to be sorted along. None indicates that the flattened array should be used. Default is -1. kind : string Sorting algorithm to use. Possible values are 'quicksort', 'mergesort', or 'heapsort'. Default is 'quicksort'. order : list type or None When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. :Returns: None :SeeAlso: - argsort : indirect sort - lexsort : indirect stable sort on multiple keys - searchsorted : find keys in sorted array :Notes: ------ The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: |------------------------------------------------------| | kind | speed | worst case | work space | stable| |------------------------------------------------------| |'quicksort'| 1 | O(n^2) | 0 | no | |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | |'heapsort' | 3 | O(n*log(n)) | 0 | no | |------------------------------------------------------| All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis.
sort(axis=-1, kind="quicksort")
>>> from numpy import *
>>> a = array([2,0,8,4,1])
>>> a.sort() # in-place sorting with quicksort (default)
>>> a
array([0, 1, 2, 4, 8])
>>> a.sort(kind='mergesort') # algorithm options are 'quicksort', 'mergesort' and 'heapsort'
>>> a = array([[8,4,1],[2,0,9]])
>>> a.sort(axis=0)
>>> a
array([[2, 0, 1],
[8, 4, 9]])
>>> a = array([[8,4,1],[2,0,9]])
>>> a.sort(axis=1) # default axis = -1
>>> a
array([[1, 4, 8],
[0, 2, 9]])
>>> sort(a) # there is a functional form
sort_complex()
numpy.sort_complex(a)
Sort 'a' as a complex array using the real part first and then the imaginary part if the real part is equal (the default sort order for complex arrays). This function is a wrapper ensuring a complex return type.
source()
numpy.source(object, output=<open file '<stdout>', mode 'w' at 0x00A5F068>)
Write source for this object to output.
split()
numpy.split(ary, indices_or_sections, axis=0)
Divide an array into a list of sub-arrays. Description: Divide ary into a list of sub-arrays along the specified axis. If indices_or_sections is an integer, ary is divided into that many equally sized arrays. If it is impossible to make an equal split, an error is raised. This is the only way this function differs from the array_split() function. If indices_or_sections is a list of sorted integers, its entries define the indexes where ary is split. Arguments: ary -- N-D array. Array to be divided into sub-arrays. indices_or_sections -- integer or 1D array. If integer, defines the number of (close to) equal sized sub-arrays. If it is a 1D array of sorted indices, it defines the indexes at which ary is divided. Any empty list results in a single sub-array equal to the original array. axis -- integer. default=0. Specifies the axis along which to split ary. Caveats: Currently, the default for axis is 0. This means a 2D array is divided into multiple groups of rows. This seems like the appropriate default
>>> from numpy import *
>>> a = array([[1,2,3,4],[5,6,7,8]])
>>> split(a,2,axis=0) # split a in 2 parts. row-wise
array([[1, 2, 3, 4]]), array([[5, 6, 7, 8]])]
>>> split(a,4,axis=1) # split a in 4 parts, column-wise
[array([[1],
[5]]), array([[2],
[6]]), array([[3],
[7]]), array([[4],
[8]])]
>>> split(a,3,axis=1) # impossible to split in 3 equal parts -> error (SEE: array_split)
Traceback (most recent call last):
<snip>
ValueError: array split does not result in an equal division
>>> split(a,[2,3],axis=1) # make a split before the 2nd and the 3rd column
[array([[1, 2],
[5, 6]]), array([[3],
[7]]), array([[4],
[8]])]
ERROR: EOF in multi-line statement
See also: dsplit, hsplit, vsplit, array_split, concatenate
sqrt()
numpy.sqrt(...)
y = sqrt(x) square-root elementwise. For real x, the domain is restricted to x>=0.
square()
numpy.square(...)
y = square(x) compute x**2.
squeeze()
numpy.squeeze(a)
Remove single-dimensional entries from the shape of a. *Examples* >>> x = array([[[1,1,1],[2,2,2],[3,3,3]]]) >>> x array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]) >>> x.shape (1, 3, 3) >>> squeeze(x).shape (3, 3)
ndarray.squeeze(...)
m.squeeze() eliminate all length-1 dimensions
>>> from numpy import *
>>> a = arange(6)
>>> a = a.reshape(1,2,1,1,3,1)
>>> a
array([[[[[[0],
[1],
[2]]]],
[[[[3],
[4],
[5]]]]]])
>>> a.squeeze() # result has shape 2x3, all dimensions with length 1 are removed
array([[0, 1, 2],
[3, 4, 5]])
>>> squeeze(a) # also exists
standard_normal()
numpy.random.standard_normal(...)
Standard Normal distribution (mean=0, stdev=1). standard_normal(size=None) -> random values
>>> standard_normal((2,3))
array([[ 1.12557608, -0.13464922, -0.35682992],
[-1.54090277, 1.21551589, -1.82854551]])
See also: randn, uniform, poisson, seed
std()
numpy.std(a, axis=None, dtype=None, out=None)
Compute the standard deviation along the specified axis. Returns the standard deviation of the array elements, a measure of the spread of a distribution. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. *Parameters*: a : {array_like} Array containing numbers whose standard deviation is desired. If a is not an array, a conversion is attempted. axis : {None, integer}, optional Axis along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. dtype : {None, dtype}, optional Type to use in computing the standard deviation. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: standard_deviation : {array, scalar}, see dtype parameter above. If out=None, returns a new array containing the standard deviation, otherwise a reference to the output array is returned. *See Also*: `var` : Variance `mean` : Average *Notes* The standard deviation is the square root of the average of the squared deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The computed standard deviation is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1. *Examples* >>> a = array([[1,2],[3,4]]) >>> std(a) 1.1180339887498949 >>> std(a,0) array([ 1., 1.]) >>> std(a,1) array([ 0.5, 0.5])
ndarray.std(...)
a.std(axis=None, dtype=None, out=None) -> standard deviation. Returns the standard deviation of the array elements, a measure of the spread of a distribution. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. dtype : type Type to use in computing the standard deviation. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: standard deviation : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - var : variance - mean : average Notes ----- The standard deviation is the square root of the average of the squared deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The computed standard deviation is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1.
>>> from numpy import *
>>> a = array([1.,2,7])
>>> a.std() # normalized by N (not N-1)
2.6246692913372702
>>> a = array([[1.,2,7],[4,9,6]])
>>> a.std()
2.793842435706702
>>> a.std(axis=0) # standard deviation of each of the 3 columns
array([ 1.5, 3.5, 0.5])
>>> a.std(axis=1) # standard deviation of each of the 2 columns
array([ 2.62466929, 2.05480467])
subtract()
numpy.subtract(...)
y = subtract(x1,x2) subtracts the arguments elementwise.
sum()
numpy.sum(a, axis=None, dtype=None, out=None)
Sum the array over the given axis. *Parameters*: a : {array_type} Array containing elements whose sum is desired. If a is not an array, a conversion is attempted. axis : {None, integer} Axis over which the sum is taken. If None is used, then the sum is over all the array elements. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are summed. If dtype has the value None and the type of a is an integer type of precision less than the default platform integer, then the default platform integer precision is used. Otherwise, the dtype is the same as that of a. out : {None, array}, optional Array into which the sum can be placed. It's type is preserved and it must be of the right shape to hold the output. *Returns*: sum_along_axis : {array, scalar}, see dtype parameter above. Returns an array whose shape is the same as a with the specified axis removed. Returns a 0d array when a is 1d or dtype=None. Returns a reference to the specified output array if specified. *See Also*: `ndarray.sum` : equivalent method *Examples* >>> sum([0.5, 1.5]) 2.0 >>> sum([0.5, 1.5], dtype=N.int32) 1 >>> sum([[0, 1], [0, 5]]) 6 >>> sum([[0, 1], [0, 5]], axis=1) array([1, 5])
ndarray.sum(...)
a.sum(axis=None, dtype=None) -> Sum of array over given axis. Sum the array over the given axis. If the axis is None, sum over all dimensions of the array. The optional dtype argument is the data type for the returned value and intermediate calculations. The default is to upcast (promote) smaller integer types to the platform-dependent int. For example, on 32-bit platforms: a.dtype default sum dtype --------------------------------------------------- bool, int8, int16, int32 int32 Warning: The arithmetic is modular and no error is raised on overflow. Examples: >>> array([0.5, 1.5]).sum() 2.0 >>> array([0.5, 1.5]).sum(dtype=int32) 1 >>> array([[0, 1], [0, 5]]).sum(axis=0) array([0, 6]) >>> array([[0, 1], [0, 5]]).sum(axis=1) array([1, 5]) >>> ones(128, dtype=int8).sum(dtype=int8) # overflow! -128
>>> from numpy import *
>>> a = array([1,2,3])
>>> a.sum()
6
>>> sum(a) # also exists
>>> a = array([[1,2,3],[4,5,6]])
>>> a.sum()
21
>>> a.sum(dtype=float) # specify type of output
21.0
>>> a.sum(axis=0) # sum over rows for each of the 3 columns
array([5, 7, 9])
>>> a.sum(axis=1) # sum over columns for each of the 2 rows
array([ 6, 15])
See also: accumulate, nan, cumsum, prod
svd()
numpy.linalg.svd(a, full_matrices=1, compute_uv=1)
Singular Value Decomposition. u,s,vh = svd(a) If a is an M x N array, then the svd produces a factoring of the array into two unitary (orthogonal) 2-d arrays u (MxM) and vh (NxN) and a min(M,N)-length array of singular values such that a == dot(u,dot(S,vh)) where S is an MxN array of zeros whose main diagonal is s. if compute_uv == 0, then return only the singular values if full_matrices == 0, then only part of either u or vh is returned so that it is MxN
>>> from numpy import *
>>> from numpy.linalg import svd
>>> A = array([[1., 3., 5.],[2., 4., 6.]]) # A is a (2x3) matrix
>>> U,sigma,V = svd(A)
>>> print U # U is a (2x2) unitary matrix
[[-0.61962948 -0.78489445]
[-0.78489445 0.61962948]]
>>> print sigma # non-zero diagonal elements of Sigma
[ 9.52551809 0.51430058]
>>> print V # V is a (3x3) unitary matrix
[[-0.2298477 -0.52474482 -0.81964194]
[ 0.88346102 0.24078249 -0.40189603]
[ 0.40824829 -0.81649658 0.40824829]]
>>> Sigma = zeros_like(A) # constructing Sigma from sigma
>>> n = min(A.shape)
>>> Sigma[:n,:n] = diag(sigma)
>>> print dot(U,dot(Sigma,V)) # A = U * Sigma * V
[[ 1. 3. 5.]
[ 2. 4. 6.]]
See also: pinv
swapaxes()
numpy.swapaxes(a, axis1, axis2)
Return array a with axis1 and axis2 interchanged. Blah, Blah.
ndarray.swapaxes(...)
a.swapaxes(axis1, axis2) -> new view with axes swapped.
>>> from numpy import *
>>> a = arange(30)
>>> a = a.reshape(2,3,5)
>>> a
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = a.swapaxes(1,2) # swap the 2nd and the 3rd axis
>>> b
array([[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]],
[[15, 20, 25],
[16, 21, 26],
[17, 22, 27],
[18, 23, 28],
[19, 24, 29]]])
>>> b.shape
(2, 5, 3)
>>> b[0,0,0] = -1 # be aware that b is a reference, not a copy
>>> print a[0,0,0]
take()
numpy.take(a, indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of a at the given indices. This function does the same thing as "fancy" indexing; however, it can be easier to use if you need to specify a given axis. *Parameters*: a : array The source array indices : int array The indices of the values to extract. axis : {None, int}, optional The axis over which to select values. None signifies that the operation should be performed over the flattened array. out : {None, array}, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' -- raise an error 'wrap' -- wrap around 'clip' -- clip to the range *Returns*: subarray : array The returned array has the same type as a. *See Also*: `ndarray.take` : equivalent method
ndarray.take(...)
a.take(indices, axis=None, out=None, mode='raise') -> new array. The new array is formed from the elements of a indexed by indices along the given axis.
>>> from numpy import *
>>> a= array([10,20,30,40])
>>> a.take([0,0,3]) # [0,0,3] is a set of indices
array([10, 10, 40])
>>> a[[0,0,3]] # the same effect
array([10, 10, 40])
>>> a.take([[0,1],[0,1]]) # shape of return array depends on shape of indices array
array([[10, 20],
[10, 20]])
>>> a = array([[10,20,30],[40,50,60]])
>>> a.take([0,2],axis=1)
array([[10, 30],
[40, 60]])
>>> take(a,[0,2],axis=1) # also exists
See also: [], put, putmask, compress, choose
tan()
numpy.tan(...)
y = tan(x) tangent elementwise.
tanh()
numpy.tanh(...)
y = tanh(x) hyperbolic tangent elementwise.
tensordot()
numpy.tensordot(a, b, axes=2)
tensordot returns the product for any (ndim >= 1) arrays. r_{xxx, yyy} = \sum_k a_{xxx,k} b_{k,yyy} where the axes to be summed over are given by the axes argument. the first element of the sequence determines the axis or axes in arr1 to sum over, and the second element in axes argument sequence determines the axis or axes in arr2 to sum over. When there is more than one axis to sum over, the corresponding arguments to axes should be sequences of the same length with the first axis to sum over given first in both sequences, the second axis second, and so forth. If the axes argument is an integer, N, then the last N dimensions of a and first N dimensions of b are summed over.
>>> from numpy import *
>>> a = arange(60.).reshape(3,4,5)
>>> b = arange(24.).reshape(4,3,2)
>>> c = tensordot(a,b, axes=([1,0],[0,1])) # sum over the 1st and 2nd dimensions
>>> c.shape
(5,2)
>>> # A slower but equivalent way of computing the same:
>>> c = zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... c[i,j] += a[k,n,i] * b[n,k,j]
...
See also: dot
test()
numpy.test(*args, **kw)
Run Numpy module test suite with level and verbosity. level: None --- do nothing, return None < 0 --- scan for tests of level=abs(level), don't run them, return TestSuite-list > 0 --- scan for tests of level, run them, return TestRunner > 10 --- run all tests (same as specifying all=True). (backward compatibility). verbosity: >= 0 --- show information messages > 1 --- show warnings on missing tests all: True --- run all test files (like self.testall()) False (default) --- only run test files associated with a module sys_argv --- replacement of sys.argv[1:] during running tests. testcase_pattern --- run only tests that match given pattern. It is assumed (when all=False) that package tests suite follows the following convention: for each package module, there exists file <packagepath>/tests/test_<modulename>.py that defines TestCase classes (with names having prefix 'test_') with methods (with names having prefixes 'check_' or 'bench_'); each of these methods are called when running unit tests.
tile()
numpy.tile(A, reps)
Repeat an array the number of times given in the integer tuple, reps. If reps has length d, the result will have dimension of max(d, A.ndim). If reps is scalar it is treated as a 1-tuple. If A.ndim < d, A is promoted to be d-dimensional by prepending new axes. So a shape (3,) array is promoted to (1,3) for 2-D replication, or shape (1,1,3) for 3-D replication. If this is not the desired behavior, promote A to d-dimensions manually before calling this function. If d < A.ndim, tup is promoted to A.ndim by pre-pending 1's to it. Thus for an A.shape of (2,3,4,5), a tup of (2,2) is treated as (1,1,2,2) Examples: >>> a = array([0,1,2]) >>> tile(a,2) array([0, 1, 2, 0, 1, 2]) >>> tile(a,(1,2)) array([[0, 1, 2, 0, 1, 2]]) >>> tile(a,(2,2)) array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) >>> tile(a,(2,1,2)) array([[[0, 1, 2, 0, 1, 2]], <BLANKLINE> [[0, 1, 2, 0, 1, 2]]]) See Also: repeat
>>> from numpy import *
>>> a = array([10,20])
>>> tile(a, (3,2)) # concatenate 3x2 copies of a together
array([[10, 20, 10, 20],
[10, 20, 10, 20],
[10, 20, 10, 20]])
>>> tile(42.0, (3,2)) # works for scalars, too
array([[ 42., 42.],
[ 42., 42.],
[ 42., 42.]])
>>> tile([[1,2],[4,8]], (2,3)) # works for 2-d arrays and list literals, too
array([[1, 2, 1, 2, 1, 2],
[4, 8, 4, 8, 4, 8],
[1, 2, 1, 2, 1, 2],
[4, 8, 4, 8, 4, 8]])
See also: hstack, vstack, r_, c_, concatenate, repeat
tofile()
ndarray.tofile(...)
a.tofile(fid, sep="", format="%s") -> None. Write the data to a file. Required arguments: file -- an open file object or a string containing a filename Keyword arguments: sep -- separator for text output. Write binary if empty (default "") format -- format string for text file output (default "%s") A convenience function for quick storage of array data. Information on endianess and precision is lost, so this method is not a good choice for files intended to archive data or transport data between machines with different endianess. Some of these problems can be overcome by outputting the data as text files at the expense of speed and file size. If 'sep' is empty this method is equivalent to file.write(a.tostring()). If 'sep' is not empty each data item is converted to the nearest Python type and formatted using "format"%item. The resulting strings are written to the file separated by the contents of 'sep'. The data is always written in "C" (row major) order independent of the order of 'a'. The data produced by this method can be recovered by using the function fromfile().
>>> from numpy import *
>>> x = arange(10.)
>>> y = x**2
>>> y.tofile("myfile.dat") # binary format
>>> y.tofile("myfile.txt", sep=' ', format = "%e") # ascii format, one row, exp notation, values separated by 1 space
>>> y.tofile("myfile.txt", sep='\n', format = "%e") # ascii format, one column, exponential notation
See also: fromfile, loadtxt, savetxt
tolist()
ndarray.tolist(...)
a.tolist() -> Array as hierarchical list. Copy the data portion of the array to a hierarchical python list and return that list. Data items are converted to the nearest compatible Python type.
>>> from numpy import *
>>> a = array([[1,2],[3,4]])
>>> a.tolist() # convert to a standard python list
[[1, 2], [3, 4]]
tostring()
ndarray.tostring(...)
a.tostring(order='C') -> raw copy of array data as a Python string. Keyword arguments: order -- order of the data item in the copy {"C","F","A"} (default "C") Construct a Python string containing the raw bytes in the array. The order of the data in arrays with ndim > 1 is specified by the 'order' keyword and this keyword overrides the order of the array. The choices are: "C" -- C order (row major) "Fortran" -- Fortran order (column major) "Any" -- Current order of array. None -- Same as "Any"
trace()
numpy.trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array. If a is 2-d, returns the sum along the diagonal of self with the given offset, i.e., the collection of elements of the form a[i,i+offset]. If a has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-d subarray whose trace is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. Arrays of integer type are summed *Parameters*: a : {array_like} Array from whis the diagonals are taken. offset : {0, integer}, optional Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to main diagonal. axis1 : {0, integer}, optional Axis to be used as the first axis of the 2-d subarrays from which the diagonals should be taken. Defaults to first axis. axis2 : {1, integer}, optional Axis to be used as the second axis of the 2-d subarrays from which the diagonals should be taken. Defaults to second axis. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are summed. If dtype has the value None and a is of integer type of precision less than the default integer precision, then the default integer precision is used. Otherwise, the precision is the same as that of a. out : {None, array}, optional Array into which the sum can be placed. It's type is preserved and it must be of the right shape to hold the output. *Returns*: sum_along_diagonals : array If a is 2-d, a 0-d array containing the diagonal is returned. If a has larger dimensions, then an array of diagonals is returned. *Examples* >>> trace(eye(3)) 3.0 >>> a = arange(8).reshape((2,2,2)) >>> trace(a) array([6, 8])
ndarray.trace(...)
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) return the sum along the offset diagonal of the array's indicated axis1 and axis2.
>>> from numpy import *
>>> a = arange(12).reshape(3,4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> a.diagonal()
array([ 0, 5, 10])
>>> a.trace()
15
>>> a.diagonal(offset=1)
array([ 1, 6, 11])
>>> a.trace(offset=1)
18
transpose()
numpy.transpose(a, axes=None)
Return a view of the array with dimensions permuted. Permutes axis according to list axes. If axes is None (default) returns array with dimensions reversed.
ndarray.transpose(...)
a.transpose(*axes) Returns a view of 'a' with axes transposed. If no axes are given, or None is passed, switches the order of the axes. For a 2-d array, this is the usual matrix transpose. If axes are given, they describe how the axes are permuted. Example: >>> a = array([[1,2],[3,4]]) >>> a array([[1, 2], [3, 4]]) >>> a.transpose() array([[1, 3], [2, 4]]) >>> a.transpose((1,0)) array([[1, 3], [2, 4]]) >>> a.transpose(1,0) array([[1, 3], [2, 4]])
A very simple example:
>>> a = array([[1,2,3],[4,5,6]])
>>> print a.shape
(2, 3)
>>> b = a.transpose()
>>> print b
[[1 4]
[2 5]
[3 6]]
>>> print b.shape
(3, 2)
From this, a more elaborate example can be understood:
>>> a = arange(30)
>>> a = a.reshape(2,3,5)
>>> a
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = a.transpose()
>>> b
array([[[ 0, 15],
[ 5, 20],
[10, 25]],
[[ 1, 16],
[ 6, 21],
[11, 26]],
[[ 2, 17],
[ 7, 22],
[12, 27]],
[[ 3, 18],
[ 8, 23],
[13, 28]],
[[ 4, 19],
[ 9, 24],
[14, 29]]])
>>> b.shape
(5, 3, 2)
>>> b = a.transpose(1,0,2) # First axis 1, then axis 0, then axis 2
>>> b
array([[[ 0, 1, 2, 3, 4],
[15, 16, 17, 18, 19]],
[[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[25, 26, 27, 28, 29]]])
>>> b.shape
(3, 2, 5)
>>> b = transpose(a, (1,0,2)) # A separate transpose() function also exists
See also: T, swapaxes, rollaxis
trapz()
numpy.trapz(y, x=None, dx=1.0, axis=-1)
Integrate y(x) using samples along the given axis and the composite trapezoidal rule. If x is None, spacing given by dx is assumed.
tri()
numpy.tri(N, M=None, k=0, dtype=<type 'float'>)
returns a N-by-M array where all the diagonals starting from lower left corner up to the k-th are all ones.
>>> from numpy import *
>>> tri(3,4,k=0,dtype=float) # 3x4 matrix of Floats, triangular, the k=0-th diagonal and below is 1, the upper part is 0
array([[ 1., 0., 0., 0.],
[ 1., 1., 0., 0.],
[ 1., 1., 1., 0.]])
>>> tri(3,4,k=1,dtype=int)
array([[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]])
tril()
numpy.tril(m, k=0)
returns the elements on and below the k-th diagonal of m. k=0 is the main diagonal, k > 0 is above and k < 0 is below the main diagonal.
>>> from numpy import *
>>> a = arange(10,100,10).reshape(3,3)
>>> a
array([[10, 20, 30],
[40, 50, 60],
[70, 80, 90]])
>>> tril(a,k=0)
array([[10, 0, 0],
[40, 50, 0],
[70, 80, 90]])
>>> tril(a,k=1)
array([[10, 20, 0],
[40, 50, 60],
[70, 80, 90]])
trim_zeros()
numpy.trim_zeros(filt, trim='fb')
Trim the leading and trailing zeros from a 1D array. Example: >>> import numpy >>> a = array((0, 0, 0, 1, 2, 3, 2, 1, 0)) >>> numpy.trim_zeros(a) array([1, 2, 3, 2, 1])
>>> from numpy import *
>>> x = array([0, 0, 0, 1, 2, 3, 0, 0])
>>> trim_zeros(x,'f') # remove zeros at the front
array([1, 2, 3, 0, 0])
>>> trim_zeros(x,'b') # remove zeros at the back
array([0, 0, 0, 1, 2, 3])
>>> trim_zeros(x,'bf') # remove zeros at the back and the front
array([1, 2, 3])
See also: compress
triu()
numpy.triu(m, k=0)
returns the elements on and above the k-th diagonal of m. k=0 is the main diagonal, k > 0 is above and k < 0 is below the main diagonal.
>>> from numpy import *
>>> a = arange(10,100,10).reshape(3,3)
>>> a
array([[10, 20, 30],
[40, 50, 60],
[70, 80, 90]])
>>> triu(a,k=0)
array([[10, 20, 30],
[ 0, 50, 60],
[ 0, 0, 90]])
>>> triu(a,k=1)
array([[ 0, 20, 30],
[ 0, 0, 60],
[ 0, 0, 0]])
true_divide()
numpy.true_divide(...)
y = true_divide(x1,x2) true divides the arguments elementwise.
typeDict
numpy.typeDict
dict() -> new empty dictionary. dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs. dict(seq) -> new dictionary initialized as if via: d = {} for k, v in seq: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2)
>>> from numpy import *
>>> typeDict['short']
<type 'numpy.int16'>
>>> typeDict['uint16']
<type 'numpy.uint16'>
>>> typeDict['void']
<type 'numpy.void'>
>>> typeDict['S']
<type 'numpy.string_'>
typename()
numpy.typename(char)
Return an english description for the given data type character.
uniform()
numpy.random.uniform(...)
Uniform distribution over [low, high). uniform(low=0.0, high=1.0, size=None) -> random values
>>> from numpy import *
>>> from numpy.random import *
>>> uniform(low=0,high=10,size=(2,3)) # uniform numbers in range [0,10)
array([[ 6.66689951, 4.50623001, 4.69973967],
[ 6.52977732, 3.24688284, 5.01917021]])
See also: standard_normal, poisson, seed
union1d()
numpy.union1d(ar1, ar2)
Union of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. :Parameters: - `ar1` : array - `ar2` : array :Returns: - `union` : array :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
unique()
numpy.unique(x)
Return sorted unique items from an array or sequence. Example: >>> unique([5,2,4,0,4,4,2,2,1]) array([0, 1, 2, 4, 5])
>>> from numpy import *
>>> x = array([2,3,2,1,0,3,4,0])
>>> unique(x) # remove double values
array([0, 1, 2, 3, 4])
See also: compress
unique1d()
numpy.unique1d(ar1, return_index=False)
Find the unique elements of 1D array. Most of the other array set operations operate on the unique arrays generated by this function. :Parameters: - `ar1` : array This array will be flattened if it is not already 1D. - `return_index` : bool, optional If True, also return the indices against ar1 that result in the unique array. :Returns: - `unique` : array The unique values. - `unique_indices` : int array, optional The indices of the unique values. Only provided if return_index is True. :See also: numpy.lib.arraysetops has a number of other functions for performing set operations on arrays.
unravel_index()
numpy.unravel_index(x, dims)
Convert a flat index into an index tuple for an array of given shape. e.g. for a 2x2 array, unravel_index(2,(2,2)) returns (1,0). Example usage: p = x.argmax() idx = unravel_index(p,x.shape) x[idx] == x.max() Note: x.flat[p] == x.max() Thus, it may be easier to use flattened indexing than to re-map the index to a tuple.
unwrap()
numpy.unwrap(p, discont=3.1415926535897931, axis=-1)
Unwrap radian phase p by changing absolute jumps greater than 'discont' to their 2*pi complement along the given axis.
vander()
numpy.vander(x, N=None)
X = vander(x,N=None) The Vandermonde matrix of vector x. The i-th column of X is the the i-th power of x. N is the maximum power to compute; if N is None it defaults to len(x).
>>> from numpy import *
>>> x = array([1,2,3,5])
>>> N=3
>>> vander(x,N) # Vandermonde matrix of the vector x
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> column_stack([x**(N-1-i) for i in range(N)]) # to understand what a Vandermonde matrix contains
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
var()
numpy.var(a, axis=None, dtype=None, out=None)
Compute the variance along the specified axis. Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. *Parameters*: a : {array_like} Array containing numbers whose variance is desired. If a is not an array, a conversion is attempted. axis : {None, integer}, optional Axis along which the variance is computed. The default is to compute the variance of the flattened array. dtype : {None, dtype}, optional Type to use in computing the variance. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : {None, array}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. *Returns*: variance : {array, scalar}, see dtype parameter above If out=None, returns a new array containing the variance, otherwise a reference to the output array is returned. *See Also*: `std` : Standard deviation `mean` : Average *Notes* The variance is the average of the squared deviations from the mean, i.e. var = mean((x - x.mean())**2). The computed variance is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1. *Examples* >>> a = array([[1,2],[3,4]]) >>> var(a) 1.25 >>> var(a,0) array([ 1., 1.]) >>> var(a,1) array([ 0.25, 0.25])
ndarray.var(...)
a.var(axis=None, dtype=None, out=None) -> variance Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the variance is computed. The default is to compute the variance of the flattened array. dtype : type Type to use in computing the variance. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: variance : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - std : standard deviation - mean: average Notes ----- The variance is the average of the squared deviations from the mean, i.e. var = mean((x - x.mean())**2). The computed variance is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1.
>>> from numpy import *
>>> a = array([1,2,7])
>>> a.var() # normalised with N (not N-1)
6.8888888888888875
>>> a = array([[1,2,7],[4,9,6]])
>>> a.var()
7.8055555555555571
>>> a.var(axis=0) # the variance of each of the 3 columns
array([ 2.25, 12.25, 0.25])
>>> a.var(axis=1) # the variance of each of the 2 rows
array([ 6.88888889, 4.22222222])
vdot()
numpy.vdot(...)
vdot(a,b) Returns the dot product of a and b for scalars and vectors of floating point and complex types. The first argument, a, is conjugated.
>>> from numpy import *
>>> x = array([1+2j,3+4j])
>>> y = array([5+6j,7+8j])
>>> vdot(x,y) # conj(x) * y = (1-2j)*(5+6j)+(3-4j)*(7+8j)
(70-8j)
See also: dot, inner, cross, outer
vectorize()
numpy.vectorize(...)
vectorize(somefunction, otypes=None, doc=None) Generalized Function class. Description: Define a vectorized function which takes nested sequence of objects or numpy arrays as inputs and returns a numpy array as output, evaluating the function over successive tuples of the input arrays like the python map function except it uses the broadcasting rules of numpy. Data-type of output of vectorized is determined by calling the function with the first element of the input. This can be avoided by specifying the otypes argument as either a string of typecode characters or a list of data-types specifiers. There should be one data-type specifier for each output. Input: somefunction -- a Python function or method Example: >>> def myfunc(a, b): ... if a > b: ... return a-b ... else: ... return a+b >>> vfunc = vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2])
>>> from numpy import *
>>> def myfunc(x):
... if x >= 0: return x**2
... else: return -x
...
>>> myfunc(2.) # works fine
4.0
>>> myfunc(array([-2,2])) # doesn't work, try it...
<snip>
>>> vecfunc = vectorize(myfunc, otypes=[float]) # declare the return type as float
>>> vecfunc(array([-2,2])) # works fine!
array([ 2., 4.])
See also: apply_along_axis, apply_over_axes
view()
ndarray.view(...)
a.view(<type>) -> new view of array with same data. Type can be either a new sub-type object or a data-descriptor object
>>> from numpy import *
>>> a = array([1., 2.])
>>> a.view() # new array referring to the same data as 'a'
array([ 1., 2.])
>>> a.view(complex) # pretend that a is made up of complex numbers
array([ 1.+2.j])
>>> a.view(int) # view(type) is NOT the same as astype(type)!
array([ 0, 1072693248, 0, 1073741824])
>>>
>>> mydescr = dtype({'names': ['gender','age'], 'formats': ['S1', 'i2']})
>>> a = array([('M',25),('F',30)], dtype = mydescr) # array with records
>>> b = a.view(recarray) # convert to a record array, names are now attributes
>>> >>> a['age'] # works with 'a' but not with 'b'
array([25, 30], dtype=int16)
>>> b.age # works with 'b' but not with 'a'
array([25, 30], dtype=int16)
See also: copy
vonmises()
numpy.random.vonmises(...)
von Mises circular distribution with mode mu and dispersion parameter kappa on [-pi, pi]. vonmises(mu, kappa, size=None)
>>> from numpy import *
>>> from numpy.random import *
>>> vonmises(mu=1,kappa=1,size=(2,3)) # Von Mises distribution mean=1.0, kappa=1
array([[ 0.81960554, 1.37470839, -0.15700173],
[ 1.2974554 , 2.94229797, 0.32462307]])
>>> from pylab import * # histogram plot example
>>> hist(vonmises(1,1,(10000)), 50)
See also: random_sample, uniform, standard_normal, seed
vsplit()
numpy.vsplit(ary, indices_or_sections)
Split ary into multiple rows of sub-arrays Description: Split a single array into multiple sub arrays. The array is divided into groups of rows. If indices_or_sections is an integer, ary is divided into that many equally sized sub arrays. If it is impossible to make the sub-arrays equally sized, the operation throws a ValueError exception. See array_split and split for other options on indices_or_sections. Arguments: ary -- N-D array. Array to be divided into sub-arrays. indices_or_sections -- integer or 1D array. If integer, defines the number of (close to) equal sized sub-arrays. If it is a 1D array of sorted indices, it defines the indexes at which ary is divided. Any empty list results in a single sub-array equal to the original array. Returns: sequence of sub-arrays. The returned arrays have the same number of dimensions as the input array. Caveats: How should we handle 1D arrays here? I am currently raising an error when I encounter them. Any better approach? Should we reduce the returned array to their minium dimensions by getting rid of any dimensions that are 1? Related: vstack, split, array_split, hsplit, dsplit. Examples: import numpy >>> a = array([[1,2,3,4], ... [1,2,3,4]]) >>> numpy.vsplit(a,2) [array([[1, 2, 3, 4]]), array([[1, 2, 3, 4]])]
>>> from numpy import *
>>> a = array([[1,2],[3,4],[5,6],[7,8]])
>>> vsplit(a,2) # split, row-wise, in 2 equal parts
[array([[1, 2],
[3, 4]]), array([[5, 6],
[7, 8]])]
>>> vsplit(a,[1,2]) # split, row-wise, before row 1 and before row 2
[array([[1, 2]]), array([[3, 4]]), array([[5, 6],
[7, 8]])]
See also: split, array_split, dsplit, hsplit, vstack
vstack()
numpy.vstack(tup)
Stack arrays in sequence vertically (row wise) Description: Take a sequence of arrays and stack them vertically to make a single array. All arrays in the sequence must have the same shape along all but the first axis. vstack will rebuild arrays divided by vsplit. Arguments: tup -- sequence of arrays. All arrays must have the same shape. Examples: >>> import numpy >>> a = array((1,2,3)) >>> b = array((2,3,4)) >>> numpy.vstack((a,b)) array([[1, 2, 3], [2, 3, 4]]) >>> a = array([[1],[2],[3]]) >>> b = array([[2],[3],[4]]) >>> numpy.vstack((a,b)) array([[1], [2], [3], [2], [3], [4]])
>>> from numpy import *
>>> a =array([1,2])
>>> b = array([[3,4],[5,6]])
>>> vstack((a,b,a)) # only the first dimension of the arrays is allowed to be different
array([[1, 2],
[3, 4],
[5, 6],
[1, 2]])
See also: hstack, column_stack, concatenate, dstack, vsplit
weibull()
numpy.random.weibull(...)
Weibull distribution. weibull(a, size=None)
>>> from numpy import *
>>> from numpy.random import *
>>> weibull(a=1,size=(2,3)) # I think a is the shape parameter
array([[ 0.08303065, 3.41486412, 0.67430149],
[ 0.41383893, 0.93577601, 0.45431195]])
>>> from pylab import * # histogram plot example
>>> hist(weibull(5, (1000)), 50)
See also: random_sample, uniform, standard_normal, seed
where()
numpy.where(...)
where(condition, x, y) or where(condition) Return elements from `x` or `y`, depending on `condition`. *Parameters*: condition : array of bool When True, yield x, otherwise yield y. x,y : 1-dimensional arrays Values from which to choose. *Notes* This is equivalent to [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] The result is shaped like `condition` and has elements of `x` or `y` where `condition` is respectively True or False. In the special case, where only `condition` is given, the tuple condition.nonzero() is returned, instead. *Examples* >>> where([True,False,True],[1,2,3],[4,5,6]) array([1, 5, 3])
>>> from numpy import *
>>> a = array([3,5,7,9])
>>> b = array([10,20,30,40])
>>> c = array([2,4,6,8])
>>> where(a <= 6, b, c)
array([10, 20, 6, 8])
>>> where(a <= 6, b, -1)
array([10, 20, -1, -1])
>>> indices = where(a <= 6) # returns a tuple; the array contains indices.
>>> indices
(array([0, 1]),)
>>> b[indices]
array([10, 20])
>>> b[a <= 6] # an alternative syntax
array([10, 20])
>>> d = array([[3,5,7,9],[2,4,6,8]])
>>> where(d <= 6) # tuple with first all the row indices, then all the column indices
(array([0, 0, 1, 1, 1]), array([0, 1, 0, 1, 2]))
Be aware of the difference between x[list of bools] and x[list of integers]!
>>> from numpy import *
>>> x = arange(5,0,-1)
>>> print x
[5 4 3 2 1]
>>> criterium = (x <= 2) | (x >= 5)
>>> criterium
array([True, False, False, True, True], dtype=bool)
>>> indices = where(criterium, 1, 0)
>>> print indices
[1 0 0 1 1]
>>> x[indices] # integers!
array([4, 5, 5, 4, 4])
>>> x[criterium] # bools!
array([5, 2, 1])
>>> indices = where(criterium)
>>> print indices
(array([0, 3, 4]),)
>>> x[indices]
array([5, 2, 1])
who()
numpy.who(vardict=None)
Print the Numpy arrays in the given dictionary (or globals() if None).
zeros()
numpy.zeros(...)
zeros((d1,...,dn),dtype=float,order='C') Return a new array of shape (d1,...,dn) and type typecode with all it's entries initialized to zero.
>>> from numpy import *
>>> zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> zeros((2,3), int)
array([[0, 0, 0],
[0, 0, 0]])
See also: zeros_like, ones, empty, eye, identity
zeros_like()
numpy.zeros_like(a)
Return an array of zeros of the shape and typecode of a. If you don't explicitly need the array to be zeroed, you should instead use empty_like(), which is faster as it only allocates memory.
>>> from numpy import *
>>> a = array([[1,2,3],[4,5,6]])
>>> zeros_like(a) # with zeros initialised array with the same shape and datatype as 'a'
array([[0, 0, 0],
[0, 0, 0]])