参考链接: python中的numpy.degrees和rad2deg

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def compute_group(cls, data, scales, **params):

n = len(data)

if n < 3:

return pd.DataFrame()

weight = data.get('weight')

if params['trim']:

range_y = data['y'].min(), data['y'].max()

else:

range_y = scales.y.dimension()

dens = compute_density(data['y'], weight, range_y, **params)

dens['y'] = dens['x']

dens['x'] = np.mean([data['x'].min(), data['x'].max()])

# Compute width if x has multiple values

if len(np.unique(data['x'])) > 1:

dens['width'] = np.ptp(data['x']) * 0.9

return dens

Example 2

def draw_group(data, panel_params, coord, ax, **params):

data = coord.transform(data, panel_params)

fill = to_rgba(data['fill'], data['alpha'])

color = to_rgba(data['color'], data['alpha'])

ranges = coord.range(panel_params)

# For perfect circles the width/height of the circle(ellipse)

# should factor in the dimensions of axes

bbox = ax.get_window_extent().transformed(

ax.figure.dpi_scale_trans.inverted())

ax_width, ax_height = bbox.width, bbox.height

factor = ((ax_width/ax_height) *

np.ptp(ranges.y)/np.ptp(ranges.x))

size = data.loc[0, 'binwidth'] * params['dotsize']

offsets = data['stackpos'] * params['stackratio']

if params['binaxis'] == 'x':

width, height = size, size*factor

xpos, ypos = data['x'], data['y'] + height*offsets

elif params['binaxis'] == 'y':

width, height = size/factor, size

xpos, ypos = data['x'] + width*offsets, data['y']

circles = []

for xy in zip(xpos, ypos):

patch = mpatches.Ellipse(xy, width=width, height=height)

circles.append(patch)

coll = mcoll.PatchCollection(circles,

edgecolors=color,

facecolors=fill)

ax.add_collection(coll)

Example 3

def fit(self, X, y=None):

"""Fit it.

Parameters

----------

X : array, shape (n_epochs, n_times)

The data for one channel.

y : None

Redundant. Necessary to be compatible with sklearn

API.

"""

deltas = np.ptp(X, axis=1)

self.deltas_ = deltas

keep = deltas <= self.thresh

# XXX: actually go over all the folds before setting the min

# in skopt. Otherwise, may confuse skopt.

if self.thresh < np.min(np.ptp(X, axis=1)):

assert np.sum(keep) == 0

keep = deltas <= np.min(np.ptp(X, axis=1))

self.mean_ = _slicemean(X, keep, axis=0)

return self

Example 4

def _vote_bad_epochs(self, epochs):

"""Each channel votes for an epoch as good or bad.

Parameters

----------

epochs : instance of mne.Epochs

The epochs object for which bad epochs must be found.

"""

n_epochs = len(epochs)

picks = _handle_picks(info=epochs.info, picks=self.picks)

drop_log = np.zeros((n_epochs, len(epochs.ch_names)))

bad_sensor_counts = np.zeros((len(epochs), ))

ch_names = [epochs.ch_names[p] for p in picks]

deltas = np.ptp(epochs.get_data()[:, picks], axis=-1).T

threshes = [self.threshes_[ch_name] for ch_name in ch_names]

for ch_idx, (delta, thresh) in enumerate(zip(deltas, threshes)):

bad_epochs_idx = np.where(delta > thresh)[0]

# TODO: combine for different ch types

bad_sensor_counts[bad_epochs_idx] += 1

drop_log[bad_epochs_idx, picks[ch_idx]] = 1

return drop_log, bad_sensor_counts

Example 5

def extend_limits(values, fraction=0.10, tolerance=1e-2):

""" Extend the values of a list by a fractional amount """

values = np.array(values)

finite_indices = np.isfinite(values)

if np.sum(finite_indices) == 0:

raise ValueError("no finite values provided")

lower_limit, upper_limit = np.min(values[finite_indices]), np.max(values[finite_indices])

ptp_value = np.ptp([lower_limit, upper_limit])

new_limits = lower_limit - fraction * ptp_value, ptp_value * fraction + upper_limit

if np.abs(new_limits[0] - new_limits[1]) < tolerance:

if np.abs(new_limits[0]) < tolerance:

# Arbitrary limits, since we"ve just been passed zeros

offset = 1

else:

offset = np.abs(new_limits[0]) * fraction

new_limits = new_limits[0] - offset, offset + new_limits[0]

return np.array(new_limits)

Example 6

def calculate_fractional_overlap(interest_range, comparison_range):

"""

Calculate how much of the range of interest overlaps with the comparison

range.

"""

if not (interest_range[-1] >= comparison_range[0] \

and comparison_range[-1] >= interest_range[0]):

return 0.0 # No overlap

elif (interest_range[0] >= comparison_range[0] \

and interest_range[-1] <= comparison_range[-1]):

return 1.0 # Total overlap

else:

# Some overlap. Which side?

if interest_range[0] < comparison_range[0]:

# Left hand side

width = interest_range[-1] - comparison_range[0]

else:

# Right hand side

width = comparison_range[-1] - interest_range[0]

return width/np.ptp(interest_range) # Fractional overlap

Example 7

def update_roi_xy_size(self):

""" Update the cursor size showing the optimizer scan area for the XY image.

"""

hpos = self.roi_xy.pos()[0]

vpos = self.roi_xy.pos()[1]

hsize = self.roi_xy.size()[0]

vsize = self.roi_xy.size()[1]

hcenter = hpos + 0.5 * hsize

vcenter = vpos + 0.5 * vsize

if self.adjust_cursor_roi:

newsize = self._optimizer_logic.refocus_XY_size

else:

viewrange = self.xy_image.getViewBox().viewRange()

newsize = np.sqrt(np.sum(np.ptp(viewrange, axis=1)**2)) / 20

self.roi_xy.setSize([newsize, newsize])

self.roi_xy.setPos([hcenter - newsize / 2, vcenter - newsize / 2])

Example 8

def update_roi_depth_size(self):

""" Update the cursor size showing the optimizer scan area for the X-depth image.

"""

hpos = self.roi_depth.pos()[0]

vpos = self.roi_depth.pos()[1]

hsize = self.roi_depth.size()[0]

vsize = self.roi_depth.size()[1]

hcenter = hpos + 0.5 * hsize

vcenter = vpos + 0.5 * vsize

if self.adjust_cursor_roi:

newsize_h = self._optimizer_logic.refocus_XY_size

newsize_v = self._optimizer_logic.refocus_Z_size

else:

viewrange = self.depth_image.getViewBox().viewRange()

newsize = np.sqrt(np.sum(np.ptp(viewrange, axis=1)**2)) / 20

newsize_h = newsize

newsize_v = newsize

self.roi_depth.setSize([newsize_h, newsize_v])

self.roi_depth.setPos([hcenter - newsize_h / 2, vcenter - newsize_v / 2])

Example 9

def plane_fit(points, tolerance=None):

'''

Given a set of points, find an origin and normal using least squares

Arguments

---------

points: (n,3)

tolerance: how non-planar the result can be without raising an error

Returns

---------

C: (3) point on the plane

N: (3) normal vector

'''

C = points[0]

x = points - C

M = np.dot(x.T, x)

N = np.linalg.svd(M)[0][:,-1]

if not (tolerance is None):

normal_range = np.ptp(np.dot(N, points.T))

if normal_range > tol.planar:

log.error('Points have peak to peak of %f', normal_range)

raise ValueError('Plane outside tolerance!')

return C, N

Example 10

def plot_epipolar_line(p1, p2, F, show_epipole=False):

""" Plot the epipole and epipolar line F*x=0

in an image given the corresponding points.

F is the fundamental matrix and p2 are the point in the other image.

"""

lines = np.dot(F, p2)

pad = np.ptp(p1, 1) * 0.01

mins = np.min(p1, 1)

maxes = np.max(p1, 1)

# epipolar line parameter and values

xpts = np.linspace(mins[0] - pad[0], maxes[0] + pad[0], 100)

for line in lines.T:

ypts = np.asarray([(line[2] + line[0] * p) / (-line[1]) for p in xpts])

valid_idx = ((ypts >= mins[1] - pad[1]) & (ypts <= maxes[1] + pad[1]))

plt.plot(xpts[valid_idx], ypts[valid_idx], linewidth=1)

plt.plot(p1[0], p1[1], 'ro')

if show_epipole:

epipole = compute_epipole(F)

plt.plot(epipole[0] / epipole[2], epipole[1] / epipole[2], 'r*')

Example 11

def startModulation(self,

radiusInMilliRad,

frequencyInHz,

centerInMilliRad):

self._origTargetPosition= centerInMilliRad

self.stopModulation()

periodInSec= 1./ frequencyInHz

assert np.ptp(self._ctrl.getWaveGeneratorTableRate()) == 0, \

"wave generator table rate must be the same for every table"

wgtr= self._ctrl.getWaveGeneratorTableRate()[0]

timestep= self._ctrl.getServoUpdateTimeInSeconds() * wgtr

lengthInPoints= periodInSec/ timestep

peakOfTheSineCurve= self._milliRadToGcsUnits(

self.getTargetPosition() + radiusInMilliRad)

offsetOfTheSineCurve= self._milliRadToGcsUnits(

self.getTargetPosition() - radiusInMilliRad)

amplitudeOfTheSineCurve= peakOfTheSineCurve - offsetOfTheSineCurve

wavelengthOfTheSineCurveInPoints= periodInSec/ timestep

startPoint= np.array([0, 0.25])* wavelengthOfTheSineCurveInPoints

curveCenterPoint= 0.5* wavelengthOfTheSineCurveInPoints

self._ctrl.clearWaveTableData([1, 2, 3])

self._ctrl.setSinusoidalWaveform(

1, WaveformGenerator.CLEAR, lengthInPoints,

amplitudeOfTheSineCurve[0], offsetOfTheSineCurve[0],

wavelengthOfTheSineCurveInPoints, startPoint[0], curveCenterPoint)

self._ctrl.setSinusoidalWaveform(

2, WaveformGenerator.CLEAR, lengthInPoints,

amplitudeOfTheSineCurve[1], offsetOfTheSineCurve[1],

wavelengthOfTheSineCurveInPoints, startPoint[1], curveCenterPoint)

self._ctrl.setConnectionOfWaveTableToWaveGenerator([1, 2], [1, 2])

self._ctrl.setWaveGeneratorStartStopMode([1, 1, 0])

self._modulationEnabled= True

Example 12

def compute_group(cls, data, scales, **params):

labels = ['x', 'y']

X = np.array(data[labels])

res = boxplot_stats(X, whis=params['coef'], labels=labels)[1]

try:

n = data['weight'].sum()

except KeyError:

n = len(data['y'])

if len(np.unique(data['x'])) > 1:

width = np.ptp(data['x']) * 0.9

else:

width = params['width']

if pdtypes.is_categorical(data['x']):

x = data['x'].iloc[0]

else:

x = np.mean([data['x'].min(), data['x'].max()])

d = {'ymin': res['whislo'],

'lower': res['q1'],

'middle': [res['med']],

'upper': res['q3'],

'ymax': res['whishi'],

'outliers': [res['fliers']],

'notchupper': res['med']+1.58*res['iqr']/np.sqrt(n),

'notchlower': res['med']-1.58*res['iqr']/np.sqrt(n),

'x': x,

'width': width,

'relvarwidth': np.sqrt(n)}

return pd.DataFrame(d)

Example 13

def test_ptp(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

Example 14

def _phampcheck(self, pha, amp, axis):

"""Check phase and amplitude values."""

# Shape checking :

if pha.ndim != amp.ndim:

raise ValueError("pha and amp must have the same number of "

"dimensions.")

# Force phase / amplitude to be at least (1, N) :

if (pha.ndim == 1) and (amp.ndim == 1):

pha = pha.reshape(1, -1)

amp = amp.reshape(1, -1)

axis = 1

# Check if the phase is in radians :

if np.ptp(pha) > 2 * np.pi:

raise ValueError("Your phase is probably in degrees and should be"

" converted in radians using either np.degrees or"

" np.deg2rad.")

# Check if the phase/amplitude have the same number of points on axis:

if pha.shape[axis] != amp.shape[axis]:

phan, ampn = pha.shape[axis], amp.shape[axis]

raise ValueError("The phase (" + str(phan) + ") and the amplitude "

"(" + str(ampn) + ") do not have the same number"

" of points on the specified axis (" +

str(axis) + ").")

# Force the phase to be in [-pi, pi] :

pha = (pha + np.pi) % (2 * np.pi) - np.pi

return pha, amp, axis

###########################################################################

# PROPERTIES

###########################################################################

# ----------- IDPAC -----------

Example 15

def _postprocess_contours(self, index, times, freqs, salience):

"""Remove contours that are too short.

Parameters

----------

index : np.array

array of contour numbers

times : np.array

array of contour times

freqs : np.array

array of contour frequencies

salience : np.array

array of contour salience values

Returns

-------

index_pruned : np.array

Pruned array of contour numbers

times_pruned : np.array

Pruned array of contour times

freqs_pruned : np.array

Pruned array of contour frequencies

salience_pruned : np.array

Pruned array of contour salience values

"""

keep_index = np.ones(times.shape).astype(bool)

for i in set(index):

this_idx = (index == i)

if np.ptp(times[this_idx]) <= self.min_contour_len:

keep_index[this_idx] = False

return (index[keep_index], times[keep_index],

freqs[keep_index], salience[keep_index])

Example 16

def test_ptp(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

Example 17

def fit(self, X, y=None):

"""Fit it."""

if self.n_channels is None or self.n_times is None:

raise ValueError('Cannot fit without knowing n_channels'

' and n_times')

X = X.reshape(-1, self.n_channels, self.n_times)

deltas = np.array([np.ptp(d, axis=1) for d in X])

epoch_deltas = deltas.max(axis=1)

keep = epoch_deltas <= self.thresh

self.mean_ = _slicemean(X, keep, axis=0)

return self

Example 18

def _get_epochs_interpolation(self, epochs, drop_log,

ch_type, verbose='progressbar'):

"""Interpolate the bad epochs."""

# 1: bad segment, # 2: interpolated

fix_log = drop_log.copy()

ch_names = epochs.ch_names

non_picks = np.setdiff1d(range(epochs.info['nchan']), self.picks)

interp_channels = list()

n_interpolate = self.n_interpolate[ch_type]

for epoch_idx in range(len(epochs)):

n_bads = drop_log[epoch_idx, self.picks].sum()

if n_bads == 0:

continue

else:

if n_bads <= n_interpolate:

interp_chs_mask = drop_log[epoch_idx] == 1

else:

# get peak-to-peak for channels in that epoch

data = epochs[epoch_idx].get_data()[0]

peaks = np.ptp(data, axis=-1)

peaks[non_picks] = -np.inf

# find channels which are bad by rejection threshold

interp_chs_mask = drop_log[epoch_idx] == 1

# ignore good channels

peaks[~interp_chs_mask] = -np.inf

# find the ordering of channels amongst the bad channels

sorted_ch_idx_picks = np.argsort(peaks)[::-1]

# then select only the worst n_interpolate channels

interp_chs_mask[

sorted_ch_idx_picks[n_interpolate:]] = False

fix_log[epoch_idx][interp_chs_mask] = 2

interp_chs = np.where(interp_chs_mask)[0]

interp_chs = [ch_name for idx, ch_name in enumerate(ch_names)

if idx in interp_chs]

interp_channels.append(interp_chs)

return interp_channels, fix_log

Example 19

def normalizeData(X):

mean = []

data_range = []

mean.append(np.mean(X[:,1]))

mean.append(np.mean(X[:,2]))

data_range = np.ptp(X,axis=0)[-2:]

#print(mean,data_range)

for i in range(len(X)):

X[:,1][i] = (X[:,1][i] - float(mean[0]))/float(data_range[0])

X[:,2][i] = (X[:,2][i] - float(mean[1]))/float(data_range[1])

return X

Example 20

def normalizeData(X):

mean = []

data_range = []

mean.append(np.mean(X[:,1]))

mean.append(np.mean(X[:,2]))

data_range = np.ptp(X,axis=0)[-2:]

#print(mean,data_range)

for i in range(len(X)):

X[:,1][i] = (X[:,1][i] - float(mean[0]))/float(data_range[0])

X[:,2][i] = (X[:,2][i] - float(mean[1]))/float(data_range[1])

return X

Example 21

def FeatureScaling(X):

mean = []

data_range = []

X1 = np.zeros((len(X),X.shape[1]))

mean.append(np.mean(X[:,1]))

mean.append(np.mean(X[:,2]))

data_range = np.ptp(X,axis=0)[-2:]

#print(mean)

print(data_range)

for i in range(len(X)):

X1[:,0][i] = (X[:,0][i] - mean[0])/data_range[0]

X1[:,1][i] = (X[:,1][i] - mean[1])/data_range[1]

return X1

Example 22

def neighbours(self, effective_temperature, surface_gravity, metallicity, N,

scales=None):

"""

Return indices of the `N`th-nearest neighbours in the grid. The three

parameters are scaled by the peak-to-peak range in the grid, unless

`scales` are indicates.

:param effective_temperature:

The effective temperature of the star.

:param surface_gravity:

The surface gravity of the star.

:param metallicity:

The metallicity of the star.

:param N:

The number of neighbouring indices to return.

:returns:

An array of length `N` that contains the indices of the closest

neighbours in the grid.

"""

point = np.array([effective_temperature, surface_gravity, metallicity])

if scales is None:

scales = np.ptp(self._grid, axis=0)

distance = np.sum(((self._grid - point)/scales)**2, axis=1)

return np.argsort(distance)[:N]

Example 23

def nearest_neighbours(self, point, n):

"""

Return the indices of the n nearest neighbours to the point.

"""

stellar_parameters = _recarray_to_array(self.stellar_parameters)

distances = np.sum(((point - stellar_parameters) \

/ np.ptp(stellar_parameters, axis=0))**2, axis=1)

return distances.argsort()[:n]

Example 24

def figure_mouse_pick(self, event):

"""

Trigger for when the mouse is used to select an item in the figure.

:param event:

The matplotlib event.

"""

ycol = "abundance"

xcol = {

self.ax_excitation_twin: "expot",

self.ax_line_strength_twin: "reduced_equivalent_width"

}[event.inaxes]

xscale = np.ptp(event.inaxes.get_xlim())

yscale = np.ptp(event.inaxes.get_ylim())

try:

distance = np.sqrt(

((self._state_transitions[ycol] - event.ydata)/yscale)**2 \

+ ((self._state_transitions[xcol] - event.xdata)/xscale)**2)

except AttributeError:

# Stellar parameters have not been measured yet

return None

index = np.nanargmin(distance)

# Because the state transitions are linked to the parent source model of

# the table view, we will have to get the proxy index.

proxy_index = self.table_view.model().mapFromSource(

self.proxy_spectral_models.sourceModel().createIndex(index, 0)).row()

self.table_view.selectRow(proxy_index)

return None

Example 25

def normalize(vec):

"""

Given an input vector normalize the vector

Parameters

==========

vec : array_like

input vector to normalize

Returns

=======

out : array_like

normalized vector

Examples

========

>>> import spacepy.toolbox as tb

>>> tb.normalize([1,2,3])

[0.0, 0.5, 1.0]

"""

# check to see if vec is numpy array, this is fastest

if isinstance(vec, np.ndarray):

out = (vec - vec.min())/np.ptp(vec)

else:

vecmin = np.min(vec)

ptp = np.ptp(vec)

out = [(val - vecmin)/ptp for val in vec]

return out

Example 26

def test_ptp(self):

N = 1000

arr = np.random.randn(N)

ser = Series(arr)

self.assertEqual(np.ptp(ser), np.ptp(arr))

# GH11163

s = Series([3, 5, np.nan, -3, 10])

self.assertEqual(s.ptp(), 13)

self.assertTrue(pd.isnull(s.ptp(skipna=False)))

mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])

s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)

expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)

self.assert_series_equal(s.ptp(level=0), expected)

expected = pd.Series([np.nan, np.nan], index=['a', 'b'])

self.assert_series_equal(s.ptp(level=0, skipna=False), expected)

with self.assertRaises(ValueError):

s.ptp(axis=1)

s = pd.Series(['a', 'b', 'c', 'd', 'e'])

with self.assertRaises(TypeError):

s.ptp()

with self.assertRaises(NotImplementedError):

s.ptp(numeric_only=True)

Example 27

def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1,

**kwargs):

""" compute spectral index after continuum subtraction

Parameters

----------

w: ndarray (nw, )

array of wavelengths in AA

flux: ndarray (N, nw)

array of flux values for different spectra in the series

blue: tuple(2)

selection for blue continuum estimate

red: tuple(2)

selection for red continuum estimate

band: tuple(2), optional

select region in this band only.

default is band = (min(blue), max(red))

unit: str

`ew` or `mag` wether equivalent width or magnitude

degree: int (default 1)

degree of the polynomial fit to the continuum

Returns

-------

ew: ndarray (N,)

equivalent width array

"""

wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue,

red, band=band,

degree=degree)

if unit in (0, 'ew', 'EW'):

return np.trapz(1. - fi, wi, axis=-1)

else:

m = np.trapz(fi, wi, axis=-1)

m = -2.5 * np.log10(m / np.ptp(wi))

return m

Example 28

def test_basic(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

b = [[3, 6.0, 9.0],

[4, 10.0, 5.0],

[8, 3.0, 2.0]]

assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0])

assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0])

Example 29

def plot_cdf(x, copy=True, fractional=True, **kwargs):

"""

Add a log-log CCDF plot to the current axes.

Arguments

---------

x : array_like

The data to plot

copy : boolean

copy input array in a new object before sorting it. If data is a *very*

large, the copy can avoided by passing False to this parameter.

fractional : boolean

compress the data by means of fractional ranking. This collapses the

ranks from multiple, identical observations into their midpoint, thus

producing smaller figures. Note that the resulting plot will NOT be the

exact CCDF function, but an approximation.

Additional keyword arguments are passed to `matplotlib.pyplot.loglog`.

Returns a matplotlib axes object.

"""

N = float(len(x))

if copy:

x = x.copy()

x.sort()

if fractional:

t = []

for x, chunk in groupby(enumerate(x, 1), itemgetter(1)):

xranks, _ = zip(*list(chunk))

t.append((float(x), xranks[0] + np.ptp(xranks) / 2.0))

t = np.asarray(t)

else:

t = np.c_[np.asfarray(x), np.arange(N) + 1]

if 'ax' not in kwargs:

ax = plt.gca()

else:

ax = kwargs.pop('ax')

ax.loglog(t[:, 0], (N - t[:, 1]) / N, 'ow', **kwargs)

return ax

Example 30

def test_integrate():

subslice = slice(100,200)

wvln = np.linspace(1000., 4000., 1024)

flux = np.zeros_like(wvln)

flux[subslice] = 1./np.ptp(wvln[subslice]) # so the integral is 1

s = Spectrum(wvln*u.angstrom, flux*u.erg/u.cm**2/u.angstrom)

# the integration grid is a sub-section of the full wavelength array

wvln_grid = s.wavelength[subslice]

i_flux = s.integrate(wvln_grid)

assert np.allclose(i_flux.value, 1.) # "close" because this is float comparison

Example 31

def is_circle(points, scale, verbose=True):

'''

Given a set of points, quickly determine if they represent

a circle or not.

'''

# make sure input is a numpy array

points = np.asanyarray(points)

scale = float(scale)

# can only be a circle if the first and last point are the

# same (AKA is a closed path)

if np.linalg.norm(points[0] - points[-1]) > tol.merge:

return None

box = points.ptp(axis=0)

# the bounding box size of the points

# check aspect ratio as an early exit if the path is not a circle

aspect = np.divide(*box)

if np.abs(aspect - 1.0) > tol.aspect_frac:

return None

# fit a circle with tolerance checks

CR = fit_circle_check(points, scale=scale)

if CR is None:

return None

# return the circle as three control points

control = angles_to_threepoint([0,np.pi*.5], *CR)

return control

Example 32

def test_ptp(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

Example 33

def print_confidence_interval(ci, tabs=''):

"""Pretty print confidence interval information"""

ci = list(ci)

ci += [np.ptp(ci)]

print(tabs + 'Value: {1:.04f}'.format(*ci))

print(tabs + '95% Confidence Interval: ({0:.04f}, {2:.04f})'.format(*ci))

print(tabs + '\tCI Width: {3:.05f}'.format(*ci))

Example 34

def test_ptp(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

Example 35

def ptp(a, axis=None, out=None):

"""

Range of values (maximum - minimum) along an axis.

The name of the function comes from the acronym for 'peak to peak'.

Parameters

----------

a : array_like

Input values.

axis : int, optional

Axis along which to find the peaks. By default, flatten the

array.

out : array_like

Alternative output array in which to place the result. It must

have the same shape and buffer length as the expected output,

but the type of the output values will be cast if necessary.

Returns

-------

ptp : ndarray

A new array holding the result, unless `out` was

specified, in which case a reference to `out` is returned.

Examples

--------

>>> x = np.arange(4).reshape((2,2))

>>> x

array([[0, 1],

[2, 3]])

>>> np.ptp(x, axis=0)

array([2, 2])

>>> np.ptp(x, axis=1)

array([1, 1])

"""

return _wrapfunc(a, 'ptp', axis=axis, out=out)

Example 36

def test_scalar(self):

"""

Should return 0 for all scalar

"""

x = scalar('x')

p = ptp(x)

f = theano.function([x], p)

y = numpy.asarray(rand() * 2000 - 1000, dtype=config.floatX)

result = f(y)

numpyResult = numpy.ptp(y)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 37

def test_vector(self):

x = vector('x')

p = ptp(x, 0)

f = theano.function([x], p)

y = rand_ranged(-1000, 1000, [100])

result = f(y)

numpyResult = numpy.ptp(y, 0)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 38

def test_matrix_first_axis(self):

x = matrix('x')

p = ptp(x, 1)

f = theano.function([x], p)

y = rand_ranged(-1000, 1000, [100, 100])

result = f(y)

numpyResult = numpy.ptp(y, 1)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 39

def test_matrix_second_axis(self):

x = matrix('x')

p = ptp(x, 0)

f = theano.function([x], p)

y = rand_ranged(-1000, 1000, [100, 100])

result = f(y)

numpyResult = numpy.ptp(y, 0)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 40

def test_matrix_neg_axis(self):

x = matrix('x')

p = ptp(x, -1)

f = theano.function([x], p)

y = rand_ranged(-1000, 1000, [100, 100])

result = f(y)

numpyResult = numpy.ptp(y, -1)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 41

def test_interface(self):

x = matrix('x')

p = x.ptp(1)

f = theano.function([x], p)

y = rand_ranged(-1000, 1000, [100, 100])

result = f(y)

numpyResult = numpy.ptp(y, 1)

self.assertTrue(numpy.array_equal(result, numpyResult))

Example 42

def has_constant(x):

"""

Parameters

----------

x: ndarray

Array to be checked for a constant (n,k)

Returns

-------

const : bool

Flag indicating whether x contains a constant or has column span with

a constant

loc : int

Column location of constant

"""

if np.any(np.all(x == 1, axis=0)):

loc = np.argwhere(np.all(x == 1, axis=0))

return True, int(loc)

if np.any((np.ptp(x, axis=0) == 0) & ~np.all(x == 0, axis=0)):

loc = np.any((np.ptp(x, axis=0) == 0) & ~np.all(x == 0, axis=0))

loc = np.argwhere(loc)

return True, int(loc)

n = x.shape[0]

aug_rank = matrix_rank(np.c_[np.ones((n, 1)), x])

rank = matrix_rank(x)

has_const = bool(aug_rank == rank)

loc = None

if has_const:

out = np.linalg.lstsq(x, np.ones((n, 1)))

beta = out[0].ravel()

loc = np.argmax(np.abs(beta) * x.var(0))

return has_const, loc

Example 43

def test_ids(panel):

data = PanelData(panel)

eids = data.entity_ids

assert eids.shape == (77, 1)

assert len(np.unique(eids)) == 11

for i in range(0, len(eids), 7):

assert np.ptp(eids[i:i + 7]) == 0

assert np.all((eids[i + 8:] - eids[i]) != 0)

tids = data.time_ids

assert tids.shape == (77, 1)

assert len(np.unique(tids)) == 7

for i in range(0, 11):

assert np.ptp(tids[i::7]) == 0

Example 44

def test_neighbors_accuracy_with_n_candidates():

# Checks whether accuracy increases as `n_candidates` increases.

n_candidates_values = np.array([.1, 50, 500])

n_samples = 100

n_features = 10

n_iter = 10

n_points = 5

rng = np.random.RandomState(42)

accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)

X = rng.rand(n_samples, n_features)

for i, n_candidates in enumerate(n_candidates_values):

lshf = LSHForest(n_candidates=n_candidates)

ignore_warnings(lshf.fit)(X)

for j in range(n_iter):

query = X[rng.randint(0, n_samples)].reshape(1, -1)

neighbors = lshf.kneighbors(query, n_neighbors=n_points,

return_distance=False)

distances = pairwise_distances(query, X, metric='cosine')

ranks = np.argsort(distances)[0, :n_points]

intersection = np.intersect1d(ranks, neighbors).shape[0]

ratio = intersection / float(n_points)

accuracies[i] = accuracies[i] + ratio

accuracies[i] = accuracies[i] / float(n_iter)

# Sorted accuracies should be equal to original accuracies

assert_true(np.all(np.diff(accuracies) >= 0),

msg="Accuracies are not non-decreasing.")

# Highest accuracy should be strictly greater than the lowest

assert_true(np.ptp(accuracies) > 0,

msg="Highest accuracy is not strictly greater than lowest.")

Example 45

def test_neighbors_accuracy_with_n_estimators():

# Checks whether accuracy increases as `n_estimators` increases.

n_estimators = np.array([1, 10, 100])

n_samples = 100

n_features = 10

n_iter = 10

n_points = 5

rng = np.random.RandomState(42)

accuracies = np.zeros(n_estimators.shape[0], dtype=float)

X = rng.rand(n_samples, n_features)

for i, t in enumerate(n_estimators):

lshf = LSHForest(n_candidates=500, n_estimators=t)

ignore_warnings(lshf.fit)(X)

for j in range(n_iter):

query = X[rng.randint(0, n_samples)].reshape(1, -1)

neighbors = lshf.kneighbors(query, n_neighbors=n_points,

return_distance=False)

distances = pairwise_distances(query, X, metric='cosine')

ranks = np.argsort(distances)[0, :n_points]

intersection = np.intersect1d(ranks, neighbors).shape[0]

ratio = intersection / float(n_points)

accuracies[i] = accuracies[i] + ratio

accuracies[i] = accuracies[i] / float(n_iter)

# Sorted accuracies should be equal to original accuracies

assert_true(np.all(np.diff(accuracies) >= 0),

msg="Accuracies are not non-decreasing.")

# Highest accuracy should be strictly greater than the lowest

assert_true(np.ptp(accuracies) > 0,

msg="Highest accuracy is not strictly greater than lowest.")

Example 46

def test_ptp(self):

a = [3, 4, 5, 10, -3, -5, 6.0]

assert_equal(np.ptp(a, axis=0), 15.0)

Example 47

def ptp(a, axis=None, out=None):

"""

Range of values (maximum - minimum) along an axis.

The name of the function comes from the acronym for 'peak to peak'.

Parameters

----------

a : array_like

Input values.

axis : int, optional

Axis along which to find the peaks. By default, flatten the

array.

out : array_like

Alternative output array in which to place the result. It must

have the same shape and buffer length as the expected output,

but the type of the output values will be cast if necessary.

Returns

-------

ptp : ndarray

A new array holding the result, unless `out` was

specified, in which case a reference to `out` is returned.

Examples

--------

>>> x = np.arange(4).reshape((2,2))

>>> x

array([[0, 1],

[2, 3]])

>>> np.ptp(x, axis=0)

array([2, 2])

>>> np.ptp(x, axis=1)

array([1, 1])

"""

try:

ptp = a.ptp

except AttributeError:

return _wrapit(a, 'ptp', axis, out)

return ptp(axis, out)

Example 48

def ptp(a, axis=None, out=None):

"""

Range of values (maximum - minimum) along an axis.

The name of the function comes from the acronym for 'peak to peak'.

Parameters

----------

a : array_like

Input values.

axis : int, optional

Axis along which to find the peaks. By default, flatten the

array.

out : array_like

Alternative output array in which to place the result. It must

have the same shape and buffer length as the expected output,

but the type of the output values will be cast if necessary.

Returns

-------

ptp : ndarray

A new array holding the result, unless `out` was

specified, in which case a reference to `out` is returned.

Examples

--------

>>> x = np.arange(4).reshape((2,2))

>>> x

array([[0, 1],

[2, 3]])

>>> np.ptp(x, axis=0)

array([2, 2])

>>> np.ptp(x, axis=1)

array([1, 1])

"""

try:

ptp = a.ptp

except AttributeError:

return _wrapit(a, 'ptp', axis, out)

return ptp(axis, out)

Example 49

def _plot_histogram(params):

"""Function for plotting histogram of peak-to-peak values."""

import matplotlib.pyplot as plt

epochs = params['epochs']

p2p = np.ptp(epochs.get_data(), axis=2)

types = list()

data = list()

if 'eeg' in params['types']:

eegs = np.array([p2p.T[i] for i,

x in enumerate(params['types']) if x == 'eeg'])

data.append(eegs.ravel())

types.append('eeg')

if 'mag' in params['types']:

mags = np.array([p2p.T[i] for i,

x in enumerate(params['types']) if x == 'mag'])

data.append(mags.ravel())

types.append('mag')

if 'grad' in params['types']:

grads = np.array([p2p.T[i] for i,

x in enumerate(params['types']) if x == 'grad'])

data.append(grads.ravel())

types.append('grad')

params['histogram'] = plt.figure()

scalings = _handle_default('scalings')

units = _handle_default('units')

titles = _handle_default('titles')

colors = _handle_default('color')

for idx in range(len(types)):

ax = plt.subplot(len(types), 1, idx + 1)

plt.xlabel(units[types[idx]])

plt.ylabel('count')

color = colors[types[idx]]

rej = None

if epochs.reject is not None and types[idx] in epochs.reject.keys():

rej = epochs.reject[types[idx]] * scalings[types[idx]]

rng = [0., rej * 1.1]

else:

rng = None

plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,

range=rng)

if rej is not None:

ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')

plt.title(titles[types[idx]])

params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)

params['histogram'].subplots_adjust(hspace=0.6)

try:

params['histogram'].show(warn=False)

except Exception:

pass

if params['fig_proj'] is not None:

params['fig_proj'].canvas.draw()

Example 50

def relim_axes(axes, percent=20):

"""

Generate new axes for a matplotlib axes based on the collections present.

:param axes:

The matplotlib axes.

:param percent: [optional]

The percent of the data to extend past the minimum and maximum data

points.

:returns:

A two-length tuple containing the lower and upper limits in the x- and

y-axis, respectively.

"""

data = np.vstack([item.get_offsets() for item in axes.collections \

if isinstance(item, PathCollection)])

if data.size == 0:

return (None, None)

data = data.reshape(-1, 2)

x, y = data[:,0], data[:, 1]

# Only use finite values.

finite = np.isfinite(x*y)

x, y = x[finite], y[finite]

if x.size > 1:

xlim = [

np.min(x) - np.ptp(x) * percent/100.,

np.max(x) + np.ptp(x) * percent/100.,

]

elif x.size == 0:

xlim = None

else:

xlim = (x[0] - 1, x[0] + 1)

if y.size > 1:

ylim = [

np.min(y) - np.ptp(y) * percent/100.,

np.max(y) + np.ptp(y) * percent/100.

]

elif y.size == 0:

ylim = None

else:

ylim = (y[0] - 1, y[0] + 1)

axes.set_xlim(xlim)

axes.set_ylim(ylim)

return (xlim, ylim)

[转载] arrayproxy转numpy_Python numpy.ptp() 使用实例相关推荐

  1. [转载] [转载] python反三角函数arctan_Python numpy.arctan() 使用实例

    参考链接: Python中的numpy.right_shift 参考链接: Python中的numpy.arcsin The following are code examples for showi ...

  2. [转载] python更新numpy_Python numpy从1.6更新到1.8

    参考链接: Python中的numpy.fix I have installed numpy 1.8. But when I do print numpy.__version__ it says 1. ...

  3. [转载] python反三角函数arctan_Python numpy.arctan() 使用实例

    参考链接: Python中的numpy.arcsin The following are code examples for showing how to use . They are extract ...

  4. python数据处理实例-Python数据处理numpy.median的实例讲解

    numpy模块下的median作用为: 计算沿指定轴的中位数 返回数组元素的中位数 其函数接口为: median(a, axis=None, out=None, overwrite_input=Fal ...

  5. python把矩阵堆叠成大矩阵_python numpy 矩阵堆叠实例

    在实际操作中,遇到了矩阵堆叠的操作,本来想着自己写一个函数,后来想,应该有库函数,于是一阵找寻 import numpy as np a = np.array([1,2,3]) b = np.arra ...

  6. python遍历字符串数组_Python遍历numpy数组的实例

    Python遍历numpy数组的实例 在用python进行图像处理时,有时需要遍历numpy数组,下面是遍历数组的方法: [rows, cols] = num.shape for i in range ...

  7. python的empty函数_python中numpy.empty()函数实例讲解

    在使用python编程的过程中,想要快速的创建ndarray数组,可以使用numpy.empty()函数.numpy.empty()函数所创建的数组内所有元素均为空,没有实际意义,所以它也是创建数组最 ...

  8. [转载] real和imag在python_Python numpy.imag() 使用实例

    参考链接: Python中的numpy.left_shift The following are code examples for showing how to use . They are ext ...

  9. [转载] python indices_Python numpy.indices() 使用实例

    参考链接: Python中的numpy.arctan The following are code examples for showing how to use . They are extract ...

最新文章

  1. 一起搞清楚 Spring Security 中的 UserDetails
  2. JAVA基础整理-100.Java 多线编程
  3. 远程调用 Spring Cloud Feign
  4. Zookeeper原理和实战开发经典视频教程 百度云网盘下载
  5. 教授直言:有些博导水平一般,名不副实,却不愿放弃招生指标
  6. C#LeetCode刷题之#16-最接近的三数之和(3Sum Closest)
  7. 无线传感器网络 | 名词解释
  8. 清除zend studio10.5中的内置浏览器中的历史记录
  9. 编译OpenCV:precomp.hpp:60:37: fatal error: dynlink_nvcuvid.h
  10. word自动生成目录步骤之详细介绍,csdn首发!!!!!
  11. 浏览器打开后自动打开某个页面
  12. arduino 操纵杆_使用Arduino Leonardo开发板制作操纵杆游戏控制器
  13. 题解【[FJOI2018]所罗门王的宝藏】
  14. tibco rv java实例_java – 我不允许使用Tibco Rendezvous确认消息的任何原因?
  15. 什么编程语言的开发者平均年薪高达94万?
  16. java 人民币转换,java人民币大小写转换方法
  17. vant 动态 粘性布局_Sticky 粘性布局
  18. 如何科学有效地根治肾虚——中篇(肾虚到底是什么?)
  19. 常用模块】HC-05蓝牙串口通信模块使用详解
  20. RTOS的基本概念与线程基础知识

热门文章

  1. 【HDOJ6986】Kanade Loves Maze Designing(暴力,dfs树)
  2. 莫烦python学习笔记之全局,局部变量
  3. opencv学习第6课官方练习实现 Create a Paint application with adjustable colors and brush radius using trackbars
  4. [高光谱] GitHub开源项目Hyperspectral-Classification的解析
  5. apollo 部署 使用
  6. c#ftp操作全解:创建删除目录,上传下载文件,删除移动文件,文件改名,文件目录查询
  7. 学习笔记:pscc2020基础
  8. 记录zedboard无法识别com的解决思路
  9. basys3芯片型号xc7a35tcpg236-1
  10. matlab简单分析信号调制解调