Python numpy.polyint() Examples
The following are 24
code examples of numpy.polyint().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_collocation.py From pySDC with BSD 2-Clause "Simplified" License | 6 votes |
def check_partialquadraturewithQ(collclass, t_start, t_end): for M in range(2, 13): coll = collclass(M, t_start, t_end) Q = coll.Qmat[1:,1:] # as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1 (or less for splines) degree = min(coll.order,M-1) poly_coeff = np.random.rand(degree) poly_vals = np.polyval(poly_coeff, coll.nodes) poly_int_coeff = np.polyint(poly_coeff) for i in range(0,M): int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, t_start) int_coll = np.dot(poly_vals, Q[i,:]) assert abs(int_ex - int_coll)<1e-12, "For node type " + coll.__class__.__name__ + ", partial quadrature from Qmat rule failed to integrate polynomial of degree M-1 exactly for M = " + str(M) # TEST 3: # Check that the partial quadrature rules from Smat entries have order equal to number of nodes M # -----------------------------------------------------------------------------------------------
Example #2
Source File: test_regression.py From mxnet-lambda with Apache License 2.0 | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #3
Source File: test_regression.py From keras-lambda with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #4
Source File: test_regression.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #5
Source File: test_regression.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #6
Source File: test_regression.py From coffeegrindsize with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #7
Source File: test_regression.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #8
Source File: test_regression.py From ImageFusion with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #9
Source File: test_collocation.py From pySDC with BSD 2-Clause "Simplified" License | 5 votes |
def check_partialquadraturewithS(collclass, t_start, t_end): for M in range(2, 13): coll = collclass(M, t_start, t_end) S = coll.Smat[1:,1:] # as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1 (or less for splines) degree = min(coll.order, M - 1) poly_coeff = np.random.rand(degree) poly_vals = np.polyval(poly_coeff, coll.nodes) poly_int_coeff = np.polyint(poly_coeff) for i in range(1,M): int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, coll.nodes[i-1]) int_coll = np.dot(poly_vals, S[i,:]) assert abs(int_ex - int_coll)<1e-12, "For node type " + coll.__class__.__name__ + ", partial quadrature rule from Smat failed to integrate polynomial of degree M-1 exactly for M = " + str(M)
Example #10
Source File: test_collocation.py From pySDC with BSD 2-Clause "Simplified" License | 5 votes |
def check_canintegratepolynomials(collclass,t_start,t_end): for M in range(2,13): coll = collclass(M, t_start, t_end) # some basic consistency tests assert np.size(coll.nodes)==np.size(coll.weights), "For node type " + type[0] + ", number of entries in nodes and weights is different" assert np.size(coll.nodes)==M, "For node type " + type[0] + ", requesting M nodes did not produce M entries in nodes and weights" # generate random set of polynomial coefficients poly_coeff = np.random.rand(coll.order-1) # evaluate polynomial at collocation nodes poly_vals = np.polyval(poly_coeff, coll.nodes) # use python's polyint function to compute anti-derivative of polynomial poly_int_coeff = np.polyint(poly_coeff) # Compute integral from 0.0 to 1.0 int_ex = np.polyval(poly_int_coeff, t_end) - np.polyval(poly_int_coeff, t_start) # use quadrature rule to compute integral int_coll = coll.evaluate(coll.weights, poly_vals) # For large values of M, substantial differences from different round of error have to be considered assert abs(int_ex - int_coll) < 1e-13, "For node type " + coll.__class__.__name__ + ", failed to integrate polynomial of degree " + str(coll.order-1) + " exactly. Error: %5.3e" % abs(int_ex - int_coll) # TEST 2: # Check that the Qmat entries are equal to the sum of Smat entries # ----------------------------------------------------------------
Example #11
Source File: test_regression.py From pySINDy with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #12
Source File: test_regression.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #13
Source File: test_regression.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #14
Source File: test_regression.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #15
Source File: test_regression.py From Computable with MIT License | 5 votes |
def test_polyint_type(self) : """Ticket #944""" msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #16
Source File: test_regression.py From vnpy_crypto with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #17
Source File: test_regression.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #18
Source File: test_regression.py From lambda-packs with MIT License | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) assert_(np.polyint(x).dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) assert_(np.polyint(x).dtype == np.float, msg)
Example #19
Source File: test_regression.py From recruit with Apache License 2.0 | 5 votes |
def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg)
Example #20
Source File: graph_metrics.py From compare-codecs with Apache License 2.0 | 4 votes |
def BdRate(group1, group2): """Compute the BD-rate between two score groups. The returned object also contains the range of PSNR values used to compute the result. Bjontegaard's metric allows to compute the average % saving in bitrate between two rate-distortion curves [1]. rate1,psnr1 - RD points for curve 1 rate2,psnr2 - RD points for curve 2 adapted from code from: (c) 2010 Giuseppe Valenzise copied from code by jzern@google.com, jimbankoski@google.com """ # pylint: disable=too-many-locals metric_set1 = group1.dataPoints() metric_set2 = group2.dataPoints() # numpy plays games with its exported functions. # pylint: disable=no-member # pylint: disable=bad-builtin psnr1 = [x[1] for x in metric_set1] psnr2 = [x[1] for x in metric_set2] log_rate1 = map(math.log, [x[0] for x in metric_set1]) log_rate2 = map(math.log, [x[0] for x in metric_set2]) # Best cubic poly fit for graph represented by log_ratex, psrn_x. poly1 = numpy.polyfit(psnr1, log_rate1, 3) poly2 = numpy.polyfit(psnr2, log_rate2, 3) # Integration interval. min_int = max([min(psnr1), min(psnr2)]) max_int = min([max(psnr1), max(psnr2)]) # find integral p_int1 = numpy.polyint(poly1) p_int2 = numpy.polyint(poly2) # Calculate the integrated value over the interval we care about. int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int) int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int) # Calculate the average improvement. avg_exp_diff = (int2 - int1) / (max_int - min_int) # In really bad formed data the exponent can grow too large. # clamp it. if avg_exp_diff > 200: avg_exp_diff = 200 # Convert to a percentage. avg_diff = (math.exp(avg_exp_diff) - 1) * 100 return {'difference': avg_diff, 'psnr':[min_int, max_int]}
Example #21
Source File: metrics.py From pcc_geo_cnn with MIT License | 4 votes |
def bdsnr(metric_set1, metric_set2): """ BJONTEGAARD Bjontegaard metric calculation Bjontegaard's metric allows to compute the average gain in psnr between two rate-distortion curves [1]. rate1,psnr1 - RD points for curve 1 rate2,psnr2 - RD points for curve 2 returns the calculated Bjontegaard metric 'dsnr' code adapted from code written by : (c) 2010 Giuseppe Valenzise http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m """ # pylint: disable=too-many-locals # numpy seems to do tricks with its exports. # pylint: disable=no-member # map() is recommended against. # pylint: disable=bad-builtin rate1 = [x[0] for x in metric_set1] psnr1 = [x[1] for x in metric_set1] rate2 = [x[0] for x in metric_set2] psnr2 = [x[1] for x in metric_set2] log_rate1 = list(map(math.log, rate1)) log_rate2 = list(map(math.log, rate2)) # Best cubic poly fit for graph represented by log_ratex, psrn_x. poly1 = numpy.polyfit(log_rate1, psnr1, 3) poly2 = numpy.polyfit(log_rate2, psnr2, 3) # Integration interval. min_int = max([min(log_rate1), min(log_rate2)]) max_int = min([max(log_rate1), max(log_rate2)]) # Integrate poly1, and poly2. p_int1 = numpy.polyint(poly1) p_int2 = numpy.polyint(poly2) # Calculate the integrated value over the interval we care about. int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int) int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int) # Calculate the average improvement. if max_int != min_int: avg_diff = (int2 - int1) / (max_int - min_int) else: avg_diff = 0.0 return avg_diff
Example #22
Source File: metrics.py From pcc_geo_cnn with MIT License | 4 votes |
def bdrate(metric_set1, metric_set2): """ BJONTEGAARD Bjontegaard metric calculation Bjontegaard's metric allows to compute the average % saving in bitrate between two rate-distortion curves [1]. rate1,psnr1 - RD points for curve 1 rate2,psnr2 - RD points for curve 2 adapted from code from: (c) 2010 Giuseppe Valenzise """ # numpy plays games with its exported functions. # pylint: disable=no-member # pylint: disable=too-many-locals # pylint: disable=bad-builtin rate1 = [x[0] for x in metric_set1] psnr1 = [x[1] for x in metric_set1] rate2 = [x[0] for x in metric_set2] psnr2 = [x[1] for x in metric_set2] log_rate1 = list(map(math.log, rate1)) log_rate2 = list(map(math.log, rate2)) # Best cubic poly fit for graph represented by log_ratex, psrn_x. poly1 = numpy.polyfit(psnr1, log_rate1, 3) poly2 = numpy.polyfit(psnr2, log_rate2, 3) # Integration interval. min_int = max([min(psnr1), min(psnr2)]) max_int = min([max(psnr1), max(psnr2)]) # find integral p_int1 = numpy.polyint(poly1) p_int2 = numpy.polyint(poly2) # Calculate the integrated value over the interval we care about. int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int) int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int) # Calculate the average improvement. avg_exp_diff = (int2 - int1) / (max_int - min_int) # In really bad formed data the exponent can grow too large. # clamp it. if avg_exp_diff > 200: avg_exp_diff = 200 # Convert to a percentage. avg_diff = (math.exp(avg_exp_diff) - 1) * 100 return avg_diff
Example #23
Source File: visual_metrics.py From compare-codecs with Apache License 2.0 | 4 votes |
def bdrate(metric_set1, metric_set2): """ BJONTEGAARD Bjontegaard metric calculation Bjontegaard's metric allows to compute the average % saving in bitrate between two rate-distortion curves [1]. rate1,psnr1 - RD points for curve 1 rate2,psnr2 - RD points for curve 2 adapted from code from: (c) 2010 Giuseppe Valenzise """ # numpy plays games with its exported functions. # pylint: disable=no-member # pylint: disable=too-many-locals # pylint: disable=bad-builtin rate1 = [x[0] for x in metric_set1] psnr1 = [x[1] for x in metric_set1] rate2 = [x[0] for x in metric_set2] psnr2 = [x[1] for x in metric_set2] log_rate1 = map(math.log, rate1) log_rate2 = map(math.log, rate2) # Best cubic poly fit for graph represented by log_ratex, psrn_x. poly1 = numpy.polyfit(psnr1, log_rate1, 3) poly2 = numpy.polyfit(psnr2, log_rate2, 3) # Integration interval. min_int = max([min(psnr1), min(psnr2)]) max_int = min([max(psnr1), max(psnr2)]) # find integral p_int1 = numpy.polyint(poly1) p_int2 = numpy.polyint(poly2) # Calculate the integrated value over the interval we care about. int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int) int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int) # Calculate the average improvement. avg_exp_diff = (int2 - int1) / (max_int - min_int) # In really bad formed data the exponent can grow too large. # clamp it. if avg_exp_diff > 200: avg_exp_diff = 200 # Convert to a percentage. avg_diff = (math.exp(avg_exp_diff) - 1) * 100 return avg_diff
Example #24
Source File: visual_metrics.py From compare-codecs with Apache License 2.0 | 4 votes |
def bdsnr(metric_set1, metric_set2): """ BJONTEGAARD Bjontegaard metric calculation Bjontegaard's metric allows to compute the average gain in psnr between two rate-distortion curves [1]. rate1,psnr1 - RD points for curve 1 rate2,psnr2 - RD points for curve 2 returns the calculated Bjontegaard metric 'dsnr' code adapted from code written by : (c) 2010 Giuseppe Valenzise http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m """ # pylint: disable=too-many-locals # numpy seems to do tricks with its exports. # pylint: disable=no-member # map() is recommended against. # pylint: disable=bad-builtin rate1 = [x[0] for x in metric_set1] psnr1 = [x[1] for x in metric_set1] rate2 = [x[0] for x in metric_set2] psnr2 = [x[1] for x in metric_set2] log_rate1 = map(math.log, rate1) log_rate2 = map(math.log, rate2) # Best cubic poly fit for graph represented by log_ratex, psrn_x. poly1 = numpy.polyfit(log_rate1, psnr1, 3) poly2 = numpy.polyfit(log_rate2, psnr2, 3) # Integration interval. min_int = max([min(log_rate1), min(log_rate2)]) max_int = min([max(log_rate1), max(log_rate2)]) # Integrate poly1, and poly2. p_int1 = numpy.polyint(poly1) p_int2 = numpy.polyint(poly2) # Calculate the integrated value over the interval we care about. int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int) int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int) # Calculate the average improvement. if max_int != min_int: avg_diff = (int2 - int1) / (max_int - min_int) else: avg_diff = 0.0 return avg_diff