#!/usr/bin/env python
# coding: utf-8
import numpy as np
from . import IS_stat
#č tato metoda je vlastně pro MinEnergyCensoredSampling
#č ale zde se taky může hodit
def get_events(sb, simplices): #simplices = bx.tri.simplices
"""
Metoda musí simplexům přiřazovat jev
0=success, 1=failure, 2=mix
"""
in_failure = np.isin(simplices, sb.failure_points)
has_failure = in_failure.any(axis=1)
all_failure = in_failure.all(axis=1)
return np.int8(np.where(has_failure, np.where(all_failure, 1, 2), 0))
def get_TRI_estimation(siss, simplex_events):
siss.get_estimations()
simplices = np.array(tuple(siss.estimations.keys()))
probabilities = np.array(tuple(siss.estimations.values()))
estimation = dict()
estimation[-1] = np.sum(probabilities[simplices == -1])
#čs jevy aj klidně in-place (nerobím kopiju)
events = simplices[simplices != -1]
probabilities = probabilities[simplices != -1]
#č zhruba - get_events() vrací pole s odpovidajícími čísly jevů pro každý simplex, počineje od nuly
#č tím slajsingem my jakoby vybirame ke každemu nalezenemu simplexovi ten správnej mu odpovídajicí jev
events = simplex_events[events]
for i in range(3): #čs kvůli 0,1,2 robiť cyklus?
estimation[i] = np.sum(probabilities[events == i])
return estimation
def is_outside(convex_hull, node_coordinates):
x = node_coordinates
#E [normal, offset] forming the hyperplane equation of the facet (see Qhull documentation for more)
A = convex_hull.equations[:,:-1]
b = convex_hull.equations[:,-1]
# N=nsim
NxN = A @ x.T + np.atleast_2d(b).T
mask = np.any(NxN > 0, axis=0)
return mask
def get_sub_simplex(convex_hull):
"""
returns coordinates of sub-simplex,
truly yours, C.O.
"""
#č zatím bez váh
#simplices -- ndarray of ints, shape (nfacet, ndim)
return np.mean(convex_hull.points[[convex_hull.simplices]], axis=1)
def get_COBYLA_constraints(convex_hull):
constraints = []
#E [normal, offset] forming the hyperplane equation of the facet (see Qhull documentation for more)
A = convex_hull.equations[:,:-1]
b = convex_hull.equations[:,-1]
def fungen(A_i, b_i):
def fun(x):
constrain = -(A_i @ x + b_i)
#print(x, A_i, b_i, constrain)
return constrain
return fun
for i in range(len(b)):
# taken from COBYLA sources:
# "the constraint functions should become
# nonnegative eventually, at least to the precision of RHOEND"
constraints.append({'type':'ineq', 'fun':fungen(A[i], b[i])})
return constraints
#
# DEPRECATED
#
# unbelivable: I was unable to find a package to calculate the second moments of an simplex
def points_inertia_tensor(vertices, masses=1):
"""
coordinates of vertices
"""
nsim, nvar = np.shape(vertices)
inertia_tensor = np.empty((nvar, nvar))
for i in range(nvar):
for j in range(i + 1):
if i==j:
inertia_tensor[i,j] = np.sum( masses * (np.sum(np.square(vertices), axis=1) - np.square(vertices[:, i])))
else:
inertia_tensor[i,j] = inertia_tensor[j,i] = - np.sum(masses * vertices[:, i]*vertices[:, j])
return inertia_tensor
def simplex_volume(vertices):
"""
coordinates of vertices
"""
nsim, nvar = np.shape(vertices)
return abs(np.linalg.det(vertices[1:] - vertices[0])) / np.math.factorial(nvar)
def simplex_barycenter_inertia_tensor(vertices, masses=1):
"""
Returns the inertia matrix relative to the center of mass
coordinates of vertices
"""
nsim, nvar = np.shape(vertices)
return points_inertia_tensor(vertices, masses) /(nvar+1)/(nvar+2) * simplex_volume(vertices)
def simplex_inertia_tensor(vertices, masses=1):
"""
coordinates of vertices
"""
nsim, nvar = np.shape(vertices)
masses = masses * np.append(np.full(nsim, 1/(nvar+1)/(nvar+2)), (nvar+1)/(nvar+2))
barycenter = np.mean(vertices, axis=0)
# barycenter beztak sepri4te ke každemu řádku tensoru
#_tensor = points_inertia_tensor(vertices, masses)/(nvar+1) + np.diag(np.square(barycenter))#*(nvar+1))
_tensor = points_inertia_tensor(np.vstack((vertices, barycenter)), masses=masses)
#return _tensor / (nvar+2) * simplex_volume(vertices)
return _tensor * simplex_volume(vertices)
# don't ask me what is it
def simplex_covariance_matrix(vertices, weights=1):
"""
coordinates of vertices
"""
nsim, nvar = np.shape(vertices)
# normalizace
weights = weights / np.mean(weights)
barycenter = np.mean((vertices.T*weights).T, axis=0)
vertices = vertices - barycenter
covariance_matrix = np.empty((nvar, nvar))
for i in range(nvar):
for j in range(i + 1):
if i==j:
covariance_matrix[i,j] = np.sum(weights * np.square(vertices[:, i]))
else:
covariance_matrix[i,j] = covariance_matrix[j,i] = np.sum(weights * vertices[:, i]*vertices[:, j])
# divide by nsim from variance formule, do we need it?
# divide by /(nvar+1)/(nvar+2) from simplex inertia tensor solution
# same as simplex_volume, but it looks like it shouldn't be here
return covariance_matrix /(nvar+1)/(nvar+2) #* simplex_volume(vertices)