iam-git / WellMet (public) (License: MIT) (since 2021-08-31) (hash sha1)
WellMet is pure Python framework for spatial structural reliability analysis. Or, more specifically, for "failure probability estimation and detection of failure surfaces by adaptive sequential decomposition of the design domain".
List of commits:
Subject Hash Author Date (UTC)
estimation: WIP, Voronoi_2_point worked 364c89027063f0bd3a1af80d7d53db059184dd68 Олёш 2020-07-10 02:51:33
IS_stat: one more IS implementation a23ab3ff5eac9a039f415b9281a02ebbbea778f2 Олёш 2020-07-09 03:09:14
f_models: .pdf() and .sample_pdf() redesigned 40dfc578e14f630c1735a279a3ec41d169cc7606 Олёш 2020-07-08 01:18:29
f_models: linear tranformations and .pdf function added 7ce23a722836eaa3488329870691a332641a92b1 Олёш 2020-07-07 03:36:07
estimation: WIP 602b24888d5b22e7a994a14119af3a12c53a374d Alex 2020-06-29 08:58:54
g_models: ConicSection added e603e051a42dec4694766d536e71300dc62cb3a8 Олёш 2020-06-27 11:11:31
qt_plot: slider and triangulation fix 04a8bf659873bed50b82dc94497f2fc473c883dd Олёш 2020-06-26 19:56:58
qt_plot: show triangulation 0aa36d61be95b6f40f4a01e77a61a701c78789ce Олёш 2020-06-26 10:10:39
f_models: přidany chybějící pdf-ka 1f3c99bcd1eaeed9b8b84175be9189449a7dec5f Олёш 2020-06-25 20:28:33
qt_plot: connection to BlackBox feature added fcff3099dc61e568981e1f1236c29b5326547fd5 Олёш 2020-06-23 09:52:56
qt_plot: estimation graphs are implemented a31c5b3c344893f0f0b7022ee54544493e97c79a Олёш 2020-06-21 22:21:55
qt_plot: WIP 7cf97855991f717873d02ce65058790b324a319d Олёш 2020-06-21 10:17:04
qt_plot: minor changes 931ca9629256f37588e7ed4cb497f71c93954a7b Олёш 2020-06-15 06:01:10
candybox introduced, stores additional data 2d7e227d114a60ab47456bfc62308b16dba4eacf Олёш 2020-06-11 23:54:54
qt_plot: konečně něco interaktivního... 2bcc8842afe87d1e44e066fe168fcfedb8043947 Олёш 2020-06-07 07:35:57
WIP: matplotlib and QtGui ce3d38363636ba60fcddc0ebfe45c1cefadfc7a5 Олёш 2020-06-07 01:57:04
tri_estimations_graph added (by plot.ly) 1d307b1a8e26277c6259596eb4c912b3262d3ee7 Олёш 2020-06-04 15:03:57
directory structure changed d67e975a7f4c7b01388f469324f635c74cf17995 Олёш 2020-06-03 21:17:35
Commit 364c89027063f0bd3a1af80d7d53db059184dd68 - estimation: WIP, Voronoi_2_point worked
Author: Олёш
Author date (UTC): 2020-07-10 02:51
Committer name: Олёш
Committer date (UTC): 2020-07-10 02:51
Parent(s): a23ab3ff5eac9a039f415b9281a02ebbbea778f2
Signer:
Signing key:
Signing status: N
Tree: 415ff69de1bf884759027ed2c7466c68377b94ec
File Lines added Lines deleted
estimation.py 141 136
File estimation.py changed (mode: 100644) (index 9137ac9..221ae6f)
... ... import scipy.stats as stats
12 12 from scipy.spatial import cKDTree from scipy.spatial import cKDTree
13 13 from scipy import interpolate from scipy import interpolate
14 14
15 import collections # for defaultdict
15 16
16 #%%
17
17 from .IS_stat import IS
18 from .candybox import CandyBox
18 19
19 20 # mizí národní slovička :( # mizí národní slovička :(
20 21 # gradient musí být funkcí! # gradient musí být funkcí!
 
... ... def progress(estimations, sampled_plan_R, Z, gradient, f_i, nis=500, budget=2000
69 70
70 71
71 72
72 # In[66]:
73 73
74 74
75 75 # #
 
... ... def L2_Voronoi_cKDTree_failure_rate(sample_box, space='Rn', nis=50000):
214 214
215 215
216 216 # #
217 # l1 Voronoi 2_point estimation
217 # Voronoi 2_point estimation
218 218 # #
219 def Voronoi_2_point_estimations(sampled_plan_R, failsi, gradient, f_i, budget=20000, L2_metric=False):
219 def Voronoi_2_point_estimation(sample_box, model_space='Rn', sampling_space=None, p_norm=1, gradient=None, budget=20000, callback=None):
220 220 """ """
221 221 Voronoi_2_point estimations Voronoi_2_point estimations
222 222 budget=20000 budget=20000
223 L2_metric=False - calculate L2 estimations for comparison
224
225 223 """ """
226 #sampled_plan_R = sampled_plan_R_full[:nsim]
227 #failsi = failsi_full[:nsim]
228 nsim, nvar = np.shape(sampled_plan_R)
229 224
230 nis = round(budget/nsim)
231 # jednodušší prostě spočítat
232 PDF = np.prod([f_i[j].pdf(sampled_plan_R[:, j]) for j in range(nvar)], axis=0)
225 # tak, na rovinu
226 # нет ножек - нет мультиков
227 if gradient is None:
228 return Voronoi_2_point_cKDTree(sample_box, model_space=model_space,sampling_space=sampling_space,\
229 p_norm=p_norm, budget=budget, callback=callback)
230 if callback is None:
231 callback = lambda *_, **__: None
232
233 # jsou to informace pro callback
234 estimation={'method': "Voronoi_2_point_estimation", 'p_norm':p_norm, 'nis':nis}
235 estimation['model_space'] = model_space
236 estimation['sampling_space'] = sampling_space
233 237
234 sampled_plan_R_ma = np.ma.asarray(sampled_plan_R)
235 238
236 # tady bych dal Rd
237 grad = np.abs(gradient([0 for j in range(nvar)]))
238 tree = cKDTree(sampled_plan_R * grad)
239 nsim = sample_box.nsim
240 nis = max(round(budget/nsim), 100)
241
242 # vytahneme ze sample_boxu rozdělení
243 f = sample_box.sampled_plan
244
245 # já vím, že sample box pokážde failsi přepočítavá
246 failsi = sample_box.failsi
247
248 PDF = sample_box.pdf(model_space)
249
250 # zde provadím rozdělení na prostor, ve kterém vzorkujem
251 # a prostor "modelu", vô ktôrom, v podstatě, měříme vzdaleností
252 sampled_plan_model = getattr(sample_box, model_space)
253
254 if sampling_space is None:
255 sampling_space = model_space
256 # sing like sampling
257 sampled_plan_sing = sampled_plan_model
258 else:
259 sampled_plan_sing = getattr(sample_box, sampling_space)
260
261 sampled_plan_sing_ma = np.ma.asarray(sampled_plan_sing)
262
263
264
265 if sampling_space is None:
266 sampling_space = model_space
267 # sing like sampling
268 sampled_plan_sing = sampled_plan_model
269
270
271
272 tree = cKDTree(sampled_plan_model)
239 273
274 global_stats = collections.defaultdict(int)
275
240 276
241 #nis = 500
242 # inicializace
243 L1_2_point_Voronoi_upper_bound = 0
244 L1_2_point_Voronoi_failure_rate = 0
245 L1_2_point_Voronoi_pure_failure_rate = 0
246 L1_2_point_Voronoi_lower_bound = 0
247 L1_Voronoi_failure_rate = 0
248
249 L2_2_point_Voronoi_upper_bound = 0
250 L2_2_point_Voronoi_failure_rate = 0
251 L2_2_point_Voronoi_pure_failure_rate = 0
252 L2_2_point_Voronoi_lower_bound = 0
253 L2_Voronoi_failure_rate = 0
254
255 Voronoi_2_point_test = 0
256 #heat_estimation = 0
257 277
278 # zde postupně v cyklu prochazíme všemi body (tj. vzorky)
279 # a omezujeme se pouse nejbližšími bodíkama
280 # tynhlenstím zajišťujeme disjunktnost
281 # a môžeme všechny nasbírané pravděpodobnosti jednoduše sčítat
258 282 for i in range(nsim): # loop over all points, not failing points only for i in range(nsim): # loop over all points, not failing points only
259 283
260 sampled_plan_R_ma.mask = ma.nomask
261 sampled_plan_R_ma[i] = ma.masked
284 sampled_plan_sing_ma.mask = ma.nomask
285 sampled_plan_sing_ma[i] = ma.masked
286
287 # zde nepouživám KDTree protože bych pokažde sestavovat strom z rouškováneho pole
288 # ale otázky p-normy a směrodatné odchylky zde jsou k velké diskuzi.
262 289
263 # neosvědčílo se
264 #==============================================================================
265 # delta_Rd_matrix = sampled_plan_Rd_ma - sampled_plan_Rd[i]
266 # mindist = [np.min(np.abs(np.where(delta_Rd_matrix[:,j] < 0,delta_Rd_matrix[:,j], f_i[j].std() ))) + np.min(np.abs(np.where(delta_Rd_matrix[:,j] > 0,delta_Rd_matrix[:,j], f_i[j].std() ))) for j in range(nvar)]
267 #
268 # # set the minimum distance as the standard deviation of IS densisty
269 # h_i = [stats.norm(sampled_plan_Rd[i,j], mindist[j] ) for j in range(nvar)] #! dosadit standard deviation pddle chutí
270 #==============================================================================
271 290 # find distance to the nearest sampling point (from all points) # find distance to the nearest sampling point (from all points)
272 mindist = np.min(np.sum(np.square(sampled_plan_R_ma - sampled_plan_R[i]), axis=1))**0.5
273
274 # set the minimum distance as the standard deviation of IS densisty
275 h_i = [stats.norm(sampled_plan_R[i,j], 2*mindist ) for j in range(nvar)] #! dosadit standard deviation pddle chutí
276
291 mindist_sing = np.min(np.sum(np.square(sampled_plan_sing_ma - sampled_plan_sing[i]), axis=1))**0.5
292
277 293 # use IS sampling density with center equal to the current "red" point # use IS sampling density with center equal to the current "red" point
294 # set the minimum distance as the standard deviation of IS densisty
295 h_i = [stats.norm(sampled_plan_sing[i,j], 2*mindist_sing) for j in range(nvar)] #! dosadit standard deviation podle chutí
296 h = f_models.UnCorD(h_i)
297
278 298
279 # select nis = 100 points from IS density and
280 # if the point has its nearest neighbor any red point from the sampled_plan,
281
282 h_plan = np.zeros((nis, nvar))
283 for j in range(nvar):
284 h_plan[:, j] = h_i[j].ppf(np.random.random(nis)) # realizace váhové funkce náhodné veličiny
285
286 # Rozptyl corrected IS
287 weights_sim = np.prod([f_i[j].pdf(h_plan[:, j]) / h_i[j].pdf(h_plan[:, j]) for j in range(nvar)], axis=0) # [f1/h1, ..., fn/hn]
288
289
290
291
292 dd, ii = tree.query(h_plan * grad)
293
299 # select nis = 100 points from IS density
300 # sice to má nazev h_plan, ale nese rozdělení a hustoty v f-ku
301 h_plan = IS(f, h, space_from_h='R', space_to_f=sampling_space, Nsim=nis)
302
303 # součet váh nemá cenu kontrolovat, jedná tam nebude, nevyjde
304
305 """
306 # dd - The distances to the nearest neighbors
307 # ii - The locations of the neighbors in self.data
308 # k - The list of k-th nearest neighbors to return.
309 # If k is an integer it is treated as a list of [1, … k] (range(1, k+1)).
310 # Note that the counting starts from 1
311 # p - Which Minkowski p-norm to use.
312 # 1 is the sum-of-absolute-values “Manhattan” distance 2 is the usual Euclidean distance
313 # infinity is the maximum-coordinate-difference distance
314 """
315 h_plan_model = getattr(h_plan, model_space)
316 dd, ii = tree.query(h_plan_model, k=1, p=p_norm)
317
318 # nechám s velkým písmenkem
294 319 Vor_mask = np.where(ii==i, True, False) Vor_mask = np.where(ii==i, True, False)
295 # musí se rovnat jedne
296 Voronoi_2_point_test += np.sum(weights_sim[Vor_mask]) / nis
320 h_plan_model_ma = h_plan_model[Vor_mask]
321
297 322
298 node_pf_estimations = np.empty(len(h_plan[Vor_mask]))
299 node_pf_pure_estimations = np.empty(len(h_plan[Vor_mask]))# pure distance estimation
300 node_failsi = np.empty(len(h_plan[Vor_mask]), dtype=np.bool) # for L1 Voronoi
301 323
302 for node_idx in range(len(h_plan[Vor_mask])):
303 node = h_plan[Vor_mask][node_idx]
304 inode2points_Rd_matrix = np.sum(np.abs((sampled_plan_R - node) * gradient(node)), axis=1)
324 # kolik bodíků jsou nejbližší k mému vzorečkovi
325 # np.empty() implicitně má dtype=float
326 # tyhle blbosti ponechám jen kvůli callbackovi
327 node_pf_estimations = np.empty(len(h_plan_model_ma))
328 node_pf_pure_estimations = np.empty(len(h_plan_model_ma))# pure distance estimation
329 node_failsi = np.empty(len(h_plan_model_ma), dtype=np.bool) # for L1 Voronoi
330
331 # projdeme přes každej bodíček
332 for node_idx in range(len(h_plan_model_ma)):
333 # KDTree byl použit jen k rozdělení na disjunktní úseky, veškerej děj se odehravá tu
334 # a to je všechno kvůli gradientu
335 node = h_plan_model_ma[node_idx]
336 # axis=1 - sčítá všechy směry dohromady, vysledkem je 1D pole rozměru nsim
337 inode2points_model_matrix = np.sum(np.abs(((sampled_plan_model - node) * gradient(node))**p_norm), axis=1)
305 338 #print(inode2points_Rd_matrix) #print(inode2points_Rd_matrix)
306 idx = np.argpartition(inode2points_Rd_matrix, 2)
339
340 """
341 partition -
342 Creates a copy of the array with its elements rearranged in such a way that
343 the value of the element in k-th position is in the position it would be in a sorted array.
344 All elements smaller than the k-th element are moved before this element
345 and all equal or greater are moved behind it. The ordering of the elements in the two partitions is undefined.
346 """
347 idx = np.argpartition(inode2points_model_matrix, 2)
307 348 node_failsi[node_idx] = failsi[idx[0]] node_failsi[node_idx] = failsi[idx[0]]
308 349
309 350
310 points_weight = PDF[idx[0]] / inode2points_Rd_matrix[idx[0]] + PDF[idx[1]] / inode2points_Rd_matrix[idx[1]]
311 points_distances = 1 / inode2points_Rd_matrix[idx[0]] + 1 / inode2points_Rd_matrix[idx[1]]
351 points_weight = PDF[idx[0]] / inode2points_model_matrix[idx[0]] + PDF[idx[1]] / inode2points_model_matrix[idx[1]]
352 points_distances = 1 / inode2points_model_matrix[idx[0]] + 1 / inode2points_model_matrix[idx[1]]
312 353
313 failure_weight = int(failsi[idx[0]]) * PDF[idx[0]] / inode2points_Rd_matrix[idx[0]]
314 failure_weight += int(failsi[idx[1]]) * PDF[idx[1]] / inode2points_Rd_matrix[idx[1]]
354 failure_weight = int(failsi[idx[0]]) * PDF[idx[0]] / inode2points_model_matrix[idx[0]]
355 failure_weight += int(failsi[idx[1]]) * PDF[idx[1]] / inode2points_model_matrix[idx[1]]
315 356
316 failure_distance = int(failsi[idx[0]]) / inode2points_Rd_matrix[idx[0]] + int(failsi[idx[1]]) / inode2points_Rd_matrix[idx[1]]
357 failure_distance = int(failsi[idx[0]]) / inode2points_model_matrix[idx[0]] + int(failsi[idx[1]]) / inode2points_model_matrix[idx[1]]
317 358
318 359
319 360 node_pf_estimations[node_idx] = failure_weight/points_weight node_pf_estimations[node_idx] = failure_weight/points_weight
320 361 node_pf_pure_estimations[node_idx] = failure_distance/points_distances node_pf_pure_estimations[node_idx] = failure_distance/points_distances
321 362
322 363
323 #for k in range(len(ii)):
324 # points_weigths[ii[k]] = points_weigths[ii[k]] + weights_sim[k] / nis
325 # near_neighbors[ii[k]] = near_neighbors[ii[k]] + 1
326 # Vor_mask[k] = failsi[ii[k]]
327
328 L1_2_point_Voronoi_upper_bound += np.sum(weights_sim[Vor_mask]*np.ceil(node_pf_estimations)) / nis
329 L1_2_point_Voronoi_failure_rate += np.sum(weights_sim[Vor_mask]*node_pf_estimations) / nis
330 L1_2_point_Voronoi_pure_failure_rate += np.sum(weights_sim[Vor_mask]*node_pf_pure_estimations) / nis
331 L1_2_point_Voronoi_lower_bound += np.sum(weights_sim[Vor_mask]*np.floor(node_pf_estimations)) / nis
332
333
334 L1_Voronoi_failure_rate += np.sum(weights_sim[Vor_mask]*node_failsi) / nis
335 364
336
337 365
338 366
339 if L2_metric:
340 node_pf_estimations = np.empty(len(h_plan[Vor_mask]))
341 node_pf_pure_estimations = np.empty(len(h_plan[Vor_mask]))# pure distance estimation
342 node_failsi = np.empty(len(h_plan[Vor_mask]), dtype=np.bool) # for L1 Voronoi
343
344 for node_idx in range(len(h_plan[Vor_mask])):
345 node = h_plan[Vor_mask][node_idx]
346 inode2points_Rd_matrix = np.sum(np.square((sampled_plan_R - node) * gradient(node)), axis=1)
347 idx = np.argpartition(inode2points_Rd_matrix, 2)
348 node_failsi[node_idx] = failsi[idx[0]]
349
350
351 points_weight = PDF[idx[0]] / inode2points_Rd_matrix[idx[0]] + PDF[idx[1]] / inode2points_Rd_matrix[idx[1]]
352 points_distances = 1 / inode2points_Rd_matrix[idx[0]] + 1 / inode2points_Rd_matrix[idx[1]]
353
354 failure_weight = int(failsi[idx[0]]) * PDF[idx[0]] / inode2points_Rd_matrix[idx[0]]
355 failure_weight += int(failsi[idx[1]]) * PDF[idx[1]] / inode2points_Rd_matrix[idx[1]]
356
357 failure_distance = int(failsi[idx[0]]) / inode2points_Rd_matrix[idx[0]] + int(failsi[idx[1]]) / inode2points_Rd_matrix[idx[1]]
358
359
360 node_pf_estimations[node_idx] = failure_weight/points_weight
361 node_pf_pure_estimations[node_idx] = failure_distance/points_distances
362
363
367 cell_stats = dict()
368 # musí sa (na konci) rovnat jedne
369 # opravdu dělíme nis'em, jako v normálním IS
370 # nikoliv počtem příjatých bodíků,
371 # protože průměrná vaha o hodně mene významná metrika
372 cell_stats['cell_probability'] = np.sum(h_plan.w[Vor_mask]) / nis
373 cell_stats['Voronoi_2_point_upper_bound'] = np.sum(h_plan.w[Vor_mask]*np.ceil(node_pf_estimations)) / nis
374 cell_stats['Voronoi_2_point_failure_rate'] = np.sum(h_plan.w[Vor_mask]*node_pf_estimations) / nis
375 cell_stats['Voronoi_2_point_pure_failure_rate'] = np.sum(h_plan.w[Vor_mask]*node_pf_pure_estimations) / nis
376 cell_stats['Voronoi_2_point_lower_bound'] = np.sum(h_plan.w[Vor_mask]*np.floor(node_pf_estimations)) / nis
377 cell_stats['Voronoi_failure_rate'] = np.sum(weights_sim[Vor_mask]*node_failsi) / nis
364 378
365 L2_2_point_Voronoi_upper_bound += np.sum(weights_sim[Vor_mask]*np.ceil(node_pf_estimations)) / nis
366 L2_2_point_Voronoi_failure_rate += np.sum(weights_sim[Vor_mask]*node_pf_estimations) / nis
367 L2_2_point_Voronoi_pure_failure_rate += np.sum(weights_sim[Vor_mask]*node_pf_pure_estimations) / nis
368 L2_2_point_Voronoi_lower_bound += np.sum(weights_sim[Vor_mask]*np.floor(node_pf_estimations)) / nis
369
370 L2_Voronoi_failure_rate += np.sum(weights_sim[Vor_mask]*node_failsi) / nis
379 for key, value in cell_stats:
380 global_stats[key] += value
371 381
382 nodes=CandyBox(h_plan[Vor_mask], w=h_plan.w[Vor_mask], node_pf_estimations=node_pf_estimations,\
383 node_pf_pure_estimations=node_pf_pure_estimations, node_failsi=node_failsi)
372 384
385 # praská
386 callback(estimation=estimation, nodes=nodes, cell_stats=cell_stats)
373 387
374 388
375
376 L1_Voronoi_2_point_estimators = {'L1_Voronoi_failure_rate':L1_Voronoi_failure_rate}
377 L1_Voronoi_2_point_estimators['L1_2_point_Voronoi_upper_bound'] = L1_2_point_Voronoi_upper_bound
378 L1_Voronoi_2_point_estimators['L1_2_point_Voronoi_failure_rate'] = L1_2_point_Voronoi_failure_rate
379 L1_Voronoi_2_point_estimators['L1_2_point_Voronoi_pure_failure_rate'] = L1_2_point_Voronoi_pure_failure_rate
380 L1_Voronoi_2_point_estimators['L1_2_point_Voronoi_lower_bound'] = L1_2_point_Voronoi_lower_bound
381 L1_Voronoi_2_point_estimators['Voronoi_2_point_test'] = Voronoi_2_point_test
382
383 if L2_metric:
384 L1_Voronoi_2_point_estimators['L2_Voronoi_failure_rate'] = L2_Voronoi_failure_rate
385 L1_Voronoi_2_point_estimators['L2_2_point_Voronoi_upper_bound'] = L2_2_point_Voronoi_upper_bound
386 L1_Voronoi_2_point_estimators['L2_2_point_Voronoi_failure_rate'] = L2_2_point_Voronoi_failure_rate
387 L1_Voronoi_2_point_estimators['L2_2_point_Voronoi_pure_failure_rate'] = L2_2_point_Voronoi_pure_failure_rate
388 L1_Voronoi_2_point_estimators['L2_2_point_Voronoi_lower_bound'] = L2_2_point_Voronoi_lower_bound
389 389
390 return L1_Voronoi_2_point_estimators
390 return global_stats
391 391
392 392
393 # dedictvi
394 #for k in range(len(ii)):
395 # points_weigths[ii[k]] = points_weigths[ii[k]] + weights_sim[k] / nis
396 # near_neighbors[ii[k]] = near_neighbors[ii[k]] + 1
397 # Vor_mask[k] = failsi[ii[k]]
393 398
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/iam-git/WellMet

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/iam-git/WellMet

Clone this repository using git:
git clone git://git.rocketgit.com/user/iam-git/WellMet

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main